diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 00000000000..e4008c6940b --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,31 @@ +# This CITATION.cff file was generated with cffinit. + +cff-version: 1.2.0 +title: "ClickHouse" +message: "If you use this software, please cite it as below." +type: software +authors: + - family-names: "Milovidov" + given-names: "Alexey" +repository-code: 'https://github.com/ClickHouse/ClickHouse' +url: 'https://clickhouse.com' +license: Apache-2.0 +preferred-citation: + type: article + authors: + - family-names: "Schulze" + given-names: "Robert" + - family-names: "Schreiber" + given-names: "Tom" + - family-names: "Yatsishin" + given-names: "Ilya" + - family-names: "Dahimene" + given-names: "Ryadh" + - family-names: "Milovidov" + given-names: "Alexey" + journal: "Proceedings of the VLDB Endowment" + title: "ClickHouse - Lightning Fast Analytics for Everyone" + year: 2024 + volume: 17 + issue: 12 + doi: 10.14778/3685800.3685802 diff --git a/README.md b/README.md index 652f5e0751d..5fa04fe29e7 100644 --- a/README.md +++ b/README.md @@ -42,21 +42,19 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else The following upcoming meetups are featuring creator of ClickHouse & CTO, Alexey Milovidov: -* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25 -* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5 * [Raleigh Meetup (Deutsche Bank)](https://www.meetup.com/triangletechtalks/events/302723486/) - September 9 * [New York Meetup (Rokt)](https://www.meetup.com/clickhouse-new-york-user-group/events/302575342) - September 10 * [Chicago Meetup (Jump Capital)](https://lu.ma/43tvmrfw) - September 12 Other upcoming meetups -* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27 -* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27 -* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5 -* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5 + * [Toronto Meetup (Shopify)](https://www.meetup.com/clickhouse-toronto-user-group/events/301490855/) - September 10 * [Austin Meetup](https://www.meetup.com/clickhouse-austin-user-group/events/302558689/) - September 17 * [London Meetup](https://www.meetup.com/clickhouse-london-user-group/events/302977267) - September 17 +* [Bangalore Meetup](https://www.meetup.com/clickhouse-bangalore-user-group/events/303208274/) - September 18 * [Tel Aviv Meetup](https://www.meetup.com/clickhouse-meetup-israel/events/303095121) - September 22 +* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1 +* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3 * [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22 * [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - October 29 * [Oslo Meetup](https://www.meetup.com/open-source-real-time-data-warehouse-real-time-analytics/events/302938622) - October 31 @@ -64,7 +62,13 @@ Other upcoming meetups * [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21 * [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26 - +Recently completed events +* [ClickHouse Guangzhou User Group Meetup](https://mp.weixin.qq.com/s/GSvo-7xUoVzCsuUvlLTpCw) - August 25 +* [Seattle Meetup (Statsig)](https://www.meetup.com/clickhouse-seattle-user-group/events/302518075/) - August 27 +* [Melbourne Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302732666/) - August 27 +* [Sydney Meetup](https://www.meetup.com/clickhouse-australia-user-group/events/302862966/) - September 5 +* [Zurich Meetup](https://www.meetup.com/clickhouse-switzerland-meetup-group/events/302267429/) - September 5 +* [San Francisco Meetup (Cloudflare)](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/302540575) - September 5 ## Recent Recordings * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" diff --git a/contrib/curl b/contrib/curl index de7b3e89218..83bedbd730d 160000 --- a/contrib/curl +++ b/contrib/curl @@ -1 +1 @@ -Subproject commit de7b3e89218467159a7af72d58cea8425946e97d +Subproject commit 83bedbd730d62b83744cc26fa0433d3f6e2e4cd6 diff --git a/contrib/grpc b/contrib/grpc index 1716359d2e2..7bc3abe952a 160000 --- a/contrib/grpc +++ b/contrib/grpc @@ -1 +1 @@ -Subproject commit 1716359d2e28d304a250f9df0e6c0ccad03de8db +Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt index adeaa7dcf33..5714fef8347 100644 --- a/contrib/icu-cmake/CMakeLists.txt +++ b/contrib/icu-cmake/CMakeLists.txt @@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/") # These lists of sources were generated from build log of the original ICU build system (configure + make). set(ICUUC_SOURCES -"${ICU_SOURCE_DIR}/common/errorcode.cpp" -"${ICU_SOURCE_DIR}/common/putil.cpp" -"${ICU_SOURCE_DIR}/common/umath.cpp" -"${ICU_SOURCE_DIR}/common/utypes.cpp" -"${ICU_SOURCE_DIR}/common/uinvchar.cpp" -"${ICU_SOURCE_DIR}/common/umutex.cpp" -"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp" -"${ICU_SOURCE_DIR}/common/uinit.cpp" -"${ICU_SOURCE_DIR}/common/uobject.cpp" -"${ICU_SOURCE_DIR}/common/cmemory.cpp" -"${ICU_SOURCE_DIR}/common/charstr.cpp" -"${ICU_SOURCE_DIR}/common/cstr.cpp" -"${ICU_SOURCE_DIR}/common/udata.cpp" -"${ICU_SOURCE_DIR}/common/ucmndata.cpp" -"${ICU_SOURCE_DIR}/common/udatamem.cpp" -"${ICU_SOURCE_DIR}/common/umapfile.cpp" -"${ICU_SOURCE_DIR}/common/udataswp.cpp" -"${ICU_SOURCE_DIR}/common/utrie_swap.cpp" -"${ICU_SOURCE_DIR}/common/ucol_swp.cpp" -"${ICU_SOURCE_DIR}/common/utrace.cpp" -"${ICU_SOURCE_DIR}/common/uhash.cpp" -"${ICU_SOURCE_DIR}/common/uhash_us.cpp" -"${ICU_SOURCE_DIR}/common/uenum.cpp" -"${ICU_SOURCE_DIR}/common/ustrenum.cpp" -"${ICU_SOURCE_DIR}/common/uvector.cpp" -"${ICU_SOURCE_DIR}/common/ustack.cpp" -"${ICU_SOURCE_DIR}/common/uvectr32.cpp" -"${ICU_SOURCE_DIR}/common/uvectr64.cpp" -"${ICU_SOURCE_DIR}/common/ucnv.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_io.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_err.cpp" -"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp" -"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp" -"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp" -"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp" -"${ICU_SOURCE_DIR}/common/ucnv2022.cpp" -"${ICU_SOURCE_DIR}/common/ucnvhz.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp" -"${ICU_SOURCE_DIR}/common/ucnvisci.cpp" -"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_set.cpp" -"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp" -"${ICU_SOURCE_DIR}/common/resource.cpp" -"${ICU_SOURCE_DIR}/common/uresbund.cpp" -"${ICU_SOURCE_DIR}/common/ures_cnv.cpp" -"${ICU_SOURCE_DIR}/common/uresdata.cpp" -"${ICU_SOURCE_DIR}/common/resbund.cpp" -"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp" -"${ICU_SOURCE_DIR}/common/ucurr.cpp" -"${ICU_SOURCE_DIR}/common/localebuilder.cpp" -"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp" -"${ICU_SOURCE_DIR}/common/messagepattern.cpp" -"${ICU_SOURCE_DIR}/common/ucat.cpp" -"${ICU_SOURCE_DIR}/common/locmap.cpp" -"${ICU_SOURCE_DIR}/common/uloc.cpp" -"${ICU_SOURCE_DIR}/common/locid.cpp" -"${ICU_SOURCE_DIR}/common/locutil.cpp" -"${ICU_SOURCE_DIR}/common/locavailable.cpp" -"${ICU_SOURCE_DIR}/common/locdispnames.cpp" -"${ICU_SOURCE_DIR}/common/locdspnm.cpp" -"${ICU_SOURCE_DIR}/common/loclikely.cpp" -"${ICU_SOURCE_DIR}/common/locresdata.cpp" -"${ICU_SOURCE_DIR}/common/lsr.cpp" -"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp" -"${ICU_SOURCE_DIR}/common/locdistance.cpp" -"${ICU_SOURCE_DIR}/common/localematcher.cpp" -"${ICU_SOURCE_DIR}/common/bytestream.cpp" -"${ICU_SOURCE_DIR}/common/stringpiece.cpp" -"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp" -"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp" -"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp" -"${ICU_SOURCE_DIR}/common/bytestrie.cpp" -"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp" -"${ICU_SOURCE_DIR}/common/ucharstrie.cpp" -"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp" -"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp" -"${ICU_SOURCE_DIR}/common/dictionarydata.cpp" -"${ICU_SOURCE_DIR}/common/edits.cpp" "${ICU_SOURCE_DIR}/common/appendable.cpp" -"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp" -"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp" -"${ICU_SOURCE_DIR}/common/unistr.cpp" -"${ICU_SOURCE_DIR}/common/unistr_case.cpp" -"${ICU_SOURCE_DIR}/common/unistr_props.cpp" -"${ICU_SOURCE_DIR}/common/utf_impl.cpp" -"${ICU_SOURCE_DIR}/common/ustring.cpp" -"${ICU_SOURCE_DIR}/common/ustrcase.cpp" -"${ICU_SOURCE_DIR}/common/ucasemap.cpp" -"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp" -"${ICU_SOURCE_DIR}/common/cstring.cpp" -"${ICU_SOURCE_DIR}/common/ustrfmt.cpp" -"${ICU_SOURCE_DIR}/common/ustrtrns.cpp" -"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp" -"${ICU_SOURCE_DIR}/common/utext.cpp" -"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp" -"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp" -"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp" -"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp" -"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp" -"${ICU_SOURCE_DIR}/common/normalizer2.cpp" -"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp" -"${ICU_SOURCE_DIR}/common/normlzr.cpp" -"${ICU_SOURCE_DIR}/common/unorm.cpp" -"${ICU_SOURCE_DIR}/common/unormcmp.cpp" -"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp" -"${ICU_SOURCE_DIR}/common/chariter.cpp" -"${ICU_SOURCE_DIR}/common/schriter.cpp" -"${ICU_SOURCE_DIR}/common/uchriter.cpp" -"${ICU_SOURCE_DIR}/common/uiter.cpp" -"${ICU_SOURCE_DIR}/common/patternprops.cpp" -"${ICU_SOURCE_DIR}/common/uchar.cpp" -"${ICU_SOURCE_DIR}/common/uprops.cpp" -"${ICU_SOURCE_DIR}/common/ucase.cpp" -"${ICU_SOURCE_DIR}/common/propname.cpp" -"${ICU_SOURCE_DIR}/common/ubidi_props.cpp" -"${ICU_SOURCE_DIR}/common/characterproperties.cpp" -"${ICU_SOURCE_DIR}/common/ubidi.cpp" -"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp" -"${ICU_SOURCE_DIR}/common/ubidiln.cpp" -"${ICU_SOURCE_DIR}/common/ushape.cpp" -"${ICU_SOURCE_DIR}/common/uscript.cpp" -"${ICU_SOURCE_DIR}/common/uscript_props.cpp" -"${ICU_SOURCE_DIR}/common/usc_impl.cpp" -"${ICU_SOURCE_DIR}/common/unames.cpp" -"${ICU_SOURCE_DIR}/common/utrie.cpp" -"${ICU_SOURCE_DIR}/common/utrie2.cpp" -"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp" -"${ICU_SOURCE_DIR}/common/ucptrie.cpp" -"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp" "${ICU_SOURCE_DIR}/common/bmpset.cpp" -"${ICU_SOURCE_DIR}/common/unisetspan.cpp" -"${ICU_SOURCE_DIR}/common/uset_props.cpp" -"${ICU_SOURCE_DIR}/common/uniset_props.cpp" -"${ICU_SOURCE_DIR}/common/uniset_closure.cpp" -"${ICU_SOURCE_DIR}/common/uset.cpp" -"${ICU_SOURCE_DIR}/common/uniset.cpp" -"${ICU_SOURCE_DIR}/common/usetiter.cpp" -"${ICU_SOURCE_DIR}/common/ruleiter.cpp" -"${ICU_SOURCE_DIR}/common/caniter.cpp" -"${ICU_SOURCE_DIR}/common/unifilt.cpp" -"${ICU_SOURCE_DIR}/common/unifunct.cpp" -"${ICU_SOURCE_DIR}/common/uarrsort.cpp" -"${ICU_SOURCE_DIR}/common/brkiter.cpp" -"${ICU_SOURCE_DIR}/common/ubrk.cpp" "${ICU_SOURCE_DIR}/common/brkeng.cpp" +"${ICU_SOURCE_DIR}/common/brkiter.cpp" +"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp" +"${ICU_SOURCE_DIR}/common/bytestream.cpp" +"${ICU_SOURCE_DIR}/common/bytestrie.cpp" +"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/caniter.cpp" +"${ICU_SOURCE_DIR}/common/characterproperties.cpp" +"${ICU_SOURCE_DIR}/common/chariter.cpp" +"${ICU_SOURCE_DIR}/common/charstr.cpp" +"${ICU_SOURCE_DIR}/common/cmemory.cpp" +"${ICU_SOURCE_DIR}/common/cstr.cpp" +"${ICU_SOURCE_DIR}/common/cstring.cpp" +"${ICU_SOURCE_DIR}/common/cwchar.cpp" "${ICU_SOURCE_DIR}/common/dictbe.cpp" +"${ICU_SOURCE_DIR}/common/dictionarydata.cpp" +"${ICU_SOURCE_DIR}/common/dtintrv.cpp" +"${ICU_SOURCE_DIR}/common/edits.cpp" +"${ICU_SOURCE_DIR}/common/emojiprops.cpp" +"${ICU_SOURCE_DIR}/common/errorcode.cpp" "${ICU_SOURCE_DIR}/common/filteredbrk.cpp" +"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp" +"${ICU_SOURCE_DIR}/common/icudataver.cpp" +"${ICU_SOURCE_DIR}/common/icuplug.cpp" +"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/localebuilder.cpp" +"${ICU_SOURCE_DIR}/common/localematcher.cpp" +"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp" +"${ICU_SOURCE_DIR}/common/locavailable.cpp" +"${ICU_SOURCE_DIR}/common/locbased.cpp" +"${ICU_SOURCE_DIR}/common/locdispnames.cpp" +"${ICU_SOURCE_DIR}/common/locdistance.cpp" +"${ICU_SOURCE_DIR}/common/locdspnm.cpp" +"${ICU_SOURCE_DIR}/common/locid.cpp" +"${ICU_SOURCE_DIR}/common/loclikely.cpp" +"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp" +"${ICU_SOURCE_DIR}/common/locmap.cpp" +"${ICU_SOURCE_DIR}/common/locresdata.cpp" +"${ICU_SOURCE_DIR}/common/locutil.cpp" +"${ICU_SOURCE_DIR}/common/lsr.cpp" +"${ICU_SOURCE_DIR}/common/lstmbe.cpp" +"${ICU_SOURCE_DIR}/common/messagepattern.cpp" +"${ICU_SOURCE_DIR}/common/mlbe.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2.cpp" +"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp" +"${ICU_SOURCE_DIR}/common/normlzr.cpp" +"${ICU_SOURCE_DIR}/common/parsepos.cpp" +"${ICU_SOURCE_DIR}/common/patternprops.cpp" +"${ICU_SOURCE_DIR}/common/pluralmap.cpp" +"${ICU_SOURCE_DIR}/common/propname.cpp" +"${ICU_SOURCE_DIR}/common/propsvec.cpp" +"${ICU_SOURCE_DIR}/common/punycode.cpp" +"${ICU_SOURCE_DIR}/common/putil.cpp" "${ICU_SOURCE_DIR}/common/rbbi.cpp" +"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp" "${ICU_SOURCE_DIR}/common/rbbidata.cpp" "${ICU_SOURCE_DIR}/common/rbbinode.cpp" "${ICU_SOURCE_DIR}/common/rbbirb.cpp" @@ -178,166 +80,180 @@ set(ICUUC_SOURCES "${ICU_SOURCE_DIR}/common/rbbisetb.cpp" "${ICU_SOURCE_DIR}/common/rbbistbl.cpp" "${ICU_SOURCE_DIR}/common/rbbitblb.cpp" -"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp" +"${ICU_SOURCE_DIR}/common/resbund.cpp" +"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp" +"${ICU_SOURCE_DIR}/common/resource.cpp" +"${ICU_SOURCE_DIR}/common/restrace.cpp" +"${ICU_SOURCE_DIR}/common/ruleiter.cpp" +"${ICU_SOURCE_DIR}/common/schriter.cpp" "${ICU_SOURCE_DIR}/common/serv.cpp" -"${ICU_SOURCE_DIR}/common/servnotf.cpp" -"${ICU_SOURCE_DIR}/common/servls.cpp" "${ICU_SOURCE_DIR}/common/servlk.cpp" "${ICU_SOURCE_DIR}/common/servlkf.cpp" +"${ICU_SOURCE_DIR}/common/servls.cpp" +"${ICU_SOURCE_DIR}/common/servnotf.cpp" "${ICU_SOURCE_DIR}/common/servrbf.cpp" "${ICU_SOURCE_DIR}/common/servslkf.cpp" -"${ICU_SOURCE_DIR}/common/uidna.cpp" -"${ICU_SOURCE_DIR}/common/usprep.cpp" -"${ICU_SOURCE_DIR}/common/uts46.cpp" -"${ICU_SOURCE_DIR}/common/punycode.cpp" -"${ICU_SOURCE_DIR}/common/util.cpp" -"${ICU_SOURCE_DIR}/common/util_props.cpp" -"${ICU_SOURCE_DIR}/common/parsepos.cpp" -"${ICU_SOURCE_DIR}/common/locbased.cpp" -"${ICU_SOURCE_DIR}/common/cwchar.cpp" -"${ICU_SOURCE_DIR}/common/wintz.cpp" -"${ICU_SOURCE_DIR}/common/dtintrv.cpp" -"${ICU_SOURCE_DIR}/common/ucnvsel.cpp" -"${ICU_SOURCE_DIR}/common/propsvec.cpp" -"${ICU_SOURCE_DIR}/common/ulist.cpp" -"${ICU_SOURCE_DIR}/common/uloc_tag.cpp" -"${ICU_SOURCE_DIR}/common/icudataver.cpp" -"${ICU_SOURCE_DIR}/common/icuplug.cpp" "${ICU_SOURCE_DIR}/common/sharedobject.cpp" "${ICU_SOURCE_DIR}/common/simpleformatter.cpp" -"${ICU_SOURCE_DIR}/common/unifiedcache.cpp" -"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp" -"${ICU_SOURCE_DIR}/common/ubiditransform.cpp" -"${ICU_SOURCE_DIR}/common/pluralmap.cpp" "${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp" -"${ICU_SOURCE_DIR}/common/restrace.cpp" -"${ICU_SOURCE_DIR}/common/emojiprops.cpp" -"${ICU_SOURCE_DIR}/common/lstmbe.cpp") +"${ICU_SOURCE_DIR}/common/stringpiece.cpp" +"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/uarrsort.cpp" +"${ICU_SOURCE_DIR}/common/ubidi.cpp" +"${ICU_SOURCE_DIR}/common/ubidi_props.cpp" +"${ICU_SOURCE_DIR}/common/ubidiln.cpp" +"${ICU_SOURCE_DIR}/common/ubiditransform.cpp" +"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp" +"${ICU_SOURCE_DIR}/common/ubrk.cpp" +"${ICU_SOURCE_DIR}/common/ucase.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap.cpp" +"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ucat.cpp" +"${ICU_SOURCE_DIR}/common/uchar.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrie.cpp" +"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp" +"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp" +"${ICU_SOURCE_DIR}/common/uchriter.cpp" +"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp" +"${ICU_SOURCE_DIR}/common/ucmndata.cpp" +"${ICU_SOURCE_DIR}/common/ucnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv2022.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_err.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_io.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_set.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp" +"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp" +"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp" +"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp" +"${ICU_SOURCE_DIR}/common/ucnvhz.cpp" +"${ICU_SOURCE_DIR}/common/ucnvisci.cpp" +"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp" +"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp" +"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp" +"${ICU_SOURCE_DIR}/common/ucnvsel.cpp" +"${ICU_SOURCE_DIR}/common/ucol_swp.cpp" +"${ICU_SOURCE_DIR}/common/ucptrie.cpp" +"${ICU_SOURCE_DIR}/common/ucurr.cpp" +"${ICU_SOURCE_DIR}/common/udata.cpp" +"${ICU_SOURCE_DIR}/common/udatamem.cpp" +"${ICU_SOURCE_DIR}/common/udataswp.cpp" +"${ICU_SOURCE_DIR}/common/uenum.cpp" +"${ICU_SOURCE_DIR}/common/uhash.cpp" +"${ICU_SOURCE_DIR}/common/uhash_us.cpp" +"${ICU_SOURCE_DIR}/common/uidna.cpp" +"${ICU_SOURCE_DIR}/common/uinit.cpp" +"${ICU_SOURCE_DIR}/common/uinvchar.cpp" +"${ICU_SOURCE_DIR}/common/uiter.cpp" +"${ICU_SOURCE_DIR}/common/ulist.cpp" +"${ICU_SOURCE_DIR}/common/uloc.cpp" +"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp" +"${ICU_SOURCE_DIR}/common/uloc_tag.cpp" +"${ICU_SOURCE_DIR}/common/ulocale.cpp" +"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp" +"${ICU_SOURCE_DIR}/common/umapfile.cpp" +"${ICU_SOURCE_DIR}/common/umath.cpp" +"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp" +"${ICU_SOURCE_DIR}/common/umutex.cpp" +"${ICU_SOURCE_DIR}/common/unames.cpp" +"${ICU_SOURCE_DIR}/common/unifiedcache.cpp" +"${ICU_SOURCE_DIR}/common/unifilt.cpp" +"${ICU_SOURCE_DIR}/common/unifunct.cpp" +"${ICU_SOURCE_DIR}/common/uniset.cpp" +"${ICU_SOURCE_DIR}/common/uniset_closure.cpp" +"${ICU_SOURCE_DIR}/common/uniset_props.cpp" +"${ICU_SOURCE_DIR}/common/unisetspan.cpp" +"${ICU_SOURCE_DIR}/common/unistr.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case.cpp" +"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp" +"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/unistr_props.cpp" +"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/unorm.cpp" +"${ICU_SOURCE_DIR}/common/unormcmp.cpp" +"${ICU_SOURCE_DIR}/common/uobject.cpp" +"${ICU_SOURCE_DIR}/common/uprops.cpp" +"${ICU_SOURCE_DIR}/common/ures_cnv.cpp" +"${ICU_SOURCE_DIR}/common/uresbund.cpp" +"${ICU_SOURCE_DIR}/common/uresdata.cpp" +"${ICU_SOURCE_DIR}/common/usc_impl.cpp" +"${ICU_SOURCE_DIR}/common/uscript.cpp" +"${ICU_SOURCE_DIR}/common/uscript_props.cpp" +"${ICU_SOURCE_DIR}/common/uset.cpp" +"${ICU_SOURCE_DIR}/common/uset_props.cpp" +"${ICU_SOURCE_DIR}/common/usetiter.cpp" +"${ICU_SOURCE_DIR}/common/ushape.cpp" +"${ICU_SOURCE_DIR}/common/usprep.cpp" +"${ICU_SOURCE_DIR}/common/ustack.cpp" +"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp" +"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp" +"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase.cpp" +"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp" +"${ICU_SOURCE_DIR}/common/ustrenum.cpp" +"${ICU_SOURCE_DIR}/common/ustrfmt.cpp" +"${ICU_SOURCE_DIR}/common/ustring.cpp" +"${ICU_SOURCE_DIR}/common/ustrtrns.cpp" +"${ICU_SOURCE_DIR}/common/utext.cpp" +"${ICU_SOURCE_DIR}/common/utf_impl.cpp" +"${ICU_SOURCE_DIR}/common/util.cpp" +"${ICU_SOURCE_DIR}/common/util_props.cpp" +"${ICU_SOURCE_DIR}/common/utrace.cpp" +"${ICU_SOURCE_DIR}/common/utrie.cpp" +"${ICU_SOURCE_DIR}/common/utrie2.cpp" +"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp" +"${ICU_SOURCE_DIR}/common/utrie_swap.cpp" +"${ICU_SOURCE_DIR}/common/uts46.cpp" +"${ICU_SOURCE_DIR}/common/utypes.cpp" +"${ICU_SOURCE_DIR}/common/uvector.cpp" +"${ICU_SOURCE_DIR}/common/uvectr32.cpp" +"${ICU_SOURCE_DIR}/common/uvectr64.cpp" +"${ICU_SOURCE_DIR}/common/wintz.cpp") set(ICUI18N_SOURCES -"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" -"${ICU_SOURCE_DIR}/i18n/fmtable.cpp" -"${ICU_SOURCE_DIR}/i18n/format.cpp" -"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/umsg.cpp" -"${ICU_SOURCE_DIR}/i18n/numfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/unum.cpp" -"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp" -"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp" -"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/datefmt.cpp" -"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp" -"${ICU_SOURCE_DIR}/i18n/udat.cpp" -"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp" -"${ICU_SOURCE_DIR}/i18n/udatpg.cpp" -"${ICU_SOURCE_DIR}/i18n/nfrs.cpp" -"${ICU_SOURCE_DIR}/i18n/nfrule.cpp" -"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp" -"${ICU_SOURCE_DIR}/i18n/rbnf.cpp" -"${ICU_SOURCE_DIR}/i18n/numsys.cpp" -"${ICU_SOURCE_DIR}/i18n/unumsys.cpp" -"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp" -"${ICU_SOURCE_DIR}/i18n/ucal.cpp" -"${ICU_SOURCE_DIR}/i18n/calendar.cpp" -"${ICU_SOURCE_DIR}/i18n/gregocal.cpp" -"${ICU_SOURCE_DIR}/i18n/timezone.cpp" -"${ICU_SOURCE_DIR}/i18n/simpletz.cpp" -"${ICU_SOURCE_DIR}/i18n/olsontz.cpp" +"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp" +"${ICU_SOURCE_DIR}/i18n/anytrans.cpp" "${ICU_SOURCE_DIR}/i18n/astro.cpp" -"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp" +"${ICU_SOURCE_DIR}/i18n/basictz.cpp" +"${ICU_SOURCE_DIR}/i18n/bocsu.cpp" +"${ICU_SOURCE_DIR}/i18n/brktrans.cpp" "${ICU_SOURCE_DIR}/i18n/buddhcal.cpp" -"${ICU_SOURCE_DIR}/i18n/persncal.cpp" -"${ICU_SOURCE_DIR}/i18n/islamcal.cpp" -"${ICU_SOURCE_DIR}/i18n/japancal.cpp" -"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp" -"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp" -"${ICU_SOURCE_DIR}/i18n/indiancal.cpp" -"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp" +"${ICU_SOURCE_DIR}/i18n/calendar.cpp" +"${ICU_SOURCE_DIR}/i18n/casetrn.cpp" "${ICU_SOURCE_DIR}/i18n/cecal.cpp" -"${ICU_SOURCE_DIR}/i18n/coptccal.cpp" -"${ICU_SOURCE_DIR}/i18n/dangical.cpp" -"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp" +"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp" +"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp" "${ICU_SOURCE_DIR}/i18n/coleitr.cpp" "${ICU_SOURCE_DIR}/i18n/coll.cpp" -"${ICU_SOURCE_DIR}/i18n/sortkey.cpp" -"${ICU_SOURCE_DIR}/i18n/bocsu.cpp" -"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp" -"${ICU_SOURCE_DIR}/i18n/ucol.cpp" -"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp" -"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp" "${ICU_SOURCE_DIR}/i18n/collation.cpp" -"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp" +"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp" +"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp" "${ICU_SOURCE_DIR}/i18n/collationdata.cpp" -"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp" +"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp" "${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp" "${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp" +"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp" "${ICU_SOURCE_DIR}/i18n/collationfcd.cpp" "${ICU_SOURCE_DIR}/i18n/collationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/collationsets.cpp" -"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp" -"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp" "${ICU_SOURCE_DIR}/i18n/collationkeys.cpp" -"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp" "${ICU_SOURCE_DIR}/i18n/collationroot.cpp" "${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp" -"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp" -"${ICU_SOURCE_DIR}/i18n/collationweights.cpp" "${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp" -"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp" -"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp" -"${ICU_SOURCE_DIR}/i18n/listformatter.cpp" -"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp" -"${ICU_SOURCE_DIR}/i18n/strmatch.cpp" -"${ICU_SOURCE_DIR}/i18n/usearch.cpp" -"${ICU_SOURCE_DIR}/i18n/search.cpp" -"${ICU_SOURCE_DIR}/i18n/stsearch.cpp" -"${ICU_SOURCE_DIR}/i18n/translit.cpp" -"${ICU_SOURCE_DIR}/i18n/utrans.cpp" -"${ICU_SOURCE_DIR}/i18n/esctrn.cpp" -"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp" -"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp" -"${ICU_SOURCE_DIR}/i18n/strrepl.cpp" -"${ICU_SOURCE_DIR}/i18n/tridpars.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsets.cpp" +"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp" +"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp" +"${ICU_SOURCE_DIR}/i18n/collationweights.cpp" +"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/coptccal.cpp" "${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp" -"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp" -"${ICU_SOURCE_DIR}/i18n/nultrans.cpp" -"${ICU_SOURCE_DIR}/i18n/remtrans.cpp" -"${ICU_SOURCE_DIR}/i18n/casetrn.cpp" -"${ICU_SOURCE_DIR}/i18n/titletrn.cpp" -"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp" -"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp" -"${ICU_SOURCE_DIR}/i18n/anytrans.cpp" -"${ICU_SOURCE_DIR}/i18n/name2uni.cpp" -"${ICU_SOURCE_DIR}/i18n/uni2name.cpp" -"${ICU_SOURCE_DIR}/i18n/nortrans.cpp" -"${ICU_SOURCE_DIR}/i18n/quant.cpp" -"${ICU_SOURCE_DIR}/i18n/transreg.cpp" -"${ICU_SOURCE_DIR}/i18n/brktrans.cpp" -"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp" -"${ICU_SOURCE_DIR}/i18n/rematch.cpp" -"${ICU_SOURCE_DIR}/i18n/repattrn.cpp" -"${ICU_SOURCE_DIR}/i18n/regexst.cpp" -"${ICU_SOURCE_DIR}/i18n/regextxt.cpp" -"${ICU_SOURCE_DIR}/i18n/regeximp.cpp" -"${ICU_SOURCE_DIR}/i18n/uregex.cpp" -"${ICU_SOURCE_DIR}/i18n/uregexc.cpp" -"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp" -"${ICU_SOURCE_DIR}/i18n/measfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/currfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/curramt.cpp" -"${ICU_SOURCE_DIR}/i18n/currunit.cpp" -"${ICU_SOURCE_DIR}/i18n/measure.cpp" -"${ICU_SOURCE_DIR}/i18n/utmscale.cpp" "${ICU_SOURCE_DIR}/i18n/csdetect.cpp" "${ICU_SOURCE_DIR}/i18n/csmatch.cpp" "${ICU_SOURCE_DIR}/i18n/csr2022.cpp" @@ -346,60 +262,80 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp" "${ICU_SOURCE_DIR}/i18n/csrucode.cpp" "${ICU_SOURCE_DIR}/i18n/csrutf8.cpp" -"${ICU_SOURCE_DIR}/i18n/inputext.cpp" -"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp" -"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/basictz.cpp" -"${ICU_SOURCE_DIR}/i18n/dtrule.cpp" -"${ICU_SOURCE_DIR}/i18n/rbtz.cpp" -"${ICU_SOURCE_DIR}/i18n/tzrule.cpp" -"${ICU_SOURCE_DIR}/i18n/tztrans.cpp" -"${ICU_SOURCE_DIR}/i18n/vtzone.cpp" -"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp" -"${ICU_SOURCE_DIR}/i18n/standardplural.cpp" -"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp" -"${ICU_SOURCE_DIR}/i18n/plurrule.cpp" -"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/selfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/curramt.cpp" +"${ICU_SOURCE_DIR}/i18n/currfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/currpinf.cpp" +"${ICU_SOURCE_DIR}/i18n/currunit.cpp" +"${ICU_SOURCE_DIR}/i18n/dangical.cpp" +"${ICU_SOURCE_DIR}/i18n/datefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp" +"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp" +"${ICU_SOURCE_DIR}/i18n/decContext.cpp" +"${ICU_SOURCE_DIR}/i18n/decNumber.cpp" +"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp" +"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp" +"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp" "${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp" "${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp" -"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp" -"${ICU_SOURCE_DIR}/i18n/tmunit.cpp" -"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp" -"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/currpinf.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp" -"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp" -"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp" -"${ICU_SOURCE_DIR}/i18n/ztrans.cpp" -"${ICU_SOURCE_DIR}/i18n/zrule.cpp" -"${ICU_SOURCE_DIR}/i18n/vzone.cpp" +"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp" +"${ICU_SOURCE_DIR}/i18n/dtrule.cpp" +"${ICU_SOURCE_DIR}/i18n/erarules.cpp" +"${ICU_SOURCE_DIR}/i18n/esctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable.cpp" +"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp" +"${ICU_SOURCE_DIR}/i18n/format.cpp" +"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp" "${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp" "${ICU_SOURCE_DIR}/i18n/fpositer.cpp" -"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp" -"${ICU_SOURCE_DIR}/i18n/decNumber.cpp" -"${ICU_SOURCE_DIR}/i18n/decContext.cpp" -"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp" -"${ICU_SOURCE_DIR}/i18n/tznames.cpp" -"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp" -"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp" -"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp" "${ICU_SOURCE_DIR}/i18n/gender.cpp" -"${ICU_SOURCE_DIR}/i18n/region.cpp" -"${ICU_SOURCE_DIR}/i18n/scriptset.cpp" -"${ICU_SOURCE_DIR}/i18n/uregion.cpp" -"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp" -"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/gregocal.cpp" +"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp" +"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp" +"${ICU_SOURCE_DIR}/i18n/indiancal.cpp" +"${ICU_SOURCE_DIR}/i18n/inputext.cpp" +"${ICU_SOURCE_DIR}/i18n/islamcal.cpp" +"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp" +"${ICU_SOURCE_DIR}/i18n/japancal.cpp" +"${ICU_SOURCE_DIR}/i18n/listformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/measfmt.cpp" "${ICU_SOURCE_DIR}/i18n/measunit.cpp" -"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp" -"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp" -"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp" +"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp" +"${ICU_SOURCE_DIR}/i18n/measure.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp" +"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp" +"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/name2uni.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrs.cpp" +"${ICU_SOURCE_DIR}/i18n/nfrule.cpp" +"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp" +"${ICU_SOURCE_DIR}/i18n/nortrans.cpp" +"${ICU_SOURCE_DIR}/i18n/nultrans.cpp" "${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp" +"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp" +"${ICU_SOURCE_DIR}/i18n/number_capi.cpp" "${ICU_SOURCE_DIR}/i18n/number_compact.cpp" +"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp" "${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp" "${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp" "${ICU_SOURCE_DIR}/i18n/number_fluent.cpp" @@ -407,7 +343,9 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/number_grouping.cpp" "${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp" "${ICU_SOURCE_DIR}/i18n/number_longnames.cpp" +"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp" "${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp" +"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp" "${ICU_SOURCE_DIR}/i18n/number_notation.cpp" "${ICU_SOURCE_DIR}/i18n/number_output.cpp" "${ICU_SOURCE_DIR}/i18n/number_padding.cpp" @@ -415,46 +353,125 @@ set(ICUI18N_SOURCES "${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp" "${ICU_SOURCE_DIR}/i18n/number_rounding.cpp" "${ICU_SOURCE_DIR}/i18n/number_scientific.cpp" -"${ICU_SOURCE_DIR}/i18n/number_utils.cpp" -"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp" -"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp" -"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp" -"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp" +"${ICU_SOURCE_DIR}/i18n/number_simple.cpp" "${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp" -"${ICU_SOURCE_DIR}/i18n/number_capi.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp" -"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp" -"${ICU_SOURCE_DIR}/i18n/string_segment.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp" -"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp" -"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp" -"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp" -"${ICU_SOURCE_DIR}/i18n/erarules.cpp" -"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp" -"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp" -"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp" -"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp" -"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp" "${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp" "${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp" +"${ICU_SOURCE_DIR}/i18n/number_utils.cpp" +"${ICU_SOURCE_DIR}/i18n/numfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp" +"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp" "${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp" +"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/numsys.cpp" +"${ICU_SOURCE_DIR}/i18n/olsontz.cpp" +"${ICU_SOURCE_DIR}/i18n/persncal.cpp" "${ICU_SOURCE_DIR}/i18n/pluralranges.cpp" +"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/plurrule.cpp" +"${ICU_SOURCE_DIR}/i18n/quant.cpp" +"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/rbnf.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp" +"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp" +"${ICU_SOURCE_DIR}/i18n/rbtz.cpp" +"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp" +"${ICU_SOURCE_DIR}/i18n/regeximp.cpp" +"${ICU_SOURCE_DIR}/i18n/regexst.cpp" +"${ICU_SOURCE_DIR}/i18n/regextxt.cpp" +"${ICU_SOURCE_DIR}/i18n/region.cpp" +"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp" +"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/rematch.cpp" +"${ICU_SOURCE_DIR}/i18n/remtrans.cpp" +"${ICU_SOURCE_DIR}/i18n/repattrn.cpp" +"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp" +"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/scriptset.cpp" +"${ICU_SOURCE_DIR}/i18n/search.cpp" +"${ICU_SOURCE_DIR}/i18n/selfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/simpletz.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp" +"${ICU_SOURCE_DIR}/i18n/sortkey.cpp" +"${ICU_SOURCE_DIR}/i18n/standardplural.cpp" +"${ICU_SOURCE_DIR}/i18n/string_segment.cpp" +"${ICU_SOURCE_DIR}/i18n/strmatch.cpp" +"${ICU_SOURCE_DIR}/i18n/strrepl.cpp" +"${ICU_SOURCE_DIR}/i18n/stsearch.cpp" +"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp" +"${ICU_SOURCE_DIR}/i18n/timezone.cpp" +"${ICU_SOURCE_DIR}/i18n/titletrn.cpp" +"${ICU_SOURCE_DIR}/i18n/tmunit.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp" +"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp" +"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp" +"${ICU_SOURCE_DIR}/i18n/translit.cpp" +"${ICU_SOURCE_DIR}/i18n/transreg.cpp" +"${ICU_SOURCE_DIR}/i18n/tridpars.cpp" +"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames.cpp" +"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/tzrule.cpp" +"${ICU_SOURCE_DIR}/i18n/tztrans.cpp" +"${ICU_SOURCE_DIR}/i18n/ucal.cpp" +"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp" +"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp" +"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp" +"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp" +"${ICU_SOURCE_DIR}/i18n/udat.cpp" +"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp" +"${ICU_SOURCE_DIR}/i18n/udatpg.cpp" +"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp" +"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp" +"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp" +"${ICU_SOURCE_DIR}/i18n/umsg.cpp" +"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp" +"${ICU_SOURCE_DIR}/i18n/uni2name.cpp" "${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp" "${ICU_SOURCE_DIR}/i18n/units_converter.cpp" "${ICU_SOURCE_DIR}/i18n/units_data.cpp" -"${ICU_SOURCE_DIR}/i18n/units_router.cpp") +"${ICU_SOURCE_DIR}/i18n/units_router.cpp" +"${ICU_SOURCE_DIR}/i18n/unum.cpp" +"${ICU_SOURCE_DIR}/i18n/unumsys.cpp" +"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp" +"${ICU_SOURCE_DIR}/i18n/uregex.cpp" +"${ICU_SOURCE_DIR}/i18n/uregexc.cpp" +"${ICU_SOURCE_DIR}/i18n/uregion.cpp" +"${ICU_SOURCE_DIR}/i18n/usearch.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp" +"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp" +"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp" +"${ICU_SOURCE_DIR}/i18n/utmscale.cpp" +"${ICU_SOURCE_DIR}/i18n/utrans.cpp" +"${ICU_SOURCE_DIR}/i18n/vtzone.cpp" +"${ICU_SOURCE_DIR}/i18n/vzone.cpp" +"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp" +"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp" +"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp" +"${ICU_SOURCE_DIR}/i18n/zrule.cpp" +"${ICU_SOURCE_DIR}/i18n/ztrans.cpp") file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") enable_language(ASM) @@ -464,6 +481,11 @@ if (ARCH_S390X) else() set(ICUDATA_SOURCE_FILE "${ICUDATA_SOURCE_DIR}/icudt75l_dat.S" ) endif() +# ^^ you might be confused how for different little endian platforms (x86, ARM) the same assembly files can be used. +# These files are indeed assembly but they only contain data ('.long' directive), which makes them portable accross CPUs. +# Only the endianness and the character set (ASCII, EBCDIC) makes a difference, also see +# https://unicode-org.github.io/icu/userguide/icu_data/#sharing-icu-data-between-platforms, 'Sharing ICU Data Between Platforms') +# (and as an experiment, try re-generating the data files on x86 vs. ARM, ... you'll get exactly the same files) set(ICUDATA_SOURCES "${ICUDATA_SOURCE_FILE}" diff --git a/contrib/libarchive b/contrib/libarchive index ee457961713..313aa1fa10b 160000 --- a/contrib/libarchive +++ b/contrib/libarchive @@ -1 +1 @@ -Subproject commit ee45796171324519f0c0bfd012018dd099296336 +Subproject commit 313aa1fa10b657de791e3202c168a6c833bc3543 diff --git a/contrib/libarchive-cmake/CMakeLists.txt b/contrib/libarchive-cmake/CMakeLists.txt index e89770da5f6..aa6dd9638b6 100644 --- a/contrib/libarchive-cmake/CMakeLists.txt +++ b/contrib/libarchive-cmake/CMakeLists.txt @@ -1,6 +1,6 @@ set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libarchive") -set(SRCS +set(SRCS "${LIBRARY_DIR}/libarchive/archive_acl.c" "${LIBRARY_DIR}/libarchive/archive_blake2sp_ref.c" "${LIBRARY_DIR}/libarchive/archive_blake2s_ref.c" @@ -135,7 +135,7 @@ set(SRCS ) add_library(_libarchive ${SRCS}) -target_include_directories(_libarchive PUBLIC +target_include_directories(_libarchive PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} "${LIBRARY_DIR}/libarchive" ) @@ -157,7 +157,7 @@ if (TARGET ch_contrib::zlib) endif() if (TARGET ch_contrib::zstd) - target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_LIBZSTD_COMPRESSOR=1) + target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_ZSTD_compressStream=1) target_link_libraries(_libarchive PRIVATE ch_contrib::zstd) endif() @@ -179,4 +179,4 @@ if (OS_LINUX) ) endif() -add_library(ch_contrib::libarchive ALIAS _libarchive) \ No newline at end of file +add_library(ch_contrib::libarchive ALIAS _libarchive) diff --git a/contrib/libarchive-cmake/config.h b/contrib/libarchive-cmake/config.h index 0b0cab47a52..9696cfb112d 100644 --- a/contrib/libarchive-cmake/config.h +++ b/contrib/libarchive-cmake/config.h @@ -334,13 +334,16 @@ typedef uint64_t uintmax_t; /* #undef ARCHIVE_XATTR_LINUX */ /* Version number of bsdcpio */ -#define BSDCPIO_VERSION_STRING "3.7.0" +#define BSDCPIO_VERSION_STRING "3.7.4" /* Version number of bsdtar */ -#define BSDTAR_VERSION_STRING "3.7.0" +#define BSDTAR_VERSION_STRING "3.7.4" /* Version number of bsdcat */ -#define BSDCAT_VERSION_STRING "3.7.0" +#define BSDCAT_VERSION_STRING "3.7.4" + +/* Version number of bsdunzip */ +#define BSDUNZIP_VERSION_STRING "3.7.4" /* Define to 1 if you have the `acl_create_entry' function. */ /* #undef HAVE_ACL_CREATE_ENTRY */ @@ -642,8 +645,8 @@ typedef uint64_t uintmax_t; /* Define to 1 if you have the `getgrnam_r' function. */ #define HAVE_GETGRNAM_R 1 -/* Define to 1 if platform uses `optreset` to reset `getopt` */ -#define HAVE_GETOPT_OPTRESET 1 +/* Define to 1 if you have the `getline' function. */ +#define HAVE_GETLINE 1 /* Define to 1 if you have the `getpid' function. */ #define HAVE_GETPID 1 @@ -750,6 +753,12 @@ typedef uint64_t uintmax_t; /* Define to 1 if you have the `pcreposix' library (-lpcreposix). */ /* #undef HAVE_LIBPCREPOSIX */ +/* Define to 1 if you have the `pcre2-8' library (-lpcre2-8). */ +/* #undef HAVE_LIBPCRE2 */ + +/* Define to 1 if you have the `pcreposix' library (-lpcre2posix). */ +/* #undef HAVE_LIBPCRE2POSIX */ + /* Define to 1 if you have the `xml2' library (-lxml2). */ #define HAVE_LIBXML2 1 @@ -765,9 +774,8 @@ typedef uint64_t uintmax_t; /* Define to 1 if you have the `zstd' library (-lzstd). */ /* #undef HAVE_LIBZSTD */ -/* Define to 1 if you have the `zstd' library (-lzstd) with compression - support. */ -/* #undef HAVE_LIBZSTD_COMPRESSOR */ +/* Define to 1 if you have the ZSTD_compressStream function. */ +/* #undef HAVE_ZSTD_compressStream */ /* Define to 1 if you have the header file. */ #define HAVE_LIMITS_H 1 @@ -923,6 +931,9 @@ typedef uint64_t uintmax_t; /* Define to 1 if you have the header file. */ /* #undef HAVE_PCREPOSIX_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PCRE2POSIX_H */ + /* Define to 1 if you have the `pipe' function. */ #define HAVE_PIPE 1 @@ -1029,6 +1040,12 @@ typedef uint64_t uintmax_t; /* Define to 1 if you have the `strrchr' function. */ #define HAVE_STRRCHR 1 +/* Define to 1 if the system has the type `struct statfs'. */ +/* #undef HAVE_STRUCT_STATFS */ + +/* Define to 1 if `f_iosize' is a member of `struct statfs'. */ +/* #undef HAVE_STRUCT_STATFS_F_IOSIZE */ + /* Define to 1 if `f_namemax' is a member of `struct statfs'. */ /* #undef HAVE_STRUCT_STATFS_F_NAMEMAX */ @@ -1077,6 +1094,9 @@ typedef uint64_t uintmax_t; /* Define to 1 if you have the `symlink' function. */ #define HAVE_SYMLINK 1 +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + /* Define to 1 if you have the header file. */ /* #undef HAVE_SYS_ACL_H */ @@ -1273,13 +1293,13 @@ typedef uint64_t uintmax_t; /* #undef HAVE__MKGMTIME */ /* Define as const if the declaration of iconv() needs const. */ -#define ICONV_CONST +#define ICONV_CONST /* Version number of libarchive as a single integer */ -#define LIBARCHIVE_VERSION_NUMBER "3007000" +#define LIBARCHIVE_VERSION_NUMBER "3007004" /* Version number of libarchive */ -#define LIBARCHIVE_VERSION_STRING "3.7.0" +#define LIBARCHIVE_VERSION_STRING "3.7.4" /* Define to 1 if `lstat' dereferences a symlink specified with a trailing slash. */ @@ -1333,7 +1353,7 @@ typedef uint64_t uintmax_t; #endif /* SAFE_TO_DEFINE_EXTENSIONS */ /* Version number of package */ -#define VERSION "3.7.0" +#define VERSION "3.7.4" /* Number of bits in a file offset, on hosts where this is settable. */ /* #undef _FILE_OFFSET_BITS */ diff --git a/contrib/libuv b/contrib/libuv index 4482964660c..714b58b9849 160000 --- a/contrib/libuv +++ b/contrib/libuv @@ -1 +1 @@ -Subproject commit 4482964660c77eec1166cd7d14fb915e3dbd774a +Subproject commit 714b58b9849568211ade86b44dd91d37f8a2175e diff --git a/contrib/libuv-cmake/CMakeLists.txt b/contrib/libuv-cmake/CMakeLists.txt index 928fdcdd7e6..22df1e9a102 100644 --- a/contrib/libuv-cmake/CMakeLists.txt +++ b/contrib/libuv-cmake/CMakeLists.txt @@ -10,6 +10,7 @@ set(uv_sources src/random.c src/strscpy.c src/strtok.c + src/thread-common.c src/threadpool.c src/timer.c src/uv-common.c @@ -70,10 +71,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux") list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112) list(APPEND uv_libraries rt) list(APPEND uv_sources - src/unix/epoll.c - src/unix/linux-core.c - src/unix/linux-inotify.c - src/unix/linux-syscalls.c + src/unix/linux.c src/unix/procfs-exepath.c src/unix/random-getrandom.c src/unix/random-sysctl-linux.c) diff --git a/contrib/openssl b/contrib/openssl index 66deddc1e53..b3e62c440f3 160000 --- a/contrib/openssl +++ b/contrib/openssl @@ -1 +1 @@ -Subproject commit 66deddc1e53cda8706604a019777259372d1bd62 +Subproject commit b3e62c440f390e12e77c80675f883af82ad3d5ed diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 39b9fa7c395..19c5903461c 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.8.3.59" +ARG VERSION="24.8.4.13" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 60b038d0da9..e74dcf6d73d 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.8.3.59" +ARG VERSION="24.8.4.13" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 77c403bb046..456bed14a7e 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.8.3.59" +ARG VERSION="24.8.4.13" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh index e544397dd0c..a39f96867be 100755 --- a/docker/test/base/setup_export_logs.sh +++ b/docker/test/base/setup_export_logs.sh @@ -124,6 +124,8 @@ function setup_logs_replication check_logs_credentials || return 0 __set_connection_args + echo "My hostname is ${HOSTNAME}" + echo 'Create all configured system logs' clickhouse-client --query "SYSTEM FLUSH LOGS" @@ -184,7 +186,17 @@ function setup_logs_replication /^TTL /d ') - echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2 + echo -e "Creating remote destination table ${table}_${hash} with statement:" >&2 + + echo "::group::${table}" + # there's the only way big "$statement" can be printed without causing EAGAIN error + # cat: write error: Resource temporarily unavailable + statement_print="${statement}" + if [ "${#statement_print}" -gt 4000 ]; then + statement_print="${statement::1999}\n…\n${statement:${#statement}-1999}" + fi + echo -e "$statement_print" + echo "::endgroup::" echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \ --distributed_ddl_task_timeout=30 --distributed_ddl_output_mode=throw_only_active \ diff --git a/docker/test/integration/helper_container/Dockerfile b/docker/test/integration/helper_container/Dockerfile index 49a3d3cd84b..1084d087e53 100644 --- a/docker/test/integration/helper_container/Dockerfile +++ b/docker/test/integration/helper_container/Dockerfile @@ -3,6 +3,8 @@ FROM alpine:3.18 RUN apk add --no-cache -U iproute2 \ - && for bin in iptables iptables-restore iptables-save; \ + && for bin in \ + iptables iptables-restore iptables-save \ + ip6tables ip6tables-restore ip6tables-save; \ do ln -sf xtables-nft-multi "/sbin/$bin"; \ done diff --git a/docs/changelogs/v24.3.11.7-lts.md b/docs/changelogs/v24.3.11.7-lts.md new file mode 100644 index 00000000000..2d97dfbaad2 --- /dev/null +++ b/docs/changelogs/v24.3.11.7-lts.md @@ -0,0 +1,17 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.11.7-lts (28795d0a47e) FIXME as compared to v24.3.10.33-lts (37b6502ebf0) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#67479](https://github.com/ClickHouse/ClickHouse/issues/67479): In rare cases ClickHouse could consider parts as broken because of some unexpected projections on disk. Now it's fixed. [#66898](https://github.com/ClickHouse/ClickHouse/pull/66898) ([alesapin](https://github.com/alesapin)). +* Backported in [#69243](https://github.com/ClickHouse/ClickHouse/issues/69243): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#69221](https://github.com/ClickHouse/ClickHouse/issues/69221): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)). + diff --git a/docs/changelogs/v24.5.8.10-stable.md b/docs/changelogs/v24.5.8.10-stable.md new file mode 100644 index 00000000000..c7a2baa4e4c --- /dev/null +++ b/docs/changelogs/v24.5.8.10-stable.md @@ -0,0 +1,18 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.5.8.10-stable (f11729638ea) FIXME as compared to v24.5.7.31-stable (6c185e9aec1) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#69295](https://github.com/ClickHouse/ClickHouse/issues/69295): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#69245](https://github.com/ClickHouse/ClickHouse/issues/69245): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix crash when using `s3` table function with GLOB paths and filters. [#69176](https://github.com/ClickHouse/ClickHouse/pull/69176) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#69223](https://github.com/ClickHouse/ClickHouse/issues/69223): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)). + diff --git a/docs/changelogs/v24.6.6.6-stable.md b/docs/changelogs/v24.6.6.6-stable.md new file mode 100644 index 00000000000..8cf83e8fc18 --- /dev/null +++ b/docs/changelogs/v24.6.6.6-stable.md @@ -0,0 +1,16 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.6.6.6-stable (a4c4580e639) FIXME as compared to v24.6.5.30-stable (e6e196c92d6) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#69197](https://github.com/ClickHouse/ClickHouse/issues/69197): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#69225](https://github.com/ClickHouse/ClickHouse/issues/69225): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)). + diff --git a/docs/changelogs/v24.7.6.8-stable.md b/docs/changelogs/v24.7.6.8-stable.md new file mode 100644 index 00000000000..13102a97b40 --- /dev/null +++ b/docs/changelogs/v24.7.6.8-stable.md @@ -0,0 +1,17 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.7.6.8-stable (7779883593a) FIXME as compared to v24.7.5.37-stable (f2533ca97be) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#69198](https://github.com/ClickHouse/ClickHouse/issues/69198): TODO. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#69249](https://github.com/ClickHouse/ClickHouse/issues/69249): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#69227](https://github.com/ClickHouse/ClickHouse/issues/69227): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)). + diff --git a/docs/changelogs/v24.8.4.13-lts.md b/docs/changelogs/v24.8.4.13-lts.md new file mode 100644 index 00000000000..66385172cf8 --- /dev/null +++ b/docs/changelogs/v24.8.4.13-lts.md @@ -0,0 +1,22 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.8.4.13-lts (53195bc189b) FIXME as compared to v24.8.3.59-lts (e729b9fa40e) + +#### Improvement +* Backported in [#68699](https://github.com/ClickHouse/ClickHouse/issues/68699): Delete old code of named collections from dictionaries and substitute it to the new, which allows to use DDL created named collections in dictionaries. Closes [#60936](https://github.com/ClickHouse/ClickHouse/issues/60936), closes [#36890](https://github.com/ClickHouse/ClickHouse/issues/36890). [#68412](https://github.com/ClickHouse/ClickHouse/pull/68412) ([Kseniia Sumarokova](https://github.com/kssenii)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#69231](https://github.com/ClickHouse/ClickHouse/issues/69231): Fix parsing error when null should be inserted as default in some cases during JSON type parsing. [#68955](https://github.com/ClickHouse/ClickHouse/pull/68955) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#69251](https://github.com/ClickHouse/ClickHouse/issues/69251): `UNION` clause in subqueries wasn't handled correctly in queries with parallel replicas and lead to LOGICAL_ERROR `Duplicate announcement received for replica`. [#69146](https://github.com/ClickHouse/ClickHouse/pull/69146) ([Igor Nikonov](https://github.com/devcrafter)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#69189](https://github.com/ClickHouse/ClickHouse/issues/69189): Don't create Object type if use_json_alias_for_old_object_type=1 but allow_experimental_object_type=0. [#69150](https://github.com/ClickHouse/ClickHouse/pull/69150) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#69229](https://github.com/ClickHouse/ClickHouse/issues/69229): Disable memory test with sanitizer. [#69193](https://github.com/ClickHouse/ClickHouse/pull/69193) ([alesapin](https://github.com/alesapin)). +* Backported in [#69219](https://github.com/ClickHouse/ClickHouse/issues/69219): Disable perf-like test with sanitizers. [#69194](https://github.com/ClickHouse/ClickHouse/pull/69194) ([alesapin](https://github.com/alesapin)). + diff --git a/docs/en/engines/table-engines/integrations/azure-queue.md b/docs/en/engines/table-engines/integrations/azure-queue.md new file mode 100644 index 00000000000..b5259336a8b --- /dev/null +++ b/docs/en/engines/table-engines/integrations/azure-queue.md @@ -0,0 +1,72 @@ +--- +slug: /en/engines/table-engines/integrations/azure-queue +sidebar_position: 181 +sidebar_label: AzureQueue +--- + +# AzureQueue Table Engine + +This engine provides an integration with [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs) ecosystem, allowing streaming data import. + +## Create Table {#creating-a-table} + +``` sql +CREATE TABLE test (name String, value UInt32) + ENGINE = AzureQueue(...) + [SETTINGS] + [mode = '',] + [after_processing = 'keep',] + [keeper_path = '',] + ... +``` + +**Engine parameters** + +`AzureQueue` parameters are the same as `AzureBlobStorage` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/azureBlobStorage.md). + +**Example** + +```sql +CREATE TABLE azure_queue_engine_table (name String, value UInt32) +ENGINE=AzureQueue('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/data/') +SETTINGS + mode = 'unordered' +``` + +## Settings {#settings} + +The set of supported settings is the same as for `S3Queue` table engine, but without `s3queue_` prefix. See [full list of settings settings](../../../engines/table-engines/integrations/s3queue.md#settings). + +## Description {#description} + +`SELECT` is not particularly useful for streaming import (except for debugging), because each file can be imported only once. It is more practical to create real-time threads using [materialized views](../../../sql-reference/statements/create/view.md). To do this: + +1. Use the engine to create a table for consuming from specified path in S3 and consider it a data stream. +2. Create a table with the desired structure. +3. Create a materialized view that converts data from the engine and puts it into a previously created table. + +When the `MATERIALIZED VIEW` joins the engine, it starts collecting data in the background. + +Example: + +``` sql + CREATE TABLE azure_queue_engine_table (name String, value UInt32) + ENGINE=AzureQueue('', 'CSV', 'gzip') + SETTINGS + mode = 'unordered'; + + CREATE TABLE stats (name String, value UInt32) + ENGINE = MergeTree() ORDER BY name; + + CREATE MATERIALIZED VIEW consumer TO stats + AS SELECT name, value FROM azure_queue_engine_table; + + SELECT * FROM stats ORDER BY name; +``` + +## Virtual columns {#virtual-columns} + +- `_path` — Path to the file. +- `_file` — Name of the file. + +For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns). diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md index 48a08dfa499..f02d0563491 100644 --- a/docs/en/engines/table-engines/integrations/s3.md +++ b/docs/en/engines/table-engines/integrations/s3.md @@ -35,7 +35,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32) [SETTINGS ...] ``` -### Engine parameters +### Engine parameters {#parameters} - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path). - `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed. diff --git a/docs/en/engines/table-engines/integrations/s3queue.md b/docs/en/engines/table-engines/integrations/s3queue.md index 06325fa15fb..f1957cfe1ce 100644 --- a/docs/en/engines/table-engines/integrations/s3queue.md +++ b/docs/en/engines/table-engines/integrations/s3queue.md @@ -5,6 +5,7 @@ sidebar_label: S3Queue --- # S3Queue Table Engine + This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ecosystem and allows streaming import. This engine is similar to the [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) engines, but provides S3-specific features. ## Create Table {#creating-a-table} @@ -16,27 +17,25 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32) [mode = '',] [after_processing = 'keep',] [keeper_path = '',] - [s3queue_loading_retries = 0,] - [s3queue_processing_threads_num = 1,] - [s3queue_enable_logging_to_s3queue_log = 0,] - [s3queue_polling_min_timeout_ms = 1000,] - [s3queue_polling_max_timeout_ms = 10000,] - [s3queue_polling_backoff_ms = 0,] - [s3queue_tracked_file_ttl_sec = 0,] - [s3queue_tracked_files_limit = 1000,] - [s3queue_cleanup_interval_min_ms = 10000,] - [s3queue_cleanup_interval_max_ms = 30000,] + [loading_retries = 0,] + [processing_threads_num = 1,] + [enable_logging_to_s3queue_log = 0,] + [polling_min_timeout_ms = 1000,] + [polling_max_timeout_ms = 10000,] + [polling_backoff_ms = 0,] + [tracked_file_ttl_sec = 0,] + [tracked_files_limit = 1000,] + [cleanup_interval_min_ms = 10000,] + [cleanup_interval_max_ms = 30000,] ``` -Starting with `24.7` settings without `s3queue_` prefix are also supported. +:::warning +Before `24.7`, it is required to use `s3queue_` prefix for all settings apart from `mode`, `after_processing` and `keeper_path`. +::: **Engine parameters** -- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path). -- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed. -- `format` — The [format](../../../interfaces/formats.md#formats) of the file. -- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3). -- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension. +`S3Queue` parameters are the same as `S3` table engine supports. See parameters section [here](../../../engines/table-engines/integrations/s3.md#parameters). **Example** diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 0b693775dde..0bbee5f86f3 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -989,19 +989,52 @@ ALTER TABLE tab DROP STATISTICS a; These lightweight statistics aggregate information about distribution of values in columns. Statistics are stored in every part and updated when every insert comes. They can be used for prewhere optimization only if we enable `set allow_statistics_optimize = 1`. -#### Available Types of Column Statistics {#available-types-of-column-statistics} +### Available Types of Column Statistics {#available-types-of-column-statistics} + +- `MinMax` + + The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns. + + Syntax: `minmax` - `TDigest` [TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns. + Syntax: `tdigest` + - `Uniq` [HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains. -- `count_min` + Syntax: `uniq` + +- `CountMin` + + [CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. + + Syntax `countmin` + + +### Supported Data Types {#supported-data-types} + +| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString | +|-----------|----------------------------------------------------|-----------------------| +| CountMin | ✔ | ✔ | +| MinMax | ✔ | ✗ | +| TDigest | ✔ | ✗ | +| Uniq | ✔ | ✔ | + + +### Supported Operations {#supported-operations} + +| | Equality filters (==) | Range filters (>, >=, <, <=) | +|-----------|-----------------------|------------------------------| +| CountMin | ✔ | ✗ | +| MinMax | ✗ | ✔ | +| TDigest | ✗ | ✔ | +| Uniq | ✔ | ✗ | - [Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column. ## Column-level Settings {#column-level-settings} diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 8892c6d8d3f..df96b8129f1 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -39,6 +39,7 @@ The supported formats are: | [JSONCompact](#jsoncompact) | ✔ | ✔ | | [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ | | [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ | +| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ | | [JSONEachRow](#jsoneachrow) | ✔ | ✔ | | [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ | | [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ | @@ -988,6 +989,59 @@ Example: Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here) +## JSONCompactWithProgress (#jsoncompactwithprogress) + +In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object. + +Each row is either a metadata object, data object, progress information or statistics object: + +1. **Metadata Object (`meta`)** + - Describes the structure of the data rows. + - Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.). + - Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}` + - Appears before any data objects. + +2. **Data Object (`data`)** + - Represents a row of query results. + - Fields: An array with values corresponding to the columns defined in the metadata. + - Example: `{"data":["1", "John Doe"]}` + - Appears after the metadata object, one per row. + +3. **Progress Information Object (`progress`)** + - Provides real-time progress feedback during query execution. + - Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`. + - Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}` + - May appear intermittently. + +4. **Statistics Object (`statistics`)** + - Summarizes query execution statistics. + - Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`. + - Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}` + - Appears at the end. + +5. **Exception Object (`exception`)** + - Represents an error that occurred during query execution. + - Fields: A single text field containing the error message. + - Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}` + - Appears when an error is encountered. + +6. **Totals Object (`totals`)** + - Provides the totals for each numeric column in the result set. + - Fields: An array with total values corresponding to the columns defined in the metadata. + - Example: `{"totals": ["", "3"]}` + - Appears at the end of the data rows, if applicable. + +Example: + +```json +{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]} +{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}} +{"data":["1", "John Doe"]} +{"data":["2", "Joe Doe"]} +{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}} +``` + + ## JSONEachRow {#jsoneachrow} In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object. diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 8d9dce983bc..16d9b66e17f 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -233,6 +233,16 @@ Features: - Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc. - Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc. +### CKibana {#ckibana} + +[CKibana](https://github.com/TongchengOpenSource/ckibana) is a lightweight service that allows you to effortlessly search, explore, and visualize ClickHouse data using the native Kibana UI. + +Features: + +- Translates chart requests from the native Kibana UI into ClickHouse query syntax. +- Supports advanced features such as sampling and caching to enhance query performance. +- Minimizes the learning cost for users after migrating from ElasticSearch to ClickHouse. + ## Commercial {#commercial} ### DataGrip {#datagrip} diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 9fce83a0dc4..ccc8cf017ca 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1463,26 +1463,29 @@ Examples: ## logger {#logger} -Logging settings. +The location and format of log messages. Keys: -- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`. -- `log` – The log file. Contains all the entries according to `level`. -- `errorlog` – Error log file. -- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place. -- `count` – The number of archived log files that ClickHouse stores. -- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`. -- `console_log_level` – Logging level for console. Default to `level`. -- `use_syslog` - Log to syslog as well. -- `syslog_level` - Logging level for logging to syslog. -- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`. -- `formatting` – Specify log format to be printed in console log (currently only `json` supported). +- `level` – Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`, + `debug`, `trace`, `test` +- `log` – The path to the log file. +- `errorlog` – The path to the error log file. +- `size` – Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created. +- `count` – Rotation policy: How many historical log files Clickhouse are kept at most. +- `stream_compress` – Compress log messages using LZ4. Set to `1` or `true` to enable. +- `console` – Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is + `1` if Clickhouse does not run in daemon mode, `0` otherwise. +- `console_log_level` – Log level for console output. Defaults to `level`. +- `formatting` – Log format for console output. Currently, only `json` is supported). +- `use_syslog` - Also forward log output to syslog. +- `syslog_level` - Log level for logging to syslog. -Both log and error log file names (only file names, not directories) support date and time format specifiers. +**Log format specifiers** -**Format specifiers** -Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`. +File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them). + +Column “Example” shows the output at `2023-07-06 18:32:07`. | Specifier | Description | Example | |-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------| @@ -1537,18 +1540,37 @@ Using the following format specifiers, you can define a pattern for the resultin ``` -Writing to the console can be configured. Config example: +To print log messages only in the console: ``` xml information - 1 + true + +``` + +**Per-level Overrides** + +The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC". + +```xml + + + + Backup + none + + + RBAC + none + + ``` ### syslog -Writing to the syslog is also supported. Config example: +To write log messages additionally to syslog: ``` xml @@ -1562,14 +1584,12 @@ Writing to the syslog is also supported. Config example: ``` -Keys for syslog: +Keys for ``: -- use_syslog — Required setting if you want to write to the syslog. -- address — The host\[:port\] of syslogd. If omitted, the local daemon is used. -- hostname — Optional. The name of the host that logs are sent from. -- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on). - Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise. -- format – Message format. Possible values: `bsd` and `syslog.` +- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used. +- `hostname` — The name of the host from which logs are send. Optional. +- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise. +- `format` – Log message format. Possible values: `bsd` and `syslog.` ### Log formats @@ -1588,6 +1608,7 @@ You can specify the log format that will be outputted in the console log. Curren "source_line": "192" } ``` + To enable JSON logging support, use the following snippet: ```xml diff --git a/docs/en/operations/utilities/clickhouse-keeper-client.md b/docs/en/operations/utilities/clickhouse-keeper-client.md index a66ecbc1372..fbfdd66d1a3 100644 --- a/docs/en/operations/utilities/clickhouse-keeper-client.md +++ b/docs/en/operations/utilities/clickhouse-keeper-client.md @@ -47,6 +47,8 @@ keeper foo bar - `ls '[path]'` -- Lists the nodes for the given path (default: cwd) - `cd '[path]'` -- Changes the working path (default `.`) +- `cp '' ''` -- Copies 'src' node to 'dest' path +- `mv '' ''` -- Moves 'src' node to the 'dest' path - `exists ''` -- Returns `1` if node exists, `0` otherwise - `set '' [version]` -- Updates the node's value. Only updates if version matches (default: -1) - `create '' [mode]` -- Creates new node with the set value diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index b65fb3d7e95..d3db0e43041 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -1617,45 +1617,348 @@ The calculation is performed relative to specific points in time: If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday. -**See Also** +**Syntax** +```sql +toStartOfInterval(value, INTERVAL x unit[, time_zone]) +toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]]) +``` + +The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g. + +``` SQL +SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30')); +``` +**See Also** - [date_trunc](#date_trunc) ## toTime Converts a date with time to a certain fixed date, while preserving the time. +**Syntax** + +```sql +toTime(date[,timezone]) +``` + +**Arguments** + +- `date` — Date to convert to a time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). +- `timezone` (optional) — Timezone for the returned value. [String](../data-types/string.md). + +**Returned value** + +- DateTime with date equated to `1970-01-02` while preserving the time. [DateTime](../data-types/datetime.md). + +:::note +If the `date` input argument contained sub-second components, +they will be dropped in the returned `DateTime` value with second-accuracy. +::: + +**Example** + +Query: + +```sql +SELECT toTime(toDateTime64('1970-12-10 01:20:30.3000',3)) AS result, toTypeName(result); +``` + +Result: + +```response +┌──────────────result─┬─toTypeName(result)─┐ +│ 1970-01-02 01:20:30 │ DateTime │ +└─────────────────────┴────────────────────┘ +``` + ## toRelativeYearNum -Converts a date, or date with time, to the number of the year, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of years elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeYearNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of years from a fixed reference point in the past. [UInt16](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeYearNum(toDate('2002-12-08')) AS y1, + toRelativeYearNum(toDate('2010-10-26')) AS y2 +``` + +Result: + +```response +┌───y1─┬───y2─┐ +│ 2002 │ 2010 │ +└──────┴──────┘ +``` ## toRelativeQuarterNum -Converts a date, or date with time, to the number of the quarter, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of quarters elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeQuarterNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of quarters from a fixed reference point in the past. [UInt32](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeQuarterNum(toDate('1993-11-25')) AS q1, + toRelativeQuarterNum(toDate('2005-01-05')) AS q2 +``` + +Result: + +```response +┌───q1─┬───q2─┐ +│ 7975 │ 8020 │ +└──────┴──────┘ +``` ## toRelativeMonthNum -Converts a date, or date with time, to the number of the month, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of months elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeMonthNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of months from a fixed reference point in the past. [UInt32](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeMonthNum(toDate('2001-04-25')) AS m1, + toRelativeMonthNum(toDate('2009-07-08')) AS m2 +``` + +Result: + +```response +┌────m1─┬────m2─┐ +│ 24016 │ 24115 │ +└───────┴───────┘ +``` ## toRelativeWeekNum -Converts a date, or date with time, to the number of the week, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of weeks elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeWeekNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of weeks from a fixed reference point in the past. [UInt32](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeWeekNum(toDate('2000-02-29')) AS w1, + toRelativeWeekNum(toDate('2001-01-12')) AS w2 +``` + +Result: + +```response +┌───w1─┬───w2─┐ +│ 1574 │ 1619 │ +└──────┴──────┘ +``` ## toRelativeDayNum -Converts a date, or date with time, to the number of the day, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of days elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeDayNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of days from a fixed reference point in the past. [UInt32](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeDayNum(toDate('1993-10-05')) AS d1, + toRelativeDayNum(toDate('2000-09-20')) AS d2 +``` + +Result: + +```response +┌───d1─┬────d2─┐ +│ 8678 │ 11220 │ +└──────┴───────┘ +``` ## toRelativeHourNum -Converts a date, or date with time, to the number of the hour, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of hours elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeHourNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of hours from a fixed reference point in the past. [UInt32](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeHourNum(toDateTime('1993-10-05 05:20:36')) AS h1, + toRelativeHourNum(toDateTime('2000-09-20 14:11:29')) AS h2 +``` + +Result: + +```response +┌─────h1─┬─────h2─┐ +│ 208276 │ 269292 │ +└────────┴────────┘ +``` ## toRelativeMinuteNum -Converts a date, or date with time, to the number of the minute, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of minutes elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeMinuteNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of minutes from a fixed reference point in the past. [UInt32](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeMinuteNum(toDateTime('1993-10-05 05:20:36')) AS m1, + toRelativeMinuteNum(toDateTime('2000-09-20 14:11:29')) AS m2 +``` + +Result: + +```response +┌───────m1─┬───────m2─┐ +│ 12496580 │ 16157531 │ +└──────────┴──────────┘ +``` ## toRelativeSecondNum -Converts a date, or date with time, to the number of the second, starting from a certain fixed point in the past. +Converts a date, or date with time, to the number of the seconds elapsed since a certain fixed point in the past. + +**Syntax** + +```sql +toRelativeSecondNum(date) +``` + +**Arguments** + +- `date` — Date or date with time. [Date](../data-types/date.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md). + +**Returned value** + +- The number of seconds from a fixed reference point in the past. [UInt32](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toRelativeSecondNum(toDateTime('1993-10-05 05:20:36')) AS s1, + toRelativeSecondNum(toDateTime('2000-09-20 14:11:29')) AS s2 +``` + +Result: + +```response +┌────────s1─┬────────s2─┐ +│ 749794836 │ 969451889 │ +└───────────┴───────────┘ +``` ## toISOYear @@ -3884,19 +4187,29 @@ Result: └───────────────────────────────────────────────────────────────────────┘ ``` -## timeSlots(StartTime, Duration,\[, Size\]) +## timeSlots For a time interval starting at ‘StartTime’ and continuing for ‘Duration’ seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the ‘Size’ in seconds. ‘Size’ is an optional parameter set to 1800 (30 minutes) by default. This is necessary, for example, when searching for pageviews in the corresponding session. Accepts DateTime and DateTime64 as ’StartTime’ argument. For DateTime, ’Duration’ and ’Size’ arguments must be `UInt32`. For ’DateTime64’ they must be `Decimal64`. Returns an array of DateTime/DateTime64 (return type matches the type of ’StartTime’). For DateTime64, the return value's scale can differ from the scale of ’StartTime’ --- the highest scale among all given arguments is taken. -Example: +**Syntax** + +```sql +timeSlots(StartTime, Duration,\[, Size\]) +``` + +**Example** + ```sql SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600)); SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299); SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0)); ``` + +Result: + ``` text ┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐ │ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │ diff --git a/docs/en/sql-reference/functions/string-replace-functions.md b/docs/en/sql-reference/functions/string-replace-functions.md index 0cc6b0b27d5..3f50cd24f93 100644 --- a/docs/en/sql-reference/functions/string-replace-functions.md +++ b/docs/en/sql-reference/functions/string-replace-functions.md @@ -20,10 +20,10 @@ overlay(s, replace, offset[, length]) **Parameters** -- `input`: A string type [String](../data-types/string.md). +- `s`: A string type [String](../data-types/string.md). - `replace`: A string type [String](../data-types/string.md). -- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string. -- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of bytes removed from `input` equals the length of `replace`; otherwise `length` bytes are removed. +- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the string `s`. +- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of bytes removed from `s` equals the length of `replace`; otherwise `length` bytes are removed. **Returned value** @@ -32,22 +32,35 @@ overlay(s, replace, offset[, length]) **Example** ```sql -SELECT overlay('ClickHouse SQL', 'CORE', 12) AS res; +SELECT overlay('My father is from Mexico.', 'mother', 4) AS res; ``` Result: ```text -┌─res─────────────┐ -│ ClickHouse CORE │ -└─────────────────┘ +┌─res──────────────────────┐ +│ My mother is from Mexico.│ +└──────────────────────────┘ +``` + +```sql +SELECT overlay('My father is from Mexico.', 'dad', 4, 6) AS res; +``` + +Result: + +```text +┌─res───────────────────┐ +│ My dad is from Mexico.│ +└───────────────────────┘ ``` ## overlayUTF8 Replace part of the string `input` with another string `replace`, starting at the 1-based index `offset`. -Assumes that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined. +Assumes that the string contains valid UTF-8 encoded text. +If this assumption is violated, no exception is thrown and the result is undefined. **Syntax** @@ -59,8 +72,8 @@ overlayUTF8(s, replace, offset[, length]) - `s`: A string type [String](../data-types/string.md). - `replace`: A string type [String](../data-types/string.md). -- `offset`: An integer type [Int](../data-types/int-uint.md). If `offset` is negative, it is counted from the end of the `input` string. -- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within input to be replaced. If `length` is not specified, the number of characters removed from `input` equals the length of `replace`; otherwise `length` characters are removed. +- `offset`: An integer type [Int](../data-types/int-uint.md) (1-based). If `offset` is negative, it is counted from the end of the input string `s`. +- `length`: Optional. An integer type [Int](../data-types/int-uint.md). `length` specifies the length of the snippet within the input string `s` to be replaced. If `length` is not specified, the number of characters removed from `s` equals the length of `replace`; otherwise `length` characters are removed. **Returned value** @@ -69,15 +82,15 @@ overlayUTF8(s, replace, offset[, length]) **Example** ```sql -SELECT overlayUTF8('ClickHouse是一款OLAP数据库', '开源', 12, 2) AS res; +SELECT overlay('Mein Vater ist aus Österreich.', 'der Türkei', 20) AS res; ``` Result: ```text -┌─res────────────────────────┐ -│ ClickHouse是开源OLAP数据库 │ -└────────────────────────────┘ +┌─res───────────────────────────┐ +│ Mein Vater ist aus der Türkei.│ +└───────────────────────────────┘ ``` ## replaceOne diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index f7fd2d68cf7..0a40ddcc2c2 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -3906,7 +3906,7 @@ Result: ## toDateTime64 -Converts the argument to the [DateTime64](../data-types/datetime64.md) data type. +Converts an input value to a value of type [DateTime64](../data-types/datetime64.md). **Syntax** @@ -3918,7 +3918,7 @@ toDateTime64(expr, scale, [timezone]) - `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md). - `scale` - Tick size (precision): 10-precision seconds. Valid range: [ 0 : 9 ]. -- `timezone` - Time zone of the specified datetime64 object. +- `timezone` (optional) - Time zone of the specified datetime64 object. **Returned value** @@ -3977,10 +3977,137 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN ## toDateTime64OrZero +Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns the min value of [DateTime64](../data-types/datetime64.md) if an invalid argument is received. + +**Syntax** + +``` sql +toDateTime64OrZero(expr, scale, [timezone]) +``` + +**Arguments** + +- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md). +- `scale` - Tick size (precision): 10-precision seconds. Valid range: [ 0 : 9 ]. +- `timezone` (optional) - Time zone of the specified DateTime64 object. + +**Returned value** + +- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64`: `1970-01-01 01:00:00.000`. [DateTime64](../data-types/datetime64.md). + +**Example** + +Query: + +```sql +SELECT toDateTime64OrZero('2008-10-12 00:00:00 00:30:30', 3) AS invalid_arg +``` + +Result: + +```response +┌─────────────invalid_arg─┐ +│ 1970-01-01 01:00:00.000 │ +└─────────────────────────┘ +``` + +**See also** + +- [toDateTime64](#todatetime64). +- [toDateTime64OrNull](#todatetime64ornull). +- [toDateTime64OrDefault](#todatetime64ordefault). + ## toDateTime64OrNull +Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md) but returns `NULL` if an invalid argument is received. + +**Syntax** + +``` sql +toDateTime64OrNull(expr, scale, [timezone]) +``` + +**Arguments** + +- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md). +- `scale` - Tick size (precision): 10-precision seconds. Valid range: [ 0 : 9 ]. +- `timezone` (optional) - Time zone of the specified DateTime64 object. + +**Returned value** + +- A calendar date and time of day, with sub-second precision, otherwise `NULL`. [DateTime64](../data-types/datetime64.md)/[NULL](../data-types/nullable.md). + +**Example** + +Query: + +```sql +SELECT + toDateTime64OrNull('1976-10-18 00:00:00.30', 3) AS valid_arg, + toDateTime64OrNull('1976-10-18 00:00:00 30', 3) AS invalid_arg +``` + +Result: + +```response +┌───────────────valid_arg─┬─invalid_arg─┐ +│ 1976-10-18 00:00:00.300 │ ᴺᵁᴸᴸ │ +└─────────────────────────┴─────────────┘ +``` + +**See also** + +- [toDateTime64](#todatetime64). +- [toDateTime64OrZero](#todatetime64orzero). +- [toDateTime64OrDefault](#todatetime64ordefault). + ## toDateTime64OrDefault +Like [toDateTime64](#todatetime64), this function converts an input value to a value of type [DateTime64](../data-types/datetime64.md), +but returns either the default value of [DateTime64](../data-types/datetime64.md) +or the provided default if an invalid argument is received. + +**Syntax** + +``` sql +toDateTime64OrNull(expr, scale, [timezone, default]) +``` + +**Arguments** + +- `expr` — The value. [String](../data-types/string.md), [UInt32](../data-types/int-uint.md), [Float](../data-types/float.md) or [DateTime](../data-types/datetime.md). +- `scale` - Tick size (precision): 10-precision seconds. Valid range: [ 0 : 9 ]. +- `timezone` (optional) - Time zone of the specified DateTime64 object. +- `default` (optional) - Default value to return if an invalid argument is received. [DateTime64](../data-types/datetime64.md). + +**Returned value** + +- A calendar date and time of day, with sub-second precision, otherwise the minimum value of `DateTime64` or the `default` value if provided. [DateTime64](../data-types/datetime64.md). + +**Example** + +Query: + +```sql +SELECT + toDateTime64OrDefault('1976-10-18 00:00:00 30', 3) AS invalid_arg, + toDateTime64OrDefault('1976-10-18 00:00:00 30', 3, 'UTC', toDateTime64('2001-01-01 00:00:00.00',3)) AS invalid_arg_with_default +``` + +Result: + +```response +┌─────────────invalid_arg─┬─invalid_arg_with_default─┐ +│ 1970-01-01 01:00:00.000 │ 2000-12-31 23:00:00.000 │ +└─────────────────────────┴──────────────────────────┘ +``` + +**See also** + +- [toDateTime64](#todatetime64). +- [toDateTime64OrZero](#todatetime64orzero). +- [toDateTime64OrNull](#todatetime64ornull). + ## toDecimal32 Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. diff --git a/docs/en/sql-reference/statements/delete.md b/docs/en/sql-reference/statements/delete.md index 8ddb5840f2e..5733efbf8f2 100644 --- a/docs/en/sql-reference/statements/delete.md +++ b/docs/en/sql-reference/statements/delete.md @@ -24,9 +24,11 @@ DELETE FROM hits WHERE Title LIKE '%hello%'; ## Lightweight `DELETE` does not delete data immediately -Lightweight `DELETE` is implemented as a [mutation](/en/sql-reference/statements/alter#mutations), which is executed asynchronously in the background by default. The statement is going to return almost immediately, but the data can still be visible to queries until the mutation is finished. +Lightweight `DELETE` is implemented as a [mutation](/en/sql-reference/statements/alter#mutations) that marks rows as deleted but does not immediately physically delete them. -The mutation marks rows as deleted, and at that point, they will no longer show up in query results. It does not physically delete the data, this will happen during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted. +By default, `DELETE` statements wait until marking the rows as deleted is completed before returning. This can take a long time if the amount of data is large. Alternatively, you can run it asynchronously in the background using the setting [`lightweight_deletes_sync`](/en/operations/settings/settings#lightweight_deletes_sync). If disabled, the `DELETE` statement is going to return immediately, but the data can still be visible to queries until the background mutation is finished. + +The mutation does not physically delete the rows that have been marked as deleted, this will only happen during the next merge. As a result, it is possible that for an unspecified period, data is not actually deleted from storage and is only marked as deleted. If you need to guarantee that your data is deleted from storage in a predictable time, consider using the table setting [`min_age_to_force_merge_seconds`](https://clickhouse.com/docs/en/operations/settings/merge-tree-settings#min_age_to_force_merge_seconds). Or you can use the [ALTER TABLE ... DELETE](/en/sql-reference/statements/alter/delete) command. Note that deleting data using `ALTER TABLE ... DELETE` may consume significant resources as it recreates all affected parts. diff --git a/docs/en/sql-reference/statements/select/from.md b/docs/en/sql-reference/statements/select/from.md index 7a6e2ab054c..f319e7b1357 100644 --- a/docs/en/sql-reference/statements/select/from.md +++ b/docs/en/sql-reference/statements/select/from.md @@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from: Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause. -`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them. +The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them. + +`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example: + +```sql +FROM table +SELECT * +``` ## FINAL Modifier @@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu ### Example Usage -**Using the `FINAL` keyword** +Using the `FINAL` keyword ```sql SELECT x, y FROM mytable FINAL WHERE x > 1; ``` -**Using `FINAL` as a query-level setting** +Using `FINAL` as a query-level setting ```sql SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1; ``` -**Using `FINAL` as a session-level setting** +Using `FINAL` as a session-level setting ```sql SET final = 1; diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index ced661d9772..3007df60765 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -384,6 +385,9 @@ try LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds()); }); + MemoryWorker memory_worker(config().getUInt64("memory_worker_period_ms", 0)); + memory_worker.start(); + static ServerErrorHandler error_handler; Poco::ErrorHandler::set(&error_handler); @@ -425,8 +429,9 @@ try for (const auto & server : *servers) metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()}); return metrics; - } - ); + }, + /*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc, + /*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None); std::vector listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host"); @@ -655,7 +660,6 @@ try GWPAsan::initFinished(); #endif - LOG_INFO(log, "Ready for connections."); waitForTerminationRequest(); diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index fb5717ba33f..c69d822e383 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -25,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -111,6 +111,8 @@ #include #include +#include + #include "config.h" #include @@ -449,9 +451,12 @@ void checkForUsersNotInMainConfig( } } +namespace +{ + /// Unused in other builds #if defined(OS_LINUX) -static String readLine(const String & path) +String readLine(const String & path) { ReadBufferFromFile in(path); String contents; @@ -459,7 +464,7 @@ static String readLine(const String & path) return contents; } -static int readNumber(const String & path) +int readNumber(const String & path) { ReadBufferFromFile in(path); int result; @@ -469,7 +474,7 @@ static int readNumber(const String & path) #endif -static void sanityChecks(Server & server) +void sanityChecks(Server & server) { std::string data_path = getCanonicalPath(server.config().getString("path", DBMS_DEFAULT_PATH)); std::string logs_path = server.config().getString("logger.log", ""); @@ -590,6 +595,8 @@ static void sanityChecks(Server & server) } } +} + void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, ContextMutablePtr context, Poco::Logger * log) { try @@ -906,6 +913,8 @@ try LOG_INFO(log, "Background threads finished in {} ms", watch.elapsedMilliseconds()); }); + MemoryWorker memory_worker(global_context->getServerSettings().memory_worker_period_ms); + /// This object will periodically calculate some metrics. ServerAsynchronousMetrics async_metrics( global_context, @@ -924,8 +933,9 @@ try for (const auto & server : servers) metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads(), server.refusedConnections()}); return metrics; - } - ); + }, + /*update_jemalloc_epoch_=*/memory_worker.getSource() != MemoryWorker::MemoryUsageSource::Jemalloc, + /*update_rss_=*/memory_worker.getSource() == MemoryWorker::MemoryUsageSource::None); /// NOTE: global context should be destroyed *before* GlobalThreadPool::shutdown() /// Otherwise GlobalThreadPool::shutdown() will hang, since Context holds some threads. @@ -1204,6 +1214,8 @@ try FailPointInjection::enableFromGlobalConfig(config()); + memory_worker.start(); + int default_oom_score = 0; #if !defined(NDEBUG) @@ -1547,15 +1559,6 @@ try total_memory_tracker.setDescription("(total)"); total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking); - if (cgroups_memory_usage_observer) - { - double hard_limit_ratio = new_server_settings.cgroup_memory_watcher_hard_limit_ratio; - double soft_limit_ratio = new_server_settings.cgroup_memory_watcher_soft_limit_ratio; - cgroups_memory_usage_observer->setMemoryUsageLimits( - static_cast(max_server_memory_usage * hard_limit_ratio), - static_cast(max_server_memory_usage * soft_limit_ratio)); - } - size_t merges_mutations_memory_usage_soft_limit = new_server_settings.merges_mutations_memory_usage_soft_limit; size_t default_merges_mutations_server_memory_usage = static_cast(current_physical_server_memory * new_server_settings.merges_mutations_memory_usage_to_ram_ratio); @@ -1584,8 +1587,6 @@ try background_memory_tracker.setDescription("(background)"); background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking); - total_memory_tracker.setAllowUseJemallocMemory(new_server_settings.allow_use_jemalloc_memory); - auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker(); total_memory_tracker.setOvercommitTracker(global_overcommit_tracker); diff --git a/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp b/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp index 636ac80e350..8fb0b645096 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp @@ -116,15 +116,17 @@ class GroupConcatImpl final SerializationPtr serialization; UInt64 limit; const String delimiter; + const DataTypePtr type; public: GroupConcatImpl(const DataTypePtr & data_type_, const Array & parameters_, UInt64 limit_, const String & delimiter_) : IAggregateFunctionDataHelper, GroupConcatImpl>( {data_type_}, parameters_, std::make_shared()) - , serialization(this->argument_types[0]->getDefaultSerialization()) , limit(limit_) , delimiter(delimiter_) + , type(data_type_) { + serialization = isFixedString(type) ? std::make_shared()->getDefaultSerialization() : this->argument_types[0]->getDefaultSerialization(); } String getName() const override { return name; } @@ -140,7 +142,14 @@ public: if (cur_data.data_size != 0) cur_data.insertChar(delimiter.c_str(), delimiter.size(), arena); - cur_data.insert(columns[0], serialization, row_num, arena); + if (isFixedString(type)) + { + ColumnWithTypeAndName col = {columns[0]->getPtr(), type, "column"}; + const auto & col_str = castColumn(col, std::make_shared()); + cur_data.insert(col_str.get(), serialization, row_num, arena); + } + else + cur_data.insert(columns[0], serialization, row_num, arena); } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index cef23f766c7..35d6e599e38 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -459,6 +459,8 @@ public: bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; } + constexpr static bool parallelizeMergeWithKey() { return true; } + void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic & is_cancelled) const override { if constexpr (is_parallelize_merge_prepare_needed) diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index ee227db6d9d..f8e7051d635 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -145,6 +145,8 @@ public: virtual bool isParallelizeMergePrepareNeeded() const { return false; } + constexpr static bool parallelizeMergeWithKey() { return false; } + virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic & /*is_cancelled*/) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName()); @@ -169,7 +171,7 @@ public: /// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function /// then destroy states (on which src places points to). - virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0; + virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic & is_cancelled, Arena * arena) const = 0; /// Serializes state (to transmit it over the network, for example). virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional version = std::nullopt) const = 0; /// NOLINT @@ -499,11 +501,15 @@ public: static_cast(this)->merge(places[i] + place_offset, rhs[i], arena); } - void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override + void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic & is_cancelled, Arena * arena) const override { for (size_t i = 0; i < size; ++i) { - static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena); + if constexpr (Derived::parallelizeMergeWithKey()) + static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena); + else + static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena); + static_cast(this)->destroy(rhs_places[i] + offset); } } diff --git a/src/AggregateFunctions/UniqExactSet.h b/src/AggregateFunctions/UniqExactSet.h index 2ae8c3a8386..25c6f7ac55f 100644 --- a/src/AggregateFunctions/UniqExactSet.h +++ b/src/AggregateFunctions/UniqExactSet.h @@ -101,6 +101,13 @@ public: auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic * is_cancelled = nullptr) { + /// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel. + if (other.size() > 40000) + { + if (isSingleLevel()) + convertToTwoLevel(); + } + if (isSingleLevel() && other.isTwoLevel()) convertToTwoLevel(); diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp index 278af9d4eb3..5e6beec791d 100644 --- a/src/Backups/RestorerFromBackup.cpp +++ b/src/Backups/RestorerFromBackup.cpp @@ -913,11 +913,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name) table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database); DatabasePtr database = table_info.database; + auto query_context = Context::createCopy(context); + query_context->setSetting("database_replicated_allow_explicit_uuid", 3); + query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3); + /// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some /// database-specific things). database->createTableRestoredFromBackup( create_table_query, - context, + query_context, restore_coordination, std::chrono::duration_cast(create_table_timeout).count()); } diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1889bba3b39..f0410eee9fe 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -176,7 +176,7 @@ add_library (clickhouse_new_delete STATIC Common/new_delete.cpp) target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io) if (TARGET ch_contrib::jemalloc) target_link_libraries (clickhouse_new_delete PRIVATE ch_contrib::jemalloc) - target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::jemalloc) + target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::jemalloc) target_link_libraries (clickhouse_storages_system PRIVATE ch_contrib::jemalloc) endif() diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 008fcee6a83..717a9bbe95a 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -1896,6 +1896,21 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin /// Temporarily apply query settings to context. std::optional old_settings; SCOPE_EXIT_SAFE({ + try + { + /// We need to park ParallelFormating threads, + /// because they can use settings from global context + /// and it can lead to data race with `setSettings` + resetOutput(); + } + catch (...) + { + if (!have_error) + { + client_exception = std::make_unique(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode()); + have_error = true; + } + } if (old_settings) client_context->setSettings(*old_settings); }); diff --git a/src/Client/ConnectionPoolWithFailover.cpp b/src/Client/ConnectionPoolWithFailover.cpp index fb895d17763..a5c14dc9957 100644 --- a/src/Client/ConnectionPoolWithFailover.cpp +++ b/src/Client/ConnectionPoolWithFailover.cpp @@ -168,7 +168,7 @@ std::vector ConnectionPoolWithFailover::g { return tryGetEntry(pool, timeouts, fail_message, settings, &table_to_check, /*async_callback=*/ {}); }; return getManyImpl(settings, pool_mode, try_get_entry, - /*skip_unavailable_endpoints=*/ std::nullopt, + /*skip_unavailable_endpoints=*/ false, /// skip_unavailable_endpoints is used to get the min number of entries, and we need at least one /*priority_func=*/ {}, settings.distributed_insert_skip_read_only_replicas); } diff --git a/src/Client/ConnectionPoolWithFailover.h b/src/Client/ConnectionPoolWithFailover.h index a2dc188eb7d..6db52140854 100644 --- a/src/Client/ConnectionPoolWithFailover.h +++ b/src/Client/ConnectionPoolWithFailover.h @@ -42,7 +42,7 @@ public: size_t max_error_cap = DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT); using Entry = IConnectionPool::Entry; - using PoolWithFailoverBase::isTryResultInvalid; + using PoolWithFailoverBase::getValidTryResult; /** Allocates connection to work. */ Entry get(const ConnectionTimeouts & timeouts) override; @@ -98,7 +98,7 @@ public: std::vector getShuffledPools(const Settings & settings, GetPriorityFunc priority_func = {}, bool use_slowdown_count = false); - size_t getMaxErrorCup() const { return Base::max_error_cap; } + size_t getMaxErrorCap() const { return Base::max_error_cap; } void updateSharedError(std::vector & shuffled_pools) { diff --git a/src/Client/HedgedConnectionsFactory.cpp b/src/Client/HedgedConnectionsFactory.cpp index be7397b0fad..df63a124539 100644 --- a/src/Client/HedgedConnectionsFactory.cpp +++ b/src/Client/HedgedConnectionsFactory.cpp @@ -327,7 +327,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect ShuffledPool & shuffled_pool = shuffled_pools[index]; LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message); - shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1); + shuffled_pool.error_count = std::min(pool->getMaxErrorCap(), shuffled_pool.error_count + 1); shuffled_pool.slowdown_count = 0; if (shuffled_pool.error_count >= max_tries) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 9b6a7428411..a92d321f8aa 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -1,5 +1,3 @@ -#include - #include #include #include @@ -8,8 +6,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -69,10 +69,14 @@ static void openCgroupv2MetricFile(const std::string & filename, std::optional(jemalloc_full_name.c_str()); values[clickhouse_full_name] = AsynchronousMetricValue(value, "An internal metric of the low-level memory allocator (jemalloc). See https://jemalloc.net/jemalloc.3.html"); return value; } @@ -768,8 +770,11 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) // 'epoch' is a special mallctl -- it updates the statistics. Without it, all // the following calls will return stale values. It increments and returns // the current epoch number, which might be useful to log as a sanity check. - auto epoch = updateJemallocEpoch(); - new_values["jemalloc.epoch"] = { epoch, "An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other `jemalloc` metrics." }; + auto epoch = update_jemalloc_epoch ? updateJemallocEpoch() : getJemallocValue("epoch"); + new_values["jemalloc.epoch"] + = {epoch, + "An internal incremental update number of the statistics of jemalloc (Jason Evans' memory allocator), used in all other " + "`jemalloc` metrics."}; // Collect the statistics themselves. saveJemallocMetric(new_values, "allocated"); @@ -782,10 +787,10 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) saveJemallocMetric(new_values, "background_thread.num_threads"); saveJemallocMetric(new_values, "background_thread.num_runs"); saveJemallocMetric(new_values, "background_thread.run_intervals"); - saveJemallocProf(new_values, "active"); + saveJemallocProf(new_values, "active"); saveAllArenasMetric(new_values, "pactive"); - [[maybe_unused]] size_t je_malloc_pdirty = saveAllArenasMetric(new_values, "pdirty"); - [[maybe_unused]] size_t je_malloc_pmuzzy = saveAllArenasMetric(new_values, "pmuzzy"); + saveAllArenasMetric(new_values, "pdirty"); + saveAllArenasMetric(new_values, "pmuzzy"); saveAllArenasMetric(new_values, "dirty_purged"); saveAllArenasMetric(new_values, "muzzy_purged"); #endif @@ -814,41 +819,8 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) " It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call." " This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring."}; - /// We must update the value of total_memory_tracker periodically. - /// Otherwise it might be calculated incorrectly - it can include a "drift" of memory amount. - /// See https://github.com/ClickHouse/ClickHouse/issues/10293 - { - Int64 amount = total_memory_tracker.get(); - Int64 peak = total_memory_tracker.getPeak(); - Int64 rss = data.resident; - Int64 free_memory_in_allocator_arenas = 0; - -#if USE_JEMALLOC - /// According to jemalloc man, pdirty is: - /// - /// Number of pages within unused extents that are potentially - /// dirty, and for which madvise() or similar has not been called. - /// - /// So they will be subtracted from RSS to make accounting more - /// accurate, since those pages are not really RSS but a memory - /// that can be used at anytime via jemalloc. - free_memory_in_allocator_arenas = je_malloc_pdirty * getPageSize(); -#endif - - Int64 difference = rss - amount; - - /// Log only if difference is high. This is for convenience. The threshold is arbitrary. - if (difference >= 1048576 || difference <= -1048576) - LOG_TRACE(log, - "MemoryTracking: was {}, peak {}, free memory in arenas {}, will set to {} (RSS), difference: {}", - ReadableSize(amount), - ReadableSize(peak), - ReadableSize(free_memory_in_allocator_arenas), - ReadableSize(rss), - ReadableSize(difference)); - - MemoryTracker::setRSS(rss, free_memory_in_allocator_arenas); - } + if (update_rss) + MemoryTracker::updateRSS(data.resident); } { diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index 78d07ef4b6c..215dc6e1337 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -1,15 +1,14 @@ #pragma once +#include #include #include #include #include #include -#include #include #include -#include #include #include #include @@ -69,7 +68,9 @@ public: AsynchronousMetrics( unsigned update_period_seconds, - const ProtocolServerMetricsFunc & protocol_server_metrics_func_); + const ProtocolServerMetricsFunc & protocol_server_metrics_func_, + bool update_jemalloc_epoch_, + bool update_rss_); virtual ~AsynchronousMetrics(); @@ -112,6 +113,9 @@ private: MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex); #endif + [[maybe_unused]] const bool update_jemalloc_epoch; + [[maybe_unused]] const bool update_rss; + #if defined(OS_LINUX) std::optional meminfo TSA_GUARDED_BY(data_mutex); std::optional loadavg TSA_GUARDED_BY(data_mutex); diff --git a/src/Common/CgroupsMemoryUsageObserver.cpp b/src/Common/CgroupsMemoryUsageObserver.cpp index 83b04360164..28bb861865a 100644 --- a/src/Common/CgroupsMemoryUsageObserver.cpp +++ b/src/Common/CgroupsMemoryUsageObserver.cpp @@ -14,239 +14,21 @@ #include #include -#include -#include -#include - -#include "config.h" -#if USE_JEMALLOC -# include -#define STRINGIFY_HELPER(x) #x -#define STRINGIFY(x) STRINGIFY_HELPER(x) -#endif using namespace DB; -namespace fs = std::filesystem; - -namespace DB -{ - -namespace ErrorCodes -{ -extern const int FILE_DOESNT_EXIST; -extern const int INCORRECT_DATA; -} - -} - -namespace -{ - -/// Format is -/// kernel 5 -/// rss 15 -/// [...] -using Metrics = std::map; - -Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf) -{ - Metrics metrics; - while (!buf.eof()) - { - std::string current_key; - readStringUntilWhitespace(current_key, buf); - - assertChar(' ', buf); - - uint64_t value = 0; - readIntText(value, buf); - assertChar('\n', buf); - - auto [_, inserted] = metrics.emplace(std::move(current_key), value); - chassert(inserted, "Duplicate keys in stat file"); - } - return metrics; -} - -uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & key) -{ - const auto all_metrics = readAllMetricsFromStatFile(buf); - if (const auto it = all_metrics.find(key); it != all_metrics.end()) - return it->second; - throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot find '{}' in '{}'", key, buf.getFileName()); -} - -struct CgroupsV1Reader : ICgroupsReader -{ - explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { } - - uint64_t readMemoryUsage() override - { - std::lock_guard lock(mutex); - buf.rewind(); - return readMetricFromStatFile(buf, "rss"); - } - - std::string dumpAllStats() override - { - std::lock_guard lock(mutex); - buf.rewind(); - return fmt::format("{}", readAllMetricsFromStatFile(buf)); - } - -private: - std::mutex mutex; - ReadBufferFromFile buf TSA_GUARDED_BY(mutex); -}; - -struct CgroupsV2Reader : ICgroupsReader -{ - explicit CgroupsV2Reader(const fs::path & stat_file_dir) - : current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat") - { - } - - uint64_t readMemoryUsage() override - { - std::lock_guard lock(mutex); - current_buf.rewind(); - stat_buf.rewind(); - - int64_t mem_usage = 0; - /// memory.current contains a single number - /// the reason why we subtract it described here: https://github.com/ClickHouse/ClickHouse/issues/64652#issuecomment-2149630667 - readIntText(mem_usage, current_buf); - mem_usage -= readMetricFromStatFile(stat_buf, "inactive_file"); - chassert(mem_usage >= 0, "Negative memory usage"); - return mem_usage; - } - - std::string dumpAllStats() override - { - std::lock_guard lock(mutex); - stat_buf.rewind(); - return fmt::format("{}", readAllMetricsFromStatFile(stat_buf)); - } - -private: - std::mutex mutex; - ReadBufferFromFile current_buf TSA_GUARDED_BY(mutex); - ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex); -}; - -/// Caveats: -/// - All of the logic in this file assumes that the current process is the only process in the -/// containing cgroup (or more precisely: the only process with significant memory consumption). -/// If this is not the case, then other processe's memory consumption may affect the internal -/// memory tracker ... -/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a -/// decade and will go away at some point, hierarchical detection is only implemented for v2. -/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such -/// systems existed only for a short transition period. - -std::optional getCgroupsV1Path() -{ - auto path = default_cgroups_mount / "memory/memory.stat"; - if (!fs::exists(path)) - return {}; - return {default_cgroups_mount / "memory"}; -} - -std::pair getCgroupsPath() -{ - auto v2_path = getCgroupsV2PathContainingFile("memory.current"); - if (v2_path.has_value()) - return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2}; - - auto v1_path = getCgroupsV1Path(); - if (v1_path.has_value()) - return {*v1_path, CgroupsMemoryUsageObserver::CgroupsVersion::V1}; - - throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file"); -} - -} namespace DB { CgroupsMemoryUsageObserver::CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_) : log(getLogger("CgroupsMemoryUsageObserver")), wait_time(wait_time_) -{ - const auto [cgroup_path, version] = getCgroupsPath(); - - cgroup_reader = createCgroupsReader(version, cgroup_path); - - LOG_INFO( - log, - "Will read the current memory usage from '{}' (cgroups version: {}), wait time is {} sec", - cgroup_path, - (version == CgroupsVersion::V1) ? "v1" : "v2", - wait_time.count()); -} +{} CgroupsMemoryUsageObserver::~CgroupsMemoryUsageObserver() { stopThread(); } -void CgroupsMemoryUsageObserver::setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_) -{ - std::lock_guard limit_lock(limit_mutex); - - if (hard_limit_ == hard_limit && soft_limit_ == soft_limit) - return; - - hard_limit = hard_limit_; - soft_limit = soft_limit_; - - on_hard_limit = [this, hard_limit_](bool up) - { - if (up) - { - LOG_WARNING(log, "Exceeded hard memory limit ({})", ReadableSize(hard_limit_)); - - /// Update current usage in memory tracker. Also reset free_memory_in_allocator_arenas to zero though we don't know if they are - /// really zero. Trying to avoid OOM ... - MemoryTracker::setRSS(hard_limit_, 0); - } - else - { - LOG_INFO(log, "Dropped below hard memory limit ({})", ReadableSize(hard_limit_)); - } - }; - - on_soft_limit = [this, soft_limit_](bool up) - { - if (up) - { - LOG_WARNING(log, "Exceeded soft memory limit ({})", ReadableSize(soft_limit_)); - -# if USE_JEMALLOC - LOG_INFO(log, "Purging jemalloc arenas"); - mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0); -# endif - /// Reset current usage in memory tracker. Expect zero for free_memory_in_allocator_arenas as we just purged them. - uint64_t memory_usage = cgroup_reader->readMemoryUsage(); - LOG_TRACE( - log, - "Read current memory usage {} bytes ({}) from cgroups, full available stats: {}", - memory_usage, - ReadableSize(memory_usage), - cgroup_reader->dumpAllStats()); - MemoryTracker::setRSS(memory_usage, 0); - - LOG_INFO(log, "Purged jemalloc arenas. Current memory usage is {}", ReadableSize(memory_usage)); - } - else - { - LOG_INFO(log, "Dropped below soft memory limit ({})", ReadableSize(soft_limit_)); - } - }; - - LOG_INFO(log, "Set new limits, soft limit: {}, hard limit: {}", ReadableSize(soft_limit_), ReadableSize(hard_limit_)); -} - void CgroupsMemoryUsageObserver::setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_) { std::lock_guard memory_amount_available_changed_lock(memory_amount_available_changed_mutex); @@ -300,35 +82,6 @@ void CgroupsMemoryUsageObserver::runThread() std::lock_guard memory_amount_available_changed_lock(memory_amount_available_changed_mutex); on_memory_amount_available_changed(); } - - std::lock_guard limit_lock(limit_mutex); - if (soft_limit > 0 && hard_limit > 0) - { - uint64_t memory_usage = cgroup_reader->readMemoryUsage(); - LOG_TRACE(log, "Read current memory usage {} bytes ({}) from cgroups", memory_usage, ReadableSize(memory_usage)); - if (memory_usage > hard_limit) - { - if (last_memory_usage <= hard_limit) - on_hard_limit(true); - } - else - { - if (last_memory_usage > hard_limit) - on_hard_limit(false); - } - - if (memory_usage > soft_limit) - { - if (last_memory_usage <= soft_limit) - on_soft_limit(true); - } - else - { - if (last_memory_usage > soft_limit) - on_soft_limit(false); - } - last_memory_usage = memory_usage; - } } catch (...) { @@ -337,13 +90,6 @@ void CgroupsMemoryUsageObserver::runThread() } } -std::unique_ptr createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const fs::path & cgroup_path) -{ - if (version == CgroupsMemoryUsageObserver::CgroupsVersion::V2) - return std::make_unique(cgroup_path); - else - return std::make_unique(cgroup_path); -} } #endif diff --git a/src/Common/CgroupsMemoryUsageObserver.h b/src/Common/CgroupsMemoryUsageObserver.h index 7f888fe631b..3de83d6b437 100644 --- a/src/Common/CgroupsMemoryUsageObserver.h +++ b/src/Common/CgroupsMemoryUsageObserver.h @@ -3,53 +3,27 @@ #include #include -#include #include namespace DB { -struct ICgroupsReader -{ - virtual ~ICgroupsReader() = default; - - virtual uint64_t readMemoryUsage() = 0; - - virtual std::string dumpAllStats() = 0; -}; - -/// Does two things: -/// 1. Periodically reads the memory usage of the process from Linux cgroups. -/// You can specify soft or hard memory limits: -/// - When the soft memory limit is hit, drop jemalloc cache. -/// - When the hard memory limit is hit, update MemoryTracking metric to throw memory exceptions faster. -/// The goal of this is to avoid that the process hits the maximum allowed memory limit at which there is a good -/// chance that the Limux OOM killer terminates it. All of this is done is because internal memory tracking in -/// ClickHouse can unfortunately under-estimate the actually used memory. -/// 2. Periodically reads the the maximum memory available to the process (which can change due to cgroups settings). -/// You can specify a callback to react on changes. The callback typically reloads the configuration, i.e. Server -/// or Keeper configuration file. This reloads settings 'max_server_memory_usage' (Server) and 'max_memory_usage_soft_limit' -/// (Keeper) from which various other internal limits are calculated, including the soft and hard limits for (1.). -/// The goal of this is to provide elasticity when the container is scaled-up/scaled-down. The mechanism (polling -/// cgroups) is quite implicit, unfortunately there is currently no better way to communicate memory threshold changes -/// to the database. +/// Periodically reads the the maximum memory available to the process (which can change due to cgroups settings). +/// You can specify a callback to react on changes. The callback typically reloads the configuration, i.e. Server +/// or Keeper configuration file. This reloads settings 'max_server_memory_usage' (Server) and 'max_memory_usage_soft_limit' +/// (Keeper) from which various other internal limits are calculated, including the soft and hard limits for (1.). +/// The goal of this is to provide elasticity when the container is scaled-up/scaled-down. The mechanism (polling +/// cgroups) is quite implicit, unfortunately there is currently no better way to communicate memory threshold changes +/// to the database. #if defined(OS_LINUX) class CgroupsMemoryUsageObserver { public: - using OnMemoryLimitFn = std::function; using OnMemoryAmountAvailableChangedFn = std::function; - enum class CgroupsVersion : uint8_t - { - V1, - V2 - }; - explicit CgroupsMemoryUsageObserver(std::chrono::seconds wait_time_); ~CgroupsMemoryUsageObserver(); - void setMemoryUsageLimits(uint64_t hard_limit_, uint64_t soft_limit_); void setOnMemoryAmountAvailableChangedFn(OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed_); void startThread(); @@ -60,32 +34,22 @@ private: const std::chrono::seconds wait_time; std::mutex limit_mutex; - size_t hard_limit TSA_GUARDED_BY(limit_mutex) = 0; - size_t soft_limit TSA_GUARDED_BY(limit_mutex) = 0; - OnMemoryLimitFn on_hard_limit TSA_GUARDED_BY(limit_mutex); - OnMemoryLimitFn on_soft_limit TSA_GUARDED_BY(limit_mutex); std::mutex memory_amount_available_changed_mutex; OnMemoryAmountAvailableChangedFn on_memory_amount_available_changed TSA_GUARDED_BY(memory_amount_available_changed_mutex); - uint64_t last_memory_usage = 0; /// how much memory does the process use uint64_t last_available_memory_amount; /// how much memory can the process use void stopThread(); void runThread(); - std::unique_ptr cgroup_reader; - std::mutex thread_mutex; std::condition_variable cond; ThreadFromGlobalPool thread; bool quit = false; }; -std::unique_ptr -createCgroupsReader(CgroupsMemoryUsageObserver::CgroupsVersion version, const std::filesystem::path & cgroup_path); - #else class CgroupsMemoryUsageObserver { diff --git a/src/Common/Jemalloc.cpp b/src/Common/Jemalloc.cpp index d7cc246db6a..d8ff9268cca 100644 --- a/src/Common/Jemalloc.cpp +++ b/src/Common/Jemalloc.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #define STRINGIFY_HELPER(x) #x #define STRINGIFY(x) STRINGIFY_HELPER(x) @@ -26,7 +25,6 @@ namespace ErrorCodes void purgeJemallocArenas() { - LOG_TRACE(getLogger("SystemJemalloc"), "Purging unused memory"); Stopwatch watch; mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0); ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge); @@ -46,20 +44,6 @@ void checkJemallocProfilingEnabled() "set: MALLOC_CONF=background_thread:true,prof:true"); } -template -void setJemallocValue(const char * name, T value) -{ - T old_value; - size_t old_value_size = sizeof(T); - if (mallctl(name, &old_value, &old_value_size, reinterpret_cast(&value), sizeof(T))) - { - LOG_WARNING(getLogger("Jemalloc"), "mallctl for {} failed", name); - return; - } - - LOG_INFO(getLogger("Jemalloc"), "Value for {} set to {} (from {})", name, value, old_value); -} - void setJemallocProfileActive(bool value) { checkJemallocProfilingEnabled(); diff --git a/src/Common/Jemalloc.h b/src/Common/Jemalloc.h index 499a906fd3d..22a94a44eba 100644 --- a/src/Common/Jemalloc.h +++ b/src/Common/Jemalloc.h @@ -5,6 +5,8 @@ #if USE_JEMALLOC #include +#include +#include namespace DB { @@ -21,6 +23,59 @@ void setJemallocBackgroundThreads(bool enabled); void setJemallocMaxBackgroundThreads(size_t max_threads); +template +void setJemallocValue(const char * name, T value) +{ + T old_value; + size_t old_value_size = sizeof(T); + mallctl(name, &old_value, &old_value_size, reinterpret_cast(&value), sizeof(T)); + LOG_INFO(getLogger("Jemalloc"), "Value for {} set to {} (from {})", name, value, old_value); +} + +template +T getJemallocValue(const char * name) +{ + T value; + size_t value_size = sizeof(T); + mallctl(name, &value, &value_size, nullptr, 0); + return value; +} + +/// Each mallctl call consists of string name lookup which can be expensive. +/// This can be avoided by translating name to "Management Information Base" (MIB) +/// and using it in mallctlbymib calls +template +struct JemallocMibCache +{ + explicit JemallocMibCache(const char * name) + { + mallctlnametomib(name, mib, &mib_length); + } + + void setValue(T value) + { + mallctlbymib(mib, mib_length, nullptr, nullptr, reinterpret_cast(&value), sizeof(T)); + } + + T getValue() + { + T value; + size_t value_size = sizeof(T); + mallctlbymib(mib, mib_length, &value, &value_size, nullptr, 0); + return value; + } + + void run() + { + mallctlbymib(mib, mib_length, nullptr, nullptr, nullptr, 0); + } + +private: + static constexpr size_t max_mib_length = 4; + size_t mib[max_mib_length]; + size_t mib_length = max_mib_length; +}; + } #endif diff --git a/src/Common/MemoryTracker.cpp b/src/Common/MemoryTracker.cpp index 7c0115467c6..7bf665ea7a0 100644 --- a/src/Common/MemoryTracker.cpp +++ b/src/Common/MemoryTracker.cpp @@ -20,13 +20,9 @@ #if USE_JEMALLOC # include -#define STRINGIFY_HELPER(x) #x -#define STRINGIFY(x) STRINGIFY_HELPER(x) - #endif #include -#include #include #include #include @@ -115,8 +111,6 @@ void AllocationTrace::onFreeImpl(void * ptr, size_t size) const namespace ProfileEvents { extern const Event QueryMemoryLimitExceeded; - extern const Event MemoryAllocatorPurge; - extern const Event MemoryAllocatorPurgeTimeMicroseconds; } using namespace std::chrono_literals; @@ -126,15 +120,13 @@ static constexpr size_t log_peak_memory_usage_every = 1ULL << 30; MemoryTracker total_memory_tracker(nullptr, VariableContext::Global); MemoryTracker background_memory_tracker(&total_memory_tracker, VariableContext::User, false); -std::atomic MemoryTracker::free_memory_in_allocator_arenas; - MemoryTracker::MemoryTracker(VariableContext level_) : parent(&total_memory_tracker), level(level_) {} MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_) : parent(parent_), level(level_) {} + MemoryTracker::MemoryTracker(MemoryTracker * parent_, VariableContext level_, bool log_peak_memory_usage_in_destructor_) - : parent(parent_) - , log_peak_memory_usage_in_destructor(log_peak_memory_usage_in_destructor_) - , level(level_) -{} + : parent(parent_), log_peak_memory_usage_in_destructor(log_peak_memory_usage_in_destructor_), level(level_) +{ +} MemoryTracker::~MemoryTracker() { @@ -204,10 +196,14 @@ void MemoryTracker::debugLogBigAllocationWithoutCheck(Int64 size [[maybe_unused] return; MemoryTrackerBlockerInThread blocker(VariableContext::Global); - LOG_TEST(getLogger("MemoryTracker"), "Too big allocation ({} bytes) without checking memory limits, " - "it may lead to OOM. Stack trace: {}", size, StackTrace().toString()); + LOG_TEST( + getLogger("MemoryTracker"), + "Too big allocation ({} bytes) without checking memory limits, " + "it may lead to OOM. Stack trace: {}", + size, + StackTrace().toString()); #else - return; /// Avoid trash logging in release builds + /// Avoid trash logging in release builds #endif } @@ -228,6 +224,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed { /// For global memory tracker always update memory usage. amount.fetch_add(size, std::memory_order_relaxed); + rss.fetch_add(size, std::memory_order_relaxed); auto metric_loaded = metric.load(std::memory_order_relaxed); if (metric_loaded != CurrentMetrics::end()) @@ -249,6 +246,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed * So, we allow over-allocations. */ Int64 will_be = size ? size + amount.fetch_add(size, std::memory_order_relaxed) : amount.load(std::memory_order_relaxed); + Int64 will_be_rss = size ? size + rss.fetch_add(size, std::memory_order_relaxed) : rss.load(std::memory_order_relaxed); auto metric_loaded = metric.load(std::memory_order_relaxed); if (metric_loaded != CurrentMetrics::end() && size) @@ -275,6 +273,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed { /// Revert amount.fetch_sub(size, std::memory_order_relaxed); + rss.fetch_sub(size, std::memory_order_relaxed); /// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global); @@ -297,33 +296,8 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed } } - Int64 limit_to_check = current_hard_limit; - -#if USE_JEMALLOC - if (level == VariableContext::Global && allow_use_jemalloc_memory.load(std::memory_order_relaxed)) - { - /// Jemalloc arenas may keep some extra memory. - /// This memory was substucted from RSS to decrease memory drift. - /// In case memory is close to limit, try to pugre the arenas. - /// This is needed to avoid OOM, because some allocations are directly done with mmap. - Int64 current_free_memory_in_allocator_arenas = free_memory_in_allocator_arenas.load(std::memory_order_relaxed); - - if (current_free_memory_in_allocator_arenas > 0 && current_hard_limit && current_free_memory_in_allocator_arenas + will_be > current_hard_limit) - { - if (free_memory_in_allocator_arenas.exchange(-current_free_memory_in_allocator_arenas) > 0) - { - Stopwatch watch; - mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", nullptr, nullptr, nullptr, 0); - ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge); - ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, watch.elapsedMicroseconds()); - } - } - - limit_to_check += abs(current_free_memory_in_allocator_arenas); - } -#endif - - if (unlikely(current_hard_limit && will_be > limit_to_check)) + if (unlikely( + current_hard_limit && (will_be > current_hard_limit || (level == VariableContext::Global && will_be_rss > current_hard_limit)))) { if (memoryTrackerCanThrow(level, false) && throw_if_memory_exceeded) { @@ -335,6 +309,7 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed { /// Revert amount.fetch_sub(size, std::memory_order_relaxed); + rss.fetch_sub(size, std::memory_order_relaxed); /// Prevent recursion. Exception::ctor -> std::string -> new[] -> MemoryTracker::alloc MemoryTrackerBlockerInThread untrack_lock(VariableContext::Global); @@ -343,12 +318,13 @@ AllocationTrace MemoryTracker::allocImpl(Int64 size, bool throw_if_memory_exceed throw DB::Exception( DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED, "Memory limit{}{} exceeded: " - "would use {} (attempt to allocate chunk of {} bytes), maximum: {}." + "would use {} (attempt to allocate chunk of {} bytes), current RSS {}, maximum: {}." "{}{}", description ? " " : "", description ? description : "", formatReadableSizeWithBinarySuffix(will_be), size, + formatReadableSizeWithBinarySuffix(rss.load(std::memory_order_relaxed)), formatReadableSizeWithBinarySuffix(current_hard_limit), overcommit_result == OvercommitResult::NONE ? "" : " OvercommitTracker decision: ", toDescription(overcommit_result)); @@ -442,6 +418,7 @@ AllocationTrace MemoryTracker::free(Int64 size, double _sample_probability) { /// For global memory tracker always update memory usage. amount.fetch_sub(size, std::memory_order_relaxed); + rss.fetch_sub(size, std::memory_order_relaxed); auto metric_loaded = metric.load(std::memory_order_relaxed); if (metric_loaded != CurrentMetrics::end()) CurrentMetrics::sub(metric_loaded, size); @@ -455,7 +432,12 @@ AllocationTrace MemoryTracker::free(Int64 size, double _sample_probability) } Int64 accounted_size = size; - if (level == VariableContext::Thread || level == VariableContext::Global) + if (level == VariableContext::Global) + { + amount.fetch_sub(accounted_size, std::memory_order_relaxed); + rss.fetch_sub(accounted_size, std::memory_order_relaxed); + } + else if (level == VariableContext::Thread) { /// Could become negative if memory allocated in this thread is freed in another one amount.fetch_sub(accounted_size, std::memory_order_relaxed); @@ -529,21 +511,29 @@ void MemoryTracker::reset() } -void MemoryTracker::setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_) +void MemoryTracker::updateRSS(Int64 rss_) { - Int64 new_amount = rss_; + total_memory_tracker.rss.store(rss_, std::memory_order_relaxed); +} + +void MemoryTracker::updateAllocated(Int64 allocated_) +{ + Int64 new_amount = allocated_; + LOG_INFO( + getLogger("MemoryTracker"), + "Correcting the value of global memory tracker from {} to {}", + ReadableSize(total_memory_tracker.amount.load(std::memory_order_relaxed)), + ReadableSize(allocated_)); total_memory_tracker.amount.store(new_amount, std::memory_order_relaxed); - free_memory_in_allocator_arenas.store(free_memory_in_allocator_arenas_, std::memory_order_relaxed); auto metric_loaded = total_memory_tracker.metric.load(std::memory_order_relaxed); if (metric_loaded != CurrentMetrics::end()) CurrentMetrics::set(metric_loaded, new_amount); bool log_memory_usage = true; - total_memory_tracker.updatePeak(rss_, log_memory_usage); + total_memory_tracker.updatePeak(new_amount, log_memory_usage); } - void MemoryTracker::setSoftLimit(Int64 value) { soft_limit.store(value, std::memory_order_relaxed); diff --git a/src/Common/MemoryTracker.h b/src/Common/MemoryTracker.h index fd32b631774..f15465a20c1 100644 --- a/src/Common/MemoryTracker.h +++ b/src/Common/MemoryTracker.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -57,9 +56,8 @@ private: std::atomic soft_limit {0}; std::atomic hard_limit {0}; std::atomic profiler_limit {0}; - std::atomic_bool allow_use_jemalloc_memory {true}; - static std::atomic free_memory_in_allocator_arenas; + std::atomic rss{0}; Int64 profiler_step = 0; @@ -122,6 +120,11 @@ public: return amount.load(std::memory_order_relaxed); } + Int64 getRSS() const + { + return rss.load(std::memory_order_relaxed); + } + // Merges and mutations may pass memory ownership to other threads thus in the end of execution // MemoryTracker for background task may have a non-zero counter. // This method is intended to fix the counter inside of background_memory_tracker. @@ -154,14 +157,6 @@ public: { return soft_limit.load(std::memory_order_relaxed); } - void setAllowUseJemallocMemory(bool value) - { - allow_use_jemalloc_memory.store(value, std::memory_order_relaxed); - } - bool getAllowUseJemallocMmemory() const - { - return allow_use_jemalloc_memory.load(std::memory_order_relaxed); - } /** Set limit if it was not set. * Otherwise, set limit to new value, if new value is greater than previous limit. @@ -249,10 +244,9 @@ public: /// Reset the accumulated data. void reset(); - /// Reset current counter to an RSS value. - /// Jemalloc may have pre-allocated arenas, they are accounted in RSS. - /// We can free this arenas in case of exception to avoid OOM. - static void setRSS(Int64 rss_, Int64 free_memory_in_allocator_arenas_); + /// update values based on external information (e.g. jemalloc's stat) + static void updateRSS(Int64 rss_); + static void updateAllocated(Int64 allocated_); /// Prints info about peak memory consumption into log. void logPeakMemoryUsage(); diff --git a/src/Common/MemoryWorker.cpp b/src/Common/MemoryWorker.cpp new file mode 100644 index 00000000000..11f3bff348c --- /dev/null +++ b/src/Common/MemoryWorker.cpp @@ -0,0 +1,333 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace fs = std::filesystem; + +namespace ProfileEvents +{ + extern const Event MemoryAllocatorPurge; + extern const Event MemoryAllocatorPurgeTimeMicroseconds; + extern const Event MemoryWorkerRun; + extern const Event MemoryWorkerRunElapsedMicroseconds; +} + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int FILE_DOESNT_EXIST; + extern const int LOGICAL_ERROR; +} + +#if defined(OS_LINUX) +namespace +{ + +using Metrics = std::map; + +/// Format is +/// kernel 5 +/// rss 15 +/// [...] +Metrics readAllMetricsFromStatFile(ReadBufferFromFile & buf) +{ + Metrics metrics; + while (!buf.eof()) + { + std::string current_key; + readStringUntilWhitespace(current_key, buf); + + assertChar(' ', buf); + + uint64_t value = 0; + readIntText(value, buf); + assertChar('\n', buf); + + auto [_, inserted] = metrics.emplace(std::move(current_key), value); + chassert(inserted, "Duplicate keys in stat file"); + } + return metrics; +} + +uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, std::string_view key) +{ + while (!buf.eof()) + { + std::string current_key; + readStringUntilWhitespace(current_key, buf); + if (current_key != key) + { + std::string dummy; + readStringUntilNewlineInto(dummy, buf); + buf.ignore(); + continue; + } + + assertChar(' ', buf); + uint64_t value = 0; + readIntText(value, buf); + return value; + } + LOG_ERROR(getLogger("CgroupsReader"), "Cannot find '{}' in '{}'", key, buf.getFileName()); + return 0; +} + +struct CgroupsV1Reader : ICgroupsReader +{ + explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { } + + uint64_t readMemoryUsage() override + { + std::lock_guard lock(mutex); + buf.rewind(); + return readMetricFromStatFile(buf, "rss"); + } + + std::string dumpAllStats() override + { + std::lock_guard lock(mutex); + buf.rewind(); + return fmt::format("{}", readAllMetricsFromStatFile(buf)); + } + +private: + std::mutex mutex; + ReadBufferFromFile buf TSA_GUARDED_BY(mutex); +}; + +struct CgroupsV2Reader : ICgroupsReader +{ + explicit CgroupsV2Reader(const fs::path & stat_file_dir) : stat_buf(stat_file_dir / "memory.stat") { } + + uint64_t readMemoryUsage() override + { + std::lock_guard lock(mutex); + stat_buf.rewind(); + return readMetricFromStatFile(stat_buf, "anon"); + } + + std::string dumpAllStats() override + { + std::lock_guard lock(mutex); + stat_buf.rewind(); + return fmt::format("{}", readAllMetricsFromStatFile(stat_buf)); + } + +private: + std::mutex mutex; + ReadBufferFromFile stat_buf TSA_GUARDED_BY(mutex); +}; + +/// Caveats: +/// - All of the logic in this file assumes that the current process is the only process in the +/// containing cgroup (or more precisely: the only process with significant memory consumption). +/// If this is not the case, then other processe's memory consumption may affect the internal +/// memory tracker ... +/// - Cgroups v1 and v2 allow nested cgroup hierarchies. As v1 is deprecated for over half a +/// decade and will go away at some point, hierarchical detection is only implemented for v2. +/// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such +/// systems existed only for a short transition period. + +std::optional getCgroupsV1Path() +{ + auto path = default_cgroups_mount / "memory/memory.stat"; + if (!fs::exists(path)) + return {}; + return {default_cgroups_mount / "memory"}; +} + +std::pair getCgroupsPath() +{ + auto v2_path = getCgroupsV2PathContainingFile("memory.current"); + if (v2_path.has_value()) + return {*v2_path, ICgroupsReader::CgroupsVersion::V2}; + + auto v1_path = getCgroupsV1Path(); + if (v1_path.has_value()) + return {*v1_path, ICgroupsReader::CgroupsVersion::V1}; + + throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Cannot find cgroups v1 or v2 current memory file"); +} + +} + +std::shared_ptr ICgroupsReader::createCgroupsReader(ICgroupsReader::CgroupsVersion version, const std::filesystem::path & cgroup_path) +{ + if (version == CgroupsVersion::V2) + return std::make_shared(cgroup_path); + else + { + chassert(version == CgroupsVersion::V1); + return std::make_shared(cgroup_path); + } +} +#endif + +namespace +{ + +std::string_view sourceToString(MemoryWorker::MemoryUsageSource source) +{ + switch (source) + { + case MemoryWorker::MemoryUsageSource::Cgroups: return "Cgroups"; + case MemoryWorker::MemoryUsageSource::Jemalloc: return "Jemalloc"; + case MemoryWorker::MemoryUsageSource::None: return "None"; + } +} + +} + +/// We try to pick the best possible supported source for reading memory usage. +/// Supported sources in order of priority +/// - reading from cgroups' pseudo-files (fastest and most accurate) +/// - reading jemalloc's resident stat (doesn't take into account allocations that didn't use jemalloc) +/// Also, different tick rates are used because not all options are equally fast +MemoryWorker::MemoryWorker(uint64_t period_ms_) + : log(getLogger("MemoryWorker")) + , period_ms(period_ms_) +{ +#if defined(OS_LINUX) + try + { + static constexpr uint64_t cgroups_memory_usage_tick_ms{50}; + + const auto [cgroup_path, version] = getCgroupsPath(); + LOG_INFO( + getLogger("CgroupsReader"), + "Will create cgroup reader from '{}' (cgroups version: {})", + cgroup_path, + (version == ICgroupsReader::CgroupsVersion::V1) ? "v1" : "v2"); + + cgroups_reader = ICgroupsReader::createCgroupsReader(version, cgroup_path); + source = MemoryUsageSource::Cgroups; + if (period_ms == 0) + period_ms = cgroups_memory_usage_tick_ms; + + return; + } + catch (...) + { + tryLogCurrentException(log, "Cannot use cgroups reader"); + } +#endif + +#if USE_JEMALLOC + static constexpr uint64_t jemalloc_memory_usage_tick_ms{100}; + + source = MemoryUsageSource::Jemalloc; + if (period_ms == 0) + period_ms = jemalloc_memory_usage_tick_ms; +#endif +} + +MemoryWorker::MemoryUsageSource MemoryWorker::getSource() +{ + return source; +} + +void MemoryWorker::start() +{ + if (source == MemoryUsageSource::None) + return; + + LOG_INFO( + getLogger("MemoryWorker"), + "Starting background memory thread with period of {}ms, using {} as source", + period_ms, + sourceToString(source)); + background_thread = ThreadFromGlobalPool([this] { backgroundThread(); }); +} + +MemoryWorker::~MemoryWorker() +{ + { + std::unique_lock lock(mutex); + shutdown = true; + } + cv.notify_all(); + + if (background_thread.joinable()) + background_thread.join(); +} + +uint64_t MemoryWorker::getMemoryUsage() +{ + switch (source) + { + case MemoryUsageSource::Cgroups: + return cgroups_reader != nullptr ? cgroups_reader->readMemoryUsage() : 0; + case MemoryUsageSource::Jemalloc: +#if USE_JEMALLOC + return resident_mib.getValue(); +#else + return 0; +#endif + case MemoryUsageSource::None: + throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Trying to fetch memory usage while no memory source can be used"); + } +} + +void MemoryWorker::backgroundThread() +{ + std::chrono::milliseconds chrono_period_ms{period_ms}; + [[maybe_unused]] bool first_run = true; + std::unique_lock lock(mutex); + while (true) + { + cv.wait_for(lock, chrono_period_ms, [this] { return shutdown; }); + if (shutdown) + return; + + Stopwatch total_watch; + +#if USE_JEMALLOC + if (source == MemoryUsageSource::Jemalloc) + epoch_mib.setValue(0); +#endif + + Int64 resident = getMemoryUsage(); + MemoryTracker::updateRSS(resident); + +#if USE_JEMALLOC + if (resident > total_memory_tracker.getHardLimit()) + { + Stopwatch purge_watch; + purge_mib.run(); + ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurge); + ProfileEvents::increment(ProfileEvents::MemoryAllocatorPurgeTimeMicroseconds, purge_watch.elapsedMicroseconds()); + } +#endif + +#if USE_JEMALLOC + if (unlikely(first_run || total_memory_tracker.get() < 0)) + { + if (source != MemoryUsageSource::Jemalloc) + epoch_mib.setValue(0); + + MemoryTracker::updateAllocated(allocated_mib.getValue()); + } +#endif + + ProfileEvents::increment(ProfileEvents::MemoryWorkerRun); + ProfileEvents::increment(ProfileEvents::MemoryWorkerRunElapsedMicroseconds, total_watch.elapsedMicroseconds()); + first_run = false; + } +} + +} diff --git a/src/Common/MemoryWorker.h b/src/Common/MemoryWorker.h new file mode 100644 index 00000000000..f4b0fed23ec --- /dev/null +++ b/src/Common/MemoryWorker.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +struct ICgroupsReader +{ + enum class CgroupsVersion : uint8_t + { + V1, + V2 + }; + +#if defined(OS_LINUX) + static std::shared_ptr + createCgroupsReader(ICgroupsReader::CgroupsVersion version, const std::filesystem::path & cgroup_path); +#endif + + virtual ~ICgroupsReader() = default; + + virtual uint64_t readMemoryUsage() = 0; + + virtual std::string dumpAllStats() = 0; +}; + + +/// Correct MemoryTracker based on external information (e.g. Cgroups or stats.resident from jemalloc) +/// The worker spawns a background thread which periodically reads current resident memory from the source, +/// whose value is sent to global MemoryTracker. +/// It can do additional things like purging jemalloc dirty pages if the current memory usage is higher than global hard limit. +class MemoryWorker +{ +public: + explicit MemoryWorker(uint64_t period_ms_); + + enum class MemoryUsageSource : uint8_t + { + None, + Cgroups, + Jemalloc + }; + + MemoryUsageSource getSource(); + + void start(); + + ~MemoryWorker(); +private: + uint64_t getMemoryUsage(); + + void backgroundThread(); + + ThreadFromGlobalPool background_thread; + + std::mutex mutex; + std::condition_variable cv; + bool shutdown = false; + + LoggerPtr log; + + uint64_t period_ms; + + MemoryUsageSource source{MemoryUsageSource::None}; + + std::shared_ptr cgroups_reader; + +#if USE_JEMALLOC + JemallocMibCache epoch_mib{"epoch"}; + JemallocMibCache resident_mib{"stats.resident"}; + JemallocMibCache allocated_mib{"stats.allocated"}; + +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) + JemallocMibCache purge_mib{"arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge"}; +#undef STRINGIFY +#undef STRINGIFY_HELPER +#endif +}; + +} diff --git a/src/Common/PoolWithFailoverBase.h b/src/Common/PoolWithFailoverBase.h index c44ab7df53a..989ffd888f8 100644 --- a/src/Common/PoolWithFailoverBase.h +++ b/src/Common/PoolWithFailoverBase.h @@ -122,6 +122,20 @@ public: return result.entry.isNull() || !result.is_usable || (skip_read_only_replicas && result.is_readonly); } + TryResult getValidTryResult(const std::vector & results, bool skip_read_only_replicas) const + { + if (results.empty()) + throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Cannot get any valid connection because all connection tries failed"); + + auto result = results.front(); + if (isTryResultInvalid(result, skip_read_only_replicas)) + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, + "Got an invalid connection result: entry.isNull {}, is_usable {}, is_up_to_date {}, delay {}, is_readonly {}, skip_read_only_replicas {}", + result.entry.isNull(), result.is_usable, result.is_up_to_date, result.delay, result.is_readonly, skip_read_only_replicas); + + return result; + } + size_t getPoolSize() const { return nested_pools.size(); } protected: diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index af1b7fbeb4a..467dfe60cd7 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -827,6 +827,9 @@ The server successfully detected this situation and will download merged part fr M(GWPAsanAllocateSuccess, "Number of successful allocations done by GWPAsan") \ M(GWPAsanAllocateFailed, "Number of failed allocations done by GWPAsan (i.e. filled pool)") \ M(GWPAsanFree, "Number of free operations done by GWPAsan") \ + \ + M(MemoryWorkerRun, "Number of runs done by MemoryWorker in background") \ + M(MemoryWorkerRunElapsedMicroseconds, "Total time spent by MemoryWorker for background work") \ #ifdef APPLY_FOR_EXTERNAL_EVENTS diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp index bd01b639913..3dce34803b2 100644 --- a/src/Common/StackTrace.cpp +++ b/src/Common/StackTrace.cpp @@ -67,10 +67,18 @@ std::string SigsegvErrorString(const siginfo_t & info, [[maybe_unused]] const uc = info.si_addr == nullptr ? "NULL pointer"s : (shouldShowAddress(info.si_addr) ? fmt::format("{}", info.si_addr) : ""s); const std::string_view access = -#if defined(__x86_64__) && !defined(OS_FREEBSD) && !defined(OS_DARWIN) && !defined(__arm__) && !defined(__powerpc__) - (context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read"; +#if defined(__arm__) + ""; +#elif defined(__powerpc__) + ""; +#elif defined(OS_DARWIN) + ""; +#elif defined(OS_FREEBSD) + ""; +#elif !defined(__x86_64__) + ""; #else - ""; + (context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read"; #endif std::string_view message; diff --git a/src/Common/tests/gtest_cgroups_reader.cpp b/src/Common/tests/gtest_cgroups_reader.cpp index 2de25bb42ce..e24b91a59b8 100644 --- a/src/Common/tests/gtest_cgroups_reader.cpp +++ b/src/Common/tests/gtest_cgroups_reader.cpp @@ -6,7 +6,7 @@ #include #include -#include +#include #include using namespace DB; @@ -126,7 +126,7 @@ const std::string EXPECTED[2] "\"workingset_restore_anon\": 0, \"workingset_restore_file\": 0, \"zswap\": 0, \"zswapped\": 0, \"zswpin\": 0, \"zswpout\": 0}"}; -class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam +class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParam { void SetUp() override { @@ -138,7 +138,7 @@ class CgroupsMemoryUsageObserverFixture : public ::testing::TestWithParamreadMemoryUsage(), - version == CgroupsMemoryUsageObserver::CgroupsVersion::V1 ? /* rss from memory.stat */ 2232029184 - : /* value from memory.current - inactive_file */ 20952338432); + version == ICgroupsReader::CgroupsVersion::V1 ? /* rss from memory.stat */ 2232029184 + : /* anon from memory.stat */ 10429399040); } TEST_P(CgroupsMemoryUsageObserverFixture, DumpAllStatsTest) { const auto version = GetParam(); - auto reader = createCgroupsReader(version, tmp_dir); + auto reader = ICgroupsReader::createCgroupsReader(version, tmp_dir); ASSERT_EQ(reader->dumpAllStats(), EXPECTED[static_cast(version)]); } @@ -173,6 +173,6 @@ TEST_P(CgroupsMemoryUsageObserverFixture, DumpAllStatsTest) INSTANTIATE_TEST_SUITE_P( CgroupsMemoryUsageObserverTests, CgroupsMemoryUsageObserverFixture, - ::testing::Values(CgroupsMemoryUsageObserver::CgroupsVersion::V1, CgroupsMemoryUsageObserver::CgroupsVersion::V2)); + ::testing::Values(ICgroupsReader::CgroupsVersion::V1, ICgroupsReader::CgroupsVersion::V2)); #endif diff --git a/src/Compression/CompressedWriteBuffer.cpp b/src/Compression/CompressedWriteBuffer.cpp index 83c9fbc9573..c3acfcb7da6 100644 --- a/src/Compression/CompressedWriteBuffer.cpp +++ b/src/Compression/CompressedWriteBuffer.cpp @@ -55,10 +55,29 @@ void CompressedWriteBuffer::nextImpl() out.write(compressed_buffer.data(), compressed_size); } + + /// Increase buffer size for next data if adaptive buffer size is used and nextImpl was called because of end of buffer. + if (!available() && use_adaptive_buffer_size && memory.size() < adaptive_buffer_max_size) + { + memory.resize(std::min(memory.size() * 2, adaptive_buffer_max_size)); + BufferBase::set(memory.data(), memory.size(), 0); + } } -CompressedWriteBuffer::CompressedWriteBuffer(WriteBuffer & out_, CompressionCodecPtr codec_, size_t buf_size) - : BufferWithOwnMemory(buf_size), out(out_), codec(std::move(codec_)) +void CompressedWriteBuffer::finalizeImpl() +{ + /// Don't try to resize buffer in nextImpl. + use_adaptive_buffer_size = false; + next(); +} + +CompressedWriteBuffer::CompressedWriteBuffer( + WriteBuffer & out_, CompressionCodecPtr codec_, size_t buf_size, bool use_adaptive_buffer_size_, size_t adaptive_buffer_initial_size) + : BufferWithOwnMemory(use_adaptive_buffer_size_ ? adaptive_buffer_initial_size : buf_size) + , out(out_) + , codec(std::move(codec_)) + , use_adaptive_buffer_size(use_adaptive_buffer_size_) + , adaptive_buffer_max_size(buf_size) { } diff --git a/src/Compression/CompressedWriteBuffer.h b/src/Compression/CompressedWriteBuffer.h index 6ae1fbee9cc..41596703bfe 100644 --- a/src/Compression/CompressedWriteBuffer.h +++ b/src/Compression/CompressedWriteBuffer.h @@ -19,7 +19,9 @@ public: explicit CompressedWriteBuffer( WriteBuffer & out_, CompressionCodecPtr codec_ = CompressionCodecFactory::instance().getDefaultCodec(), - size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, + bool use_adaptive_buffer_size_ = false, + size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE); ~CompressedWriteBuffer() override; @@ -45,10 +47,17 @@ public: private: void nextImpl() override; + void finalizeImpl() override; WriteBuffer & out; CompressionCodecPtr codec; + /// If true, the size of internal buffer will be exponentially increased up to + /// adaptive_buffer_max_size after each nextImpl call. It can be used to avoid + /// large buffer allocation when actual size of written data is small. + bool use_adaptive_buffer_size; + size_t adaptive_buffer_max_size; + PODArray compressed_buffer; }; diff --git a/src/Coordination/KeeperAsynchronousMetrics.cpp b/src/Coordination/KeeperAsynchronousMetrics.cpp index 86166ffe31b..157858f3c44 100644 --- a/src/Coordination/KeeperAsynchronousMetrics.cpp +++ b/src/Coordination/KeeperAsynchronousMetrics.cpp @@ -114,8 +114,13 @@ void updateKeeperInformation(KeeperDispatcher & keeper_dispatcher, AsynchronousM } KeeperAsynchronousMetrics::KeeperAsynchronousMetrics( - ContextPtr context_, unsigned update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_) - : AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_), context(std::move(context_)) + ContextPtr context_, + unsigned update_period_seconds, + const ProtocolServerMetricsFunc & protocol_server_metrics_func_, + bool update_jemalloc_epoch_, + bool update_rss_) + : AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_, update_jemalloc_epoch_, update_rss_) + , context(std::move(context_)) { } diff --git a/src/Coordination/KeeperAsynchronousMetrics.h b/src/Coordination/KeeperAsynchronousMetrics.h index ec0e60cbb6e..a2ab7cab756 100644 --- a/src/Coordination/KeeperAsynchronousMetrics.h +++ b/src/Coordination/KeeperAsynchronousMetrics.h @@ -13,9 +13,13 @@ class KeeperAsynchronousMetrics : public AsynchronousMetrics { public: KeeperAsynchronousMetrics( - ContextPtr context_, unsigned update_period_seconds, const ProtocolServerMetricsFunc & protocol_server_metrics_func_); - ~KeeperAsynchronousMetrics() override; + ContextPtr context_, + unsigned update_period_seconds, + const ProtocolServerMetricsFunc & protocol_server_metrics_func_, + bool update_jemalloc_epoch_, + bool update_rss_); + ~KeeperAsynchronousMetrics() override; private: ContextPtr context; diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 4a350077596..893bb8e6082 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -148,7 +148,14 @@ void KeeperDispatcher::requestThread() Int64 mem_soft_limit = keeper_context->getKeeperMemorySoftLimit(); if (configuration_and_settings->standalone_keeper && isExceedingMemorySoftLimit() && checkIfRequestIncreaseMem(request.request)) { - LOG_WARNING(log, "Processing requests refused because of max_memory_usage_soft_limit {}, the total used memory is {}, request type is {}", ReadableSize(mem_soft_limit), ReadableSize(total_memory_tracker.get()), request.request->getOpNum()); + LOG_WARNING( + log, + "Processing requests refused because of max_memory_usage_soft_limit {}, the total allocated memory is {}, RSS is {}, request type " + "is {}", + ReadableSize(mem_soft_limit), + ReadableSize(total_memory_tracker.get()), + ReadableSize(total_memory_tracker.getRSS()), + request.request->getOpNum()); addErrorResponses({request}, Coordination::Error::ZCONNECTIONLOSS); continue; } diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index f09ea56391a..918f24efb2c 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -602,7 +602,7 @@ bool KeeperServer::isLeaderAlive() const bool KeeperServer::isExceedingMemorySoftLimit() const { Int64 mem_soft_limit = keeper_context->getKeeperMemorySoftLimit(); - return mem_soft_limit > 0 && total_memory_tracker.get() >= mem_soft_limit; + return mem_soft_limit > 0 && std::max(total_memory_tracker.get(), total_memory_tracker.getRSS()) >= mem_soft_limit; } /// TODO test whether taking failed peer in count diff --git a/src/Core/Defines.h b/src/Core/Defines.h index 6df335a9c8f..c6e65f34e90 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -20,6 +20,9 @@ static constexpr auto DBMS_DEFAULT_POLL_INTERVAL = 10; /// The size of the I/O buffer by default. static constexpr auto DBMS_DEFAULT_BUFFER_SIZE = 1048576ULL; +/// The initial size of adaptive I/O buffer by default. +static constexpr auto DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE = 16384ULL; + static constexpr auto PADDING_FOR_SIMD = 64; /** Which blocks by default read the data (by number of rows). @@ -40,7 +43,7 @@ static constexpr auto SHOW_CHARS_ON_SYNTAX_ERROR = ptrdiff_t(160); /// each period reduces the error counter by 2 times /// too short a period can cause errors to disappear immediately after creation. static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_DEFAULT_DECREASE_ERROR_PERIOD = 60; -/// replica error max cap, this is to prevent replica from accumulating too many errors and taking to long to recover. +/// replica error max cap, this is to prevent replica from accumulating too many errors and taking too long to recover. static constexpr auto DBMS_CONNECTION_POOL_WITH_FAILOVER_MAX_ERROR_COUNT = 1000; /// The boundary on which the blocks for asynchronous file operations should be aligned. diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index 79173503f28..18ee096569a 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -148,6 +148,7 @@ namespace DB M(Bool, storage_metadata_write_full_object_key, false, "Write disk metadata files with VERSION_FULL_OBJECT_KEY format", 0) \ M(UInt64, max_materialized_views_count_for_table, 0, "A limit on the number of materialized views attached to a table.", 0) \ M(UInt32, max_database_replicated_create_table_thread_pool_size, 1, "The number of threads to create tables during replica recovery in DatabaseReplicated. Zero means number of threads equal number of cores.", 0) \ + M(Bool, database_replicated_allow_detach_permanently, true, "Allow detaching tables permanently in Replicated databases", 0) \ M(Bool, format_alter_operations_with_parentheses, false, "If enabled, each operation in alter queries will be surrounded with parentheses in formatted queries to make them less ambiguous.", 0) \ M(String, default_replica_path, "/clickhouse/tables/{uuid}/{shard}", "The path to the table in ZooKeeper", 0) \ M(String, default_replica_name, "{replica}", "The replica name in ZooKeeper", 0) \ @@ -169,6 +170,7 @@ namespace DB M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \ M(Double, gwp_asan_force_sample_probability, 0.0003, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \ M(UInt64, config_reload_interval_ms, 2000, "How often clickhouse will reload config and check for new changes", 0) \ + M(UInt64, memory_worker_period_ms, 0, "Tick period of background memory worker which corrects memory tracker memory usages and cleans up unused pages during higher memory usage. If set to 0, default value will be used depending on the memory usage source", 0) \ M(Bool, disable_insertion_and_mutation, false, "Disable all insert/alter/delete queries. This setting will be enabled if someone needs read-only nodes to prevent insertion and mutation affect reading performance.", 0) /// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 0e58562c745..493752fc3fe 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -710,7 +710,8 @@ class IColumn; M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \ M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \ M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \ - M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \ + M(UInt64, database_replicated_allow_replicated_engine_arguments, 0, "0 - Don't allow to explicitly specify ZooKeeper path and replica name for *MergeTree tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified path and use default one instead.", 0) \ + M(UInt64, database_replicated_allow_explicit_uuid, 0, "0 - Don't allow to explicitly specify UUIDs for tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified UUID and generate a random one instead.", 0) \ M(Bool, database_replicated_allow_heavy_create, false, "Allow long-running DDL queries (CREATE AS SELECT and POPULATE) in Replicated database engine. Note that it can block DDL queue for a long time.", 0) \ M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \ M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 258065dcfd4..19f2d5ccdf0 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -76,12 +76,14 @@ static std::initializer_list path.size()) + return false; + + for (size_t i = 0; i != prefix_len; ++i) + { + if (path[i].type == SubstreamType::DynamicData || path[i].type == SubstreamType::DynamicStructure + || path[i].type == SubstreamType::ObjectData || path[i].type == SubstreamType::ObjectStructure) + return true; + } + + return false; +} + ISerialization::SubstreamData ISerialization::createFromPath(const SubstreamPath & path, size_t prefix_len) { assert(prefix_len <= path.size()); diff --git a/src/DataTypes/Serializations/ISerialization.h b/src/DataTypes/Serializations/ISerialization.h index 33575a07177..32f418e9132 100644 --- a/src/DataTypes/Serializations/ISerialization.h +++ b/src/DataTypes/Serializations/ISerialization.h @@ -457,6 +457,9 @@ public: /// for writing/reading data. For example, it's a null-map subcolumn of Variant type (it's always constructed from discriminators);. static bool isEphemeralSubcolumn(const SubstreamPath & path, size_t prefix_len); + /// Returns true if stream with specified path corresponds to dynamic subcolumn. + static bool isDynamicSubcolumn(const SubstreamPath & path, size_t prefix_len); + protected: template State * checkAndGetState(const StatePtr & state) const; diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 8e3378bcc12..3d64c82ba7d 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -63,6 +63,7 @@ namespace ErrorCodes extern const int NO_ACTIVE_REPLICAS; extern const int CANNOT_GET_REPLICATED_DATABASE_SNAPSHOT; extern const int CANNOT_RESTORE_TABLE; + extern const int SUPPORT_IS_DISABLED; } static constexpr const char * REPLICATED_DATABASE_MARK = "DatabaseReplicated"; @@ -441,7 +442,8 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL bool is_create_query = mode == LoadingStrictnessLevel::CREATE; String replica_host_id; - if (current_zookeeper->tryGet(replica_path, replica_host_id)) + bool replica_exists_in_zk = current_zookeeper->tryGet(replica_path, replica_host_id); + if (replica_exists_in_zk) { if (replica_host_id == DROPPED_MARK && !is_create_query) { @@ -454,7 +456,7 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL String host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection); String host_id_default = getHostID(getContext(), db_uuid, false); - if (is_create_query || (replica_host_id != host_id && replica_host_id != host_id_default)) + if (replica_host_id != host_id && replica_host_id != host_id_default) { throw Exception( ErrorCodes::REPLICA_ALREADY_EXISTS, @@ -484,13 +486,20 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL current_zookeeper->set(replica_path + "/replica_group", replica_group_name, -1); createEmptyLogEntry(current_zookeeper); } + + /// Needed to mark all the queries + /// in the range (max log ptr at replica ZooKeeper nodes creation, max log ptr after replica recovery] as successful. + String max_log_ptr_at_creation_str; + if (current_zookeeper->tryGet(replica_path + "/max_log_ptr_at_creation", max_log_ptr_at_creation_str)) + max_log_ptr_at_creation = parse(max_log_ptr_at_creation_str); } - else if (is_create_query) + + if (is_create_query) { - /// Create new replica. Throws if replica with the same name already exists + /// Create replica nodes in ZooKeeper. If newly initialized nodes already exist, reuse them. createReplicaNodesInZooKeeper(current_zookeeper); } - else + else if (!replica_exists_in_zk) { /// It's not CREATE query, but replica does not exist. Probably it was dropped. /// Do not create anything, continue as readonly. @@ -606,37 +615,84 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt "already contains some data and it does not look like Replicated database path.", zookeeper_path); /// Write host name to replica_path, it will protect from multiple replicas with the same name - auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection); + const auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection); + + const std::vector check_paths = { + replica_path, + replica_path + "/replica_group", + replica_path + "/digest", + }; + bool nodes_exist = true; + auto check_responses = current_zookeeper->tryGet(check_paths); + for (size_t i = 0; i < check_responses.size(); ++i) + { + const auto response = check_responses[i]; + + if (response.error == Coordination::Error::ZNONODE) + { + nodes_exist = false; + break; + } else if (response.error != Coordination::Error::ZOK) + { + throw zkutil::KeeperException::fromPath(response.error, check_paths[i]); + } + } + + if (nodes_exist) + { + const std::vector expected_data = { + host_id, + replica_group_name, + "0", + }; + for (size_t i = 0; i != expected_data.size(); ++i) + { + if (check_responses[i].data != expected_data[i]) + { + throw Exception( + ErrorCodes::REPLICA_ALREADY_EXISTS, + "Replica node {} in ZooKeeper already exists and contains unexpected value: {}", + quoteString(check_paths[i]), quoteString(check_responses[i].data)); + } + } + + LOG_DEBUG(log, "Newly initialized replica nodes found in ZooKeeper, reusing them"); + createEmptyLogEntry(current_zookeeper); + return; + } for (int attempts = 10; attempts > 0; --attempts) { Coordination::Stat stat; - String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat); + const String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat); - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent)); - /// In addition to creating the replica nodes, we record the max_log_ptr at the instant where - /// we declared ourself as an existing replica. We'll need this during recoverLostReplica to - /// notify other nodes that issued new queries while this node was recovering. - ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version)); + const Coordination::Requests ops = { + zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent), + zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent), + + /// Previously, this method was not idempotent and max_log_ptr_at_creation could be stored in memory. + /// we need to store max_log_ptr_at_creation in ZooKeeper to make this method idempotent during replica creation. + zkutil::makeCreateRequest(replica_path + "/max_log_ptr_at_creation", max_log_ptr_str, zkutil::CreateMode::Persistent), + zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version), + }; + + Coordination::Responses ops_responses; + const auto code = current_zookeeper->tryMulti(ops, ops_responses); - Coordination::Responses responses; - const auto code = current_zookeeper->tryMulti(ops, responses); if (code == Coordination::Error::ZOK) { max_log_ptr_at_creation = parse(max_log_ptr_str); - break; + createEmptyLogEntry(current_zookeeper); + return; } - else if (code == Coordination::Error::ZNODEEXISTS || attempts == 1) + + if (attempts == 1) { - /// If its our last attempt, or if the replica already exists, fail immediately. - zkutil::KeeperMultiException::check(code, ops, responses); + zkutil::KeeperMultiException::check(code, ops, ops_responses); } } - createEmptyLogEntry(current_zookeeper); } void DatabaseReplicated::beforeLoadingMetadata(ContextMutablePtr context_, LoadingStrictnessLevel mode) @@ -852,18 +908,6 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora bool maybe_replica_macros = info.expanded_other; bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros"); - if (!enable_functional_tests_helper) - { - if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments) - LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments"); - else - throw Exception(ErrorCodes::INCORRECT_QUERY, - "It's not allowed to specify explicit zookeeper_path and replica_name " - "for ReplicatedMergeTree arguments in Replicated database. If you really want to " - "specify them explicitly, enable setting " - "database_replicated_allow_replicated_engine_arguments."); - } - if (maybe_shard_macros && maybe_replica_macros) return; @@ -876,7 +920,9 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora return; } - throw Exception(ErrorCodes::INCORRECT_QUERY, + /// We will replace it with default arguments if the setting is 2 + if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments != 2) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. " "If you really want to specify it explicitly, then you should use some macros " "to distinguish different shards and replicas"); @@ -1145,6 +1191,9 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep /// so we need to allow experimental features that can be used in a CREATE query enableAllExperimentalSettings(query_context); + query_context->setSetting("database_replicated_allow_explicit_uuid", 3); + query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3); + auto txn = std::make_shared(current_zookeeper, zookeeper_path, false, ""); query_context->initZooKeeperMetadataTransaction(txn); return query_context; @@ -1693,6 +1742,9 @@ void DatabaseReplicated::detachTablePermanently(ContextPtr local_context, const { waitDatabaseStarted(); + if (!local_context->getServerSettings().database_replicated_allow_detach_permanently) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Support for DETACH TABLE PERMANENTLY is disabled"); + auto txn = local_context->getZooKeeperMetadataTransaction(); assert(!ddl_worker->isCurrentlyActive() || txn); if (txn && txn->isInitialQuery()) diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp index d1f0a928b1d..12a5b615234 100644 --- a/src/Disks/DiskLocal.cpp +++ b/src/Disks/DiskLocal.cpp @@ -339,7 +339,15 @@ DiskLocal::writeFile(const String & path, size_t buf_size, WriteMode mode, const { int flags = (mode == WriteMode::Append) ? (O_APPEND | O_CREAT | O_WRONLY) : -1; return std::make_unique( - fs::path(disk_path) / path, buf_size, flags, settings.local_throttler); + fs::path(disk_path) / path, + buf_size, + flags, + settings.local_throttler, + 0666, + nullptr, + 0, + settings.use_adaptive_write_buffer, + settings.adaptive_write_buffer_initial_size); } std::vector DiskLocal::getBlobPath(const String & path) const diff --git a/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp b/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp index 29d3cc8ebd2..5315e331dbd 100644 --- a/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp +++ b/src/Disks/IO/WriteBufferFromAzureBlobStorage.cpp @@ -59,7 +59,7 @@ WriteBufferFromAzureBlobStorage::WriteBufferFromAzureBlobStorage( const WriteSettings & write_settings_, std::shared_ptr settings_, ThreadPoolCallbackRunnerUnsafe schedule_) - : WriteBufferFromFileBase(buf_size_, nullptr, 0) + : WriteBufferFromFileBase(std::min(buf_size_, static_cast(DBMS_DEFAULT_BUFFER_SIZE)), nullptr, 0) , log(getLogger("WriteBufferFromAzureBlobStorage")) , buffer_allocation_policy(createBufferAllocationPolicy(*settings_)) , max_single_part_upload_size(settings_->max_single_part_upload_size) @@ -244,11 +244,21 @@ void WriteBufferFromAzureBlobStorage::allocateBuffer() buffer_allocation_policy->nextBuffer(); chassert(0 == hidden_size); - auto size = buffer_allocation_policy->getBufferSize(); - + /// First buffer was already allocated in BufferWithOwnMemory constructor with buffer size provided in constructor. + /// It will be reallocated in subsequent nextImpl calls up to the desired buffer size from buffer_allocation_policy. if (buffer_allocation_policy->getBufferNumber() == 1) - size = std::min(size_t(DBMS_DEFAULT_BUFFER_SIZE), size); + { + /// Reduce memory size if initial size was larger then desired size from buffer_allocation_policy. + /// Usually it doesn't happen but we have it in unit tests. + if (memory.size() > buffer_allocation_policy->getBufferSize()) + { + memory.resize(buffer_allocation_policy->getBufferSize()); + WriteBuffer::set(memory.data(), memory.size()); + } + return; + } + auto size = buffer_allocation_policy->getBufferSize(); memory = Memory(size); WriteBuffer::set(memory.data(), memory.size()); } diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index f85b5f45b37..fa48825e1a6 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -289,7 +289,7 @@ std::unique_ptr AzureObjectStorage::writeObject( /// NO return std::make_unique( client.get(), object.remote_path, - buf_size, + write_settings.use_adaptive_write_buffer ? write_settings.adaptive_write_buffer_initial_size : buf_size, patchSettings(write_settings), settings.get(), std::move(scheduler)); diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 8de80971238..f26a3a8bd9d 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -282,7 +282,7 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN client.get(), uri.bucket, object.remote_path, - buf_size, + write_settings.use_adaptive_write_buffer ? write_settings.adaptive_write_buffer_initial_size : buf_size, request_settings, std::move(blob_storage_log), attributes, diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp index 9d898cd2470..123f2e4f608 100644 --- a/src/Formats/JSONUtils.cpp +++ b/src/Formats/JSONUtils.cpp @@ -483,6 +483,33 @@ namespace JSONUtils writeArrayEnd(out, 1); } + + void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out) + { + writeCompactArrayStart(out, 0, "meta"); + + for (size_t i = 0; i < names.size(); ++i) + { + writeCompactObjectStart(out); + writeTitle("name", out, 0, ""); + + /// The field names are pre-escaped to be put into JSON string literal. + writeChar('"', out); + writeString(names[i], out); + writeChar('"', out); + + writeFieldCompactDelimiter(out); + writeTitle("type", out, 0, ""); + writeJSONString(types[i]->getName(), out, settings); + writeCompactObjectEnd(out); + + if (i + 1 < names.size()) + writeFieldCompactDelimiter(out); + } + + writeCompactArrayEnd(out); + } + void writeAdditionalInfo( size_t rows, size_t rows_before_limit, @@ -530,6 +557,45 @@ namespace JSONUtils } } + void writeCompactAdditionalInfo( + size_t rows, + size_t rows_before_limit, + bool applied_limit, + const Stopwatch & watch, + const Progress & progress, + bool write_statistics, + WriteBuffer & out) + { + writeCompactObjectStart(out); + writeCompactObjectStart(out, 0, "statistics"); + writeTitle("rows", out, 0, ""); + writeIntText(rows, out); + + if (applied_limit) + { + writeFieldCompactDelimiter(out); + writeTitle("rows_before_limit_at_least", out, 0, ""); + writeIntText(rows_before_limit, out); + } + + if (write_statistics) + { + writeFieldCompactDelimiter(out); + writeTitle("elapsed", out, 0, ""); + writeText(watch.elapsedSeconds(), out); + writeFieldCompactDelimiter(out); + + writeTitle("rows_read", out, 0, ""); + writeText(progress.read_rows.load(), out); + writeFieldCompactDelimiter(out); + + writeTitle("bytes_read", out, 0, ""); + writeText(progress.read_bytes.load(), out); + } + writeCompactObjectEnd(out); + writeCompactObjectEnd(out); + } + void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent) { writeTitle("exception", out, indent, " "); diff --git a/src/Formats/JSONUtils.h b/src/Formats/JSONUtils.h index e2ac3467971..622703947b9 100644 --- a/src/Formats/JSONUtils.h +++ b/src/Formats/JSONUtils.h @@ -99,6 +99,7 @@ namespace JSONUtils WriteBuffer & out); void writeMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out); + void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out); void writeAdditionalInfo( size_t rows, @@ -111,6 +112,15 @@ namespace JSONUtils bool write_statistics, WriteBuffer & out); + void writeCompactAdditionalInfo( + size_t rows, + size_t rows_before_limit, + bool applied_limit, + const Stopwatch & watch, + const Progress & progress, + bool write_statistics, + WriteBuffer & out); + void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent = 0); void skipColon(ReadBuffer & in); diff --git a/src/Formats/registerFormats.cpp b/src/Formats/registerFormats.cpp index 57ca1bb49c8..770b747fafd 100644 --- a/src/Formats/registerFormats.cpp +++ b/src/Formats/registerFormats.cpp @@ -95,6 +95,7 @@ void registerOutputFormatMarkdown(FormatFactory & factory); void registerOutputFormatPostgreSQLWire(FormatFactory & factory); void registerOutputFormatPrometheus(FormatFactory & factory); void registerOutputFormatSQLInsert(FormatFactory & factory); +void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory); /// Input only formats. @@ -242,6 +243,7 @@ void registerFormats() registerOutputFormatCapnProto(factory); registerOutputFormatPrometheus(factory); registerOutputFormatSQLInsert(factory); + registerOutputFormatJSONCompactWithProgress(factory); registerInputFormatRegexp(factory); registerInputFormatJSONAsString(factory); diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index dfb4b76e5e2..d6d533f16ed 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -492,7 +492,7 @@ struct ToStartOfInterval { throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME); } - static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0) { if (scale_multiplier < 1000000000) { @@ -527,7 +527,7 @@ struct ToStartOfInterval { throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME); } - static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0) { if (scale_multiplier < 1000000) { @@ -570,7 +570,7 @@ struct ToStartOfInterval { throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME); } - static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0) { if (scale_multiplier < 1000) { @@ -613,7 +613,7 @@ struct ToStartOfInterval { return time_zone.toStartOfSecondInterval(t, seconds); } - static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0) { return time_zone.toStartOfSecondInterval(t / scale_multiplier, seconds); } @@ -634,7 +634,7 @@ struct ToStartOfInterval { return time_zone.toStartOfMinuteInterval(t, minutes); } - static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0) { return time_zone.toStartOfMinuteInterval(t / scale_multiplier, minutes); } @@ -655,7 +655,7 @@ struct ToStartOfInterval { return time_zone.toStartOfHourInterval(t, hours); } - static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0) { return time_zone.toStartOfHourInterval(t / scale_multiplier, hours); } @@ -676,7 +676,7 @@ struct ToStartOfInterval { return static_cast(time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days)); } - static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0) { return time_zone.toStartOfDayInterval(time_zone.toDayNum(t / scale_multiplier), days); } @@ -697,9 +697,13 @@ struct ToStartOfInterval { return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks); } - static UInt16 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0) { - return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t / scale_multiplier), weeks); + if (origin == 0) + return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t / scale_multiplier), weeks); + else + return ToStartOfInterval::execute(t, weeks * 7, time_zone, scale_multiplier, origin); + } }; @@ -718,9 +722,23 @@ struct ToStartOfInterval { return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months); } - static UInt16 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0) { - return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t / scale_multiplier), months); + const Int64 scaled_time = t / scale_multiplier; + if (origin == 0) + return time_zone.toStartOfMonthInterval(time_zone.toDayNum(scaled_time), months); + else + { + const Int64 scaled_origin = origin / scale_multiplier; + const Int64 days = time_zone.toDayOfMonth(scaled_time + scaled_origin) - time_zone.toDayOfMonth(scaled_origin); + Int64 months_to_add = time_zone.toMonth(scaled_time + scaled_origin) - time_zone.toMonth(scaled_origin); + const Int64 years = time_zone.toYear(scaled_time + scaled_origin) - time_zone.toYear(scaled_origin); + months_to_add = days < 0 ? months_to_add - 1 : months_to_add; + months_to_add += years * 12; + Int64 month_multiplier = (months_to_add / months) * months; + + return (time_zone.addMonths(time_zone.toDate(scaled_origin), month_multiplier) - time_zone.toDate(scaled_origin)); + } } }; @@ -739,9 +757,12 @@ struct ToStartOfInterval { return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters); } - static UInt16 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0) { - return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t / scale_multiplier), quarters); + if (origin == 0) + return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t / scale_multiplier), quarters); + else + return ToStartOfInterval::execute(t, quarters * 3, time_zone, scale_multiplier, origin); } }; @@ -760,9 +781,12 @@ struct ToStartOfInterval { return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years); } - static UInt16 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier) + static Int64 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0) { - return time_zone.toStartOfYearInterval(time_zone.toDayNum(t / scale_multiplier), years); + if (origin == 0) + return time_zone.toStartOfYearInterval(time_zone.toDayNum(t / scale_multiplier), years); + else + return ToStartOfInterval::execute(t, years * 12, time_zone, scale_multiplier, origin); } }; diff --git a/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h index 7d60bd54d2f..f174bcd5f3e 100644 --- a/src/Functions/LowerUpperUTF8Impl.h +++ b/src/Functions/LowerUpperUTF8Impl.h @@ -6,6 +6,7 @@ # include # include +# include # include # include # include @@ -49,6 +50,11 @@ struct LowerUpperUTF8Impl if (U_FAILURE(error_code)) throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Error calling ucasemap_open: {}", u_errorName(error_code)); + SCOPE_EXIT( + { + ucasemap_close(case_map); + }); + size_t curr_offset = 0; for (size_t row_i = 0; row_i < input_rows_count; ++row_i) { diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp index dfe589fb74f..d2aedd57f99 100644 --- a/src/Functions/array/array.cpp +++ b/src/Functions/array/array.cpp @@ -1,11 +1,15 @@ -#include -#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include +#include +#include #include +#include namespace DB @@ -44,11 +48,13 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { - size_t num_elements = arguments.size(); + const size_t num_elements = arguments.size(); if (num_elements == 0) + { /// We should return constant empty array. return result_type->createColumnConstWithDefaultValue(input_rows_count); + } const DataTypePtr & elem_type = static_cast(*result_type).getNestedType(); @@ -60,7 +66,6 @@ public: Columns columns_holder(num_elements); ColumnRawPtrs column_ptrs(num_elements); - for (size_t i = 0; i < num_elements; ++i) { const auto & arg = arguments[i]; @@ -77,35 +82,199 @@ public: } /// Create and fill the result array. - auto out = ColumnArray::create(elem_type->createColumn()); IColumn & out_data = out->getData(); IColumn::Offsets & out_offsets = out->getOffsets(); - out_data.reserve(input_rows_count * num_elements); - out_offsets.resize(input_rows_count); - + /// Fill out_offsets + out_offsets.resize_exact(input_rows_count); IColumn::Offset current_offset = 0; for (size_t i = 0; i < input_rows_count; ++i) { - for (size_t j = 0; j < num_elements; ++j) - out_data.insertFrom(*column_ptrs[j], i); - current_offset += num_elements; out_offsets[i] = current_offset; } + /// Fill out_data + out_data.reserve(input_rows_count * num_elements); + if (num_elements == 1) + out_data.insertRangeFrom(*column_ptrs[0], 0, input_rows_count); + else + execute(column_ptrs, out_data, input_rows_count); return out; } private: + bool execute(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + return executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) + || executeNumber(columns, out_data, input_rows_count) || executeString(columns, out_data, input_rows_count) + || executeNullable(columns, out_data, input_rows_count) || executeTuple(columns, out_data, input_rows_count) + || executeFixedString(columns, out_data, input_rows_count) || executeGeneric(columns, out_data, input_rows_count); + } + + template + bool executeNumber(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + using Container = ColumnVectorOrDecimal::Container; + std::vector containers(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnVectorOrDecimal * concrete_column = checkAndGetColumn>(columns[i]); + if (!concrete_column) + return false; + + containers[i] = &concrete_column->getData(); + } + + ColumnVectorOrDecimal & concrete_out_data = assert_cast &>(out_data); + Container & out_container = concrete_out_data.getData(); + out_container.resize_exact(columns.size() * input_rows_count); + + for (size_t row_i = 0; row_i < input_rows_count; ++row_i) + { + const size_t base = row_i * columns.size(); + for (size_t col_i = 0; col_i < columns.size(); ++col_i) + out_container[base + col_i] = (*containers[col_i])[row_i]; + } + return true; + } + + bool executeString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + size_t total_bytes = 0; + std::vector concrete_columns(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnString * concrete_column = checkAndGetColumn(columns[i]); + if (!concrete_column) + return false; + + total_bytes += concrete_column->getChars().size(); + concrete_columns[i] = concrete_column; + } + + ColumnString & concrete_out_data = assert_cast(out_data); + auto & out_chars = concrete_out_data.getChars(); + auto & out_offsets = concrete_out_data.getOffsets(); + out_chars.resize_exact(total_bytes); + out_offsets.resize_exact(input_rows_count * columns.size()); + + size_t cur_out_offset = 0; + for (size_t row_i = 0; row_i < input_rows_count; ++row_i) + { + const size_t base = row_i * columns.size(); + for (size_t col_i = 0; col_i < columns.size(); ++col_i) + { + StringRef ref = concrete_columns[col_i]->getDataAt(row_i); + memcpySmallAllowReadWriteOverflow15(&out_chars[cur_out_offset], ref.data, ref.size); + out_chars[cur_out_offset + ref.size] = 0; + + cur_out_offset += ref.size + 1; + out_offsets[base + col_i] = cur_out_offset; + } + } + return true; + } + + bool executeFixedString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + std::vector concrete_columns(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnFixedString * concrete_column = checkAndGetColumn(columns[i]); + if (!concrete_column) + return false; + + concrete_columns[i] = concrete_column; + } + + ColumnFixedString & concrete_out_data = assert_cast(out_data); + auto & out_chars = concrete_out_data.getChars(); + + const size_t n = concrete_out_data.getN(); + size_t total_bytes = n * columns.size() * input_rows_count; + out_chars.resize_exact(total_bytes); + + size_t curr_out_offset = 0; + for (size_t row_i = 0; row_i < input_rows_count; ++row_i) + { + for (size_t col_i = 0; col_i < columns.size(); ++col_i) + { + StringRef ref = concrete_columns[col_i]->getDataAt(row_i); + memcpySmallAllowReadWriteOverflow15(&out_chars[curr_out_offset], ref.data, n); + curr_out_offset += n; + } + } + return true; + } + + bool executeNullable(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + ColumnRawPtrs null_maps(columns.size(), nullptr); + ColumnRawPtrs nested_columns(columns.size(), nullptr); + for (size_t i = 0; i < columns.size(); ++i) + { + const ColumnNullable * concrete_column = checkAndGetColumn(columns[i]); + if (!concrete_column) + return false; + + null_maps[i] = &concrete_column->getNullMapColumn(); + nested_columns[i] = &concrete_column->getNestedColumn(); + } + + ColumnNullable & concrete_out_data = assert_cast(out_data); + auto & out_null_map = concrete_out_data.getNullMapColumn(); + auto & out_nested_column = concrete_out_data.getNestedColumn(); + execute(null_maps, out_null_map, input_rows_count); + execute(nested_columns, out_nested_column, input_rows_count); + return true; + } + + bool executeTuple(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + ColumnTuple * concrete_out_data = typeid_cast(&out_data); + if (!concrete_out_data) + return false; + + const size_t tuple_size = concrete_out_data->tupleSize(); + for (size_t i = 0; i < tuple_size; ++i) + { + ColumnRawPtrs elem_columns(columns.size(), nullptr); + for (size_t j = 0; j < columns.size(); ++j) + { + const ColumnTuple * concrete_column = assert_cast(columns[j]); + elem_columns[j] = &concrete_column->getColumn(i); + } + execute(elem_columns, concrete_out_data->getColumn(i), input_rows_count); + } + return true; + } + + bool executeGeneric(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const + { + for (size_t i = 0; i < input_rows_count; ++i) + for (const auto * column : columns) + out_data.insertFrom(*column, i); + return true; + } + + String getName() const override { return name; } - bool addField(DataTypePtr type_res, const Field & f, Array & arr) const; - bool use_variant_as_common_type = false; }; diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp index 534f7c0d8cd..14672cfd568 100644 --- a/src/Functions/map.cpp +++ b/src/Functions/map.cpp @@ -2,6 +2,8 @@ #include #include #include +#include +#include #include #include #include @@ -13,7 +15,6 @@ #include #include #include -#include namespace DB @@ -36,11 +37,18 @@ class FunctionMap : public IFunction public: static constexpr auto name = "map"; - explicit FunctionMap(bool use_variant_as_common_type_) : use_variant_as_common_type(use_variant_as_common_type_) {} + explicit FunctionMap(ContextPtr context_) + : context(context_) + , use_variant_as_common_type( + context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type) + , function_array(FunctionFactory::instance().get("array", context)) + , function_map_from_arrays(FunctionFactory::instance().get("mapFromArrays", context)) + { + } static FunctionPtr create(ContextPtr context) { - return std::make_shared(context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type); + return std::make_shared(context); } String getName() const override @@ -101,62 +109,38 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { size_t num_elements = arguments.size(); - if (num_elements == 0) return result_type->createColumnConstWithDefaultValue(input_rows_count); + ColumnsWithTypeAndName key_args; + ColumnsWithTypeAndName value_args; + for (size_t i = 0; i < num_elements; i += 2) + { + key_args.emplace_back(arguments[i]); + value_args.emplace_back(arguments[i+1]); + } + const auto & result_type_map = static_cast(*result_type); const DataTypePtr & key_type = result_type_map.getKeyType(); const DataTypePtr & value_type = result_type_map.getValueType(); + const DataTypePtr & key_array_type = std::make_shared(key_type); + const DataTypePtr & value_array_type = std::make_shared(value_type); - Columns columns_holder(num_elements); - ColumnRawPtrs column_ptrs(num_elements); + /// key_array = array(args[0], args[2]...) + ColumnPtr key_array = function_array->build(key_args)->execute(key_args, key_array_type, input_rows_count); + /// value_array = array(args[1], args[3]...) + ColumnPtr value_array = function_array->build(value_args)->execute(value_args, value_array_type, input_rows_count); - for (size_t i = 0; i < num_elements; ++i) - { - const auto & arg = arguments[i]; - const auto to_type = i % 2 == 0 ? key_type : value_type; - - ColumnPtr preprocessed_column = castColumn(arg, to_type); - preprocessed_column = preprocessed_column->convertToFullColumnIfConst(); - - columns_holder[i] = std::move(preprocessed_column); - column_ptrs[i] = columns_holder[i].get(); - } - - /// Create and fill the result map. - - MutableColumnPtr keys_data = key_type->createColumn(); - MutableColumnPtr values_data = value_type->createColumn(); - MutableColumnPtr offsets = DataTypeNumber().createColumn(); - - size_t total_elements = input_rows_count * num_elements / 2; - keys_data->reserve(total_elements); - values_data->reserve(total_elements); - offsets->reserve(input_rows_count); - - IColumn::Offset current_offset = 0; - for (size_t i = 0; i < input_rows_count; ++i) - { - for (size_t j = 0; j < num_elements; j += 2) - { - keys_data->insertFrom(*column_ptrs[j], i); - values_data->insertFrom(*column_ptrs[j + 1], i); - } - - current_offset += num_elements / 2; - offsets->insert(current_offset); - } - - auto nested_column = ColumnArray::create( - ColumnTuple::create(Columns{std::move(keys_data), std::move(values_data)}), - std::move(offsets)); - - return ColumnMap::create(nested_column); + /// result = mapFromArrays(key_array, value_array) + ColumnsWithTypeAndName map_args{{key_array, key_array_type, ""}, {value_array, value_array_type, ""}}; + return function_map_from_arrays->build(map_args)->execute(map_args, result_type, input_rows_count); } private: + ContextPtr context; bool use_variant_as_common_type = false; + FunctionOverloadResolverPtr function_array; + FunctionOverloadResolverPtr function_map_from_arrays; }; /// mapFromArrays(keys, values) is a function that allows you to make key-value pair from a pair of arrays or maps @@ -173,6 +157,7 @@ public: bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } bool useDefaultImplementationForNulls() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { diff --git a/src/Functions/materialize.h b/src/Functions/materialize.h index 41994509745..571391faba7 100644 --- a/src/Functions/materialize.h +++ b/src/Functions/materialize.h @@ -2,7 +2,7 @@ #include #include #include -#include +#include namespace DB { @@ -18,11 +18,6 @@ public: return std::make_shared(); } - bool useDefaultImplementationForNulls() const override - { - return false; - } - /// Get the function name. String getName() const override { @@ -34,8 +29,16 @@ public: return true; } + bool useDefaultImplementationForNulls() const override { return false; } + + bool useDefaultImplementationForNothing() const override { return false; } + + bool useDefaultImplementationForConstants() const override { return false; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + bool useDefaultImplementationForSparseColumns() const override { return false; } + bool isSuitableForConstantFolding() const override { return false; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } @@ -52,7 +55,7 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override { - return arguments[0].column->convertToFullColumnIfConst(); + return recursiveRemoveSparse(arguments[0].column->convertToFullColumnIfConst()); } bool hasInformationAboutMonotonicity() const override { return true; } diff --git a/src/Functions/overlay.cpp b/src/Functions/overlay.cpp index df8b825eabe..eddb7590cab 100644 --- a/src/Functions/overlay.cpp +++ b/src/Functions/overlay.cpp @@ -1,12 +1,12 @@ #include #include +#include +#include #include #include #include #include #include -#include -#include namespace DB { @@ -16,8 +16,8 @@ namespace /// If 'is_utf8' - measure offset and length in code points instead of bytes. /// Syntax: -/// - overlay(input, replace, offset[, length]) -/// - overlayUTF8(input, replace, offset[, length]) - measure offset and length in code points instead of bytes +/// - overlay(s, replace, offset[, length]) +/// - overlayUTF8(s, replace, offset[, length]) - measure offset and length in code points instead of bytes template class FunctionOverlay : public IFunction { @@ -34,7 +34,7 @@ public: DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { FunctionArgumentDescriptors mandatory_args{ - {"input", static_cast(&isString), nullptr, "String"}, + {"s", static_cast(&isString), nullptr, "String"}, {"replace", static_cast(&isString), nullptr, "String"}, {"offset", static_cast(&isNativeInteger), nullptr, "(U)Int8/16/32/64"}, }; @@ -100,7 +100,6 @@ public: res_data.reserve(col_input_string->getChars().size()); } - #define OVERLAY_EXECUTE_CASE(HAS_FOUR_ARGS, OFFSET_IS_CONST, LENGTH_IS_CONST) \ if (input_is_const && replace_is_const) \ constantConstant( \ @@ -186,7 +185,6 @@ public: return res_col; } - private: /// input offset is 1-based, maybe negative /// output result is 0-based valid offset, within [0, input_size] @@ -229,6 +227,7 @@ private: ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) const { + /// Free us from handling negative length in the code below if (has_four_args && length_is_const && const_length < 0) { constantConstant( @@ -343,6 +342,7 @@ private: ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) const { + /// Free us from handling negative length in the code below if (has_four_args && length_is_const && const_length < 0) { vectorConstant( @@ -461,6 +461,7 @@ private: ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) const { + /// Free us from handling negative length in the code below if (has_four_args && length_is_const && const_length < 0) { constantVector( @@ -577,6 +578,7 @@ private: ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets) const { + /// Free us from handling negative length in the code below if (has_four_args && length_is_const && const_length < 0) { vectorVector( diff --git a/src/Functions/sqid.cpp b/src/Functions/sqid.cpp index 0e133590b84..074a34bd083 100644 --- a/src/Functions/sqid.cpp +++ b/src/Functions/sqid.cpp @@ -124,7 +124,7 @@ public: std::string_view sqid = col_non_const->getDataAt(i).toView(); std::vector integers = sqids.decode(String(sqid)); res_nested_data.insert(integers.begin(), integers.end()); - res_offsets_data.push_back(integers.size()); + res_offsets_data.push_back(res_offsets_data.back() + integers.size()); } } else diff --git a/src/Functions/toStartOfInterval.cpp b/src/Functions/toStartOfInterval.cpp index 709f5f86d80..6573fef6634 100644 --- a/src/Functions/toStartOfInterval.cpp +++ b/src/Functions/toStartOfInterval.cpp @@ -10,21 +10,31 @@ #include #include #include +#include namespace DB { namespace ErrorCodes { - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ARGUMENT_OUT_OF_BOUND; + extern const int BAD_ARGUMENTS; extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int ARGUMENT_OUT_OF_BOUND; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; } class FunctionToStartOfInterval : public IFunction { +private: + enum class Overload + { + Default, /// toStartOfInterval(time, interval) or toStartOfInterval(time, interval, timezone) + Origin /// toStartOfInterval(time, interval, origin) or toStartOfInterval(time, interval, origin, timezone) + }; + mutable Overload overload; + public: static FunctionPtr create(ContextPtr) { return std::make_shared(); } @@ -34,7 +44,7 @@ public: size_t getNumberOfArguments() const override { return 0; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; } + ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2, 3}; } bool hasInformationAboutMonotonicity() const override { return true; } Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override { return { .is_monotonic = true, .is_always_monotonic = true }; } @@ -72,6 +82,9 @@ public: "Illegal type {} of 2nd argument of function {}, expected a time interval", type_arg2->getName(), getName()); + overload = Overload::Default; + + /// Determine result type for default overload (no origin) switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case) { case IntervalKind::Kind::Nanosecond: @@ -97,13 +110,49 @@ public: auto check_third_argument = [&] { const DataTypePtr & type_arg3 = arguments[2].type; - if (!isString(type_arg3)) - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of 3rd argument of function {}, expected a constant timezone string", + if (isString(type_arg3)) + { + if (value_is_date && result_type == ResultType::Date) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64", + getName(), interval_type->getKind().toString()); + } + else if (isDateOrDate32OrDateTimeOrDateTime64(type_arg3)) + { + overload = Overload::Origin; + const DataTypePtr & type_arg1 = arguments[0].type; + if (isDate(type_arg1) && isDate(type_arg3)) + result_type = ResultType::Date; + else if (isDate32(type_arg1) && isDate32(type_arg3)) + result_type = ResultType::Date32; + else if (isDateTime(type_arg1) && isDateTime(type_arg3)) + result_type = ResultType::DateTime; + else if (isDateTime64(type_arg1) && isDateTime64(type_arg3)) + result_type = ResultType::DateTime64; + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same type", getName()); + } + else + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. " + "This argument is optional and must be a constant String with timezone name or a Date/Date32/DateTime/DateTime64 with a constant origin", type_arg3->getName(), getName()); - if (value_is_date && result_type == ResultType::Date) /// weird why this is && instead of || but too afraid to change it + }; + + auto check_fourth_argument = [&] + { + if (overload != Overload::Origin) /// sanity check + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. " + "The third argument must a Date/Date32/DateTime/DateTime64 with a constant origin", + arguments[2].type->getName(), getName()); + + const DataTypePtr & type_arg4 = arguments[3].type; + if (!isString(type_arg4)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 4th argument of function {}. " + "This argument is optional and must be a constant String with timezone name", + type_arg4->getName(), getName()); + if (value_is_date && result_type == ResultType::Date) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "The timezone argument of function {} with interval type {} is allowed only when the 1st argument has type DateTime or DateTimt64", + "A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64", getName(), interval_type->getKind().toString()); }; @@ -118,10 +167,17 @@ public: check_second_argument(); check_third_argument(); } + else if (arguments.size() == 4) + { + check_first_argument(); + check_second_argument(); + check_third_argument(); + check_fourth_argument(); + } else { throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, - "Number of arguments for function {} doesn't match: passed {}, should be 2 or 3", + "Number of arguments for function {} doesn't match: passed {}, must be 2, 3 or 4", getName(), arguments.size()); } @@ -132,10 +188,19 @@ public: case ResultType::Date32: return std::make_shared(); case ResultType::DateTime: - return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); + { + const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3; + return std::make_shared(extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false)); + } case ResultType::DateTime64: { UInt32 scale = 0; + if (isDateTime64(arguments[0].type) && overload == Overload::Origin) + { + scale = assert_cast(*arguments[0].type.get()).getScale(); + if (assert_cast(*arguments[2].type.get()).getScale() != scale) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same scale", getName()); + } if (interval_type->getKind() == IntervalKind::Kind::Nanosecond) scale = 9; else if (interval_type->getKind() == IntervalKind::Kind::Microsecond) @@ -143,69 +208,103 @@ public: else if (interval_type->getKind() == IntervalKind::Kind::Millisecond) scale = 3; - return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false)); + const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3; + return std::make_shared(scale, extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false)); } } std::unreachable(); } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /* input_rows_count */) const override { const auto & time_column = arguments[0]; const auto & interval_column = arguments[1]; - const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0); - auto result_column = dispatchForTimeColumn(time_column, interval_column, result_type, time_zone, input_rows_count); + + ColumnWithTypeAndName origin_column; + if (overload == Overload::Origin) + origin_column = arguments[2]; + + const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3; + const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_arg_num, 0); + + ColumnPtr result_column; + if (isDate(result_type)) + result_column = dispatchForTimeColumn(time_column, interval_column, origin_column, result_type, time_zone); + else if (isDate32(result_type)) + result_column = dispatchForTimeColumn(time_column, interval_column, origin_column, result_type, time_zone); + else if (isDateTime(result_type)) + result_column = dispatchForTimeColumn(time_column, interval_column, origin_column, result_type, time_zone); + else if (isDateTime64(result_type)) + result_column = dispatchForTimeColumn(time_column, interval_column, origin_column, result_type, time_zone); return result_column; } private: + template ColumnPtr dispatchForTimeColumn( - const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, - const DataTypePtr & result_type, const DateLUTImpl & time_zone, - size_t input_rows_count) const + const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone) const { const auto & time_column_type = *time_column.type.get(); const auto & time_column_col = *time_column.column.get(); - if (isDateTime64(time_column_type)) - { - const auto * time_column_vec = checkAndGetColumn(&time_column_col); - auto scale = assert_cast(time_column_type).getScale(); - - if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count, scale); - } - else if (isDateTime(time_column_type)) - { - const auto * time_column_vec = checkAndGetColumn(&time_column_col); - if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count); - } - else if (isDate(time_column_type)) + if (isDate(time_column_type)) { const auto * time_column_vec = checkAndGetColumn(&time_column_col); + if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone); } else if (isDate32(time_column_type)) { const auto * time_column_vec = checkAndGetColumn(&time_column_col); if (time_column_vec) - return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count); + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone); + } + else if (isDateTime(time_column_type)) + { + const auto * time_column_vec = checkAndGetColumn(&time_column_col); + if (time_column_vec) + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone); + } + else if (isDateTime64(time_column_type)) + { + const auto * time_column_vec = checkAndGetColumn(&time_column_col); + auto scale = assert_cast(time_column_type).getScale(); + + if (time_column_vec) + return dispatchForIntervalColumn(assert_cast(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone, scale); } throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64", getName()); } - template + template ColumnPtr dispatchForIntervalColumn( - const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column, - const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale = 1) const + const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column, + const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale = 1) const { const auto * interval_type = checkAndGetDataType(interval_column.type.get()); if (!interval_type) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a time interval", getName()); + switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case) + { + case IntervalKind::Kind::Nanosecond: + case IntervalKind::Kind::Microsecond: + case IntervalKind::Kind::Millisecond: + if (isDateOrDate32(time_data_type) || isDateTime(time_data_type)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type {}", isDate(time_data_type) ? "Date" : "DateTime"); + break; + case IntervalKind::Kind::Second: + case IntervalKind::Kind::Minute: + case IntervalKind::Kind::Hour: + if (isDateOrDate32(time_data_type)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type Date"); + break; + default: + break; + } + const auto * interval_column_const_int64 = checkAndGetColumnConst(interval_column.column.get()); if (!interval_column_const_int64) throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a const time interval", getName()); @@ -217,51 +316,102 @@ private: switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case) { case IntervalKind::Kind::Nanosecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Microsecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Millisecond: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Second: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Minute: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Hour: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Day: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Week: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Month: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Quarter: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); case IntervalKind::Kind::Year: - return execute(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale); + return execute(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale); } std::unreachable(); } - template - ColumnPtr execute( - const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units, - const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale) const + template + ColumnPtr execute(const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale) const { using ResultColumnType = typename ResultDataType::ColumnType; - using ResultFieldType = typename ResultDataType::FieldType; const auto & time_data = time_column_type.getData(); + size_t size = time_data.size(); auto result_col = result_type->createColumn(); auto * col_to = assert_cast(result_col.get()); auto & result_data = col_to->getData(); - result_data.resize(input_rows_count); + result_data.resize(size); Int64 scale_multiplier = DecimalUtils::scaleMultiplier(scale); - for (size_t i = 0; i != input_rows_count; ++i) - result_data[i] = static_cast(ToStartOfInterval::execute(time_data[i], num_units, time_zone, scale_multiplier)); + if (origin_column.column) // Overload: Origin + { + const bool is_small_interval = (unit == IntervalKind::Kind::Nanosecond || unit == IntervalKind::Kind::Microsecond || unit == IntervalKind::Kind::Millisecond); + const bool is_result_date = isDateOrDate32(result_type); + + Int64 result_scale = scale_multiplier; + Int64 origin_scale = 1; + + if (isDateTime64(result_type)) /// We have origin scale only in case if arguments are DateTime64. + origin_scale = assert_cast(*origin_column.type).getScaleMultiplier(); + else if (!is_small_interval) /// In case of large interval and arguments are not DateTime64, we should not have scale in result. + result_scale = 1; + + if (is_small_interval) + result_scale = assert_cast(*result_type).getScaleMultiplier(); + + /// In case if we have a difference between time arguments and Interval, we need to calculate the difference between them + /// to get the right precision for the result. In case of large intervals, we should not have scale difference. + Int64 scale_diff = is_small_interval ? std::max(result_scale / origin_scale, origin_scale / result_scale) : 1; + + static constexpr Int64 SECONDS_PER_DAY = 86'400; + + UInt64 origin = origin_column.column->get64(0); + for (size_t i = 0; i != size; ++i) + { + UInt64 time_arg = time_data[i]; + if (origin > static_cast(time_arg)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The origin must be before the end date / date with time"); + + if (is_result_date) /// All internal calculations of ToStartOfInterval<...> expect arguments to be seconds or milli-, micro-, nanoseconds. + { + time_arg *= SECONDS_PER_DAY; + origin *= SECONDS_PER_DAY; + } + + Int64 offset = ToStartOfInterval::execute(time_arg - origin, num_units, time_zone, result_scale, origin); + + /// In case if arguments are DateTime64 with large interval, we should apply scale on it. + offset *= (!is_small_interval) ? result_scale : 1; + + if (is_result_date) /// Convert back to date after calculations. + { + offset /= SECONDS_PER_DAY; + origin /= SECONDS_PER_DAY; + } + + result_data[i] = 0; + result_data[i] += (result_scale < origin_scale) ? (origin + offset) / scale_diff : (origin + offset) * scale_diff; + } + } + else // Overload: Default + { + for (size_t i = 0; i != size; ++i) + result_data[i] = static_cast(ToStartOfInterval::execute(time_data[i], num_units, time_zone, scale_multiplier)); + } return result_col; } diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index 9c80b377661..73bbba055d0 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -36,7 +36,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// Case when bucket name represented in domain name of S3 URL. /// E.g. (https://bucket-name.s3.region.amazonaws.com/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access - static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3express[\-a-z0-9]+|s3|cos|obs|oss|eos)([.\-][a-z0-9\-.:]+))"); + static const RE2 virtual_hosted_style_pattern(R"(([^.]+)\.(s3express[\-a-z0-9]+|s3|cos|obs|.*oss[^\/]*|eos)([.\-][a-z0-9\-.:]+))"); /// Case when AWS Private Link Interface is being used /// E.g. (bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/bucket-name/key) diff --git a/src/IO/WriteBufferFromFile.cpp b/src/IO/WriteBufferFromFile.cpp index f1825ce1e22..d68203029c1 100644 --- a/src/IO/WriteBufferFromFile.cpp +++ b/src/IO/WriteBufferFromFile.cpp @@ -32,8 +32,10 @@ WriteBufferFromFile::WriteBufferFromFile( ThrottlerPtr throttler_, mode_t mode, char * existing_memory, - size_t alignment) - : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, throttler_, alignment, file_name_) + size_t alignment, + bool use_adaptive_buffer_size_, + size_t adaptive_buffer_initial_size) + : WriteBufferFromFileDescriptor(-1, buf_size, existing_memory, throttler_, alignment, file_name_, use_adaptive_buffer_size_, adaptive_buffer_initial_size) { ProfileEvents::increment(ProfileEvents::FileOpen); @@ -66,8 +68,10 @@ WriteBufferFromFile::WriteBufferFromFile( size_t buf_size, ThrottlerPtr throttler_, char * existing_memory, - size_t alignment) - : WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, throttler_, alignment, original_file_name) + size_t alignment, + bool use_adaptive_buffer_size_, + size_t adaptive_buffer_initial_size) + : WriteBufferFromFileDescriptor(fd_, buf_size, existing_memory, throttler_, alignment, original_file_name, use_adaptive_buffer_size_, adaptive_buffer_initial_size) { fd_ = -1; } diff --git a/src/IO/WriteBufferFromFile.h b/src/IO/WriteBufferFromFile.h index 57847d893af..c0fa7f0b233 100644 --- a/src/IO/WriteBufferFromFile.h +++ b/src/IO/WriteBufferFromFile.h @@ -36,7 +36,9 @@ public: ThrottlerPtr throttler_ = {}, mode_t mode = 0666, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool use_adaptive_buffer_size_ = false, + size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE); /// Use pre-opened file descriptor. explicit WriteBufferFromFile( @@ -45,7 +47,9 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, ThrottlerPtr throttler_ = {}, char * existing_memory = nullptr, - size_t alignment = 0); + size_t alignment = 0, + bool use_adaptive_buffer_size_ = false, + size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE); ~WriteBufferFromFile() override; diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp index f1207edc55b..b60a792e11c 100644 --- a/src/IO/WriteBufferFromFileDescriptor.cpp +++ b/src/IO/WriteBufferFromFileDescriptor.cpp @@ -83,6 +83,13 @@ void WriteBufferFromFileDescriptor::nextImpl() ProfileEvents::increment(ProfileEvents::DiskWriteElapsedMicroseconds, watch.elapsedMicroseconds()); ProfileEvents::increment(ProfileEvents::WriteBufferFromFileDescriptorWriteBytes, bytes_written); + + /// Increase buffer size for next data if adaptive buffer size is used and nextImpl was called because of end of buffer. + if (!available() && use_adaptive_buffer_size && memory.size() < adaptive_max_buffer_size) + { + memory.resize(std::min(memory.size() * 2, adaptive_max_buffer_size)); + BufferBase::set(memory.data(), memory.size(), 0); + } } /// NOTE: This class can be used as a very low-level building block, for example @@ -94,11 +101,15 @@ WriteBufferFromFileDescriptor::WriteBufferFromFileDescriptor( char * existing_memory, ThrottlerPtr throttler_, size_t alignment, - std::string file_name_) - : WriteBufferFromFileBase(buf_size, existing_memory, alignment) + std::string file_name_, + bool use_adaptive_buffer_size_, + size_t adaptive_buffer_initial_size) + : WriteBufferFromFileBase(use_adaptive_buffer_size_ ? adaptive_buffer_initial_size : buf_size, existing_memory, alignment) , fd(fd_) , throttler(throttler_) , file_name(std::move(file_name_)) + , use_adaptive_buffer_size(use_adaptive_buffer_size_) + , adaptive_max_buffer_size(buf_size) { } @@ -124,6 +135,7 @@ void WriteBufferFromFileDescriptor::finalizeImpl() return; } + use_adaptive_buffer_size = false; next(); } diff --git a/src/IO/WriteBufferFromFileDescriptor.h b/src/IO/WriteBufferFromFileDescriptor.h index cb73b1e1d08..e893ecd80fb 100644 --- a/src/IO/WriteBufferFromFileDescriptor.h +++ b/src/IO/WriteBufferFromFileDescriptor.h @@ -18,7 +18,9 @@ public: char * existing_memory = nullptr, ThrottlerPtr throttler_ = {}, size_t alignment = 0, - std::string file_name_ = ""); + std::string file_name_ = "", + bool use_adaptive_buffer_size_ = false, + size_t adaptive_buffer_initial_size = DBMS_DEFAULT_INITIAL_ADAPTIVE_BUFFER_SIZE); /** Could be used before initialization if needed 'fd' was not passed to constructor. * It's not possible to change 'fd' during work. @@ -56,6 +58,12 @@ protected: /// If file has name contains filename, otherwise contains string "(fd=...)" std::string file_name; + /// If true, the size of internal buffer will be exponentially increased up to + /// adaptive_buffer_max_size after each nextImpl call. It can be used to avoid + /// large buffer allocation when actual size of written data is small. + bool use_adaptive_buffer_size; + size_t adaptive_max_buffer_size; + void finalizeImpl() override; }; diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 7a978e951a7..d03b486ec52 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -95,7 +95,7 @@ WriteBufferFromS3::WriteBufferFromS3( std::optional> object_metadata_, ThreadPoolCallbackRunnerUnsafe schedule_, const WriteSettings & write_settings_) - : WriteBufferFromFileBase(buf_size_, nullptr, 0) + : WriteBufferFromFileBase(std::min(buf_size_, static_cast(DBMS_DEFAULT_BUFFER_SIZE)), nullptr, 0) , bucket(bucket_) , key(key_) , request_settings(request_settings_) @@ -351,9 +351,17 @@ void WriteBufferFromS3::allocateBuffer() buffer_allocation_policy->nextBuffer(); chassert(0 == hidden_size); + /// First buffer was already allocated in BufferWithOwnMemory constructor with provided in constructor buffer size. + /// It will be reallocated in subsequent nextImpl calls up to the desired buffer size from buffer_allocation_policy. if (buffer_allocation_policy->getBufferNumber() == 1) { - allocateFirstBuffer(); + /// Reduce memory size if initial size was larger then desired size from buffer_allocation_policy. + /// Usually it doesn't happen but we have it in unit tests. + if (memory.size() > buffer_allocation_policy->getBufferSize()) + { + memory.resize(buffer_allocation_policy->getBufferSize()); + WriteBuffer::set(memory.data(), memory.size()); + } return; } @@ -361,14 +369,6 @@ void WriteBufferFromS3::allocateBuffer() WriteBuffer::set(memory.data(), memory.size()); } -void WriteBufferFromS3::allocateFirstBuffer() -{ - const auto max_first_buffer = buffer_allocation_policy->getBufferSize(); - const auto size = std::min(size_t(DBMS_DEFAULT_BUFFER_SIZE), max_first_buffer); - memory = Memory(size); - WriteBuffer::set(memory.data(), memory.size()); -} - void WriteBufferFromS3::setFakeBufferWhenPreFinalized() { WriteBuffer::set(fake_buffer_when_prefinalized, sizeof(fake_buffer_when_prefinalized)); diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index b026da607c5..604f036fcb8 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -64,7 +64,6 @@ private: void reallocateFirstBuffer(); void detachBuffer(); void allocateBuffer(); - void allocateFirstBuffer(); void setFakeBufferWhenPreFinalized(); S3::UploadPartRequest getUploadRequest(size_t part_number, PartData & data); diff --git a/src/IO/WriteBufferValidUTF8.cpp b/src/IO/WriteBufferValidUTF8.cpp index d611befac37..2a86f8c2801 100644 --- a/src/IO/WriteBufferValidUTF8.cpp +++ b/src/IO/WriteBufferValidUTF8.cpp @@ -54,7 +54,7 @@ inline void WriteBufferValidUTF8::putReplacement() } -inline void WriteBufferValidUTF8::putValid(char *data, size_t len) +inline void WriteBufferValidUTF8::putValid(const char *data, size_t len) { if (len == 0) return; @@ -149,9 +149,34 @@ void WriteBufferValidUTF8::finalizeImpl() /// Write all complete sequences from buffer. nextImpl(); - /// If unfinished sequence at end, then write replacement. + /// Handle remaining bytes if we have an incomplete sequence if (working_buffer.begin() != memory.data()) - putReplacement(); + { + const char * p = memory.data(); + + while (p < pos) + { + UInt8 len = length_of_utf8_sequence[static_cast(*p)]; + if (p + len > pos) + { + /// Incomplete sequence. Skip one byte. + putReplacement(); + ++p; + } + else if (Poco::UTF8Encoding::isLegal(reinterpret_cast(p), len)) + { + /// Valid sequence + putValid(p, len); + p += len; + } + else + { + /// Invalid sequence, skip first byte. + putReplacement(); + ++p; + } + } + } } } diff --git a/src/IO/WriteBufferValidUTF8.h b/src/IO/WriteBufferValidUTF8.h index daaf0427f88..a398b8ded01 100644 --- a/src/IO/WriteBufferValidUTF8.h +++ b/src/IO/WriteBufferValidUTF8.h @@ -26,7 +26,7 @@ public: private: void putReplacement(); - void putValid(char * data, size_t len); + void putValid(const char * data, size_t len); void nextImpl() override; void finalizeImpl() override; diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h index cdc75e8c0e9..6b3d04f4e5c 100644 --- a/src/IO/WriteSettings.h +++ b/src/IO/WriteSettings.h @@ -24,6 +24,9 @@ struct WriteSettings bool s3_allow_parallel_part_upload = true; bool azure_allow_parallel_part_upload = true; + bool use_adaptive_write_buffer = false; + size_t adaptive_write_buffer_initial_size = 16 * 1024; + bool operator==(const WriteSettings & other) const = default; }; diff --git a/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp index c0bf7fcb28a..abe80db7ba5 100644 --- a/src/IO/tests/gtest_s3_uri.cpp +++ b/src/IO/tests/gtest_s3_uri.cpp @@ -204,6 +204,14 @@ TEST(S3UriTest, validPatterns) ASSERT_EQ("", uri.version_id); ASSERT_EQ(true, uri.is_virtual_hosted_style); } + { + S3::URI uri("https://bucket-test.cn-beijing-internal.oss-data-acc.aliyuncs.com/cc-2zeh496zqm0g6e09g"); + ASSERT_EQ("https://cn-beijing-internal.oss-data-acc.aliyuncs.com", uri.endpoint); + ASSERT_EQ("bucket-test", uri.bucket); + ASSERT_EQ("cc-2zeh496zqm0g6e09g", uri.key); + ASSERT_EQ("", uri.version_id); + ASSERT_EQ(true, uri.is_virtual_hosted_style); + } } TEST(S3UriTest, versionIdChecks) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index d1aa8a0fff0..aff06c490c5 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -2371,7 +2371,7 @@ void NO_INLINE Aggregator::mergeDataNullKey( template void NO_INLINE Aggregator::mergeDataImpl( - Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch) const + Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch, ThreadPool & thread_pool, std::atomic & is_cancelled) const { if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization) mergeDataNullKey(table_dst, table_src, arena); @@ -2410,7 +2410,7 @@ void NO_INLINE Aggregator::mergeDataImpl( { if (!is_aggregate_function_compiled[i]) aggregate_functions[i]->mergeAndDestroyBatch( - dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena); + dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena); } return; @@ -2420,7 +2420,7 @@ void NO_INLINE Aggregator::mergeDataImpl( for (size_t i = 0; i < params.aggregates_size; ++i) { aggregate_functions[i]->mergeAndDestroyBatch( - dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena); + dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena); } } @@ -2535,8 +2535,10 @@ void NO_INLINE Aggregator::mergeWithoutKeyDataImpl( template void NO_INLINE Aggregator::mergeSingleLevelDataImpl( - ManyAggregatedDataVariants & non_empty_data) const + ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const { + ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads}; + AggregatedDataVariantsPtr & res = non_empty_data[0]; bool no_more_keys = false; @@ -2557,13 +2559,13 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( if (compiled_aggregate_functions_holder) { mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, true, prefetch); + getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, true, prefetch, thread_pool, is_cancelled); } else #endif { mergeDataImpl( - getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, false, prefetch); + getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, false, prefetch, thread_pool, is_cancelled); } } else if (res->without_key) @@ -2589,7 +2591,7 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl( #define M(NAME) \ template void NO_INLINE Aggregator::mergeSingleLevelDataImpl( \ - ManyAggregatedDataVariants & non_empty_data) const; + ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const; APPLY_FOR_VARIANTS_SINGLE_LEVEL(M) #undef M @@ -2597,6 +2599,8 @@ template void NO_INLINE Aggregator::mergeBucketImpl( ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena, std::atomic & is_cancelled) const { + ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads}; + /// We merge all aggregation results to the first. AggregatedDataVariantsPtr & res = data[0]; @@ -2613,7 +2617,7 @@ void NO_INLINE Aggregator::mergeBucketImpl( if (compiled_aggregate_functions_holder) { mergeDataImpl( - getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena, true, prefetch); + getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena, true, prefetch, thread_pool, is_cancelled); } else #endif @@ -2623,7 +2627,9 @@ void NO_INLINE Aggregator::mergeBucketImpl( getDataVariant(current).data.impls[bucket], arena, false, - prefetch); + prefetch, + thread_pool, + is_cancelled); } } } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 2cb04fc7c51..4de0a640219 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -467,7 +467,7 @@ private: /// Merge data from hash table `src` into `dst`. template - void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch) const; + void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch, ThreadPool & thread_pool, std::atomic & is_cancelled) const; /// Merge data from hash table `src` into `dst`, but only for keys that already exist in dst. In other cases, merge the data into `overflows`. template @@ -490,7 +490,7 @@ private: template void mergeSingleLevelDataImpl( - ManyAggregatedDataVariants & non_empty_data) const; + ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const; template using ConvertToBlockRes = std::conditional_t; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 373cc91ebcb..7adfb42fb51 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index fb5337158ba..858b4a78430 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -152,6 +152,7 @@ class ServerType; template class MergeTreeBackgroundExecutor; class AsyncLoader; +struct ICgroupsReader; struct TemporaryTableHolder; using TemporaryTablesMapping = std::map>; diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 80cb0510b35..c1f9b4637f8 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -228,8 +228,8 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) metadata_path = metadata_path / "store" / DatabaseCatalog::getPathForUUID(create.uuid); - if (!create.attach && fs::exists(metadata_path)) - throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists", metadata_path.string()); + if (!create.attach && fs::exists(metadata_path) && !fs::is_empty(metadata_path)) + throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists and is not empty", metadata_path.string()); } else if (create.storage->engine->name == "MaterializeMySQL" || create.storage->engine->name == "MaterializedMySQL") @@ -329,6 +329,9 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) writeChar('\n', statement_buf); String statement = statement_buf.str(); + /// Needed to make database creation retriable if it fails after the file is created + fs::remove(metadata_file_tmp_path); + /// Exclusive flag guarantees, that database is not created right now in another thread. WriteBufferFromFile out(metadata_file_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL); writeString(statement, out); @@ -350,13 +353,6 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) DatabaseCatalog::instance().attachDatabase(database_name, database); added = true; - if (need_write_metadata) - { - /// Prevents from overwriting metadata of detached database - renameNoReplace(metadata_file_tmp_path, metadata_file_path); - renamed = true; - } - if (!load_database_without_tables) { /// We use global context here, because storages lifetime is bigger than query context lifetime @@ -368,6 +364,13 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) /// Only then prioritize, schedule and wait all the startup tasks waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks); } + + if (need_write_metadata) + { + /// Prevents from overwriting metadata of detached database + renameNoReplace(metadata_file_tmp_path, metadata_file_path); + renamed = true; + } } catch (...) { @@ -781,14 +784,14 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti const auto & settings = getContext()->getSettingsRef(); if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings.allow_experimental_full_text_index) - throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is not enabled (the setting 'allow_experimental_full_text_index')"); + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is disabled. Turn on setting 'allow_experimental_full_text_index'"); /// ---- /// Temporary check during a transition period. Please remove at the end of 2024. if (index_desc.type == INVERTED_INDEX_NAME && !settings.allow_experimental_inverted_index) throw Exception(ErrorCodes::ILLEGAL_INDEX, "Please use index type 'full_text' instead of 'inverted'"); /// ---- if (index_desc.type == "vector_similarity" && !settings.allow_experimental_vector_similarity_index) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index is disabled. Turn on allow_experimental_vector_similarity_index"); + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental vector similarity index is disabled. Turn on setting 'allow_experimental_vector_similarity_index'"); properties.indices.push_back(index_desc); } @@ -1226,6 +1229,27 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data bool from_path = create.attach_from_path.has_value(); bool is_on_cluster = getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; + if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal && !is_on_cluster && !create.attach) + { + if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "It's not allowed to explicitly specify UUIDs for tables in Replicated databases, " + "see database_replicated_allow_explicit_uuid"); + } + else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 1) + { + LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "It's not recommended to explicitly specify UUIDs for tables in Replicated databases"); + } + else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2) + { + UUID old_uuid = create.uuid; + create.uuid = UUIDHelpers::Nil; + create.generateRandomUUIDs(); + LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) " + "to make sure it's unique", old_uuid, create.uuid); + } + } + if (is_replicated_database_internal && !internal) { if (create.uuid == UUIDHelpers::Nil) diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index 6cb50b310ad..f8a808f6c68 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -258,7 +258,7 @@ ProcessList::insert(const String & query_, const IAST * ast, ContextMutablePtr q query_context, query_, client_info, - priorities.insert(static_cast(settings.priority)), + priorities.insert(settings.priority), std::move(thread_group), query_kind, settings, diff --git a/src/Interpreters/QueryPriorities.h b/src/Interpreters/QueryPriorities.h index 9e18e7bcff3..7601c7ba6eb 100644 --- a/src/Interpreters/QueryPriorities.h +++ b/src/Interpreters/QueryPriorities.h @@ -31,7 +31,7 @@ namespace DB class QueryPriorities { public: - using Priority = int; + using Priority = size_t; private: friend struct Handle; diff --git a/src/Interpreters/ServerAsynchronousMetrics.cpp b/src/Interpreters/ServerAsynchronousMetrics.cpp index 872a9f864df..079029695c9 100644 --- a/src/Interpreters/ServerAsynchronousMetrics.cpp +++ b/src/Interpreters/ServerAsynchronousMetrics.cpp @@ -55,9 +55,11 @@ ServerAsynchronousMetrics::ServerAsynchronousMetrics( ContextPtr global_context_, unsigned update_period_seconds, unsigned heavy_metrics_update_period_seconds, - const ProtocolServerMetricsFunc & protocol_server_metrics_func_) + const ProtocolServerMetricsFunc & protocol_server_metrics_func_, + bool update_jemalloc_epoch_, + bool update_rss_) : WithContext(global_context_) - , AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_) + , AsynchronousMetrics(update_period_seconds, protocol_server_metrics_func_, update_jemalloc_epoch_, update_rss_) , heavy_metric_update_period(heavy_metrics_update_period_seconds) { /// sanity check diff --git a/src/Interpreters/ServerAsynchronousMetrics.h b/src/Interpreters/ServerAsynchronousMetrics.h index e3c83dc748e..5fab419a32b 100644 --- a/src/Interpreters/ServerAsynchronousMetrics.h +++ b/src/Interpreters/ServerAsynchronousMetrics.h @@ -14,7 +14,10 @@ public: ContextPtr global_context_, unsigned update_period_seconds, unsigned heavy_metrics_update_period_seconds, - const ProtocolServerMetricsFunc & protocol_server_metrics_func_); + const ProtocolServerMetricsFunc & protocol_server_metrics_func_, + bool update_jemalloc_epoch_, + bool update_rss_); + ~ServerAsynchronousMetrics() override; private: diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index 7e1b4e2fb0e..737353095b8 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -164,7 +164,7 @@ Field convertDecimalType(const Field & from, const To & type) } -Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint) +Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint, const FormatSettings & format_settings) { if (from_type_hint && from_type_hint->equals(type)) { @@ -359,7 +359,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID Array res(src_arr_size); for (size_t i = 0; i < src_arr_size; ++i) { - res[i] = convertFieldToType(src_arr[i], element_type); + res[i] = convertFieldToType(src_arr[i], element_type, nullptr, format_settings); if (res[i].isNull() && !canContainNull(element_type)) { // See the comment for Tuples below. @@ -387,7 +387,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID for (size_t i = 0; i < dst_tuple_size; ++i) { const auto & element_type = *(type_tuple->getElements()[i]); - res[i] = convertFieldToType(src_tuple[i], element_type); + res[i] = convertFieldToType(src_tuple[i], element_type, nullptr, format_settings); if (res[i].isNull() && !canContainNull(element_type)) { /* @@ -435,12 +435,12 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID Tuple updated_entry(2); - updated_entry[0] = convertFieldToType(key, key_type); + updated_entry[0] = convertFieldToType(key, key_type, nullptr, format_settings); if (updated_entry[0].isNull() && !canContainNull(key_type)) have_unconvertible_element = true; - updated_entry[1] = convertFieldToType(value, value_type); + updated_entry[1] = convertFieldToType(value, value_type, nullptr, format_settings); if (updated_entry[1].isNull() && !canContainNull(value_type)) have_unconvertible_element = true; @@ -551,7 +551,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID ReadBufferFromString in_buffer(src.safeGet()); try { - type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, FormatSettings{}); + type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, format_settings); } catch (Exception & e) { @@ -563,7 +563,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } Field parsed = (*col)[0]; - return convertFieldToType(parsed, type, from_type_hint); + return convertFieldToType(parsed, type, from_type_hint, format_settings); } throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch in IN or VALUES section. Expected: {}. Got: {}", @@ -573,7 +573,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } -Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint) +Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings) { if (from_value.isNull()) return from_value; @@ -582,7 +582,7 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co return from_value; if (const auto * low_cardinality_type = typeid_cast(&to_type)) - return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint); + return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint, format_settings); else if (const auto * nullable_type = typeid_cast(&to_type)) { const IDataType & nested_type = *nullable_type->getNestedType(); @@ -593,20 +593,20 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co if (from_type_hint && from_type_hint->equals(nested_type)) return from_value; - return convertFieldToTypeImpl(from_value, nested_type, from_type_hint); + return convertFieldToTypeImpl(from_value, nested_type, from_type_hint, format_settings); } else - return convertFieldToTypeImpl(from_value, to_type, from_type_hint); + return convertFieldToTypeImpl(from_value, to_type, from_type_hint, format_settings); } -Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint) +Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings) { bool is_null = from_value.isNull(); if (is_null && !canContainNull(to_type)) throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert NULL to {}", to_type.getName()); - Field converted = convertFieldToType(from_value, to_type, from_type_hint); + Field converted = convertFieldToType(from_value, to_type, from_type_hint, format_settings); if (!is_null && converted.isNull()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, @@ -626,9 +626,9 @@ static bool decimalEqualsFloat(Field field, Float64 float_value) return decimal_to_float == float_value; } -std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type) +std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings) { - Field result_value = convertFieldToType(from_value, to_type, &from_type); + Field result_value = convertFieldToType(from_value, to_type, &from_type, format_settings); if (Field::isDecimal(from_value.getType()) && Field::isDecimal(result_value.getType())) { diff --git a/src/Interpreters/convertFieldToType.h b/src/Interpreters/convertFieldToType.h index 4aa09f8619e..c3c6271a157 100644 --- a/src/Interpreters/convertFieldToType.h +++ b/src/Interpreters/convertFieldToType.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -15,13 +16,13 @@ class IDataType; * Checks for the compatibility of types, checks values fall in the range of valid values of the type, makes type conversion. * If the value does not fall into the range - returns Null. */ -Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr); +Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {}); /// Does the same, but throws ARGUMENT_OUT_OF_BOUND if value does not fall into the range. -Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr); +Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {}); /// Applies stricter rules than convertFieldToType, doesn't allow loss of precision converting to Decimal. /// Returns `Field` if the conversion was successful and the result is equal to the original value, otherwise returns nullopt. -std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type); +std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings = {}); } diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 69a652a74a0..7ed16f17087 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -198,6 +198,7 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & auto & result_query_plan = planner.getQueryPlan(); auto optimization_settings = QueryPlanOptimizationSettings::fromContext(query_context); + optimization_settings.build_sets = false; // no need to build sets to collect filters result_query_plan.optimize(optimization_settings); FiltersForTableExpressionMap res; diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp new file mode 100644 index 00000000000..e90864ecdf3 --- /dev/null +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp @@ -0,0 +1,154 @@ +#include +#include +#include + +#include + + +namespace DB +{ + +JSONCompactWithProgressRowOutputFormat::JSONCompactWithProgressRowOutputFormat( + WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_) + : JSONRowOutputFormat(out_, header, settings_, yield_strings_) +{ +} + +void JSONCompactWithProgressRowOutputFormat::writePrefix() +{ + JSONUtils::writeCompactObjectStart(*ostr); + JSONUtils::writeCompactMetadata(names, types, settings, *ostr); + JSONUtils::writeCompactObjectEnd(*ostr); + writeCString("\n", *ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) +{ + JSONUtils::writeFieldFromColumn(column, serialization, row_num, yield_strings, settings, *ostr); + ++field_number; +} + +void JSONCompactWithProgressRowOutputFormat::writeFieldDelimiter() +{ + JSONUtils::writeFieldCompactDelimiter(*ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeRowStartDelimiter() +{ + if (has_progress) + writeProgress(); + writeCString("{\"data\":", *ostr); + JSONUtils::writeCompactArrayStart(*ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeRowEndDelimiter() +{ + JSONUtils::writeCompactArrayEnd(*ostr); + writeCString("}\n", *ostr); + field_number = 0; + ++row_count; +} + +void JSONCompactWithProgressRowOutputFormat::writeRowBetweenDelimiter() +{ +} + +void JSONCompactWithProgressRowOutputFormat::writeBeforeTotals() +{ + JSONUtils::writeCompactObjectStart(*ostr); + JSONUtils::writeCompactArrayStart(*ostr, 0, "totals"); +} + +void JSONCompactWithProgressRowOutputFormat::writeTotals(const Columns & columns, size_t row_num) +{ + JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeAfterTotals() +{ + JSONUtils::writeCompactArrayEnd(*ostr); + JSONUtils::writeCompactObjectEnd(*ostr); + writeCString("\n", *ostr); +} + +void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num) +{ + JSONUtils::writeCompactArrayStart(*ostr, 2, title); + JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr); + JSONUtils::writeCompactArrayEnd(*ostr); +} + +void JSONCompactWithProgressRowOutputFormat::onProgress(const Progress & value) +{ + statistics.progress.incrementPiecewiseAtomically(value); + String progress_line; + WriteBufferFromString buf(progress_line); + writeCString("{\"progress\":", buf); + statistics.progress.writeJSON(buf); + writeCString("}\n", buf); + buf.finalize(); + std::lock_guard lock(progress_lines_mutex); + progress_lines.emplace_back(std::move(progress_line)); + has_progress = true; +} + + +void JSONCompactWithProgressRowOutputFormat::flush() +{ + if (has_progress) + writeProgress(); + JSONRowOutputFormat::flush(); +} + +void JSONCompactWithProgressRowOutputFormat::writeSuffix() +{ + if (has_progress) + writeProgress(); +} + +void JSONCompactWithProgressRowOutputFormat::writeProgress() +{ + std::lock_guard lock(progress_lines_mutex); + for (const auto & progress_line : progress_lines) + writeString(progress_line, *ostr); + progress_lines.clear(); + has_progress = false; +} + +void JSONCompactWithProgressRowOutputFormat::finalizeImpl() +{ + if (exception_message.empty()) + { + JSONUtils::writeCompactAdditionalInfo( + row_count, + statistics.rows_before_limit, + statistics.applied_limit, + statistics.watch, + statistics.progress, + settings.write_statistics, + *ostr); + } + else + { + JSONUtils::writeCompactObjectStart(*ostr); + JSONUtils::writeException(exception_message, *ostr, settings, 0); + JSONUtils::writeCompactObjectEnd(*ostr); + } + writeCString("\n", *ostr); + ostr->next(); +} + +void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory) +{ + factory.registerOutputFormat( + "JSONCompactWithProgress", + [](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings) + { return std::make_shared(buf, sample, format_settings, false); }); + + factory.registerOutputFormat( + "JSONCompactWithProgressStrings", + [](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings) + { return std::make_shared(buf, sample, format_settings, true); }); +} + +} diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h new file mode 100644 index 00000000000..1c21914d8cb --- /dev/null +++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include +#include + + +namespace DB +{ + +struct FormatSettings; + +class JSONCompactWithProgressRowOutputFormat final : public JSONRowOutputFormat +{ +public: + JSONCompactWithProgressRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_); + + String getName() const override { return "JSONCompactWithProgressRowOutputFormat"; } + + void onProgress(const Progress & value) override; + void flush() override; + +private: + void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override; + void writeFieldDelimiter() override; + void writeRowStartDelimiter() override; + void writeRowEndDelimiter() override; + void writeRowBetweenDelimiter() override; + bool supportTotals() const override { return true; } + bool supportExtremes() const override { return true; } + void writeBeforeTotals() override; + void writeAfterTotals() override; + void writeExtremesElement(const char * title, const Columns & columns, size_t row_num) override; + void writeTotals(const Columns & columns, size_t row_num) override; + + void writeProgress(); + void writePrefix() override; + void writeSuffix() override; + void finalizeImpl() override; + + + std::vector progress_lines; + std::mutex progress_lines_mutex; + /// To not lock mutex and check progress_lines every row, + /// we will use atomic flag that progress_lines is not empty. + std::atomic_bool has_progress = false; +}; + +} diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 10d0e051665..16b88d0b8dc 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -542,7 +542,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx if (format_settings.null_as_default) tryToReplaceNullFieldsInComplexTypesWithDefaultValues(expression_value, type); - Field value = convertFieldToType(expression_value, type, value_raw.second.get()); + Field value = convertFieldToType(expression_value, type, value_raw.second.get(), format_settings); /// Check that we are indeed allowed to insert a NULL. if (value.isNull() && !type.isNullable() && !type.isLowCardinalityNullable()) diff --git a/src/Processors/QueryPlan/Optimizations/Optimizations.h b/src/Processors/QueryPlan/Optimizations/Optimizations.h index c48bdf1552a..43f07ced696 100644 --- a/src/Processors/QueryPlan/Optimizations/Optimizations.h +++ b/src/Processors/QueryPlan/Optimizations/Optimizations.h @@ -16,7 +16,7 @@ void optimizeTreeFirstPass(const QueryPlanOptimizationSettings & settings, Query void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_settings, QueryPlan::Node & root, QueryPlan::Nodes & nodes); /// Third pass is used to apply filters such as key conditions and skip indexes to the storages that support them. /// After that it add CreateSetsStep for the subqueries that has not be used in the filters. -void optimizeTreeThirdPass(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes); +void addStepsToBuildSets(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes); /// Optimization (first pass) is a function applied to QueryPlan::Node. /// It can read and update subtree of specified node. diff --git a/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h b/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h index 539ff2eafbb..a2b22495800 100644 --- a/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h +++ b/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h @@ -75,6 +75,8 @@ struct QueryPlanOptimizationSettings String force_projection_name; bool optimize_use_implicit_projections = false; + bool build_sets = true; + static QueryPlanOptimizationSettings fromSettings(const Settings & from); static QueryPlanOptimizationSettings fromContext(ContextPtr from); }; diff --git a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp index 25895788e2e..f8504d84d12 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp @@ -216,7 +216,7 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s optimization_settings.force_projection_name); } -void optimizeTreeThirdPass(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes) +void addStepsToBuildSets(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::Nodes & nodes) { Stack stack; stack.push_back({.node = &root}); diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index 44d2703e973..aaa1f53b5ee 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -50,6 +50,9 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type) case TypeIndex::Float64: case TypeIndex::Nullable: case TypeIndex::ObjectDeprecated: + case TypeIndex::Object: + case TypeIndex::Variant: + case TypeIndex::Dynamic: return false; case TypeIndex::Array: { @@ -76,16 +79,6 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type) const auto & data_type_map = static_cast(data_type); return isSafePrimaryDataKeyType(*data_type_map.getKeyType()) && isSafePrimaryDataKeyType(*data_type_map.getValueType()); } - case TypeIndex::Variant: - { - const auto & data_type_variant = static_cast(data_type); - const auto & data_type_variant_elements = data_type_variant.getVariants(); - for (const auto & data_type_variant_element : data_type_variant_elements) - if (!isSafePrimaryDataKeyType(*data_type_variant_element)) - return false; - - return false; - } default: { break; diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index b78f7a29cde..9a39df26241 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -504,7 +504,8 @@ void QueryPlan::optimize(const QueryPlanOptimizationSettings & optimization_sett QueryPlanOptimizations::optimizeTreeFirstPass(optimization_settings, *root, nodes); QueryPlanOptimizations::optimizeTreeSecondPass(optimization_settings, *root, nodes); - QueryPlanOptimizations::optimizeTreeThirdPass(*this, *root, nodes); + if (optimization_settings.build_sets) + QueryPlanOptimizations::addStepsToBuildSets(*this, *root, nodes); updateDataStreams(*root); } diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index c9ada32b839..2e21df0f387 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -486,7 +486,7 @@ private: #define M(NAME) \ else if (first->type == AggregatedDataVariants::Type::NAME) \ - params->aggregator.mergeSingleLevelDataImplNAME)::element_type>(*data); + params->aggregator.mergeSingleLevelDataImplNAME)::element_type>(*data, shared_data->is_cancelled); if (false) {} // NOLINT APPLY_FOR_VARIANTS_SINGLE_LEVEL(M) #undef M diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 26c719e0263..ef76bc691ec 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1142,6 +1142,16 @@ bool AlterCommands::hasFullTextIndex(const StorageInMemoryMetadata & metadata) return false; } +bool AlterCommands::hasVectorSimilarityIndex(const StorageInMemoryMetadata & metadata) +{ + for (const auto & index : metadata.secondary_indices) + { + if (index.type == "vector_similarity") + return true; + } + return false; +} + void AlterCommands::apply(StorageInMemoryMetadata & metadata, ContextPtr context) const { if (!prepared) diff --git a/src/Storages/AlterCommands.h b/src/Storages/AlterCommands.h index a91bac10214..c4c792e7dec 100644 --- a/src/Storages/AlterCommands.h +++ b/src/Storages/AlterCommands.h @@ -237,6 +237,9 @@ public: /// Check if commands have any full-text index static bool hasFullTextIndex(const StorageInMemoryMetadata & metadata); + + /// Check if commands have any vector similarity index + static bool hasVectorSimilarityIndex(const StorageInMemoryMetadata & metadata); }; } diff --git a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp index 2cf69b9f6b7..625c64128e7 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertBatch.cpp @@ -28,7 +28,6 @@ namespace ErrorCodes extern const int TOO_MANY_PARTITIONS; extern const int DISTRIBUTED_TOO_MANY_PENDING_BYTES; extern const int ARGUMENT_OUT_OF_BOUND; - extern const int LOGICAL_ERROR; } /// Can the batch be split and send files from batch one-by-one instead? @@ -243,10 +242,7 @@ void DistributedAsyncInsertBatch::sendBatch(const SettingsChanges & settings_cha auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings); auto results = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName()); - auto result = results.front(); - if (parent.pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result"); - + auto result = parent.pool->getValidTryResult(results, insert_settings.distributed_insert_skip_read_only_replicas); connection = std::move(result.entry); compression_expected = connection->getCompression() == Protocol::Compression::Enable; @@ -305,10 +301,7 @@ void DistributedAsyncInsertBatch::sendSeparateFiles(const SettingsChanges & sett auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings); auto results = parent.pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, parent.storage.remote_storage.getQualifiedName()); - auto result = results.front(); - if (parent.pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result"); - + auto result = parent.pool->getValidTryResult(results, insert_settings.distributed_insert_skip_read_only_replicas); auto connection = std::move(result.entry); bool compression_expected = connection->getCompression() == Protocol::Compression::Enable; diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index 7616b384860..7f368102dfd 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -415,10 +415,7 @@ void DistributedAsyncInsertDirectoryQueue::processFile(std::string & file_path, auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(insert_settings); auto results = pool->getManyCheckedForInsert(timeouts, insert_settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName()); - auto result = results.front(); - if (pool->isTryResultInvalid(result, insert_settings.distributed_insert_skip_read_only_replicas)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result"); - + auto result = pool->getValidTryResult(results, insert_settings.distributed_insert_skip_read_only_replicas); auto connection = std::move(result.entry); LOG_DEBUG(log, "Sending `{}` to {} ({} rows, {} bytes)", diff --git a/src/Storages/Distributed/DistributedSink.cpp b/src/Storages/Distributed/DistributedSink.cpp index e3e73e42096..f01ea10065c 100644 --- a/src/Storages/Distributed/DistributedSink.cpp +++ b/src/Storages/Distributed/DistributedSink.cpp @@ -347,7 +347,7 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si } const Block & shard_block = (num_shards > 1) ? job.current_shard_block : current_block; - const Settings & settings = context->getSettingsRef(); + const Settings settings = context->getSettingsCopy(); size_t rows = shard_block.rows(); @@ -377,10 +377,7 @@ DistributedSink::runWritingJob(JobReplica & job, const Block & current_block, si /// NOTE: INSERT will also take into account max_replica_delay_for_distributed_queries /// (anyway fallback_to_stale_replicas_for_distributed_queries=true by default) auto results = shard_info.pool->getManyCheckedForInsert(timeouts, settings, PoolMode::GET_ONE, storage.remote_storage.getQualifiedName()); - auto result = results.front(); - if (shard_info.pool->isTryResultInvalid(result, settings.distributed_insert_skip_read_only_replicas)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got an invalid connection result"); - + auto result = shard_info.pool->getValidTryResult(results, settings.distributed_insert_skip_read_only_replicas); job.connection_entry = std::move(result.entry); } else diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 37eb94f269b..a7bcaa25ddc 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -986,6 +986,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic indexes_to_recalc = MergeTreeIndexFactory::instance().getMany(indexes_it->second); auto indices_expression_dag = indexes_it->second.getSingleExpressionForIndices(global_ctx->metadata_snapshot->getColumns(), global_ctx->data->getContext())->getActionsDAG().clone(); + indices_expression_dag.addMaterializingOutputActions(); /// Const columns cannot be written without materialization. auto calculate_indices_expression_step = std::make_unique( merge_column_query_plan.getCurrentDataStream(), std::move(indices_expression_dag)); @@ -1352,10 +1353,10 @@ bool MergeTask::execute() /// Apply merge strategy (Ordinary, Colapsing, Aggregating, etc) to the stream -class ApplyMergeStep : public ITransformingStep +class MergePartsStep : public ITransformingStep { public: - ApplyMergeStep( + MergePartsStep( const DataStream & input_stream_, const SortDescription & sort_description_, const Names partition_key_columns_, @@ -1378,7 +1379,7 @@ public: , time_of_merge(time_of_merge_) {} - String getName() const override { return "ApplyMergePolicy"; } + String getName() const override { return "MergeParts"; } void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override { @@ -1495,45 +1496,6 @@ private: const time_t time_of_merge{0}; }; - -class MaterializingStep : public ITransformingStep -{ -public: - explicit MaterializingStep( - const DataStream & input_stream_) - : ITransformingStep(input_stream_, input_stream_.header, getTraits()) - {} - - String getName() const override { return "Materializing"; } - - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override - { - pipeline.addTransform(std::make_shared(input_streams.front().header)); - } - - void updateOutputStream() override - { - output_stream = createOutputStream(input_streams.front(), input_streams.front().header, getDataStreamTraits()); - } - -private: - static Traits getTraits() - { - return ITransformingStep::Traits - { - { - .returns_single_stream = true, - .preserves_number_of_streams = true, - .preserves_sorting = true, - }, - { - .preserves_number_of_rows = true, - } - }; - } -}; - - class TTLStep : public ITransformingStep { public: @@ -1618,8 +1580,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const global_ctx->horizontal_stage_progress = std::make_unique( ctx->column_sizes ? ctx->column_sizes->keyColumnsWeight() : 1.0); - auto sorting_key_expression_dag = global_ctx->metadata_snapshot->getSortingKey().expression->getActionsDAG().clone(); - /// Read from all parts std::vector plans; for (size_t i = 0; i < global_ctx->future_part->parts.size(); ++i) @@ -1644,15 +1604,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const global_ctx->context, ctx->log); - if (global_ctx->metadata_snapshot->hasSortingKey()) - { - /// Calculate sorting key expressions so that they are available for merge sorting. - auto calculate_sorting_key_expression_step = std::make_unique( - plan_for_part->getCurrentDataStream(), - sorting_key_expression_dag.clone()); /// TODO: can we avoid cloning here? - plan_for_part->addStep(std::move(calculate_sorting_key_expression_step)); - } - plans.emplace_back(std::move(plan_for_part)); } @@ -1669,6 +1620,16 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const merge_parts_query_plan.unitePlans(std::move(union_step), std::move(plans)); } + if (global_ctx->metadata_snapshot->hasSortingKey()) + { + /// Calculate sorting key expressions so that they are available for merge sorting. + auto sorting_key_expression_dag = global_ctx->metadata_snapshot->getSortingKey().expression->getActionsDAG().clone(); + auto calculate_sorting_key_expression_step = std::make_unique( + merge_parts_query_plan.getCurrentDataStream(), + std::move(sorting_key_expression_dag)); + merge_parts_query_plan.addStep(std::move(calculate_sorting_key_expression_step)); + } + /// Merge { Names sort_columns = global_ctx->metadata_snapshot->getSortingKeyColumns(); @@ -1691,7 +1652,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const if (global_ctx->cleanup && !data_settings->allow_experimental_replacing_merge_with_cleanup) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental merges with CLEANUP are not allowed"); - auto merge_step = std::make_unique( + auto merge_step = std::make_unique( merge_parts_query_plan.getCurrentDataStream(), sort_description, partition_key_columns, @@ -1749,12 +1710,11 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const if (!global_ctx->merging_skip_indexes.empty()) { auto indices_expression_dag = global_ctx->merging_skip_indexes.getSingleExpressionForIndices(global_ctx->metadata_snapshot->getColumns(), global_ctx->data->getContext())->getActionsDAG().clone(); + indices_expression_dag.addMaterializingOutputActions(); /// Const columns cannot be written without materialization. auto calculate_indices_expression_step = std::make_unique( merge_parts_query_plan.getCurrentDataStream(), std::move(indices_expression_dag)); merge_parts_query_plan.addStep(std::move(calculate_indices_expression_step)); - /// TODO: what is the purpose of MaterializingTransform in the original code? - merge_parts_query_plan.addStep(std::make_unique(merge_parts_query_plan.getCurrentDataStream())); } if (!subqueries.empty()) diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index de670731d21..ca619d4d208 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3230,6 +3230,10 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is not enabled (turn on setting 'allow_experimental_full_text_index')"); + if (AlterCommands::hasVectorSimilarityIndex(new_metadata) && !settings.allow_experimental_vector_similarity_index) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "Experimental vector similarity index is disabled (turn on setting 'allow_experimental_vector_similarity_index')"); + for (const auto & disk : getDisks()) if (!disk->supportsHardLinks() && !commands.isSettingsAlter() && !commands.isCommentAlter()) throw Exception( diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index b0e70e94b73..9bfc87135d9 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -85,11 +85,11 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( marks_file_extension{marks_file_extension_}, plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), - compressor(plain_hashing, compression_codec_, max_compress_block_size_), + compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), compressed_hashing(compressor), marks_file(data_part_storage->writeFile(marks_path_ + marks_file_extension, 4096, query_write_settings)), marks_hashing(*marks_file), - marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_), + marks_compressor(marks_hashing, marks_compression_codec_, marks_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), marks_compressed_hashing(marks_compressor), compress_marks(MarkType(marks_file_extension).compressed) { @@ -108,7 +108,7 @@ MergeTreeDataPartWriterOnDisk::Stream::Stream( data_file_extension{data_file_extension_}, plain_file(data_part_storage->writeFile(data_path_ + data_file_extension, max_compress_block_size_, query_write_settings)), plain_hashing(*plain_file), - compressor(plain_hashing, compression_codec_, max_compress_block_size_), + compressor(plain_hashing, compression_codec_, max_compress_block_size_, query_write_settings.use_adaptive_write_buffer, query_write_settings.adaptive_write_buffer_initial_size), compressed_hashing(compressor), compress_marks(false) { diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 8b6735e0fe2..f050accd7a1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -177,6 +177,10 @@ void MergeTreeDataPartWriterWide::addStreams( if (!max_compress_block_size) max_compress_block_size = settings.max_compress_block_size; + WriteSettings query_write_settings = settings.query_write_settings; + query_write_settings.use_adaptive_write_buffer = settings.use_adaptive_write_buffer_for_dynamic_subcolumns && ISerialization::isDynamicSubcolumn(substream_path, substream_path.size()); + query_write_settings.adaptive_write_buffer_initial_size = settings.adaptive_write_buffer_initial_size; + column_streams[stream_name] = std::make_unique>( stream_name, data_part_storage, @@ -186,7 +190,7 @@ void MergeTreeDataPartWriterWide::addStreams( max_compress_block_size, marks_compression_codec, settings.marks_compress_block_size, - settings.query_write_settings); + query_write_settings); full_name_to_stream_name.emplace(full_stream_name, stream_name); stream_name_to_full_name.emplace(stream_name, full_stream_name); diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.cpp b/src/Storages/MergeTree/MergeTreeIOSettings.cpp index 24cb25afe47..19365a90a14 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeIOSettings.cpp @@ -30,6 +30,8 @@ MergeTreeWriterSettings::MergeTreeWriterSettings( , low_cardinality_max_dictionary_size(global_settings.low_cardinality_max_dictionary_size) , low_cardinality_use_single_dictionary_for_part(global_settings.low_cardinality_use_single_dictionary_for_part != 0) , use_compact_variant_discriminators_serialization(storage_settings->use_compact_variant_discriminators_serialization) + , use_adaptive_write_buffer_for_dynamic_subcolumns(storage_settings->use_adaptive_write_buffer_for_dynamic_subcolumns) + , adaptive_write_buffer_initial_size(storage_settings->adaptive_write_buffer_initial_size) { } diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index 47b174b2e29..fcc72815d8f 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -80,6 +80,8 @@ struct MergeTreeWriterSettings size_t low_cardinality_max_dictionary_size; bool low_cardinality_use_single_dictionary_for_part; bool use_compact_variant_discriminators_serialization; + bool use_adaptive_write_buffer_for_dynamic_subcolumns; + size_t adaptive_write_buffer_initial_size; }; } diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index ae183d74782..58892d0dbf2 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -195,7 +195,7 @@ void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) LOG_TRACE(logger, "Start writing vector similarity index"); if (empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty minmax index {}", backQuote(index_name)); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty vector similarity index {}", backQuote(index_name)); writeIntBinary(FILE_FORMAT_VERSION, ostr); diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 0769b60dc6b..dcb18155114 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -99,6 +99,8 @@ struct Settings; M(Bool, add_implicit_sign_column_constraint_for_collapsing_engine, false, "If true, add implicit constraint for sign column for CollapsingMergeTree engine.", 0) \ M(Milliseconds, sleep_before_commit_local_part_in_replicated_table_ms, 0, "For testing. Do not change it.", 0) \ M(Bool, optimize_row_order, false, "Allow reshuffling of rows during part inserts and merges to improve the compressibility of the new part", 0) \ + M(Bool, use_adaptive_write_buffer_for_dynamic_subcolumns, true, "Allow to use adaptive writer buffers during writing dynamic subcolumns to reduce memory usage", 0) \ + M(UInt64, adaptive_write_buffer_initial_size, 16 * 1024, "Initial size of an adaptive write buffer", 0) \ \ /* Part removal settings. */ \ M(UInt64, simultaneous_parts_removal_limit, 0, "Maximum number of parts to remove during one CleanupThread iteration (0 means unlimited).", 0) \ diff --git a/src/Storages/MergeTree/VectorSimilarityCondition.cpp b/src/Storages/MergeTree/VectorSimilarityCondition.cpp index c8f33857640..641b0037e7b 100644 --- a/src/Storages/MergeTree/VectorSimilarityCondition.cpp +++ b/src/Storages/MergeTree/VectorSimilarityCondition.cpp @@ -40,10 +40,12 @@ void extractReferenceVectorFromLiteral(std::vector & reference_vector, } } -VectorSimilarityCondition::Info::DistanceFunction stringToDistanceFunction(std::string_view distance_function) +VectorSimilarityCondition::Info::DistanceFunction stringToDistanceFunction(const String & distance_function) { if (distance_function == "L2Distance") return VectorSimilarityCondition::Info::DistanceFunction::L2; + else if (distance_function == "cosineDistance") + return VectorSimilarityCondition::Info::DistanceFunction::Cosine; else return VectorSimilarityCondition::Info::DistanceFunction::Unknown; } @@ -57,7 +59,7 @@ VectorSimilarityCondition::VectorSimilarityCondition(const SelectQueryInfo & que , index_is_useful(checkQueryStructure(query_info)) {} -bool VectorSimilarityCondition::alwaysUnknownOrTrue(String distance_function) const +bool VectorSimilarityCondition::alwaysUnknownOrTrue(const String & distance_function) const { if (!index_is_useful) return true; /// query isn't supported diff --git a/src/Storages/MergeTree/VectorSimilarityCondition.h b/src/Storages/MergeTree/VectorSimilarityCondition.h index 2380f8f46b0..2e9e06a31d0 100644 --- a/src/Storages/MergeTree/VectorSimilarityCondition.h +++ b/src/Storages/MergeTree/VectorSimilarityCondition.h @@ -57,7 +57,8 @@ public: enum class DistanceFunction : uint8_t { Unknown, - L2 + L2, + Cosine }; std::vector reference_vector; @@ -68,7 +69,7 @@ public: }; /// Returns false if query can be speeded up by an ANN index, true otherwise. - bool alwaysUnknownOrTrue(String distance_function) const; + bool alwaysUnknownOrTrue(const String & distance_function) const; std::vector getReferenceVector() const; size_t getDimensions() const; @@ -141,18 +142,12 @@ private: /// Traverses the AST of ORDERBY section void traverseOrderByAST(const ASTPtr & node, RPN & rpn); - /// Returns true and stores ANNExpr if the query has valid WHERE section - static bool matchRPNWhere(RPN & rpn, Info & info); - /// Returns true and stores ANNExpr if the query has valid ORDERBY section static bool matchRPNOrderBy(RPN & rpn, Info & info); /// Returns true and stores Length if we have valid LIMIT clause in query static bool matchRPNLimit(RPNElement & rpn, UInt64 & limit); - /// Matches dist function, reference vector, column name - static bool matchMainParts(RPN::iterator & iter, const RPN::iterator & end, Info & info); - /// Gets float or int from AST node static float getFloatOrIntLiteralOrPanic(const RPN::iterator& iter); diff --git a/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h b/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h index 5ef5e1db62e..212dc048868 100644 --- a/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h +++ b/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h @@ -14,6 +14,6 @@ using ContextPtr = std::shared_ptr; /// Extracts a zookeeper path from a specified CREATE TABLE query. /// The function checks the table engine and if it is Replicated*MergeTree then it takes the first argument and expands macros in it. /// Returns std::nullopt if the specified CREATE query doesn't describe a Replicated table or its arguments can't be evaluated. -std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & context); +std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & local_context); } diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 9a65d590453..18ed7df9b5d 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -189,7 +190,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( const String & engine_name, ASTs & engine_args, LoadingStrictnessLevel mode, - const ContextPtr & context, + const ContextPtr & local_context, String & zookeeper_path, String & replica_name, RenamingRestrictions & renaming_restrictions) @@ -206,11 +207,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( { /// Allow expressions in engine arguments. /// In new syntax argument can be literal or identifier or array/tuple of identifiers. - evaluateEngineArgs(engine_args, context); + evaluateEngineArgs(engine_args, local_context); } - bool is_on_cluster = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; - bool is_replicated_database = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && + bool is_on_cluster = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY; + bool is_replicated_database = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY && DatabaseCatalog::instance().getDatabase(table_id.database_name)->getEngineName() == "Replicated"; /// Allow implicit {uuid} macros only for zookeeper_path in ON CLUSTER queries @@ -230,10 +231,10 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( /// We did unfold it in previous versions to make moving table from Atomic to Ordinary database work correctly, /// but now it's not allowed (and it was the only reason to unfold {uuid} macro). info.table_id.uuid = UUIDHelpers::Nil; - zookeeper_path = context->getMacros()->expand(zookeeper_path, info); + zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info); info.level = 0; - replica_name = context->getMacros()->expand(replica_name, info); + replica_name = local_context->getMacros()->expand(replica_name, info); } ast_zk_path->value = zookeeper_path; @@ -251,11 +252,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( } if (!allow_uuid_macro) info.table_id.uuid = UUIDHelpers::Nil; - zookeeper_path = context->getMacros()->expand(zookeeper_path, info); + zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info); info.level = 0; info.table_id.uuid = UUIDHelpers::Nil; - replica_name = context->getMacros()->expand(replica_name, info); + replica_name = local_context->getMacros()->expand(replica_name, info); /// We do not allow renaming table with these macros in metadata, because zookeeper_path will be broken after RENAME TABLE. /// NOTE: it may happen if table was created by older version of ClickHouse (< 20.10) and macros was not unfolded on table creation @@ -272,9 +273,24 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( bool has_arguments = (arg_num + 2 <= arg_cnt); bool has_valid_arguments = has_arguments && engine_args[arg_num]->as() && engine_args[arg_num + 1]->as(); + const auto & server_settings = local_context->getServerSettings(); if (has_valid_arguments) { + if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "It's not allowed to specify explicit zookeeper_path and replica_name " + "for ReplicatedMergeTree arguments in Replicated database. If you really want to " + "specify them explicitly, enable setting " + "database_replicated_allow_replicated_engine_arguments."); + } + else if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1) + { + LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify " + "zookeeper_path and replica_name in ReplicatedMergeTree arguments"); + } + /// Get path and name from engine arguments auto * ast_zk_path = engine_args[arg_num]->as(); if (ast_zk_path && ast_zk_path->value.getType() == Field::Types::String) @@ -288,6 +304,15 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message); + + if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2) + { + LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) " + "with default arguments", zookeeper_path, replica_name); + engine_args[arg_num]->as()->value = zookeeper_path = server_settings.default_replica_path; + engine_args[arg_num + 1]->as()->value = replica_name = server_settings.default_replica_name; + } + expand_macro(ast_zk_path, ast_replica_name); } else if (is_extended_storage_def @@ -297,7 +322,6 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( { /// Try use default values if arguments are not specified. /// Note: {uuid} macro works for ON CLUSTER queries when database engine is Atomic. - const auto & server_settings = context->getServerSettings(); zookeeper_path = server_settings.default_replica_path; /// TODO maybe use hostname if {replica} is not defined? replica_name = server_settings.default_replica_name; @@ -322,7 +346,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs( } /// Extracts a zookeeper path from a specified CREATE TABLE query. -std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & context) +std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & local_context) { if (!query.storage || !query.storage->engine) return {}; @@ -346,7 +370,7 @@ std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreate try { - extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, context, + extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, local_context, zookeeper_path, replica_name, renaming_restrictions); } catch (Exception & e) diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index 667a925d11e..f04e868ee5a 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -207,18 +207,28 @@ struct DeltaLakeMetadataImpl Poco::Dynamic::Var json = parser.parse(json_str); Poco::JSON::Object::Ptr object = json.extract(); + if (!object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to parse metadata file"); + +#ifdef ABORT_ON_LOGICAL_ERROR std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM object->stringify(oss); LOG_TEST(log, "Metadata: {}", oss.str()); +#endif if (object->has("metaData")) { const auto metadata_object = object->get("metaData").extract(); + if (!metadata_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `metaData` field"); + const auto schema_object = metadata_object->getValue("schemaString"); Poco::JSON::Parser p; Poco::Dynamic::Var fields_json = parser.parse(schema_object); const Poco::JSON::Object::Ptr & fields_object = fields_json.extract(); + if (!fields_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `fields` field"); auto current_schema = parseMetadata(fields_object); if (file_schema.empty()) @@ -237,6 +247,9 @@ struct DeltaLakeMetadataImpl if (object->has("add")) { auto add_object = object->get("add").extract(); + if (!add_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `add` field"); + auto path = add_object->getValue("path"); result.insert(fs::path(configuration->getPath()) / path); @@ -247,6 +260,9 @@ struct DeltaLakeMetadataImpl if (add_object->has("partitionValues")) { auto partition_values = add_object->get("partitionValues").extract(); + if (!partition_values) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `partitionValues` field"); + if (partition_values->size()) { auto & current_partition_columns = file_partition_columns[filename]; @@ -274,7 +290,11 @@ struct DeltaLakeMetadataImpl } else if (object->has("remove")) { - auto path = object->get("remove").extract()->getValue("path"); + auto remove_object = object->get("remove").extract(); + if (!remove_object) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `remove` field"); + + auto path = remove_object->getValue("path"); result.erase(fs::path(configuration->getPath()) / path); } } diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index 9452ce81e9e..c1ef37e1a48 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -64,9 +64,7 @@ namespace void checkAndAdjustSettings( ObjectStorageQueueSettings & queue_settings, - ASTStorage * engine_args, - bool is_attach, - const LoggerPtr & log) + bool is_attach) { if (!is_attach && !queue_settings.mode.changed) { @@ -85,16 +83,6 @@ namespace "Setting `cleanup_interval_min_ms` ({}) must be less or equal to `cleanup_interval_max_ms` ({})", queue_settings.cleanup_interval_min_ms, queue_settings.cleanup_interval_max_ms); } - - if (!is_attach && !queue_settings.processing_threads_num.changed) - { - queue_settings.processing_threads_num = std::max(getNumberOfPhysicalCPUCores(), 16); - engine_args->settings->as()->changes.insertSetting( - "processing_threads_num", - queue_settings.processing_threads_num.value); - - LOG_TRACE(log, "Set `processing_threads_num` to {}", queue_settings.processing_threads_num); - } } std::shared_ptr getQueueLog(const ObjectStoragePtr & storage, const ContextPtr & context, const ObjectStorageQueueSettings & table_settings) @@ -130,7 +118,7 @@ StorageObjectStorageQueue::StorageObjectStorageQueue( const String & comment, ContextPtr context_, std::optional format_settings_, - ASTStorage * engine_args, + ASTStorage * /* engine_args */, LoadingStrictnessLevel mode) : IStorage(table_id_) , WithContext(context_) @@ -154,7 +142,7 @@ StorageObjectStorageQueue::StorageObjectStorageQueue( throw Exception(ErrorCodes::BAD_QUERY_PARAMETER, "ObjectStorageQueue url must either end with '/' or contain globs"); } - checkAndAdjustSettings(*queue_settings, engine_args, mode > LoadingStrictnessLevel::CREATE, log); + checkAndAdjustSettings(*queue_settings, mode > LoadingStrictnessLevel::CREATE); object_storage = configuration->createObjectStorage(context_, /* is_readonly */true); FormatFactory::instance().checkFormatName(configuration->format); diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp index 6372c804e0e..0557530515f 100644 --- a/src/Storages/Statistics/Statistics.cpp +++ b/src/Storages/Statistics/Statistics.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -101,6 +102,8 @@ Float64 ColumnStatistics::estimateLess(const Field & val) const { if (stats.contains(StatisticsType::TDigest)) return stats.at(StatisticsType::TDigest)->estimateLess(val); + if (stats.contains(StatisticsType::MinMax)) + return stats.at(StatisticsType::MinMax)->estimateLess(val); return rows * ConditionSelectivityEstimator::default_cond_range_factor; } @@ -121,6 +124,14 @@ Float64 ColumnStatistics::estimateEqual(const Field & val) const if (stats.contains(StatisticsType::CountMinSketch)) return stats.at(StatisticsType::CountMinSketch)->estimateEqual(val); #endif + if (stats.contains(StatisticsType::Uniq)) + { + UInt64 cardinality = stats.at(StatisticsType::Uniq)->estimateCardinality(); + if (cardinality == 0 || rows == 0) + return 0; + return 1.0 / cardinality * rows; /// assume uniform distribution + } + return rows * ConditionSelectivityEstimator::default_cond_equal_factor; } @@ -198,6 +209,9 @@ void MergeTreeStatisticsFactory::registerValidator(StatisticsType stats_type, Va MergeTreeStatisticsFactory::MergeTreeStatisticsFactory() { + registerValidator(StatisticsType::MinMax, minMaxStatisticsValidator); + registerCreator(StatisticsType::MinMax, minMaxStatisticsCreator); + registerValidator(StatisticsType::TDigest, tdigestStatisticsValidator); registerCreator(StatisticsType::TDigest, tdigestStatisticsCreator); @@ -234,7 +248,7 @@ ColumnStatisticsPtr MergeTreeStatisticsFactory::get(const ColumnDescription & co { auto it = creators.find(type); if (it == creators.end()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'tdigest' 'uniq' and 'count_min'", type); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'countmin', 'minmax', 'tdigest' and 'uniq'", type); auto stat_ptr = (it->second)(desc, column_desc.type); column_stat->stats[type] = stat_ptr; } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index 6dbd0625d3d..f477181ec2d 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -1,4 +1,3 @@ - #include #include #include @@ -50,7 +49,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const if (isStringOrFixedString(data_type)) return sketch.get_estimate(val.safeGet()); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'count_min' does not support estimate data type of {}", data_type->getName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'countmin' does not support estimate data type of {}", data_type->getName()); } void StatisticsCountMinSketch::update(const ColumnPtr & column) @@ -89,7 +88,7 @@ void countMinSketchStatisticsValidator(const SingleStatisticsDescription & /*des DataTypePtr inner_data_type = removeNullable(data_type); inner_data_type = removeLowCardinalityAndNullable(inner_data_type); if (!inner_data_type->isValueRepresentedByNumber() && !isStringOrFixedString(inner_data_type)) - throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName()); + throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countmin' does not support type {}", data_type->getName()); } StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type) diff --git a/src/Storages/Statistics/StatisticsMinMax.cpp b/src/Storages/Statistics/StatisticsMinMax.cpp new file mode 100644 index 00000000000..27072d225de --- /dev/null +++ b/src/Storages/Statistics/StatisticsMinMax.cpp @@ -0,0 +1,86 @@ +#include +#include +#include +#include +#include + +#include + + +namespace DB +{ + +namespace ErrorCodes +{ +extern const int ILLEGAL_STATISTICS; +} + +StatisticsMinMax::StatisticsMinMax(const SingleStatisticsDescription & description, const DataTypePtr & data_type_) + : IStatistics(description) + , data_type(data_type_) +{ +} + +void StatisticsMinMax::update(const ColumnPtr & column) +{ + for (size_t row = 0; row < column->size(); ++row) + { + if (column->isNullAt(row)) + continue; + + auto value = column->getFloat64(row); + min = std::min(value, min); + max = std::max(value, max); + } + row_count += column->size(); +} + +void StatisticsMinMax::serialize(WriteBuffer & buf) +{ + writeIntBinary(row_count, buf); + writeFloatBinary(min, buf); + writeFloatBinary(max, buf); +} + +void StatisticsMinMax::deserialize(ReadBuffer & buf) +{ + readIntBinary(row_count, buf); + readFloatBinary(min, buf); + readFloatBinary(max, buf); +} + +Float64 StatisticsMinMax::estimateLess(const Field & val) const +{ + if (row_count == 0) + return 0; + + auto val_as_float = StatisticsUtils::tryConvertToFloat64(val, data_type); + if (!val_as_float.has_value()) + return 0; + + if (val_as_float < min) + return 0; + + if (val_as_float > max) + return row_count; + + if (min == max) + return (val_as_float != max) ? 0 : row_count; + + return ((*val_as_float - min) / (max - min)) * row_count; +} + +void minMaxStatisticsValidator(const SingleStatisticsDescription & /*description*/, const DataTypePtr & data_type) +{ + auto inner_data_type = removeNullable(data_type); + inner_data_type = removeLowCardinalityAndNullable(inner_data_type); + if (!inner_data_type->isValueRepresentedByNumber()) + throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'minmax' do not support type {}", data_type->getName()); +} + +StatisticsPtr minMaxStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type) +{ + return std::make_shared(description, data_type); +} + +} diff --git a/src/Storages/Statistics/StatisticsMinMax.h b/src/Storages/Statistics/StatisticsMinMax.h new file mode 100644 index 00000000000..c60fa810c47 --- /dev/null +++ b/src/Storages/Statistics/StatisticsMinMax.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +class StatisticsMinMax : public IStatistics +{ +public: + StatisticsMinMax(const SingleStatisticsDescription & statistics_description, const DataTypePtr & data_type_); + + void update(const ColumnPtr & column) override; + + void serialize(WriteBuffer & buf) override; + void deserialize(ReadBuffer & buf) override; + + Float64 estimateLess(const Field & val) const override; + +private: + Float64 min = std::numeric_limits::max(); + Float64 max = std::numeric_limits::min(); + UInt64 row_count = 0; + + DataTypePtr data_type; +}; + +void minMaxStatisticsValidator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); +StatisticsPtr minMaxStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type); + +} diff --git a/src/Storages/Statistics/StatisticsUniq.cpp b/src/Storages/Statistics/StatisticsUniq.cpp index 07311b5b86d..5e5b7a67b04 100644 --- a/src/Storages/Statistics/StatisticsUniq.cpp +++ b/src/Storages/Statistics/StatisticsUniq.cpp @@ -56,7 +56,7 @@ void uniqStatisticsValidator(const SingleStatisticsDescription & /*description*/ { DataTypePtr inner_data_type = removeNullable(data_type); inner_data_type = removeLowCardinalityAndNullable(inner_data_type); - if (!inner_data_type->isValueRepresentedByNumber()) + if (!inner_data_type->isValueRepresentedByNumber() && !isStringOrFixedString(inner_data_type)) throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'uniq' do not support type {}", data_type->getName()); } diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp index 64634124758..ac7fa8898de 100644 --- a/src/Storages/StatisticsDescription.cpp +++ b/src/Storages/StatisticsDescription.cpp @@ -48,9 +48,11 @@ static StatisticsType stringToStatisticsType(String type) return StatisticsType::TDigest; if (type == "uniq") return StatisticsType::Uniq; - if (type == "count_min") + if (type == "countmin") return StatisticsType::CountMinSketch; - throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq' and 'count_min'.", type); + if (type == "minmax") + return StatisticsType::MinMax; + throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type); } String SingleStatisticsDescription::getTypeName() const @@ -62,9 +64,11 @@ String SingleStatisticsDescription::getTypeName() const case StatisticsType::Uniq: return "Uniq"; case StatisticsType::CountMinSketch: - return "count_min"; + return "countmin"; + case StatisticsType::MinMax: + return "minmax"; default: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'tdigest', 'uniq' and 'count_min'.", type); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type); } } diff --git a/src/Storages/StatisticsDescription.h b/src/Storages/StatisticsDescription.h index 46927f1418c..3780de3e967 100644 --- a/src/Storages/StatisticsDescription.h +++ b/src/Storages/StatisticsDescription.h @@ -14,6 +14,7 @@ enum class StatisticsType : UInt8 TDigest = 0, Uniq = 1, CountMinSketch = 2, + MinMax = 3, Max = 63, }; diff --git a/src/Storages/System/StorageSystemServerSettings.cpp b/src/Storages/System/StorageSystemServerSettings.cpp index d242b6de4ec..ee99c472620 100644 --- a/src/Storages/System/StorageSystemServerSettings.cpp +++ b/src/Storages/System/StorageSystemServerSettings.cpp @@ -63,7 +63,6 @@ void StorageSystemServerSettings::fillData(MutableColumns & res_columns, Context /// current setting values, one needs to ask the components directly. std::unordered_map> changeable_settings = { {"max_server_memory_usage", {std::to_string(total_memory_tracker.getHardLimit()), ChangeableWithoutRestart::Yes}}, - {"allow_use_jemalloc_memory", {std::to_string(total_memory_tracker.getAllowUseJemallocMmemory()), ChangeableWithoutRestart::Yes}}, {"max_table_size_to_drop", {std::to_string(context->getMaxTableSizeToDrop()), ChangeableWithoutRestart::Yes}}, {"max_partition_size_to_drop", {std::to_string(context->getMaxPartitionSizeToDrop()), ChangeableWithoutRestart::Yes}}, diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index cd499aee581..436fc5b6bb2 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -410,7 +410,9 @@ class CI: num_batches=6, ), JobNames.INTEGRATION_TEST_TSAN: CommonJobConfigs.INTEGRATION_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_TSAN], num_batches=6 + required_builds=[BuildNames.PACKAGE_TSAN], + num_batches=6, + timeout=9000, # the job timed out with default value (7200) ), JobNames.INTEGRATION_TEST_ARM: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], diff --git a/tests/config/users.d/database_replicated.xml b/tests/config/users.d/database_replicated.xml index c049c3559fc..1c2cf2ac22b 100644 --- a/tests/config/users.d/database_replicated.xml +++ b/tests/config/users.d/database_replicated.xml @@ -6,6 +6,7 @@ 120 1 1 + 3 diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 53f4f1e1f26..821bb887435 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -2112,6 +2112,7 @@ class ClickHouseCluster: self.base_cmd + ["up", "--force-recreate", "--no-deps", "-d", node.name] ) node.ip_address = self.get_instance_ip(node.name) + node.ipv6_address = self.get_instance_global_ipv6(node.name) node.client = Client(node.ip_address, command=self.client_bin_path) logging.info("Restart node with ip change") @@ -3182,6 +3183,7 @@ class ClickHouseCluster: for instance in self.instances.values(): instance.docker_client = self.docker_client instance.ip_address = self.get_instance_ip(instance.name) + instance.ipv6_address = self.get_instance_global_ipv6(instance.name) logging.debug( f"Waiting for ClickHouse start in {instance.name}, ip: {instance.ip_address}..." diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py index e6e79dc7947..f24b5924e73 100644 --- a/tests/integration/helpers/network.py +++ b/tests/integration/helpers/network.py @@ -3,6 +3,7 @@ import subprocess import time import logging import docker +import ipaddress class PartitionManager: @@ -26,25 +27,76 @@ class PartitionManager: self._check_instance(instance) self._add_rule( - {"source": instance.ip_address, "destination_port": 2181, "action": action} + { + "source": instance.ip_address, + "destination_port": 2181, + "action": action, + } ) self._add_rule( - {"destination": instance.ip_address, "source_port": 2181, "action": action} + { + "destination": instance.ip_address, + "source_port": 2181, + "action": action, + } ) + if instance.ipv6_address: + self._add_rule( + { + "source": instance.ipv6_address, + "destination_port": 2181, + "action": action, + } + ) + self._add_rule( + { + "destination": instance.ipv6_address, + "source_port": 2181, + "action": action, + } + ) + def dump_rules(self): - return _NetworkManager.get().dump_rules() + v4 = _NetworkManager.get().dump_rules() + v6 = _NetworkManager.get().dump_v6_rules() + + return v4 + v6 def restore_instance_zk_connections(self, instance, action="DROP"): self._check_instance(instance) self._delete_rule( - {"source": instance.ip_address, "destination_port": 2181, "action": action} + { + "source": instance.ip_address, + "destination_port": 2181, + "action": action, + } ) self._delete_rule( - {"destination": instance.ip_address, "source_port": 2181, "action": action} + { + "destination": instance.ip_address, + "source_port": 2181, + "action": action, + } ) + if instance.ipv6_address: + self._delete_rule( + { + "source": instance.ipv6_address, + "destination_port": 2181, + "action": action, + } + ) + self._delete_rule( + { + "destination": instance.ipv6_address, + "source_port": 2181, + "action": action, + } + ) + def partition_instances(self, left, right, port=None, action="DROP"): self._check_instance(left) self._check_instance(right) @@ -59,16 +111,34 @@ class PartitionManager: rule["destination_port"] = port return rule + def create_rule_v6(src, dst): + rule = { + "source": src.ipv6_address, + "destination": dst.ipv6_address, + "action": action, + } + if port is not None: + rule["destination_port"] = port + return rule + self._add_rule(create_rule(left, right)) self._add_rule(create_rule(right, left)) + if left.ipv6_address and right.ipv6_address: + self._add_rule(create_rule_v6(left, right)) + self._add_rule(create_rule_v6(right, left)) + def add_network_delay(self, instance, delay_ms): self._add_tc_netem_delay(instance, delay_ms) def heal_all(self): while self._iptables_rules: rule = self._iptables_rules.pop() - _NetworkManager.get().delete_iptables_rule(**rule) + + if self._is_ipv6_rule(rule): + _NetworkManager.get().delete_ip6tables_rule(**rule) + else: + _NetworkManager.get().delete_iptables_rule(**rule) while self._netem_delayed_instances: instance = self._netem_delayed_instances.pop() @@ -90,12 +160,27 @@ class PartitionManager: if instance.ip_address is None: raise Exception("Instance + " + instance.name + " is not launched!") + @staticmethod + def _is_ipv6_rule(rule): + if rule.get("source"): + return ipaddress.ip_address(rule["source"]).version == 6 + if rule.get("destination"): + return ipaddress.ip_address(rule["destination"]).version == 6 + + return False + def _add_rule(self, rule): - _NetworkManager.get().add_iptables_rule(**rule) + if self._is_ipv6_rule(rule): + _NetworkManager.get().add_ip6tables_rule(**rule) + else: + _NetworkManager.get().add_iptables_rule(**rule) self._iptables_rules.append(rule) def _delete_rule(self, rule): - _NetworkManager.get().delete_iptables_rule(**rule) + if self._is_ipv6_rule(rule): + _NetworkManager.get().delete_ip6tables_rule(**rule) + else: + _NetworkManager.get().delete_iptables_rule(**rule) self._iptables_rules.remove(rule) def _add_tc_netem_delay(self, instance, delay_ms): @@ -150,35 +235,65 @@ class _NetworkManager: cls._instance = cls(**kwargs) return cls._instance + def setup_ip6tables_docker_user_chain(self): + _rules = subprocess.check_output(f"ip6tables-save", shell=True) + if "DOCKER-USER" in _rules.decode("utf-8"): + return + + setup_cmds = [ + ["ip6tables", "--wait", "-N", "DOCKER-USER"], + ["ip6tables", "--wait", "-I", "FORWARD", "-j", "DOCKER-USER"], + ["ip6tables", "--wait", "-A", "DOCKER-USER", "-j", "RETURN"], + ] + for cmd in setup_cmds: + self._exec_run(cmd, privileged=True) + def add_iptables_rule(self, **kwargs): cmd = ["iptables", "--wait", "-I", "DOCKER-USER", "1"] cmd.extend(self._iptables_cmd_suffix(**kwargs)) self._exec_run(cmd, privileged=True) + def add_ip6tables_rule(self, **kwargs): + self.setup_ip6tables_docker_user_chain() + + cmd = ["ip6tables", "--wait", "-I", "DOCKER-USER", "1"] + cmd.extend(self._iptables_cmd_suffix(**kwargs)) + self._exec_run(cmd, privileged=True) + def delete_iptables_rule(self, **kwargs): cmd = ["iptables", "--wait", "-D", "DOCKER-USER"] cmd.extend(self._iptables_cmd_suffix(**kwargs)) self._exec_run(cmd, privileged=True) + def delete_ip6tables_rule(self, **kwargs): + cmd = ["ip6tables", "--wait", "-D", "DOCKER-USER"] + cmd.extend(self._iptables_cmd_suffix(**kwargs)) + self._exec_run(cmd, privileged=True) + def dump_rules(self): cmd = ["iptables", "-L", "DOCKER-USER"] return self._exec_run(cmd, privileged=True) + def dump_v6_rules(self): + cmd = ["ip6tables", "-L", "DOCKER-USER"] + return self._exec_run(cmd, privileged=True) + @staticmethod def clean_all_user_iptables_rules(): - for i in range(1000): - iptables_iter = i - # when rules will be empty, it will return error - res = subprocess.run("iptables --wait -D DOCKER-USER 1", shell=True) + for iptables in ("iptables", "ip6tables"): + for i in range(1000): + iptables_iter = i + # when rules will be empty, it will return error + res = subprocess.run(f"{iptables} --wait -D DOCKER-USER 1", shell=True) - if res.returncode != 0: - logging.info( - "All iptables rules cleared, " - + str(iptables_iter) - + " iterations, last error: " - + str(res.stderr) - ) - return + if res.returncode != 0: + logging.info( + f"All {iptables} rules cleared, " + + str(iptables_iter) + + " iterations, last error: " + + str(res.stderr) + ) + break @staticmethod def _iptables_cmd_suffix( @@ -188,6 +303,7 @@ class _NetworkManager: destination_port=None, action=None, probability=None, + protocol=None, custom_args=None, ): ret = [] @@ -202,7 +318,7 @@ class _NetworkManager: str(probability), ] ) - ret.extend(["-p", "tcp"]) + ret.extend(["-p", "tcp" if protocol is None else protocol]) if source is not None: ret.extend(["-s", source]) if destination is not None: diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index 4806625f3f0..56e007dcf5d 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -343,6 +343,13 @@ def test_increment_backup_without_changes(): def test_incremental_backup_overflow(): + if ( + instance.is_built_with_thread_sanitizer() + or instance.is_built_with_memory_sanitizer() + or instance.is_built_with_address_sanitizer() + ): + pytest.skip("The test is slow in builds with sanitizer") + backup_name = new_backup_name() incremental_backup_name = new_backup_name() diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py index 202a741bfb5..acf4bd28c9b 100644 --- a/tests/integration/test_backward_compatibility/test_functions.py +++ b/tests/integration/test_backward_compatibility/test_functions.py @@ -154,6 +154,13 @@ def test_aggregate_states(start_cluster): def test_string_functions(start_cluster): + if ( + upstream.is_built_with_thread_sanitizer() + or upstream.is_built_with_memory_sanitizer() + or upstream.is_built_with_address_sanitizer() + ): + pytest.skip("The test is slow in builds with sanitizer") + functions = backward.query( """ SELECT if(NOT empty(alias_to), alias_to, name) diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index 891ee8f00f5..ec0bef23731 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -311,7 +311,8 @@ def test_replicated_database(cluster): SETTINGS storage_policy = 'web'; """.format( uuids[0] - ) + ), + settings={"database_replicated_allow_explicit_uuid": 3}, ) node2 = cluster.instances["node2"] diff --git a/tests/integration/test_disks_app_func/test.py b/tests/integration/test_disks_app_func/test.py index 56ea5c8846a..a4b2399e117 100644 --- a/tests/integration/test_disks_app_func/test.py +++ b/tests/integration/test_disks_app_func/test.py @@ -13,8 +13,20 @@ def started_cluster(): main_configs=["config.xml"], with_minio=True, ) - cluster.start() + + # local disk requires its `path` directory to exist. + # the two paths below belong to `test1` and `test2` disks + node = cluster.instances["disks_app_test"] + for path in ["path1", "path2"]: + node.exec_in_container( + [ + "bash", + "-c", + f"mkdir -p /var/lib/clickhouse/{path}", + ] + ) + yield cluster finally: diff --git a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml index 25ececea3e8..e71b93379d0 100644 --- a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml +++ b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config2.xml @@ -16,7 +16,7 @@ az-zoo2 1 - 20000000 + 200000000 10000 diff --git a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml index 81e343b77c9..cf4a4686f2c 100644 --- a/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml +++ b/tests/integration/test_keeper_memory_soft_limit/configs/keeper_config3.xml @@ -13,7 +13,7 @@ 2181 3 - 20000000 + 200000000 10000 diff --git a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config1.xml b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config1.xml index 8459ea3e068..6af17946eec 100644 --- a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config1.xml +++ b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config1.xml @@ -5,6 +5,7 @@ minio minio123 + false 9181 1 /var/lib/clickhouse/coordination/log diff --git a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config2.xml b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config2.xml index dfe73628f66..25f2b0de812 100644 --- a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config2.xml +++ b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config2.xml @@ -5,6 +5,7 @@ minio minio123 + false 9181 2 /var/lib/clickhouse/coordination/log diff --git a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config3.xml b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config3.xml index 948d9527718..e274b5184f1 100644 --- a/tests/integration/test_keeper_s3_snapshot/configs/keeper_config3.xml +++ b/tests/integration/test_keeper_s3_snapshot/configs/keeper_config3.xml @@ -5,6 +5,7 @@ minio minio123 + false 9181 3 /var/lib/clickhouse/coordination/log diff --git a/tests/integration/test_keeper_s3_snapshot/test.py b/tests/integration/test_keeper_s3_snapshot/test.py index 84ffc964621..b6c25305aef 100644 --- a/tests/integration/test_keeper_s3_snapshot/test.py +++ b/tests/integration/test_keeper_s3_snapshot/test.py @@ -2,6 +2,9 @@ import pytest from helpers.cluster import ClickHouseCluster from time import sleep from retry import retry +from multiprocessing.dummy import Pool +import helpers.keeper_utils as keeper_utils +from minio.deleteobjects import DeleteObject from kazoo.client import KazooClient @@ -75,7 +78,18 @@ def wait_node(node): raise Exception("Can't wait node", node.name, "to become ready") +def delete_keeper_snapshots_logs(nodex): + nodex.exec_in_container( + [ + "bash", + "-c", + "rm -rf /var/lib/clickhouse/coordination/log /var/lib/clickhouse/coordination/snapshots", + ] + ) + + def test_s3_upload(started_cluster): + node1_zk = get_fake_zk(node1.name) # we defined in configs snapshot_distance as 50 @@ -89,6 +103,11 @@ def test_s3_upload(started_cluster): for obj in list(cluster.minio_client.list_objects("snapshots")) ] + def delete_s3_snapshots(): + snapshots = cluster.minio_client.list_objects("snapshots") + for s in snapshots: + cluster.minio_client.remove_object("snapshots", s.object_name) + # Keeper sends snapshots asynchornously, hence we need to retry. @retry(AssertionError, tries=10, delay=2) def _check_snapshots(): @@ -125,3 +144,26 @@ def test_s3_upload(started_cluster): ) destroy_zk_client(node2_zk) + node2.stop_clickhouse() + delete_keeper_snapshots_logs(node2) + node3.stop_clickhouse() + delete_keeper_snapshots_logs(node3) + delete_keeper_snapshots_logs(node1) + p = Pool(3) + waiters = [] + + def start_clickhouse(node): + node.start_clickhouse() + + waiters.append(p.apply_async(start_clickhouse, args=(node1,))) + waiters.append(p.apply_async(start_clickhouse, args=(node2,))) + waiters.append(p.apply_async(start_clickhouse, args=(node3,))) + + delete_s3_snapshots() # for next iteration + + for waiter in waiters: + waiter.wait() + + keeper_utils.wait_until_connected(cluster, node1) + keeper_utils.wait_until_connected(cluster, node2) + keeper_utils.wait_until_connected(cluster, node3) diff --git a/tests/integration/test_memory_limit/test.py b/tests/integration/test_memory_limit/test.py index 6d6745711da..db68a38c1b1 100644 --- a/tests/integration/test_memory_limit/test.py +++ b/tests/integration/test_memory_limit/test.py @@ -13,7 +13,6 @@ node = cluster.add_instance( "configs/async_metrics_no.xml", ], mem_limit="4g", - env_variables={"MALLOC_CONF": "dirty_decay_ms:0"}, ) diff --git a/tests/integration/test_replicated_database/configs/settings.xml b/tests/integration/test_replicated_database/configs/settings.xml index c637fe8eead..41799c5bed2 100644 --- a/tests/integration/test_replicated_database/configs/settings.xml +++ b/tests/integration/test_replicated_database/configs/settings.xml @@ -5,6 +5,8 @@ 1 0 0 + 3 + 3 diff --git a/tests/integration/test_replicated_database/configs/settings2.xml b/tests/integration/test_replicated_database/configs/settings2.xml index dad5740a8ae..fb7f0c8d4d3 100644 --- a/tests/integration/test_replicated_database/configs/settings2.xml +++ b/tests/integration/test_replicated_database/configs/settings2.xml @@ -5,6 +5,8 @@ 1 0 0 + 3 + 3 0 diff --git a/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml b/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml index 16caee9ba20..a5e45ead44e 100644 --- a/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml +++ b/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml @@ -1,6 +1,8 @@ + 3 + 3 diff --git a/tests/integration/test_role/test.py b/tests/integration/test_role/test.py index 225cab975ff..b746af56083 100644 --- a/tests/integration/test_role/test.py +++ b/tests/integration/test_role/test.py @@ -629,5 +629,6 @@ def test_roles_cache(): check() instance.query("DROP USER " + ", ".join(users)) - instance.query("DROP ROLE " + ", ".join(roles)) + if roles: + instance.query("DROP ROLE " + ", ".join(roles)) instance.query("DROP TABLE tbl") diff --git a/tests/integration/test_storage_kerberized_kafka/test.py b/tests/integration/test_storage_kerberized_kafka/test.py index 24d10d7ff83..a00914543c6 100644 --- a/tests/integration/test_storage_kerberized_kafka/test.py +++ b/tests/integration/test_storage_kerberized_kafka/test.py @@ -8,6 +8,7 @@ import logging from helpers.cluster import ClickHouseCluster, is_arm from helpers.test_tools import TSV from helpers.client import QueryRuntimeException +from helpers.network import PartitionManager import json import subprocess @@ -138,7 +139,7 @@ def test_kafka_json_as_string_request_new_ticket_after_expiration(kafka_cluster) kafka_produce( kafka_cluster, - "kafka_json_as_string", + "kafka_json_as_string_after_expiration", [ '{"t": 123, "e": {"x": "woof"} }', "", @@ -152,9 +153,9 @@ def test_kafka_json_as_string_request_new_ticket_after_expiration(kafka_cluster) CREATE TABLE test.kafka (field String) ENGINE = Kafka SETTINGS kafka_broker_list = 'kerberized_kafka1:19092', - kafka_topic_list = 'kafka_json_as_string', + kafka_topic_list = 'kafka_json_as_string_after_expiration', kafka_commit_on_select = 1, - kafka_group_name = 'kafka_json_as_string', + kafka_group_name = 'kafka_json_as_string_after_expiration', kafka_format = 'JSONAsString', kafka_flush_interval_ms=1000; """ @@ -170,7 +171,7 @@ def test_kafka_json_as_string_request_new_ticket_after_expiration(kafka_cluster) """ assert TSV(result) == TSV(expected) assert instance.contains_in_log( - "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows" + "Parsing of message (topic: kafka_json_as_string_after_expiration, partition: 0, offset: 1) return no rows" ) @@ -204,27 +205,40 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster): ], ) - kafka_cluster.pause_container("kafka_kerberos") - time.sleep(45) # wait for ticket expiration + # temporary prevent CH - KDC communications + with PartitionManager() as pm: + other_node = "kafka_kerberos" + for node in kafka_cluster.instances.values(): + source = node.ip_address + destination = kafka_cluster.get_instance_ip(other_node) + logging.debug(f"partitioning source {source}, destination {destination}") + pm._add_rule( + { + "source": source, + "destination": destination, + "action": "REJECT", + "protocol": "all", + } + ) - instance.query( - """ - CREATE TABLE test.kafka_no_kdc (field String) - ENGINE = Kafka - SETTINGS kafka_broker_list = 'kerberized_kafka1:19092', - kafka_topic_list = 'kafka_json_as_string_no_kdc', - kafka_group_name = 'kafka_json_as_string_no_kdc', - kafka_commit_on_select = 1, - kafka_format = 'JSONAsString', - kafka_flush_interval_ms=1000; - """ - ) + time.sleep(45) # wait for ticket expiration - result = instance.query("SELECT * FROM test.kafka_no_kdc;") + instance.query( + """ + CREATE TABLE test.kafka_no_kdc (field String) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kerberized_kafka1:19092', + kafka_topic_list = 'kafka_json_as_string_no_kdc', + kafka_group_name = 'kafka_json_as_string_no_kdc', + kafka_commit_on_select = 1, + kafka_format = 'JSONAsString', + kafka_flush_interval_ms=1000; + """ + ) + + result = instance.query("SELECT * FROM test.kafka_no_kdc;") expected = "" - kafka_cluster.unpause_container("kafka_kerberos") - assert TSV(result) == TSV(expected) assert instance.contains_in_log("StorageKafka (kafka_no_kdc): Nothing to commit") assert instance.contains_in_log("Ticket expired") @@ -234,7 +248,7 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster): def test_kafka_config_from_sql_named_collection(kafka_cluster): kafka_produce( kafka_cluster, - "kafka_json_as_string", + "kafka_json_as_string_named_collection", [ '{"t": 123, "e": {"x": "woof"} }', "", @@ -245,6 +259,7 @@ def test_kafka_config_from_sql_named_collection(kafka_cluster): instance.query( """ + DROP NAMED COLLECTION IF EXISTS kafka_config; CREATE NAMED COLLECTION kafka_config AS kafka.security_protocol = 'SASL_PLAINTEXT', kafka.sasl_mechanism = 'GSSAPI', @@ -255,9 +270,9 @@ def test_kafka_config_from_sql_named_collection(kafka_cluster): kafka.api_version_request = 'false', kafka_broker_list = 'kerberized_kafka1:19092', - kafka_topic_list = 'kafka_json_as_string', + kafka_topic_list = 'kafka_json_as_string_named_collection', kafka_commit_on_select = 1, - kafka_group_name = 'kafka_json_as_string', + kafka_group_name = 'kafka_json_as_string_named_collection', kafka_format = 'JSONAsString', kafka_flush_interval_ms=1000; """ @@ -279,7 +294,7 @@ def test_kafka_config_from_sql_named_collection(kafka_cluster): """ assert TSV(result) == TSV(expected) assert instance.contains_in_log( - "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows" + "Parsing of message (topic: kafka_json_as_string_named_collection, partition: 0, offset: 1) return no rows" ) diff --git a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh index 4916721764c..1efe529ac24 100755 --- a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh +++ b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh @@ -15,6 +15,6 @@ $CLICKHOUSE_CLIENT --query=" INSERT INTO users VALUES (1321770221388956068); "; -for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT -n | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo +for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo $CLICKHOUSE_CLIENT --query="DROP TABLE users;"; diff --git a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh index d57efaa1f0e..f0c0354ab33 100755 --- a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh +++ b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS numbers_100k; CREATE VIEW numbers_100k AS SELECT * FROM system.numbers LIMIT 100000; "; diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8_memleak.reference b/tests/queries/0_stateless/00170_lower_upper_utf8_memleak.reference new file mode 100644 index 00000000000..7f6160bace3 --- /dev/null +++ b/tests/queries/0_stateless/00170_lower_upper_utf8_memleak.reference @@ -0,0 +1 @@ +españa diff --git a/tests/queries/0_stateless/00170_lower_upper_utf8_memleak.sh b/tests/queries/0_stateless/00170_lower_upper_utf8_memleak.sh new file mode 100755 index 00000000000..8436659c6a8 --- /dev/null +++ b/tests/queries/0_stateless/00170_lower_upper_utf8_memleak.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# Tags: no-fasttest +# no-fasttest: upper/lowerUTF8 use ICU + +# Test for issue #69336 + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL --query "SELECT lowerUTF8('ESPAÑA')" diff --git a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh index b59aae83e81..b6cc270994f 100755 --- a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh +++ b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh @@ -48,13 +48,13 @@ pack_unpack_compare "SELECT name, is_aggregate FROM system.functions" "name Stri echo # Check settings are passed correctly ${CLICKHOUSE_LOCAL} --max_rows_in_distinct=33 -q "SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'" -${CLICKHOUSE_LOCAL} -n -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'" +${CLICKHOUSE_LOCAL} -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'" ${CLICKHOUSE_LOCAL} --max_bytes_before_external_group_by=1 --max_block_size=10 -q "SELECT sum(ignore(*)) FROM (SELECT number, count() FROM numbers(1000) GROUP BY number)" echo # Check exta options, we expect zero exit code and no stderr output -(${CLICKHOUSE_LOCAL} --ignore-error -n --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC") +(${CLICKHOUSE_LOCAL} --ignore-error --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC") echo -${CLICKHOUSE_LOCAL} -n -q "CREATE TABLE sophisticated_default +${CLICKHOUSE_LOCAL} -q "CREATE TABLE sophisticated_default ( a UInt8 DEFAULT 3, b UInt8 ALIAS a + 5, diff --git a/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh index eaa50bce6b1..eed0d3bf5c6 100755 --- a/tests/queries/0_stateless/00505_secure.sh +++ b/tests/queries/0_stateless/00505_secure.sh @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT_SECURE -q "SELECT 4;" # TODO: can test only on unchanged port. Possible solutions: generate config or pass shard port via command line if [[ "$CLICKHOUSE_PORT_TCP_SECURE" = "$CLICKHOUSE_PORT_TCP_SECURE" ]]; then - cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -n -m + cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -m else tail -n 13 "$CURDIR"/00505_secure.reference fi diff --git a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql index 51e6a513608..329f6ad2248 100644 --- a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql +++ b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql @@ -6,6 +6,8 @@ DROP TABLE IF EXISTS without_deduplication; DROP TABLE IF EXISTS with_deduplication_mv; DROP TABLE IF EXISTS without_deduplication_mv; +SET database_replicated_allow_explicit_uuid=3; +SET database_replicated_allow_replicated_engine_arguments=3; CREATE TABLE with_deduplication(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/with_deduplication', 'r1') ORDER BY x; CREATE TABLE without_deduplication(x UInt32) diff --git a/tests/queries/0_stateless/00531_client_ignore_error.sh b/tests/queries/0_stateless/00531_client_ignore_error.sh index d3215e1beac..553cb9fa897 100755 --- a/tests/queries/0_stateless/00531_client_ignore_error.sh +++ b/tests/queries/0_stateless/00531_client_ignore_error.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error +echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error #$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'" diff --git a/tests/queries/0_stateless/00534_client_ignore_error.sh b/tests/queries/0_stateless/00534_client_ignore_error.sh index d3215e1beac..553cb9fa897 100755 --- a/tests/queries/0_stateless/00534_client_ignore_error.sh +++ b/tests/queries/0_stateless/00534_client_ignore_error.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null -echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error +echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null +echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error #$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'" diff --git a/tests/queries/0_stateless/00609_mv_index_in_in.sql b/tests/queries/0_stateless/00609_mv_index_in_in.sql index bd9f35350c1..848938780c2 100644 --- a/tests/queries/0_stateless/00609_mv_index_in_in.sql +++ b/tests/queries/0_stateless/00609_mv_index_in_in.sql @@ -1,4 +1,4 @@ --- Tags: no-ordinary-database +-- Tags: no-ordinary-database, no-parallel DROP TABLE IF EXISTS test_00609; DROP TABLE IF EXISTS test_mv_00609; @@ -6,6 +6,7 @@ DROP TABLE IF EXISTS test_mv_00609; create table test_00609 (a Int8) engine=Memory; insert into test_00609 values (1); +set database_replicated_allow_explicit_uuid=3; set allow_deprecated_syntax_for_merge_tree=1; create materialized view test_mv_00609 uuid '00000609-1000-4000-8000-000000000001' Engine=MergeTree(date, (a), 8192) populate as select a, toDate('2000-01-01') date from test_00609; diff --git a/tests/queries/0_stateless/00686_client_exit_code.sh b/tests/queries/0_stateless/00686_client_exit_code.sh index 9586ddbd0a3..e348f93f30d 100755 --- a/tests/queries/0_stateless/00686_client_exit_code.sh +++ b/tests/queries/0_stateless/00686_client_exit_code.sh @@ -8,5 +8,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib -echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} -n 2>/dev/null +echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} 2>/dev/null echo $? diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh index ea8b9d02e49..fd002668696 100755 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh @@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & -yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery & +yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT & +yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT & wait ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table" diff --git a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh index 5a4fd901f8d..285fd3945f9 100755 --- a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh +++ b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh @@ -12,7 +12,7 @@ settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_que # Test insert logging on each block and checkPacket() method -$CLICKHOUSE_CLIENT $settings -n -q " +$CLICKHOUSE_CLIENT $settings -q " DROP TABLE IF EXISTS merge_tree_table; CREATE TABLE merge_tree_table (id UInt64, date Date, uid UInt32) ENGINE = MergeTree(date, id, 8192);" diff --git a/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/tests/queries/0_stateless/00738_lock_for_inner_table.sh index b62a639d8f4..9bc84dd1063 100755 --- a/tests/queries/0_stateless/00738_lock_for_inner_table.sh +++ b/tests/queries/0_stateless/00738_lock_for_inner_table.sh @@ -7,6 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --database_replicated_allow_explicit_uuid 3" + # there are some issues with Atomic database, let's generate it uniq # otherwise flaky check will not pass. uuid=$(${CLICKHOUSE_CLIENT} --query "SELECT reinterpretAsUUID(currentDatabase())") diff --git a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh index da4d3b05987..a29d2e5bc71 100755 --- a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh +++ b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS table" -seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT -n & -seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT -n 2>/dev/null & +seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT & +seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT 2>/dev/null & wait diff --git a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh index f0bc52ee356..cb774116356 100755 --- a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh +++ b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh @@ -27,7 +27,7 @@ function thread_drop_create() while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 100 ]; do it=$((it+1)) - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " drop table if exists view_00840; create view view_00840 as select count(*),database,table from system.columns group by database,table; " diff --git a/tests/queries/0_stateless/00900_long_parquet.sh b/tests/queries/0_stateless/00900_long_parquet.sh index 07d2f24e446..86a0d013078 100755 --- a/tests/queries/0_stateless/00900_long_parquet.sh +++ b/tests/queries/0_stateless/00900_long_parquet.sh @@ -8,11 +8,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS contributors; CREATE TABLE contributors (name String) ENGINE = Memory;" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.contributors ORDER BY name DESC FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO contributors FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" -- random results SELECT * FROM contributors LIMIT 10 FORMAT Null; DROP TABLE contributors; @@ -21,30 +21,30 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_numbers (number UInt64) ENGINE = Memory;" # less than default block size (65k) ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 10000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; TRUNCATE TABLE parquet_numbers;" # More than default block size ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 100000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; TRUNCATE TABLE parquet_numbers;" ${CLICKHOUSE_CLIENT} --max_block_size=2 --query="SELECT * FROM system.numbers LIMIT 3 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; TRUNCATE TABLE parquet_numbers;" ${CLICKHOUSE_CLIENT} --max_block_size=1 --query="SELECT * FROM system.numbers LIMIT 1000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10; DROP TABLE parquet_numbers; DROP TABLE IF EXISTS parquet_events; CREATE TABLE parquet_events (event String, value UInt64, description String) ENGINE = Memory;" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.events FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_events FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT event, description FROM parquet_events WHERE event IN ('ContextLock', 'Query') ORDER BY event; DROP TABLE parquet_events; @@ -78,7 +78,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types2 ORDER BY int8 FORMAT echo diff: diff "${CLICKHOUSE_TMP}"/parquet_all_types_1.dump "${CLICKHOUSE_TMP}"/parquet_all_types_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" TRUNCATE TABLE parquet_types2; INSERT INTO parquet_types3 values ( 79, 81, 82, 83, 84, 85, 86, 87, 88, 89, 'str01', 'fstr1', '2003-03-04', '2004-05-06', toDateTime64('2004-05-06 07:08:09.012', 9));" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types3 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet" @@ -88,7 +88,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 values ( 80, ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types4 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT 'dest:'; SELECT * FROM parquet_types2 ORDER BY int8; SELECT 'min:'; @@ -106,7 +106,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet" echo dest from null: -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_types6 ORDER BY int8; DROP TABLE parquet_types5; @@ -126,7 +126,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" INSERT INTO parquet_arrays VALUES (2, [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_arrays FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_arrays ORDER BY id; DROP TABLE parquet_arrays; @@ -135,7 +135,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_nullable_arrays (id UInt32, a1 Array(Nullable(UInt32)), a2 Array(Nullable(String)), a3 Array(Nullable(Decimal(4, 2)))) engine=Memory(); INSERT INTO parquet_nullable_arrays VALUES (1, [1, Null, 2], [Null, 'Some string', Null], [0.001, Null, 42.42]), (2, [Null], [Null], [Null]), (3, [], [], []);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nullable_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nullable_arrays FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_nullable_arrays ORDER BY id; DROP TABLE parquet_nullable_arrays; @@ -143,7 +143,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_nested_arrays (a1 Array(Array(Array(UInt32))), a2 Array(Array(Array(String))), a3 Array(Array(Nullable(UInt32))), a4 Array(Array(Nullable(String)))) engine=Memory(); INSERT INTO parquet_nested_arrays VALUES ([[[1,2,3], [1,2,3]], [[1,2,3]], [[], [1,2,3]]], [[['Some string', 'Some string'], []], [['Some string']], [[]]], [[Null, 1, 2], [Null], [1, 2], []], [['Some string', Null, 'Some string'], [Null], []]);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_nested_arrays; DROP TABLE parquet_nested_arrays; @@ -151,6 +151,6 @@ ${CLICKHOUSE_CLIENT} -n --query=" CREATE TABLE parquet_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory; INSERT INTO TABLE parquet_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123);" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_decimal FORMAT Arrow" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM parquet_decimal; DROP TABLE parquet_decimal;" diff --git a/tests/queries/0_stateless/00900_long_parquet_decimal.sh b/tests/queries/0_stateless/00900_long_parquet_decimal.sh index a819dcbcdc3..14e8fdcc038 100755 --- a/tests/queries/0_stateless/00900_long_parquet_decimal.sh +++ b/tests/queries/0_stateless/00900_long_parquet_decimal.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -26,7 +26,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump echo diff0: diff "${CLICKHOUSE_TMP}"/parquet_decimal0_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -61,7 +61,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump echo diff1: diff "${CLICKHOUSE_TMP}"/parquet_decimal1_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -75,7 +75,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump echo diff2: diff "${CLICKHOUSE_TMP}"/parquet_decimal2_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2; @@ -86,7 +86,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_1.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" 2> /dev/null echo nothing: -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM decimal2 ORDER BY a, b, c, d; TRUNCATE TABLE decimal2; @@ -94,7 +94,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_2.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" echo nulls: -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT * FROM decimal2 ORDER BY a, b, c, d; TRUNCATE TABLE decimal2; @@ -104,7 +104,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_3.parquet ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SELECT 'full orig:'; SELECT * FROM decimal ORDER BY a, b, c, d; SELECT 'full inserted:'; @@ -115,6 +115,6 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d;" > "${ echo diff3: diff "${CLICKHOUSE_TMP}"/parquet_decimal3_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal3_2.dump -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" DROP TABLE IF EXISTS decimal; DROP TABLE IF EXISTS decimal2;" diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh index d310a2c3612..152d5a847b7 100755 --- a/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh +++ b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh @@ -13,5 +13,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # ${CURDIR}/00921_datetime64_compatibility.python python3 "${CURDIR}"/00921_datetime64_compatibility_long.python \ - | ${CLICKHOUSE_CLIENT} --ignore-error -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ + | ${CLICKHOUSE_CLIENT} --ignore-error -m --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \ | grep -v -e 'Received exception .*$' -e '^(query: ' | sed 's/^\(Code: [0-9]\+\).*$/\1/g' diff --git a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh index 686dd7f6df0..1d35daf9f2e 100755 --- a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh +++ b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions1;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions2;" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE indices_mutaions1 ( u64 UInt64, diff --git a/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh b/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh index 8243c6bde62..df330b82c80 100755 --- a/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh +++ b/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh @@ -22,7 +22,7 @@ function thread1() function thread2() { - while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done + while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done } function thread3() diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 02a739ece4a..d5c0248e2b3 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; @@ -31,7 +31,7 @@ function thread1() function thread2() { - while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done + while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done } function thread3() @@ -87,6 +87,6 @@ check_replication_consistency "alter_table" "count(), sum(a), sum(b), round(sum( $CLICKHOUSE_CLIENT -q "SELECT table, lost_part_count FROM system.replicas WHERE database=currentDatabase() AND lost_part_count!=0"; -$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & -$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & +$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & +$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') & wait diff --git a/tests/queries/0_stateless/01014_lazy_database_basic.sh b/tests/queries/0_stateless/01014_lazy_database_basic.sh index ea7603b2519..55d18a7c527 100755 --- a/tests/queries/0_stateless/01014_lazy_database_basic.sh +++ b/tests/queries/0_stateless/01014_lazy_database_basic.sh @@ -5,9 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n -q "DROP DATABASE IF EXISTS testlazy" +${CLICKHOUSE_CLIENT} -q "DROP DATABASE IF EXISTS testlazy" -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " CREATE DATABASE testlazy ENGINE = Lazy(1); CREATE TABLE testlazy.log (a UInt64, b UInt64) ENGINE = Log; CREATE TABLE testlazy.slog (a UInt64, b UInt64) ENGINE = StripeLog; @@ -30,7 +30,7 @@ ${CLICKHOUSE_CLIENT} -q " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " SELECT * FROM testlazy.log LIMIT 0; -- drop testlazy.log from cache RENAME TABLE testlazy.log TO testlazy.log2; SELECT database, name FROM system.tables WHERE database = 'testlazy'; @@ -44,7 +44,7 @@ ${CLICKHOUSE_CLIENT} -q " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " INSERT INTO testlazy.log2 VALUES (1, 1); INSERT INTO testlazy.slog VALUES (2, 2); INSERT INTO testlazy.tlog VALUES (3, 3); @@ -55,14 +55,14 @@ ${CLICKHOUSE_CLIENT} -n -q " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " SELECT * FROM testlazy.log2 LIMIT 0; -- drop testlazy.log2 from cache DROP TABLE testlazy.log2; " sleep 1.5 -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " SELECT * FROM testlazy.slog; SELECT * FROM testlazy.tlog; " diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh index e4b3a31b13f..ff2c0b8821e 100755 --- a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh +++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh @@ -83,7 +83,7 @@ export -f recreate_lazy_func4; export -f show_tables_func; -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DATABASE IF EXISTS $CURR_DATABASE; CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1); " diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh index 872b0a7c1a1..82221eb06e9 100755 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP DATABASE IF EXISTS database_for_dict; DROP TABLE IF EXISTS table_for_dict1; DROP TABLE IF EXISTS table_for_dict2; @@ -44,7 +44,7 @@ function thread3() function thread4() { - while true; do $CLICKHOUSE_CLIENT -n -q " + while true; do $CLICKHOUSE_CLIENT -q " SELECT * FROM database_for_dict.dict1 FORMAT Null; SELECT * FROM database_for_dict.dict2 FORMAT Null; " ||: ; done @@ -52,7 +52,7 @@ function thread4() function thread5() { - while true; do $CLICKHOUSE_CLIENT -n -q " + while true; do $CLICKHOUSE_CLIENT -q " SELECT dictGetString('database_for_dict.dict1', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null; SELECT dictGetString('database_for_dict.dict2', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null; " ||: ; done @@ -117,7 +117,7 @@ $CLICKHOUSE_CLIENT -q "SELECT 'Still alive'" $CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict1" $CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict2" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP DATABASE database_for_dict; DROP TABLE table_for_dict1; DROP TABLE table_for_dict2; diff --git a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh index 4bd21fcee02..eb12a76eb62 100755 --- a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh +++ b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --multiquery <&1 \ | grep -c 'Code: 43. DB::Exception: .* DB::Exception:.* Types .* are non-conforming as arguments for aggregate function avgWeighted' diff --git a/tests/queries/0_stateless/01053_ssd_dictionary.sh b/tests/queries/0_stateless/01053_ssd_dictionary.sh index fdd40940ab5..7dd3fa4657a 100755 --- a/tests/queries/0_stateless/01053_ssd_dictionary.sh +++ b/tests/queries/0_stateless/01053_ssd_dictionary.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -n --query=" +$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 --query=" DROP DATABASE IF EXISTS 01053_db; CREATE DATABASE 01053_db; diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh index 39e65af039b..22f8e5269bd 100755 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_bad_alters"; -$CLICKHOUSE_CLIENT -n --query "CREATE TABLE table_for_bad_alters ( +$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_bad_alters ( key UInt64, value1 UInt8, value2 String diff --git a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh index 619b6e91d11..9101b9faa3d 100755 --- a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh +++ b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh @@ -30,7 +30,7 @@ function drop_db() database=$($CLICKHOUSE_CLIENT -q "select name from system.databases where name like '${CLICKHOUSE_DATABASE}%' order by rand() limit 1") if [[ "$database" == "$CLICKHOUSE_DATABASE" ]]; then continue; fi if [ -z "$database" ]; then continue; fi - $CLICKHOUSE_CLIENT -n --query \ + $CLICKHOUSE_CLIENT --query \ "drop database if exists $database" 2>&1| grep -Fa "Exception: " sleep 0.$RANDOM done diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh index 5eebb558575..5fe85136d05 100755 --- a/tests/queries/0_stateless/01114_database_atomic.sh +++ b/tests/queries/0_stateless/01114_database_atomic.sh @@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -q "SELECT name, splitByChar('/', metadata_path)[-2] as uuid_path, ((splitByChar('/', metadata_path)[-3] as metadata) = substr(uuid_path, 1, 3)) OR metadata='metadata' FROM system.databases WHERE name LIKE '${CLICKHOUSE_DATABASE}_%'" | sed "s/$uuid_db_1/00001114-1000-4000-8000-000000000001/g" | sed "s/$uuid_db_2/00001114-1000-4000-8000-000000000002/g" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " CREATE TABLE ${DATABASE_1}.mt_tmp (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); INSERT INTO ${DATABASE_1}.mt_tmp SELECT * FROM numbers(100); CREATE TABLE ${DATABASE_3}.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5); @@ -65,7 +65,7 @@ while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE que sleep 0.1 done -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " RENAME TABLE ${DATABASE_1}.mt TO ${DATABASE_1}.mt_tmp; RENAME TABLE ${DATABASE_1}.mt_tmp TO ${DATABASE_2}.mt_tmp; EXCHANGE TABLES ${DATABASE_2}.mt AND ${DATABASE_2}.mt_tmp; @@ -79,7 +79,7 @@ uuid_mt1=$($CLICKHOUSE_CLIENT -q "SELECT uuid FROM system.tables WHERE database= $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_1}.mt" | sed "s/$uuid_mt1/00001114-0000-4000-8000-000000000001/g" $CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_2}.mt" | sed "s/$explicit_uuid/00001114-0000-4000-8000-000000000002/g" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE ${DATABASE_1}.mt SETTINGS database_atomic_wait_for_drop_and_detach_synchronously=0; CREATE TABLE ${DATABASE_1}.mt (s String) ENGINE=Log(); INSERT INTO ${DATABASE_1}.mt SELECT 's' || toString(number) FROM numbers(5); diff --git a/tests/queries/0_stateless/01119_session_log.sh b/tests/queries/0_stateless/01119_session_log.sh index 2d17b545276..61bb7cf3ea8 100755 --- a/tests/queries/0_stateless/01119_session_log.sh +++ b/tests/queries/0_stateless/01119_session_log.sh @@ -14,7 +14,7 @@ and interface in ('HTTP', 'TCP', 'TCP_Interserver') and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes) and event_time >= now() - interval 5 minute" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " select * from remote('127.0.0.2', system, one, 'default', ''); select * from remote('127.0.0.2', system, one, 'default', 'wrong password'); -- { serverError AUTHENTICATION_FAILED } select * from remote('127.0.0.2', system, one, 'nonexistsnt_user_1119', ''); -- { serverError AUTHENTICATION_FAILED } diff --git a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql index a585ef1c324..c689542e4c3 100644 --- a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql +++ b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql @@ -7,6 +7,8 @@ DROP TABLE IF EXISTS rmt1; DROP TABLE IF EXISTS rmt2; DROP TABLE IF EXISTS rmt3; +SET database_replicated_allow_replicated_engine_arguments=1; + CREATE TABLE rmt (n UInt64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/test_01148/{shard}/{database}/{table}', '{replica}') ORDER BY n; SHOW CREATE TABLE rmt; RENAME TABLE rmt TO rmt1; diff --git a/tests/queries/0_stateless/01153_attach_mv_uuid.sql b/tests/queries/0_stateless/01153_attach_mv_uuid.sql index 00cce8a1de4..0ef16449096 100644 --- a/tests/queries/0_stateless/01153_attach_mv_uuid.sql +++ b/tests/queries/0_stateless/01153_attach_mv_uuid.sql @@ -14,6 +14,8 @@ INSERT INTO src VALUES (3), (4); SELECT * FROM mv ORDER BY n; DROP TABLE mv SYNC; +SET database_replicated_allow_explicit_uuid=3; + SET show_table_uuid_in_table_create_query_if_not_nil=1; CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n; ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh index a255c1db30e..8afb0c18462 100755 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " CREATE TABLE table_for_rename_replicated ( date Date, diff --git a/tests/queries/0_stateless/01238_http_memory_tracking.sh b/tests/queries/0_stateless/01238_http_memory_tracking.sh index ce1310cf302..f88c8fb47c6 100755 --- a/tests/queries/0_stateless/01238_http_memory_tracking.sh +++ b/tests/queries/0_stateless/01238_http_memory_tracking.sh @@ -14,7 +14,7 @@ ${CLICKHOUSE_CLIENT} --format Null -q "CREATE USER $MISTER_USER" # This is needed to keep at least one running query for user for the time of test. # (1k http queries takes ~1 second, let's run for 5x more to avoid flaps) -${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5)' & +${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null <<<'SELECT sleepEachRow(1) FROM numbers(5)' & # ignore "yes: standard output: Broken pipe" yes 'SELECT 1' 2>/dev/null | { diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh index 60a65b9a253..c1ec812875c 100755 --- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh +++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP DATABASE IF EXISTS 01280_db; CREATE DATABASE 01280_db; DROP TABLE IF EXISTS 01280_db.table_for_dict; @@ -39,9 +39,9 @@ $CLICKHOUSE_CLIENT -n --query=" LIFETIME(MIN 1000 MAX 2000) LAYOUT(COMPLEX_KEY_SSD_CACHE(FILE_SIZE 8192 PATH '$USER_FILES_PATH/0d'));" -$CLICKHOUSE_CLIENT -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" +$CLICKHOUSE_CLIENT -q "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }" -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" SELECT 'TEST_SMALL'; SELECT 'VALUE FROM RAM BUFFER'; SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple('1', toInt32(3))); @@ -63,9 +63,9 @@ $CLICKHOUSE_CLIENT -n --query=" SELECT dictGetInt32('01280_db.ssd_dict', 'b', tuple('10', toInt32(-20))); SELECT dictGetString('01280_db.ssd_dict', 'c', tuple('10', toInt32(-20)));" -$CLICKHOUSE_CLIENT -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" +$CLICKHOUSE_CLIENT -q "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }" -$CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict; +$CLICKHOUSE_CLIENT --query="DROP DICTIONARY 01280_db.ssd_dict; DROP TABLE IF EXISTS 01280_db.keys_table; CREATE TABLE 01280_db.keys_table ( @@ -122,4 +122,4 @@ $CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict; DROP DICTIONARY IF EXISTS database_for_dict.ssd_dict; DROP TABLE IF EXISTS database_for_dict.keys_table;" -$CLICKHOUSE_CLIENT -n --query="DROP DATABASE IF EXISTS 01280_db;" +$CLICKHOUSE_CLIENT --query="DROP DATABASE IF EXISTS 01280_db;" diff --git a/tests/queries/0_stateless/01287_max_execution_speed.sql b/tests/queries/0_stateless/01287_max_execution_speed.sql index 35bc4e02d38..0d132999481 100644 --- a/tests/queries/0_stateless/01287_max_execution_speed.sql +++ b/tests/queries/0_stateless/01287_max_execution_speed.sql @@ -1,4 +1,4 @@ --- Tags: no-fasttest +-- Tags: no-fasttest, no-debug, no-tsan, no-msan, no-asan SET min_execution_speed = 100000000000, timeout_before_checking_execution_speed = 0; SELECT count() FROM system.numbers; -- { serverError TOO_SLOW } diff --git a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh index 21f46a34514..00619f13173 100755 --- a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh +++ b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh @@ -85,7 +85,7 @@ export -f recreate_lazy_func4; export -f test_func; -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DATABASE IF EXISTS $CURR_DATABASE; CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1); " diff --git a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh index 1d2d4516b9c..6ff6644f11e 100755 --- a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh +++ b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh @@ -10,7 +10,7 @@ set -e function thread() { while true; do - $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 SYNC; + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table_$1 SYNC; CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 | grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now| were removed by another replica|Removing leftovers from table|Another replica was suddenly created|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time|^\(query: ' done diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh index 1d5f5d54853..cc96a37a0ce 100755 --- a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh +++ b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh @@ -17,7 +17,7 @@ function thread1() { local TIMELIMIT=$((SECONDS+$1)) while [ $SECONDS -lt "$TIMELIMIT" ]; do - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;" + $CLICKHOUSE_CLIENT --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;" done } diff --git a/tests/queries/0_stateless/01395_limit_more_cases.sh b/tests/queries/0_stateless/01395_limit_more_cases.sh index 9709bd74f26..6be8a91f0c7 100755 --- a/tests/queries/0_stateless/01395_limit_more_cases.sh +++ b/tests/queries/0_stateless/01395_limit_more_cases.sh @@ -20,4 +20,4 @@ for OFFSET in {0..15}; do FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT); " done -done | $CLICKHOUSE_CLIENT -n --max_block_size 5 +done | $CLICKHOUSE_CLIENT --max_block_size 5 diff --git a/tests/queries/0_stateless/01395_limit_more_cases_random.sh b/tests/queries/0_stateless/01395_limit_more_cases_random.sh index c2f6b060aab..bb942906e63 100755 --- a/tests/queries/0_stateless/01395_limit_more_cases_random.sh +++ b/tests/queries/0_stateless/01395_limit_more_cases_random.sh @@ -19,4 +19,4 @@ for _ in $(seq $ITERATIONS); do throwIf((c != 0 OR first != 0 OR last != 0) AND (c != last - first + 1)) FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT); " -done | $CLICKHOUSE_CLIENT -n --max_block_size $(($RANDOM % 20 + 1)) | uniq +done | $CLICKHOUSE_CLIENT --max_block_size $(($RANDOM % 20 + 1)) | uniq diff --git a/tests/queries/0_stateless/01412_cache_dictionary_race.sh b/tests/queries/0_stateless/01412_cache_dictionary_race.sh index 36295ca01ea..b0c73cf742f 100755 --- a/tests/queries/0_stateless/01412_cache_dictionary_race.sh +++ b/tests/queries/0_stateless/01412_cache_dictionary_race.sh @@ -10,7 +10,7 @@ $CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db" $CLICKHOUSE_CLIENT --query "CREATE DATABASE ordinary_db" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " CREATE DICTIONARY ordinary_db.dict1 ( @@ -35,7 +35,7 @@ function dict_get_thread() function drop_create_table_thread() { while true; do - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ordinary_db.table_for_dict_real ( + $CLICKHOUSE_CLIENT --query "CREATE TABLE ordinary_db.table_for_dict_real ( key_column UInt64, second_column UInt8, third_column String diff --git a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh index ec9c5134059..950afea9086 100755 --- a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh +++ b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh @@ -23,7 +23,7 @@ function f { function g { local TIMELIMIT=$((SECONDS+$1)) for _ in $(seq 1 100); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " INSERT INTO mem SELECT number FROM numbers(1000000); INSERT INTO mem SELECT number FROM numbers(1000000); INSERT INTO mem SELECT number FROM numbers(1000000); diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh index 22cd6fb8127..0e6ab287146 100755 --- a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh +++ b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh @@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) NUM_REPLICAS=6 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i SYNC; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum_many', 'r$i') ORDER BY x; " @@ -39,12 +39,12 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SYSTEM SYNC REPLICA r$i; SELECT count(), min(x), max(x), sum(x) FROM r$i; " done for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS r$i SYNC;" + $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS r$i SYNC;" done diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh index 1589f17c752..3f4210f9bb0 100755 --- a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh +++ b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh @@ -12,13 +12,13 @@ NUM_REPLICAS=2 NUM_INSERTS=5 for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS r$i; CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum', 'r$i') ORDER BY x; " done -$CLICKHOUSE_CLIENT -n -q "SYSTEM STOP REPLICATION QUEUES r2;" +$CLICKHOUSE_CLIENT -q "SYSTEM STOP REPLICATION QUEUES r2;" function thread { $CLICKHOUSE_CLIENT --insert_quorum 2 --insert_quorum_parallel 1 --query "INSERT INTO r1 SELECT $1" @@ -28,12 +28,12 @@ for i in $(seq 1 $NUM_INSERTS); do thread $i & done -$CLICKHOUSE_CLIENT -n -q "SYSTEM START REPLICATION QUEUES r2;" +$CLICKHOUSE_CLIENT -q "SYSTEM START REPLICATION QUEUES r2;" wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n -q " + $CLICKHOUSE_CLIENT -q " SELECT count(), min(x), max(x), sum(x) FROM r$i; DROP TABLE IF EXISTS r$i; " diff --git a/tests/queries/0_stateless/01563_distributed_query_finish.sh b/tests/queries/0_stateless/01563_distributed_query_finish.sh index e3c5928f108..50d194b1f15 100755 --- a/tests/queries/0_stateless/01563_distributed_query_finish.sh +++ b/tests/queries/0_stateless/01563_distributed_query_finish.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm < toString(x), range(number % 128)), ' '), '') -FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8, max_rows_to_read=0; +FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8, max_rows_to_read=0, max_memory_usage='10Gi'; -- optimize table t final; diff --git a/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh b/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh index 5fc41890a18..dbfb3518a61 100755 --- a/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh +++ b/tests/queries/0_stateless/01732_race_condition_storage_join_long.sh @@ -11,14 +11,14 @@ set -o pipefail echo " DROP TABLE IF EXISTS storage_join_race; CREATE TABLE storage_join_race (x UInt64, y UInt64) Engine = Join(ALL, FULL, x); -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT function read_thread_big() { while true; do echo " SELECT * FROM ( SELECT number AS x FROM numbers(100000) ) AS t1 ALL FULL JOIN storage_join_race USING (x) FORMAT Null; - " | $CLICKHOUSE_CLIENT -n + " | $CLICKHOUSE_CLIENT done } @@ -27,7 +27,7 @@ function read_thread_small() while true; do echo " SELECT * FROM ( SELECT number AS x FROM numbers(10) ) AS t1 ALL FULL JOIN storage_join_race USING (x) FORMAT Null; - " | $CLICKHOUSE_CLIENT -n + " | $CLICKHOUSE_CLIENT done } @@ -36,7 +36,7 @@ function read_thread_select() while true; do echo " SELECT * FROM storage_join_race FORMAT Null; - " | $CLICKHOUSE_CLIENT -n + " | $CLICKHOUSE_CLIENT done } @@ -56,7 +56,7 @@ echo " INSERT INTO storage_join_race SELECT number AS x, sleepEachRow(0.1) + number AS y FROM numbers ($TIMEOUT * 10) SETTINGS function_sleep_max_microseconds_per_block = 100000000, max_block_size = 10; -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT wait diff --git a/tests/queries/0_stateless/01780_column_sparse_materialize.reference b/tests/queries/0_stateless/01780_column_sparse_materialize.reference new file mode 100644 index 00000000000..660cfabff33 --- /dev/null +++ b/tests/queries/0_stateless/01780_column_sparse_materialize.reference @@ -0,0 +1,50 @@ +-- { echoOn } + +SELECT dumpColumnStructure(id) FROM sparse_t; +UInt64, Sparse(size = 2, UInt64(size = 2), UInt64(size = 1)) +UInt64, Sparse(size = 2, UInt64(size = 2), UInt64(size = 1)) +SELECT dumpColumnStructure(materialize(id)) FROM sparse_t; +UInt64, UInt64(size = 2) +UInt64, UInt64(size = 2) +SELECT dumpColumnStructure(u) FROM sparse_t; +UInt64, Sparse(size = 2, UInt64(size = 1), UInt64(size = 0)) +UInt64, Sparse(size = 2, UInt64(size = 1), UInt64(size = 0)) +SELECT dumpColumnStructure(materialize(u)) FROM sparse_t; +UInt64, UInt64(size = 2) +UInt64, UInt64(size = 2) +SELECT dumpColumnStructure(s) FROM sparse_t; +String, Sparse(size = 2, String(size = 2), UInt64(size = 1)) +String, Sparse(size = 2, String(size = 2), UInt64(size = 1)) +SELECT dumpColumnStructure(materialize(s)) FROM sparse_t; +String, String(size = 2) +String, String(size = 2) +SELECT dumpColumnStructure(arr1) FROM sparse_t; +Array(String), Array(size = 2, UInt64(size = 2), String(size = 1)) +Array(String), Array(size = 2, UInt64(size = 2), String(size = 1)) +SELECT dumpColumnStructure(materialize(arr1)) FROM sparse_t; +Array(String), Array(size = 2, UInt64(size = 2), String(size = 1)) +Array(String), Array(size = 2, UInt64(size = 2), String(size = 1)) +SELECT dumpColumnStructure(arr2) FROM sparse_t; +Array(UInt64), Array(size = 2, UInt64(size = 2), UInt64(size = 1)) +Array(UInt64), Array(size = 2, UInt64(size = 2), UInt64(size = 1)) +SELECT dumpColumnStructure(materialize(arr2)) FROM sparse_t; +Array(UInt64), Array(size = 2, UInt64(size = 2), UInt64(size = 1)) +Array(UInt64), Array(size = 2, UInt64(size = 2), UInt64(size = 1)) +SELECT dumpColumnStructure(t) FROM sparse_t; +Tuple(a UInt64, s String), Tuple(size = 2, Sparse(size = 2, UInt64(size = 1), UInt64(size = 0)), Sparse(size = 2, String(size = 1), UInt64(size = 0))) +Tuple(a UInt64, s String), Tuple(size = 2, Sparse(size = 2, UInt64(size = 1), UInt64(size = 0)), Sparse(size = 2, String(size = 1), UInt64(size = 0))) +SELECT dumpColumnStructure(materialize(t)) FROM sparse_t; +Tuple(a UInt64, s String), Tuple(size = 2, UInt64(size = 2), String(size = 2)) +Tuple(a UInt64, s String), Tuple(size = 2, UInt64(size = 2), String(size = 2)) +SELECT dumpColumnStructure(t.a) FROM sparse_t; +UInt64, Sparse(size = 2, UInt64(size = 1), UInt64(size = 0)) +UInt64, Sparse(size = 2, UInt64(size = 1), UInt64(size = 0)) +SELECT dumpColumnStructure(materialize(t.a)) FROM sparse_t; +UInt64, UInt64(size = 2) +UInt64, UInt64(size = 2) +SELECT dumpColumnStructure(t.s) FROM sparse_t; +String, Sparse(size = 2, String(size = 1), UInt64(size = 0)) +String, Sparse(size = 2, String(size = 1), UInt64(size = 0)) +SELECT dumpColumnStructure(materialize(t.s)) FROM sparse_t; +String, String(size = 2) +String, String(size = 2) diff --git a/tests/queries/0_stateless/01780_column_sparse_materialize.sql b/tests/queries/0_stateless/01780_column_sparse_materialize.sql new file mode 100644 index 00000000000..a53ea140f0f --- /dev/null +++ b/tests/queries/0_stateless/01780_column_sparse_materialize.sql @@ -0,0 +1,52 @@ +DROP TABLE IF EXISTS sparse_t; + +CREATE TABLE sparse_t ( + id UInt64, + u UInt64, + s String, + arr1 Array(String), + arr2 Array(UInt64), + t Tuple(a UInt64, s String)) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.1; + +INSERT INTO sparse_t SELECT + number, + if (number % 2 = 0, number, 0), + if (number % 2 = 0, toString(number), ''), + if (number % 2 = 0, [''], []), + if (number % 2 = 0, [0], []), + (if (number % 2 = 0, number, 0), '') +FROM numbers(2); + +-- { echoOn } + +SELECT dumpColumnStructure(id) FROM sparse_t; +SELECT dumpColumnStructure(materialize(id)) FROM sparse_t; + +SELECT dumpColumnStructure(u) FROM sparse_t; +SELECT dumpColumnStructure(materialize(u)) FROM sparse_t; + +SELECT dumpColumnStructure(s) FROM sparse_t; +SELECT dumpColumnStructure(materialize(s)) FROM sparse_t; + +SELECT dumpColumnStructure(arr1) FROM sparse_t; +SELECT dumpColumnStructure(materialize(arr1)) FROM sparse_t; + +SELECT dumpColumnStructure(arr2) FROM sparse_t; +SELECT dumpColumnStructure(materialize(arr2)) FROM sparse_t; + +SELECT dumpColumnStructure(t) FROM sparse_t; +SELECT dumpColumnStructure(materialize(t)) FROM sparse_t; + +SELECT dumpColumnStructure(t.a) FROM sparse_t; +SELECT dumpColumnStructure(materialize(t.a)) FROM sparse_t; + +SELECT dumpColumnStructure(t.s) FROM sparse_t; +SELECT dumpColumnStructure(materialize(t.s)) FROM sparse_t; + +-- { echoOff } + + +DROP TABLE IF EXISTS sparse_t +; diff --git a/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh index 4713b15bb5b..4cf487274d4 100755 --- a/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh +++ b/tests/queries/0_stateless/01802_test_postgresql_protocol_with_row_policy.sh @@ -17,7 +17,7 @@ INSERT INTO db01802.postgresql SELECT number FROM numbers(10); SELECT 'before row policy'; SELECT * FROM db01802.postgresql; -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT echo " @@ -28,7 +28,7 @@ CREATE ROW POLICY IF NOT EXISTS test_policy ON db01802.postgresql FOR SELECT USI SELECT ''; SELECT 'after row policy with no password'; -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} db01802 --user postgresql_user -c "SELECT * FROM postgresql;" @@ -40,7 +40,7 @@ GRANT SELECT(val) ON db01802.postgresql TO postgresql_user; CREATE ROW POLICY IF NOT EXISTS test_policy ON db01802.postgresql FOR SELECT USING val = 2 TO postgresql_user; SELECT 'after row policy with plaintext_password'; -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT psql "postgresql://postgresql_user:qwerty@localhost:${CLICKHOUSE_PORT_POSTGRESQL}/db01802" -c "SELECT * FROM postgresql;" diff --git a/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh b/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh index c38fc505fa8..4196598e461 100755 --- a/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh +++ b/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh @@ -17,10 +17,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # The number of threads removing data parts should be between 1 and 129. # Because max_parts_cleaning_thread_pool_size is 128 by default -$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -nm -q "create database ordinary_$CLICKHOUSE_DATABASE engine=Ordinary" +$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -m -q "create database ordinary_$CLICKHOUSE_DATABASE engine=Ordinary" # MergeTree -$CLICKHOUSE_CLIENT -nm -q """ +$CLICKHOUSE_CLIENT -m -q """ use ordinary_$CLICKHOUSE_DATABASE; drop table if exists data_01810; @@ -47,7 +47,7 @@ $CLICKHOUSE_CLIENT -nm -q """ """ # ReplicatedMergeTree -$CLICKHOUSE_CLIENT -nm -q """ +$CLICKHOUSE_CLIENT -m -q """ use ordinary_$CLICKHOUSE_DATABASE; drop table if exists rep_data_01810; @@ -76,4 +76,4 @@ $CLICKHOUSE_CLIENT -nm -q """ format Null; """ -$CLICKHOUSE_CLIENT -nm -q "drop database ordinary_$CLICKHOUSE_DATABASE" +$CLICKHOUSE_CLIENT -m -q "drop database ordinary_$CLICKHOUSE_DATABASE" diff --git a/tests/queries/0_stateless/01889_postgresql_protocol_null_fields.sh b/tests/queries/0_stateless/01889_postgresql_protocol_null_fields.sh index e9467d77451..c0f200ed20d 100755 --- a/tests/queries/0_stateless/01889_postgresql_protocol_null_fields.sh +++ b/tests/queries/0_stateless/01889_postgresql_protocol_null_fields.sh @@ -9,6 +9,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo " DROP USER IF EXISTS postgresql_user; CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password; -" | $CLICKHOUSE_CLIENT -n +" | $CLICKHOUSE_CLIENT psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT NULL;" diff --git a/tests/queries/0_stateless/01900_kill_mutation_parallel_long.sh b/tests/queries/0_stateless/01900_kill_mutation_parallel_long.sh index 30dff6db0c4..81a674be3c4 100755 --- a/tests/queries/0_stateless/01900_kill_mutation_parallel_long.sh +++ b/tests/queries/0_stateless/01900_kill_mutation_parallel_long.sh @@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data_01900_1; drop table if exists data_01900_2; @@ -27,18 +27,18 @@ $CLICKHOUSE_CLIENT -nm -q " # so 100 mutations will be scheduled and killed later. for i in {1..100}; do echo "alter table data_01900_1 update s = 'foo_$i' where 1;" -done | $CLICKHOUSE_CLIENT -nm +done | $CLICKHOUSE_CLIENT -m # but these mutations should not be killed. ( for i in {1..100}; do echo "alter table data_01900_2 update s = 'bar_$i' where 1;" - done | $CLICKHOUSE_CLIENT -nm --mutations_sync=1 + done | $CLICKHOUSE_CLIENT -m --mutations_sync=1 ) & -$CLICKHOUSE_CLIENT --format Null -nm -q "kill mutation where table = 'data_01900_1' and database = '$CLICKHOUSE_DATABASE';" +$CLICKHOUSE_CLIENT --format Null -m -q "kill mutation where table = 'data_01900_1' and database = '$CLICKHOUSE_DATABASE';" wait -$CLICKHOUSE_CLIENT -nm -q "select * from data_01900_2" +$CLICKHOUSE_CLIENT -m -q "select * from data_01900_2" $CLICKHOUSE_CLIENT -q "drop table data_01900_1" $CLICKHOUSE_CLIENT -q "drop table data_01900_2" diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index 0dfde4997a7..bc8b01fa9f5 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -18,7 +18,7 @@ done wait for i in $(seq 1 $NUM_REPLICAS); do - $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ttl_table$i( + $CLICKHOUSE_CLIENT --query "CREATE TABLE ttl_table$i( key DateTime ) ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/ttl_table', '$i') diff --git a/tests/queries/0_stateless/02354_vector_search_experimental_setting.reference b/tests/queries/0_stateless/02354_vector_search_experimental_setting.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_experimental_setting.sql b/tests/queries/0_stateless/02354_vector_search_experimental_setting.sql new file mode 100644 index 00000000000..cce838f8e02 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_experimental_setting.sql @@ -0,0 +1,32 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that CREATE TABLE and ADD INDEX respect setting 'allow_experimental_vector_similarity_index'. + +DROP TABLE IF EXISTS tab; + +-- Test CREATE TABLE + +SET allow_experimental_vector_similarity_index = 0; +CREATE TABLE tab (id UInt32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY tuple(); -- { serverError SUPPORT_IS_DISABLED } + +SET allow_experimental_vector_similarity_index = 1; +CREATE TABLE tab (id UInt32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY tuple(); +DROP TABLE tab; + +-- Test ADD INDEX + +CREATE TABLE tab (id UInt32, vec Array(Float32)) ENGINE = MergeTree ORDER BY tuple(); + +SET allow_experimental_vector_similarity_index = 0; +ALTER TABLE tab ADD INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance'); -- { serverError SUPPORT_IS_DISABLED } + +SET allow_experimental_vector_similarity_index = 1; +ALTER TABLE tab ADD INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance'); + +-- Other index DDL must work regardless of the setting +SET allow_experimental_vector_similarity_index = 0; +ALTER TABLE tab MATERIALIZE INDEX idx; +-- ALTER TABLE tab CLEAR INDEX idx; -- <-- Should work but doesn't w/o enabled setting. Unexpected but not terrible. +ALTER TABLE tab DROP INDEX idx; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index faff306ef60..e42f91d05dc 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -41,6 +41,21 @@ Special cases 6 [1,9.3] 0.005731362878640178 1 [2,3.2] 0.15200169244542905 7 [5.5,4.7] 0.3503476876550442 +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromMergeTree (default.tab) + Indexes: + PrimaryKey + Condition: true + Parts: 1/1 + Granules: 4/4 + Skip + Name: idx + Description: vector_similarity GRANULARITY 2 + Parts: 1/1 + Granules: 2/4 -- Setting "max_limit_for_ann_queries" Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 17939992165..8769e5c56bb 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -63,6 +63,13 @@ FROM tab ORDER BY cosineDistance(vec, reference_vec) LIMIT 3; +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + SELECT '-- Setting "max_limit_for_ann_queries"'; EXPLAIN indexes=1 WITH [0.0, 2.0] as reference_vec diff --git a/tests/queries/0_stateless/02378_part_log_profile_events.sh b/tests/queries/0_stateless/02378_part_log_profile_events.sh new file mode 100755 index 00000000000..8dd8b1eca91 --- /dev/null +++ b/tests/queries/0_stateless/02378_part_log_profile_events.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# Tags: no-shared-merge-tree +# Tag no-shared-merge-tree: depend on events with local disk + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query " + DROP TABLE IF EXISTS test; + + CREATE TABLE test (key UInt64, val UInt64) engine = MergeTree Order by key PARTITION BY key >= 128; + SET max_block_size = 64, max_insert_block_size = 64, min_insert_block_size_rows = 64; + INSERT INTO test SELECT number AS key, sipHash64(number) AS val FROM numbers(512); +" + +${CLICKHOUSE_CLIENT} --query " + SYSTEM FLUSH LOGS; + SELECT + if(count(DISTINCT query_id) == 1, 'Ok', 'Error: ' || toString(count(DISTINCT query_id))), + if(count() == 512 / 64, 'Ok', 'Error: ' || toString(count())), -- 512 rows inserted, 64 rows per block + if(SUM(ProfileEvents['MergeTreeDataWriterRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterRows']))), + if(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']))), + if(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']))), + if(SUM(ProfileEvents['MergeTreeDataWriterBlocks']) >= 8, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterBlocks']))) + FROM system.part_log + WHERE event_time > now() - INTERVAL 10 MINUTE + AND database == currentDatabase() AND table == 'test' + AND event_type == 'NewPart'; +" + +${CLICKHOUSE_CLIENT} --query "OPTIMIZE TABLE test FINAL;" + +${CLICKHOUSE_CLIENT} --query " + SYSTEM FLUSH LOGS; + SELECT + if(count() > 2, 'Ok', 'Error: ' || toString(count())), + if(SUM(ProfileEvents['MergedRows']) >= 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergedRows']))) + FROM system.part_log + WHERE event_time > now() - INTERVAL 10 MINUTE + AND database == currentDatabase() AND table == 'test' + AND event_type == 'MergeParts'; +" + +${CLICKHOUSE_CLIENT} --query " + ALTER TABLE test UPDATE val = 0 WHERE key % 2 == 0 SETTINGS mutations_sync = 2 +" + +# The mutation query may return before the entry is added to the system.part_log table. +# Retry SYSTEM FLUSH LOGS until all entries are fully flushed. +for _ in {1..10}; do + ${CLICKHOUSE_CLIENT} --query "SYSTEM FLUSH LOGS" + res=$(${CLICKHOUSE_CLIENT} --query " + SELECT count() FROM system.part_log + WHERE event_time > now() - INTERVAL 10 MINUTE + AND database == currentDatabase() AND table == 'test' + AND event_type == 'MutatePart';" + ) + if [[ $res -eq 2 ]]; then + break + fi + + sleep 2.0 +done + +${CLICKHOUSE_CLIENT} --query " + SELECT + if(count() == 2, 'Ok', 'Error: ' || toString(count())), + if(SUM(ProfileEvents['MutatedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MutatedRows']))), + if(SUM(ProfileEvents['FileOpen']) > 1, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['FileOpen']))) + FROM system.part_log + WHERE event_time > now() - INTERVAL 10 MINUTE + AND database == currentDatabase() AND table == 'test' + AND event_type == 'MutatePart'; +" + +${CLICKHOUSE_CLIENT} --query "DROP TABLE test" diff --git a/tests/queries/0_stateless/02378_part_log_profile_events.sql b/tests/queries/0_stateless/02378_part_log_profile_events.sql deleted file mode 100644 index eec76d6f50e..00000000000 --- a/tests/queries/0_stateless/02378_part_log_profile_events.sql +++ /dev/null @@ -1,50 +0,0 @@ -DROP TABLE IF EXISTS test; - -CREATE TABLE test (key UInt64, val UInt64) engine = MergeTree Order by key PARTITION BY key >= 128; - -SET max_block_size = 64, max_insert_block_size = 64, min_insert_block_size_rows = 64; - -INSERT INTO test SELECT number AS key, sipHash64(number) AS val FROM numbers(512); - -SYSTEM FLUSH LOGS; - -SELECT - if(count(DISTINCT query_id) == 1, 'Ok', 'Error: ' || toString(count(DISTINCT query_id))), - if(count() == 512 / 64, 'Ok', 'Error: ' || toString(count())), -- 512 rows inserted, 64 rows per block - if(SUM(ProfileEvents['MergeTreeDataWriterRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterRows']))), - if(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterUncompressedBytes']))), - if(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']) >= 1024, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterCompressedBytes']))), - if(SUM(ProfileEvents['MergeTreeDataWriterBlocks']) >= 8, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergeTreeDataWriterBlocks']))) -FROM system.part_log -WHERE event_time > now() - INTERVAL 10 MINUTE - AND database == currentDatabase() AND table == 'test' - AND event_type == 'NewPart' -; - -OPTIMIZE TABLE test FINAL; - -SYSTEM FLUSH LOGS; -SELECT - if(count() > 2, 'Ok', 'Error: ' || toString(count())), - if(SUM(ProfileEvents['MergedRows']) >= 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergedRows']))) -FROM system.part_log -WHERE event_time > now() - INTERVAL 10 MINUTE - AND database == currentDatabase() AND table == 'test' - AND event_type == 'MergeParts' -; - -ALTER TABLE test UPDATE val = 0 WHERE key % 2 == 0 SETTINGS mutations_sync = 2; - -SYSTEM FLUSH LOGS; - -SELECT - if(count() == 2, 'Ok', 'Error: ' || toString(count())), - if(SUM(ProfileEvents['MutatedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MutatedRows']))), - if(SUM(ProfileEvents['FileOpen']) > 1, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['FileOpen']))) -FROM system.part_log -WHERE event_time > now() - INTERVAL 10 MINUTE - AND database == currentDatabase() AND table == 'test' - AND event_type == 'MutatePart' -; - -DROP TABLE test; diff --git a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh index c1aa24943c1..096a0fcc0b0 100755 --- a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh +++ b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh @@ -15,7 +15,7 @@ ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO us ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" ${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 36 }" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference new file mode 100644 index 00000000000..874494fb061 --- /dev/null +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.reference @@ -0,0 +1,10 @@ +2 +3 +m1 +m2 +rmt1 +rmt2 +02858000-1000-4000-8000-000000000 +0 +CREATE TABLE default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/{shard}\', \'_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 +CREATE TABLE default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh new file mode 100755 index 00000000000..81a9cef02ff --- /dev/null +++ b/tests/queries/0_stateless/02858_explicit_uuid_and_zk_path.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +db=$CLICKHOUSE_DATABASE +if [[ $($CLICKHOUSE_CLIENT -q "SELECT engine = 'Replicated' FROM system.databases WHERE name='$CLICKHOUSE_DATABASE'") != 1 ]]; then + $CLICKHOUSE_CLIENT -q "CREATE DATABASE rdb_$CLICKHOUSE_DATABASE ENGINE=Replicated('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rdb', '1', '1')" + db="rdb_$CLICKHOUSE_DATABASE" +fi + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=0 -q "CREATE TABLE $db.m0 +UUID '02858000-1000-4000-8000-000000000000' (n int) ENGINE=Memory" 2>&1| grep -Fac "database_replicated_allow_explicit_uuid" + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=1 -q "CREATE TABLE $db.m1 +UUID '02858000-1000-4000-8000-000000000$(($RANDOM % 10))$(($RANDOM % 10))$(($RANDOM % 10))' (n int) ENGINE=Memory" + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=2 -q "CREATE TABLE $db.m2 +UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory" + + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments" + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" + +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int) +ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" + + +$CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name" + +$CLICKHOUSE_CLIENT -q "SELECT substring(toString(uuid) as s, 1, length(s) - 3) FROM system.tables WHERE database='$db' and name='m1'" +$CLICKHOUSE_CLIENT -q "SELECT toString(uuid) LIKE '02858000%' FROM system.tables WHERE database='$db' and name='m2'" + +$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt1" | sed "s/$db/default/g" +$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt2" | sed "s/$db/default/g" + +$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS rdb_$CLICKHOUSE_DATABASE" diff --git a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference new file mode 100644 index 00000000000..1843964377d --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference @@ -0,0 +1,5 @@ +Test create statistics: +CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64) STATISTICS(tdigest, uniq, countmin, minmax),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)) STATISTICS(tdigest, uniq, countmin, minmax),\n `d` DateTime STATISTICS(tdigest, uniq, countmin, minmax),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 +Test materialize and drop statistics: +CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql new file mode 100644 index 00000000000..249e3c84a51 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql @@ -0,0 +1,35 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS tab SYNC; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET allow_suspicious_low_cardinality_types=1; +SET mutations_sync = 2; + + +SELECT 'Test create statistics:'; + +CREATE TABLE tab +( + a LowCardinality(Int64) STATISTICS(countmin, minmax, tdigest, uniq), + b LowCardinality(Nullable(String)) STATISTICS(countmin, uniq), + c LowCardinality(Nullable(Int64)) STATISTICS(countmin, minmax, tdigest, uniq), + d DateTime STATISTICS(countmin, minmax, tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk; + +INSERT INTO tab select number, number, number, toDateTime(number), generateUUIDv4() FROM system.numbers LIMIT 10000; +SHOW CREATE TABLE tab; + + +SELECT 'Test materialize and drop statistics:'; +ALTER TABLE tab DROP STATISTICS a, b, c, d; +ALTER TABLE tab ADD STATISTICS b TYPE countmin, uniq; +ALTER TABLE tab MATERIALIZE STATISTICS b; +SHOW CREATE TABLE tab; + +ALTER TABLE tab DROP STATISTICS b; +SHOW CREATE TABLE tab; + +DROP TABLE IF EXISTS tab SYNC; diff --git a/tests/queries/0_stateless/02864_statistics_ddl.sql b/tests/queries/0_stateless/02864_statistics_ddl.sql index 32b56a842b7..5b2c5cebc1d 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.sql +++ b/tests/queries/0_stateless/02864_statistics_ddl.sql @@ -1,5 +1,5 @@ -- Tags: no-fasttest --- no-fasttest: 'count_min' sketches need a 3rd party library +-- no-fasttest: 'countmin' sketches need a 3rd party library -- Tests that DDL statements which create / drop / materialize statistics @@ -7,6 +7,7 @@ SET mutations_sync = 1; DROP TABLE IF EXISTS tab; +SET allow_experimental_statistics = 0; -- Error case: Can't create statistics when allow_experimental_statistics = 0 CREATE TABLE tab (col Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } @@ -46,7 +47,7 @@ CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(tdigest)) Engine = MergeTre CREATE TABLE tab (col UUID STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } CREATE TABLE tab (col IPv6 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } --- uniq requires data_type.isValueRepresentedByInteger +-- uniq requires data_type.isValueRepresentedByInteger or (Fixed)String -- These types work: CREATE TABLE tab (col UInt8 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; CREATE TABLE tab (col UInt256 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; @@ -61,38 +62,62 @@ CREATE TABLE tab (col IPv4 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple CREATE TABLE tab (col Nullable(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -- These types don't work: -CREATE TABLE tab (col String STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col FixedString(1) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } CREATE TABLE tab (col Array(Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } --- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String +-- countmin requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String -- These types work: -CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt8 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; -- These types don't work: -CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- minmax requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } -- CREATE TABLE was easy, ALTER is more fun @@ -162,17 +187,24 @@ ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATI -- uniq -- Works: ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64; -ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; -- Doesn't work: ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } --- count_min +-- countmin -- Works: -ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; -ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab ADD STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; -- Doesn't work: -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS } +-- minmax +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE minmax; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE minmax; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE minmax; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE minmax; -- { serverError ILLEGAL_STATISTICS } -- Any data type changes on columns with statistics are disallowed, for simplicity even if the new data type is compatible with all existing -- statistics objects (e.g. tdigest can be created on Float64 and UInt64) diff --git a/tests/queries/0_stateless/02864_statistics_predicates.reference b/tests/queries/0_stateless/02864_statistics_predicates.reference index ffbd7269e05..c2d74300fa9 100644 --- a/tests/queries/0_stateless/02864_statistics_predicates.reference +++ b/tests/queries/0_stateless/02864_statistics_predicates.reference @@ -3,10 +3,13 @@ u64 and = 10 10 10 +10 0 0 0 0 +0 +10 10 10 10 @@ -16,10 +19,13 @@ u64 and < 70 70 70 +70 80 80 80 80 +80 +70 70 70 70 @@ -29,6 +35,8 @@ f64 and = 10 10 10 +10 +0 0 0 0 @@ -37,6 +45,8 @@ f64 and = 10 10 10 +10 +0 0 0 0 @@ -46,6 +56,8 @@ f64 and < 70 70 70 +70 +80 80 80 80 @@ -54,6 +66,8 @@ f64 and < 70 70 70 +70 +80 80 80 80 @@ -63,6 +77,8 @@ dt and = 0 0 0 +0 +10 10 10 10 @@ -72,6 +88,8 @@ dt and < 10000 10000 10000 +10000 +70 70 70 70 @@ -89,6 +107,10 @@ b and = 5000 5000 5000 +5000 +5000 +5000 +0 0 0 0 @@ -96,3 +118,4 @@ b and = s and = 10 10 +10 diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql index 779116cf19a..d7afba12c1d 100644 --- a/tests/queries/0_stateless/02864_statistics_predicates.sql +++ b/tests/queries/0_stateless/02864_statistics_predicates.sql @@ -1,5 +1,5 @@ -- Tags: no-fasttest --- no-fasttest: 'count_min' sketches need a 3rd party library +-- no-fasttest: 'countmin' sketches need a 3rd party library -- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types. @@ -12,46 +12,56 @@ CREATE TABLE tab ( u64 UInt64, u64_tdigest UInt64 STATISTICS(tdigest), - u64_count_min UInt64 STATISTICS(count_min), + u64_minmax UInt64 STATISTICS(minmax), + u64_countmin UInt64 STATISTICS(countmin), u64_uniq UInt64 STATISTICS(uniq), f64 Float64, f64_tdigest Float64 STATISTICS(tdigest), - f64_count_min Float64 STATISTICS(count_min), + f64_minmax Float64 STATISTICS(minmax), + f64_countmin Float64 STATISTICS(countmin), f64_uniq Float64 STATISTICS(uniq), dt DateTime, dt_tdigest DateTime STATISTICS(tdigest), - dt_count_min DateTime STATISTICS(count_min), + dt_minmax DateTime STATISTICS(minmax), + dt_countmin DateTime STATISTICS(countmin), dt_uniq DateTime STATISTICS(uniq), b Bool, b_tdigest Bool STATISTICS(tdigest), - b_count_min Bool STATISTICS(count_min), + b_minmax Bool STATISTICS(minmax), + b_countmin Bool STATISTICS(countmin), b_uniq Bool STATISTICS(uniq), s String, -- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest - s_count_min String STATISTICS(count_min) - -- s_uniq String STATISTICS(uniq), -- not supported by uniq + -- s_minmax String STATISTICS(minmax), -- not supported by minmax + s_countmin String STATISTICS(countmin), + s_uniq String STATISTICS(uniq) ) Engine = MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; INSERT INTO tab -- SELECT number % 10000, number % 1000, -(number % 100) FROM system.numbers LIMIT 10000; -SELECT number % 1000, +SELECT number % 1000, -- u64 number % 1000, number % 1000, number % 1000, number % 1000, + number % 1000, -- f64 number % 1000, number % 1000, number % 1000, number % 1000, + number % 1000, -- dt number % 1000, number % 1000, number % 1000, + number % 1000, + number % 2, -- b number % 2, number % 2, number % 2, number % 2, toString(number % 1000), + toString(number % 1000), toString(number % 1000) FROM system.numbers LIMIT 10000; @@ -61,44 +71,52 @@ SELECT 'u64 and ='; SELECT count(*) FROM tab WHERE u64 = 7; SELECT count(*) FROM tab WHERE u64_tdigest = 7; -SELECT count(*) FROM tab WHERE u64_count_min = 7; +SELECT count(*) FROM tab WHERE u64_minmax = 7; +SELECT count(*) FROM tab WHERE u64_countmin = 7; SELECT count(*) FROM tab WHERE u64_uniq = 7; SELECT count(*) FROM tab WHERE u64 = 7.7; SELECT count(*) FROM tab WHERE u64_tdigest = 7.7; -SELECT count(*) FROM tab WHERE u64_count_min = 7.7; +SELECT count(*) FROM tab WHERE u64_minmax = 7.7; +SELECT count(*) FROM tab WHERE u64_countmin = 7.7; SELECT count(*) FROM tab WHERE u64_uniq = 7.7; SELECT count(*) FROM tab WHERE u64 = '7'; SELECT count(*) FROM tab WHERE u64_tdigest = '7'; -SELECT count(*) FROM tab WHERE u64_count_min = '7'; +SELECT count(*) FROM tab WHERE u64_minmax = '7'; +SELECT count(*) FROM tab WHERE u64_countmin = '7'; SELECT count(*) FROM tab WHERE u64_uniq = '7'; SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_minmax = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_countmin = '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH } SELECT 'u64 and <'; SELECT count(*) FROM tab WHERE u64 < 7; SELECT count(*) FROM tab WHERE u64_tdigest < 7; -SELECT count(*) FROM tab WHERE u64_count_min < 7; +SELECT count(*) FROM tab WHERE u64_minmax < 7; +SELECT count(*) FROM tab WHERE u64_countmin < 7; SELECT count(*) FROM tab WHERE u64_uniq < 7; SELECT count(*) FROM tab WHERE u64 < 7.7; SELECT count(*) FROM tab WHERE u64_tdigest < 7.7; -SELECT count(*) FROM tab WHERE u64_count_min < 7.7; +SELECT count(*) FROM tab WHERE u64_minmax < 7.7; +SELECT count(*) FROM tab WHERE u64_countmin < 7.7; SELECT count(*) FROM tab WHERE u64_uniq < 7.7; SELECT count(*) FROM tab WHERE u64 < '7'; SELECT count(*) FROM tab WHERE u64_tdigest < '7'; -SELECT count(*) FROM tab WHERE u64_count_min < '7'; +SELECT count(*) FROM tab WHERE u64_minmax < '7'; +SELECT count(*) FROM tab WHERE u64_countmin < '7'; SELECT count(*) FROM tab WHERE u64_uniq < '7'; SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH } -SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_minmax < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_countmin < '7.7'; -- { serverError TYPE_MISMATCH } SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH } -- f64 ---------------------------------------------------- @@ -107,44 +125,52 @@ SELECT 'f64 and ='; SELECT count(*) FROM tab WHERE f64 = 7; SELECT count(*) FROM tab WHERE f64_tdigest = 7; -SELECT count(*) FROM tab WHERE f64_count_min = 7; +SELECT count(*) FROM tab WHERE f64_minmax = 7; +SELECT count(*) FROM tab WHERE f64_countmin = 7; SELECT count(*) FROM tab WHERE f64_uniq = 7; SELECT count(*) FROM tab WHERE f64 = 7.7; SELECT count(*) FROM tab WHERE f64_tdigest = 7.7; -SELECT count(*) FROM tab WHERE f64_count_min = 7.7; +SELECT count(*) FROM tab WHERE f64_minmax = 7.7; +SELECT count(*) FROM tab WHERE f64_countmin = 7.7; SELECT count(*) FROM tab WHERE f64_uniq = 7.7; SELECT count(*) FROM tab WHERE f64 = '7'; SELECT count(*) FROM tab WHERE f64_tdigest = '7'; -SELECT count(*) FROM tab WHERE f64_count_min = '7'; +SELECT count(*) FROM tab WHERE f64_minmax = '7'; +SELECT count(*) FROM tab WHERE f64_countmin = '7'; SELECT count(*) FROM tab WHERE f64_uniq = '7'; SELECT count(*) FROM tab WHERE f64 = '7.7'; SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; -SELECT count(*) FROM tab WHERE f64_count_min = '7.7'; +SELECT count(*) FROM tab WHERE f64_minmax = '7.7'; +SELECT count(*) FROM tab WHERE f64_countmin = '7.7'; SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; SELECT 'f64 and <'; SELECT count(*) FROM tab WHERE f64 < 7; SELECT count(*) FROM tab WHERE f64_tdigest < 7; -SELECT count(*) FROM tab WHERE f64_count_min < 7; +SELECT count(*) FROM tab WHERE f64_minmax < 7; +SELECT count(*) FROM tab WHERE f64_countmin < 7; SELECT count(*) FROM tab WHERE f64_uniq < 7; SELECT count(*) FROM tab WHERE f64 < 7.7; SELECT count(*) FROM tab WHERE f64_tdigest < 7.7; -SELECT count(*) FROM tab WHERE f64_count_min < 7.7; +SELECT count(*) FROM tab WHERE f64_minmax < 7.7; +SELECT count(*) FROM tab WHERE f64_countmin < 7.7; SELECT count(*) FROM tab WHERE f64_uniq < 7.7; SELECT count(*) FROM tab WHERE f64 < '7'; SELECT count(*) FROM tab WHERE f64_tdigest < '7'; -SELECT count(*) FROM tab WHERE f64_count_min < '7'; +SELECT count(*) FROM tab WHERE f64_minmax < '7'; +SELECT count(*) FROM tab WHERE f64_countmin < '7'; SELECT count(*) FROM tab WHERE f64_uniq < '7'; SELECT count(*) FROM tab WHERE f64 < '7.7'; SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; -SELECT count(*) FROM tab WHERE f64_count_min < '7.7'; +SELECT count(*) FROM tab WHERE f64_minmax < '7.7'; +SELECT count(*) FROM tab WHERE f64_countmin < '7.7'; SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; -- dt ---------------------------------------------------- @@ -153,24 +179,28 @@ SELECT 'dt and ='; SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_minmax = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_countmin = '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt = 7; SELECT count(*) FROM tab WHERE dt_tdigest = 7; -SELECT count(*) FROM tab WHERE dt_count_min = 7; +SELECT count(*) FROM tab WHERE dt_minmax = 7; +SELECT count(*) FROM tab WHERE dt_countmin = 7; SELECT count(*) FROM tab WHERE dt_uniq = 7; SELECT 'dt and <'; SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; -SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_minmax < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_countmin < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; SELECT count(*) FROM tab WHERE dt < 7; SELECT count(*) FROM tab WHERE dt_tdigest < 7; -SELECT count(*) FROM tab WHERE dt_count_min < 7; +SELECT count(*) FROM tab WHERE dt_minmax < 7; +SELECT count(*) FROM tab WHERE dt_countmin < 7; SELECT count(*) FROM tab WHERE dt_uniq < 7; -- b ---------------------------------------------------- @@ -179,22 +209,26 @@ SELECT 'b and ='; SELECT count(*) FROM tab WHERE b = true; SELECT count(*) FROM tab WHERE b_tdigest = true; -SELECT count(*) FROM tab WHERE b_count_min = true; +SELECT count(*) FROM tab WHERE b_minmax = true; +SELECT count(*) FROM tab WHERE b_countmin = true; SELECT count(*) FROM tab WHERE b_uniq = true; SELECT count(*) FROM tab WHERE b = 'true'; SELECT count(*) FROM tab WHERE b_tdigest = 'true'; -SELECT count(*) FROM tab WHERE b_count_min = 'true'; +SELECT count(*) FROM tab WHERE b_minmax = 'true'; +SELECT count(*) FROM tab WHERE b_countmin = 'true'; SELECT count(*) FROM tab WHERE b_uniq = 'true'; SELECT count(*) FROM tab WHERE b = 1; SELECT count(*) FROM tab WHERE b_tdigest = 1; -SELECT count(*) FROM tab WHERE b_count_min = 1; +SELECT count(*) FROM tab WHERE b_minmax = 1; +SELECT count(*) FROM tab WHERE b_countmin = 1; SELECT count(*) FROM tab WHERE b_uniq = 1; SELECT count(*) FROM tab WHERE b = 1.1; SELECT count(*) FROM tab WHERE b_tdigest = 1.1; -SELECT count(*) FROM tab WHERE b_count_min = 1.1; +SELECT count(*) FROM tab WHERE b_minmax = 1.1; +SELECT count(*) FROM tab WHERE b_countmin = 1.1; SELECT count(*) FROM tab WHERE b_uniq = 1.1; -- s ---------------------------------------------------- @@ -203,12 +237,14 @@ SELECT 's and ='; SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE } -- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported -SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE } --- SELECT count(*) FROM tab WHERE s_uniq = 7; -- not supported +-- SELECT count(*) FROM tab WHERE s_minmax = 7; -- not supported +SELECT count(*) FROM tab WHERE s_countmin = 7; -- { serverError NO_COMMON_TYPE } +SELECT count(*) FROM tab WHERE s_uniq = 7; -- { serverError NO_COMMON_TYPE } SELECT count(*) FROM tab WHERE s = '7'; -- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported -SELECT count(*) FROM tab WHERE s_count_min = '7'; --- SELECT count(*) FROM tab WHERE s_uniq = '7'; -- not supported +-- SELECT count(*) FROM tab WHERE s_minmax = '7'; -- not supported +SELECT count(*) FROM tab WHERE s_countmin = '7'; +SELECT count(*) FROM tab WHERE s_uniq = '7'; DROP TABLE tab; diff --git a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh index b9603e75d2e..466f0d01a7f 100755 --- a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh +++ b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh @@ -5,6 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh +CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --database_replicated_allow_explicit_uuid 3 --database_replicated_allow_replicated_engine_arguments 3" ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test_exception_replicated SYNC" diff --git a/tests/queries/0_stateless/02916_to_start_of_interval_with_origin.reference b/tests/queries/0_stateless/02916_to_start_of_interval_with_origin.reference new file mode 100644 index 00000000000..f0afdf03963 --- /dev/null +++ b/tests/queries/0_stateless/02916_to_start_of_interval_with_origin.reference @@ -0,0 +1,59 @@ +-- Negative tests +Time and origin as Date +2023-02-01 +2023-08-01 +2023-10-08 +2023-10-08 +2023-10-09 +Time and origin as Date32 +2023-02-01 +2023-08-01 +2023-10-08 +2023-10-08 +2023-10-09 +Time and origin as DateTime +2023-02-01 09:08:07 +2023-08-01 09:08:07 +2023-10-08 09:08:07 +2023-10-08 09:08:07 +2023-10-09 09:08:07 +2023-10-09 10:10:07 +2023-10-09 10:11:07 +2023-10-09 10:11:12 +Time and origin as DateTime64(9) +2023-02-01 09:08:07.123456789 +2023-08-01 09:08:07.123456789 +2023-09-10 09:08:07.123456789 +2023-10-08 09:08:07.123456789 +2023-10-09 09:08:07.123456789 +2023-10-09 10:10:07.123456789 +2023-10-09 10:11:11.123456789 +2023-10-09 10:11:12.123456789 +2023-10-09 10:11:12.987 +2023-10-09 10:11:12.987654 +2023-10-09 10:11:12.987654321 +Time and origin as DateTime64(3) +2023-02-01 09:08:07.123 +2023-08-01 09:08:07.123 +2023-10-08 09:08:07.123 +2023-10-08 09:08:07.123 +2023-10-09 09:08:07.123 +2023-10-09 10:10:07.123 +2023-10-09 10:11:11.123 +2023-10-09 10:11:12.123 +2023-10-09 10:11:12.987 +2023-10-09 10:11:12.987000 +2023-10-09 10:11:12.987000000 +Non-const arguments +2023-03-01 16:55:00.00 +2023-02-01 16:55:00.00 +2023-03-01 16:55:00.00 +2023-02-01 16:55:00.00 +2023-03-01 16:55:00.00 +2023-03-01 16:55:00 +2023-02-01 16:55:00 +2023-03-01 16:55:00 +2023-02-01 16:55:00 +2023-03-01 16:55:00 +2023-01-02 15:44:30 +2023-02-01 16:44:30.00 diff --git a/tests/queries/0_stateless/02916_to_start_of_interval_with_origin.sql b/tests/queries/0_stateless/02916_to_start_of_interval_with_origin.sql new file mode 100644 index 00000000000..b03ccae31d9 --- /dev/null +++ b/tests/queries/0_stateless/02916_to_start_of_interval_with_origin.sql @@ -0,0 +1,95 @@ +set session_timezone = 'UTC'; + +SELECT '-- Negative tests'; + +-- time and origin arguments must have the same type +SELECT toStartOfInterval(toDate('2023-01-02 14:45:50'), toIntervalSecond(5), toDate32('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-01-02 14:45:50'), toIntervalMillisecond(12), toDateTime('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-01-02 14:45:50'), toIntervalHour(5), toDate('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalMinute(1), toDateTime64('2023-01-02 14:44:30', 2)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime64('2023-01-02 14:45:50', 2), toIntervalMinute(1), toDate('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- the origin must be before the time +SELECT toStartOfInterval(toDateTime('2023-01-02 14:42:50'), toIntervalMinute(1), toDateTime('2023-01-02 14:44:30')); -- { serverError BAD_ARGUMENTS } + +-- the origin must be constant +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalMinute(1), number % 2 == 0 ? toDateTime('2023-02-01 15:55:00') : toDateTime('2023-01-01 15:55:00')) from numbers(1); -- { serverError ILLEGAL_COLUMN } +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalHour(1), materialize(toDateTime('2023-01-02 14:44:30')), 'Europe/Amsterdam'); -- { serverError ILLEGAL_COLUMN } + +-- with 4 arguments, the 3rd one must not be a string or an integer +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), 'Europe/Amsterdam', 'Europe/Amsterdam'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), 5, 'Europe/Amsterdam'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- too many arguments +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), toDateTime('2020-01-02 14:44:30'), 'Europe/Amsterdam', 5); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 'Time and origin as Date'; +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalYear(1), toDate('2022-02-01')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalQuarter(1), toDate('2022-02-01')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMonth(1), toDate('2023-09-08')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalWeek(1), toDate('2023-10-01')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalDay(1), toDate('2023-10-08')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalHour(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMinute(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalSecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMillisecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMicrosecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalNanosecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Time and origin as Date32'; +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalYear(1), toDate32('2022-02-01')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalQuarter(1), toDate32('2022-02-01')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMonth(1), toDate32('2023-09-08')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalWeek(1), toDate32('2023-10-01')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalDay(1), toDate32('2023-10-08')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalHour(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMinute(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalSecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMillisecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMicrosecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalNanosecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Time and origin as DateTime'; +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1), toDateTime('2022-02-01 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalQuarter(1), toDateTime('2022-02-01 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMonth(1), toDateTime('2023-09-08 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalWeek(1), toDateTime('2023-10-01 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalDay(1), toDateTime('2023-10-08 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalHour(1), toDateTime('2023-10-09 09:10:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMinute(1), toDateTime('2023-10-09 09:10:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalSecond(1), toDateTime('2023-10-09 09:10:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMillisecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMicrosecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalNanosecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Time and origin as DateTime64(9)'; +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalYear(1), toDateTime64('2022-02-01 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalQuarter(1), toDateTime64('2022-02-01 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMonth(1), toDateTime64('2023-09-10 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalWeek(1), toDateTime64('2023-10-01 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalDay(1), toDateTime64('2023-10-08 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalHour(1), toDateTime64('2023-10-09 09:10:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMinute(1), toDateTime64('2023-10-09 09:10:11.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalSecond(1), toDateTime64('2023-10-09 10:11:10.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMillisecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMicrosecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalNanosecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9)); + +SELECT 'Time and origin as DateTime64(3)'; +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalYear(1), toDateTime64('2022-02-01 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalQuarter(1), toDateTime64('2022-02-01 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMonth(1), toDateTime64('2023-09-08 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalWeek(1), toDateTime64('2023-10-01 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalDay(1), toDateTime64('2023-10-08 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalHour(1), toDateTime64('2023-10-09 09:10:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMinute(1), toDateTime64('2023-10-09 10:10:11.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalSecond(1), toDateTime64('2023-10-09 10:11:10.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMillisecond(1), toDateTime64('2023-10-09 10:11:12.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMicrosecond(1), toDateTime64('2023-10-09 10:11:12.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalNanosecond(1), toDateTime64('2023-10-09 10:11:12.123', 3)); + +SELECT 'Non-const arguments'; +SELECT toStartOfInterval(number % 2 == 0 ? toDateTime64('2023-03-01 15:55:00', 2) : toDateTime64('2023-02-01 15:55:00', 2), toIntervalMinute(1), toDateTime64('2023-01-01 13:55:00', 2), 'Europe/Amsterdam') from numbers(5); +SELECT toStartOfInterval(number % 2 == 0 ? toDateTime('2023-03-01 15:55:00') : toDateTime('2023-02-01 15:55:00'), toIntervalHour(1), toDateTime('2023-01-01 13:55:00'), 'Europe/Amsterdam') from numbers(5); +SELECT toStartOfInterval(materialize(toDateTime('2023-01-02 14:45:50')), toIntervalHour(1), toDateTime('2023-01-02 14:44:30'), 'Europe/Amsterdam'); +SELECT toStartOfInterval(materialize(toDateTime64('2023-02-01 15:45:50', 2)), toIntervalHour(1), toDateTime64('2023-01-02 14:44:30', 2), 'Europe/Amsterdam'); diff --git a/tests/queries/0_stateless/02933_sqid.reference b/tests/queries/0_stateless/02933_sqid.reference index a559bacb0ac..4597e2347e3 100644 --- a/tests/queries/0_stateless/02933_sqid.reference +++ b/tests/queries/0_stateless/02933_sqid.reference @@ -13,5 +13,6 @@ Td1EnWQo [1,2,3,4] XMbT -- invalid sqid [] +-- bug 69450 -- alias XMbT diff --git a/tests/queries/0_stateless/02933_sqid.sql b/tests/queries/0_stateless/02933_sqid.sql index 81d4b2bc35c..822fe33df51 100644 --- a/tests/queries/0_stateless/02933_sqid.sql +++ b/tests/queries/0_stateless/02933_sqid.sql @@ -25,5 +25,12 @@ SELECT sqidEncode(toNullable(materialize(1)), toLowCardinality(materialize(2))); SELECT '-- invalid sqid'; SELECT sqidDecode('invalid sqid'); +SELECT '-- bug 69450'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (id String) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab SELECT * FROM generateRandom() LIMIT 1000000; +SELECT sqidDecode(id) FROM tab FORMAT Null; +DROP TABLE tab; + SELECT '-- alias'; SELECT sqid(1, 2); diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.reference b/tests/queries/0_stateless/03174_json_compact_with_progress.reference new file mode 100644 index 00000000000..cdbe7cfcb3e --- /dev/null +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.reference @@ -0,0 +1,15 @@ +1 +{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}]} +{"data":[1, "a"]} +{"data":[2, "b"]} +{"data":[3, "c"]} +{"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":33}} +2 +{"meta": [{"name":"name", "type":"String"}, {"name":"c", "type":"UInt64"}]} +{"data":["a", "1"]} +{"data":["b", "1"]} +{"data":["c", "1"]} +{"totals": ["", "3"]} +{"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":30}} +3 +Value passed to 'throwIf' function is non-zero: diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.sh b/tests/queries/0_stateless/03174_json_compact_with_progress.sh new file mode 100755 index 00000000000..b15dc7cfdb2 --- /dev/null +++ b/tests/queries/0_stateless/03174_json_compact_with_progress.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;" + +$CLICKHOUSE_CLIENT -q "SELECT 1;" +# Check JSONCompactWithProgress Output +$CLICKHOUSE_CLIENT -q "CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value;" +$CLICKHOUSE_CLIENT -q "INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');" +$CLICKHOUSE_CLIENT -q "SELECT * FROM test_table FORMAT JSONCompactWithProgress settings max_block_size=2;" | grep -v --text "progress" | sed -E 's/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' + +$CLICKHOUSE_CLIENT -q "SELECT 2;" +# Check Totals +$CLICKHOUSE_CLIENT -q "SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | grep -v --text "progress" | sed -E 's/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g' + +$CLICKHOUSE_CLIENT -q "SELECT 3;" +# Check exceptions +${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "SELECT throwIf(number = 15), 1::Int64 as a, '\"' from numbers(100) format JSONCompactWithProgress settings output_format_json_quote_64bit_integers=1, max_block_size=10" | grep "exception" | cut -c42-88 + +$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;" diff --git a/tests/queries/0_stateless/03175_sparse_and_skip_index.reference b/tests/queries/0_stateless/03175_sparse_and_skip_index.reference new file mode 100644 index 00000000000..619e98a152a --- /dev/null +++ b/tests/queries/0_stateless/03175_sparse_and_skip_index.reference @@ -0,0 +1,4 @@ +key Sparse +value Sparse +1000 +1 diff --git a/tests/queries/0_stateless/03175_sparse_and_skip_index.sql b/tests/queries/0_stateless/03175_sparse_and_skip_index.sql new file mode 100644 index 00000000000..4de6d1ac6df --- /dev/null +++ b/tests/queries/0_stateless/03175_sparse_and_skip_index.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t_bloom_filter; +CREATE TABLE t_bloom_filter( + key UInt64, + value UInt64, + + INDEX key_bf key TYPE bloom_filter(0.01) GRANULARITY 2147483648, -- bloom filter on sorting key column + INDEX value_bf value TYPE bloom_filter(0.01) GRANULARITY 2147483648 -- bloom filter on no-sorting column +) ENGINE=MergeTree ORDER BY key +SETTINGS + -- settings to trigger sparse serialization and vertical merge + ratio_of_defaults_for_sparse_serialization = 0.0 + ,vertical_merge_algorithm_min_rows_to_activate = 1 + ,vertical_merge_algorithm_min_columns_to_activate = 1 + ,allow_vertical_merges_from_compact_to_wide_parts = 1 + ,min_bytes_for_wide_part=0 +; + +SYSTEM STOP MERGES t_bloom_filter; + +-- Create at least one part +INSERT INTO t_bloom_filter +SELECT + number % 100 as key, -- 100 unique keys + rand() % 100 as value -- 100 unique values +FROM numbers(50_000); + +-- And another part +INSERT INTO t_bloom_filter +SELECT + number % 100 as key, -- 100 unique keys + rand() % 100 as value -- 100 unique values +FROM numbers(50_000, 50_000); + +SYSTEM START MERGES t_bloom_filter; + +-- Merge everything into a single part +OPTIMIZE TABLE t_bloom_filter FINAL; + +-- Check sparse serialization +SELECT column, serialization_kind FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_bloom_filter' AND active ORDER BY column; + +SELECT COUNT() FROM t_bloom_filter WHERE key = 1; + +-- Check bloom filter non-zero size +SELECT COUNT() FROM system.parts WHERE database = currentDatabase() AND table = 't_bloom_filter' AND secondary_indices_uncompressed_bytes > 200 AND active; diff --git a/tests/queries/0_stateless/03205_overlay.sql b/tests/queries/0_stateless/03205_overlay.sql index 765b29f93ec..b692cc0c5ab 100644 --- a/tests/queries/0_stateless/03205_overlay.sql +++ b/tests/queries/0_stateless/03205_overlay.sql @@ -35,10 +35,10 @@ SELECT overlay('Spark SQL', materialize('ANSI '), materialize(7), materialize(0) SELECT overlay(materialize('Spark SQL'), materialize('ANSI '), materialize(7), materialize(0)), overlayUTF8(materialize('Spark SQL和CH'), materialize('ANSI '), materialize(7), materialize(0)); SELECT 'Test with special offset values'; -WITH number - 12 AS offset SELECT offset, overlay('Spark SQL', '__', offset), overlayUTF8('Spark SQL和CH', '之', offset) FROM numbers(26); +WITH number - 12 AS offset SELECT offset, overlay('Spark SQL', '__', offset), overlayUTF8('Spark SQL和CH', '之', offset) FROM numbers(26) ORDER BY number; SELECT 'Test with special length values'; -WITH number - 1 AS length SELECT length, overlay('Spark SQL', 'ANSI ', 7, length), overlayUTF8('Spark SQL和CH', 'ANSI ', 7, length) FROM numbers(8); +WITH number - 1 AS length SELECT length, overlay('Spark SQL', 'ANSI ', 7, length), overlayUTF8('Spark SQL和CH', 'ANSI ', 7, length) FROM numbers(8) ORDER BY number; SELECT 'Test with special input and replace values'; SELECT overlay('', '_', 6), overlayUTF8('', '_', 6); diff --git a/tests/queries/0_stateless/03221_incomplete_utf8_sequence.reference b/tests/queries/0_stateless/03221_incomplete_utf8_sequence.reference new file mode 100644 index 00000000000..4577427251d --- /dev/null +++ b/tests/queries/0_stateless/03221_incomplete_utf8_sequence.reference @@ -0,0 +1,16 @@ +{ + "meta": + [ + { + "name": "unhex('f0')", + "type": "String" + } + ], + + "data": + [ + ["�"] + ], + + "rows": 1 +} diff --git a/tests/queries/0_stateless/03221_incomplete_utf8_sequence.sql b/tests/queries/0_stateless/03221_incomplete_utf8_sequence.sql new file mode 100644 index 00000000000..ee4f25f3b4a --- /dev/null +++ b/tests/queries/0_stateless/03221_incomplete_utf8_sequence.sql @@ -0,0 +1,2 @@ +SET output_format_write_statistics = 0; +SELECT unhex('f0') FORMAT JSONCompact; diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql new file mode 100644 index 00000000000..f207581f482 --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql @@ -0,0 +1,10 @@ +SET allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY (c0); +CREATE TABLE t1 (c0 Array(Dynamic), c1 Int) ENGINE = MergeTree() ORDER BY (c0); +INSERT INTO t1 (c0, c1) VALUES ([18446717433683171873], 13623876564923702671), ([-4], 6111684076076982207); +SELECT 1 FROM t0 FINAL JOIN t1 ON TRUE; +DROP TABLE t0; +DROP TABLE t1; + diff --git a/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference new file mode 100644 index 00000000000..8dbf92d6590 --- /dev/null +++ b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference @@ -0,0 +1 @@ +{'Hello':'2020-01-01 00:00:00'} diff --git a/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql new file mode 100644 index 00000000000..484a16bb22f --- /dev/null +++ b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql @@ -0,0 +1,7 @@ +drop table if exists test; +create table test (map Map(String, DateTime)) engine=Memory; +set date_time_input_format='best_effort'; +insert into test values (map('Hello', '01/01/2020')); +select * from test; +drop table test; + diff --git a/tests/queries/0_stateless/03232_pr_not_ready_set.reference b/tests/queries/0_stateless/03232_pr_not_ready_set.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03232_pr_not_ready_set.sql b/tests/queries/0_stateless/03232_pr_not_ready_set.sql new file mode 100644 index 00000000000..3b2d5d28cfb --- /dev/null +++ b/tests/queries/0_stateless/03232_pr_not_ready_set.sql @@ -0,0 +1,19 @@ +SYSTEM FLUSH LOGS; +SELECT + is_initial_query, + count() AS c, + replaceRegexpAll(query, '_data_(\\d+)_(\\d+)', '_data_') AS query +FROM system.query_log +WHERE (event_date >= yesterday()) AND (type = 'QueryFinish') AND (ignore(54, 0, ignore('QueryFinish', 11, toLowCardinality(toLowCardinality(11)), 11, 11, 11), 'QueryFinish', materialize(11), toUInt128(11)) IN ( + SELECT query_id + FROM system.query_log + WHERE (current_database = currentDatabase()) AND (event_date >= yesterday()) AND (type = 'QueryFinish') AND (query LIKE '-- Parallel inner query alone%') +)) +GROUP BY + is_initial_query, + query +ORDER BY + is_initial_query ASC, + c ASC, + query ASC +SETTINGS allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1, parallel_replicas_min_number_of_rows_per_replica=10; diff --git a/tests/queries/0_stateless/03235_groupArray_string_consistency.reference b/tests/queries/0_stateless/03235_groupArray_string_consistency.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/03235_groupArray_string_consistency.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03235_groupArray_string_consistency.sql b/tests/queries/0_stateless/03235_groupArray_string_consistency.sql new file mode 100644 index 00000000000..618ec6f839b --- /dev/null +++ b/tests/queries/0_stateless/03235_groupArray_string_consistency.sql @@ -0,0 +1,10 @@ +CREATE TABLE t (st FixedString(54)) ENGINE=MergeTree ORDER BY (); + +INSERT INTO t VALUES +('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ'), +('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'), +('IIIIIIIIII\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'); + +WITH (SELECT groupConcat(',')(st) FROM t) AS a, + (SELECT groupConcat(',')(st :: String) FROM t) AS b +SELECT equals(a, b); diff --git a/tests/queries/1_stateful/00177_memory_bound_merging.sh b/tests/queries/1_stateful/00177_memory_bound_merging.sh index d5cd1a05cd8..564160bb251 100755 --- a/tests/queries/1_stateful/00177_memory_bound_merging.sh +++ b/tests/queries/1_stateful/00177_memory_bound_merging.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) check_replicas_read_in_order() { # NOTE: lack of "current_database = '$CLICKHOUSE_DATABASE'" filter is made on purpose - $CLICKHOUSE_CLIENT -nq " + $CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; SELECT COUNT() > 0 @@ -22,7 +22,7 @@ check_replicas_read_in_order() { # at some point we had a bug in this logic (see https://github.com/ClickHouse/ClickHouse/pull/45892#issue-1566140414) test1() { query_id="query_id_memory_bound_merging_$RANDOM$RANDOM" - $CLICKHOUSE_CLIENT --query_id="$query_id" -nq " + $CLICKHOUSE_CLIENT --query_id="$query_id" -q " SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; SELECT URL, EventDate, max(URL) @@ -39,7 +39,7 @@ test1() { # at some point we had a bug in this logic (see https://github.com/ClickHouse/ClickHouse/pull/45892#issue-1566140414) test2() { query_id="query_id_memory_bound_merging_$RANDOM$RANDOM" - $CLICKHOUSE_CLIENT --query_id="$query_id" -nq " + $CLICKHOUSE_CLIENT --query_id="$query_id" -q " SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; SELECT URL, EventDate, max(URL) @@ -53,7 +53,7 @@ test2() { } test3() { - $CLICKHOUSE_CLIENT -nq " + $CLICKHOUSE_CLIENT -q " SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; SET max_threads = 16, prefer_localhost_replica = 1, read_in_order_two_level_merge_threshold = 1000, query_plan_aggregation_in_order = 1, distributed_aggregation_memory_efficient = 1; diff --git a/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh b/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh index f9fea2c1dad..bf44f2d7ce7 100755 --- a/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh +++ b/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh @@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT -q "SELECT * FROM hits_s3_sampled WHERE URL LIKE '%google%' O query_id=02906_read_from_cache_$RANDOM $CLICKHOUSE_CLIENT --query_id ${query_id} -q "SELECT * FROM hits_s3_sampled WHERE URL LIKE '%google%' ORDER BY EventTime LIMIT 10 FORMAT Null SETTINGS filesystem_cache_reserve_space_wait_lock_timeout_milliseconds=2000" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; -- AsynchronousReaderIgnoredBytes = 0: no seek-avoiding happened diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index d10db5f0d3d..3467f21c812 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -24,6 +24,7 @@ Aggregatefunction AggregatingMergeTree AggregatorThreads AggregatorThreadsActive +AzureQueue Akka AlertManager Alexey @@ -120,6 +121,7 @@ CMPLNT CMake CMakeLists CODECS +CountMin COVID CPUFrequencyMHz CPUs @@ -151,6 +153,7 @@ ChannelID Cidr Ciphertext CityHash +CKibana Clangd ClickBench ClickCat @@ -247,6 +250,7 @@ DoubleDelta Doxygen Durre ECMA +ElasticSearch ETag Ecto EdgeAngle @@ -421,6 +425,7 @@ JSONCompactStringsEachRowWithNames JSONCompactStringsEachRowWithNamesAndTypes JSONDynamicPaths JSONDynamicPathsWithTypes +JSONCompactWithProgress JSONEachRow JSONEachRowWithProgress JSONExtract @@ -471,6 +476,7 @@ KeeperMap KeeperOutstandingRequests Kerberos Khanna +Kibana KittenHouse Klickhouse Kolmogorov @@ -975,6 +981,7 @@ ThreadPoolRemoteFSReaderThreads ThreadPoolRemoteFSReaderThreadsActive ThreadsActive ThreadsInOvercommitTracker +TimescaleDB's TimeSeries Timeunit TinyLog @@ -1367,6 +1374,7 @@ cipherList ciphertext cityHash cityhash +ckibana ckman clangd cli @@ -1915,6 +1923,7 @@ jsoncompactstrings jsoncompactstringseachrow jsoncompactstringseachrowwithnames jsoncompactstringseachrowwithnamesandtypes +jsoncompactwithprogress jsoneachrow jsoneachrowwithprogress jsonobjecteachrow @@ -2583,6 +2592,7 @@ sqlinsert sqlite sqrt src +dest srcReplicas sshkey stackoverflow diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 4fa387731ce..a621f5a7ddf 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,16 +1,22 @@ +v24.8.4.13-lts 2024-09-06 v24.8.3.59-lts 2024-09-03 v24.8.2.3-lts 2024-08-22 v24.8.1.2684-lts 2024-08-21 +v24.7.6.8-stable 2024-09-06 v24.7.5.37-stable 2024-09-03 v24.7.4.51-stable 2024-08-23 +v24.7.3.47-stable 2024-09-04 v24.7.3.42-stable 2024-08-08 v24.7.2.13-stable 2024-08-01 v24.7.1.2915-stable 2024-07-30 +v24.6.6.6-stable 2024-09-06 v24.6.5.30-stable 2024-09-03 v24.6.4.42-stable 2024-08-23 v24.6.3.95-stable 2024-08-06 +v24.6.3.38-stable 2024-09-04 v24.6.2.17-stable 2024-07-05 v24.6.1.4423-stable 2024-07-01 +v24.5.8.10-stable 2024-09-06 v24.5.7.31-stable 2024-09-03 v24.5.6.45-stable 2024-08-23 v24.5.5.78-stable 2024-08-05 @@ -22,6 +28,7 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.11.7-lts 2024-09-06 v24.3.10.33-lts 2024-09-03 v24.3.9.5-lts 2024-08-22 v24.3.8.13-lts 2024-08-20