diff --git a/contrib/icu-cmake/CMakeLists.txt b/contrib/icu-cmake/CMakeLists.txt
index adeaa7dcf33..afaa189701d 100644
--- a/contrib/icu-cmake/CMakeLists.txt
+++ b/contrib/icu-cmake/CMakeLists.txt
@@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
# These lists of sources were generated from build log of the original ICU build system (configure + make).
set(ICUUC_SOURCES
-"${ICU_SOURCE_DIR}/common/errorcode.cpp"
-"${ICU_SOURCE_DIR}/common/putil.cpp"
-"${ICU_SOURCE_DIR}/common/umath.cpp"
-"${ICU_SOURCE_DIR}/common/utypes.cpp"
-"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
-"${ICU_SOURCE_DIR}/common/umutex.cpp"
-"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
-"${ICU_SOURCE_DIR}/common/uinit.cpp"
-"${ICU_SOURCE_DIR}/common/uobject.cpp"
-"${ICU_SOURCE_DIR}/common/cmemory.cpp"
-"${ICU_SOURCE_DIR}/common/charstr.cpp"
-"${ICU_SOURCE_DIR}/common/cstr.cpp"
-"${ICU_SOURCE_DIR}/common/udata.cpp"
-"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
-"${ICU_SOURCE_DIR}/common/udatamem.cpp"
-"${ICU_SOURCE_DIR}/common/umapfile.cpp"
-"${ICU_SOURCE_DIR}/common/udataswp.cpp"
-"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
-"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
-"${ICU_SOURCE_DIR}/common/utrace.cpp"
-"${ICU_SOURCE_DIR}/common/uhash.cpp"
-"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
-"${ICU_SOURCE_DIR}/common/uenum.cpp"
-"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
-"${ICU_SOURCE_DIR}/common/uvector.cpp"
-"${ICU_SOURCE_DIR}/common/ustack.cpp"
-"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
-"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
-"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
-"${ICU_SOURCE_DIR}/common/resource.cpp"
-"${ICU_SOURCE_DIR}/common/uresbund.cpp"
-"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
-"${ICU_SOURCE_DIR}/common/uresdata.cpp"
-"${ICU_SOURCE_DIR}/common/resbund.cpp"
-"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
-"${ICU_SOURCE_DIR}/common/ucurr.cpp"
-"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
-"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
-"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
-"${ICU_SOURCE_DIR}/common/ucat.cpp"
-"${ICU_SOURCE_DIR}/common/locmap.cpp"
-"${ICU_SOURCE_DIR}/common/uloc.cpp"
-"${ICU_SOURCE_DIR}/common/locid.cpp"
-"${ICU_SOURCE_DIR}/common/locutil.cpp"
-"${ICU_SOURCE_DIR}/common/locavailable.cpp"
-"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
-"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
-"${ICU_SOURCE_DIR}/common/loclikely.cpp"
-"${ICU_SOURCE_DIR}/common/locresdata.cpp"
-"${ICU_SOURCE_DIR}/common/lsr.cpp"
-"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
-"${ICU_SOURCE_DIR}/common/locdistance.cpp"
-"${ICU_SOURCE_DIR}/common/localematcher.cpp"
-"${ICU_SOURCE_DIR}/common/bytestream.cpp"
-"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
-"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
-"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
-"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
-"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
-"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
-"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
-"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
-"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
-"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
-"${ICU_SOURCE_DIR}/common/edits.cpp"
"${ICU_SOURCE_DIR}/common/appendable.cpp"
-"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
-"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
-"${ICU_SOURCE_DIR}/common/unistr.cpp"
-"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
-"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
-"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
-"${ICU_SOURCE_DIR}/common/ustring.cpp"
-"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
-"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
-"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
-"${ICU_SOURCE_DIR}/common/cstring.cpp"
-"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
-"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
-"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
-"${ICU_SOURCE_DIR}/common/utext.cpp"
-"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
-"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
-"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
-"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
-"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
-"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
-"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
-"${ICU_SOURCE_DIR}/common/normlzr.cpp"
-"${ICU_SOURCE_DIR}/common/unorm.cpp"
-"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
-"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
-"${ICU_SOURCE_DIR}/common/chariter.cpp"
-"${ICU_SOURCE_DIR}/common/schriter.cpp"
-"${ICU_SOURCE_DIR}/common/uchriter.cpp"
-"${ICU_SOURCE_DIR}/common/uiter.cpp"
-"${ICU_SOURCE_DIR}/common/patternprops.cpp"
-"${ICU_SOURCE_DIR}/common/uchar.cpp"
-"${ICU_SOURCE_DIR}/common/uprops.cpp"
-"${ICU_SOURCE_DIR}/common/ucase.cpp"
-"${ICU_SOURCE_DIR}/common/propname.cpp"
-"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
-"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
-"${ICU_SOURCE_DIR}/common/ubidi.cpp"
-"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
-"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
-"${ICU_SOURCE_DIR}/common/ushape.cpp"
-"${ICU_SOURCE_DIR}/common/uscript.cpp"
-"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
-"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
-"${ICU_SOURCE_DIR}/common/unames.cpp"
-"${ICU_SOURCE_DIR}/common/utrie.cpp"
-"${ICU_SOURCE_DIR}/common/utrie2.cpp"
-"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
-"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
-"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
-"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
-"${ICU_SOURCE_DIR}/common/uset_props.cpp"
-"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
-"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
-"${ICU_SOURCE_DIR}/common/uset.cpp"
-"${ICU_SOURCE_DIR}/common/uniset.cpp"
-"${ICU_SOURCE_DIR}/common/usetiter.cpp"
-"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
-"${ICU_SOURCE_DIR}/common/caniter.cpp"
-"${ICU_SOURCE_DIR}/common/unifilt.cpp"
-"${ICU_SOURCE_DIR}/common/unifunct.cpp"
-"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
-"${ICU_SOURCE_DIR}/common/brkiter.cpp"
-"${ICU_SOURCE_DIR}/common/ubrk.cpp"
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
+"${ICU_SOURCE_DIR}/common/brkiter.cpp"
+"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
+"${ICU_SOURCE_DIR}/common/bytestream.cpp"
+"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
+"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
+"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
+"${ICU_SOURCE_DIR}/common/caniter.cpp"
+"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
+"${ICU_SOURCE_DIR}/common/chariter.cpp"
+"${ICU_SOURCE_DIR}/common/charstr.cpp"
+"${ICU_SOURCE_DIR}/common/cmemory.cpp"
+"${ICU_SOURCE_DIR}/common/cstr.cpp"
+"${ICU_SOURCE_DIR}/common/cstring.cpp"
+"${ICU_SOURCE_DIR}/common/cwchar.cpp"
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
+"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
+"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
+"${ICU_SOURCE_DIR}/common/edits.cpp"
+"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
+"${ICU_SOURCE_DIR}/common/errorcode.cpp"
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
+"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
+"${ICU_SOURCE_DIR}/common/icudataver.cpp"
+"${ICU_SOURCE_DIR}/common/icuplug.cpp"
+"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
+"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
+"${ICU_SOURCE_DIR}/common/localematcher.cpp"
+"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
+"${ICU_SOURCE_DIR}/common/locavailable.cpp"
+"${ICU_SOURCE_DIR}/common/locbased.cpp"
+"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
+"${ICU_SOURCE_DIR}/common/locdistance.cpp"
+"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
+"${ICU_SOURCE_DIR}/common/locid.cpp"
+"${ICU_SOURCE_DIR}/common/loclikely.cpp"
+"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
+"${ICU_SOURCE_DIR}/common/locmap.cpp"
+"${ICU_SOURCE_DIR}/common/locresdata.cpp"
+"${ICU_SOURCE_DIR}/common/locutil.cpp"
+"${ICU_SOURCE_DIR}/common/lsr.cpp"
+"${ICU_SOURCE_DIR}/common/lstmbe.cpp"
+"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
+"${ICU_SOURCE_DIR}/common/mlbe.cpp"
+"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
+"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
+"${ICU_SOURCE_DIR}/common/normlzr.cpp"
+"${ICU_SOURCE_DIR}/common/parsepos.cpp"
+"${ICU_SOURCE_DIR}/common/patternprops.cpp"
+"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
+"${ICU_SOURCE_DIR}/common/propname.cpp"
+"${ICU_SOURCE_DIR}/common/propsvec.cpp"
+"${ICU_SOURCE_DIR}/common/punycode.cpp"
+"${ICU_SOURCE_DIR}/common/putil.cpp"
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
+"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
@@ -178,166 +80,180 @@ set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
-"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
+"${ICU_SOURCE_DIR}/common/resbund.cpp"
+"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
+"${ICU_SOURCE_DIR}/common/resource.cpp"
+"${ICU_SOURCE_DIR}/common/restrace.cpp"
+"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
+"${ICU_SOURCE_DIR}/common/schriter.cpp"
"${ICU_SOURCE_DIR}/common/serv.cpp"
-"${ICU_SOURCE_DIR}/common/servnotf.cpp"
-"${ICU_SOURCE_DIR}/common/servls.cpp"
"${ICU_SOURCE_DIR}/common/servlk.cpp"
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
+"${ICU_SOURCE_DIR}/common/servls.cpp"
+"${ICU_SOURCE_DIR}/common/servnotf.cpp"
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
-"${ICU_SOURCE_DIR}/common/uidna.cpp"
-"${ICU_SOURCE_DIR}/common/usprep.cpp"
-"${ICU_SOURCE_DIR}/common/uts46.cpp"
-"${ICU_SOURCE_DIR}/common/punycode.cpp"
-"${ICU_SOURCE_DIR}/common/util.cpp"
-"${ICU_SOURCE_DIR}/common/util_props.cpp"
-"${ICU_SOURCE_DIR}/common/parsepos.cpp"
-"${ICU_SOURCE_DIR}/common/locbased.cpp"
-"${ICU_SOURCE_DIR}/common/cwchar.cpp"
-"${ICU_SOURCE_DIR}/common/wintz.cpp"
-"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
-"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
-"${ICU_SOURCE_DIR}/common/propsvec.cpp"
-"${ICU_SOURCE_DIR}/common/ulist.cpp"
-"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
-"${ICU_SOURCE_DIR}/common/icudataver.cpp"
-"${ICU_SOURCE_DIR}/common/icuplug.cpp"
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
-"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
-"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
-"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
-"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
-"${ICU_SOURCE_DIR}/common/restrace.cpp"
-"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
-"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
+"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
+"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
+"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
+"${ICU_SOURCE_DIR}/common/ubidi.cpp"
+"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
+"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
+"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
+"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
+"${ICU_SOURCE_DIR}/common/ubrk.cpp"
+"${ICU_SOURCE_DIR}/common/ucase.cpp"
+"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
+"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
+"${ICU_SOURCE_DIR}/common/ucat.cpp"
+"${ICU_SOURCE_DIR}/common/uchar.cpp"
+"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
+"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
+"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
+"${ICU_SOURCE_DIR}/common/uchriter.cpp"
+"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
+"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
+"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
+"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
+"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
+"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
+"${ICU_SOURCE_DIR}/common/ucurr.cpp"
+"${ICU_SOURCE_DIR}/common/udata.cpp"
+"${ICU_SOURCE_DIR}/common/udatamem.cpp"
+"${ICU_SOURCE_DIR}/common/udataswp.cpp"
+"${ICU_SOURCE_DIR}/common/uenum.cpp"
+"${ICU_SOURCE_DIR}/common/uhash.cpp"
+"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
+"${ICU_SOURCE_DIR}/common/uidna.cpp"
+"${ICU_SOURCE_DIR}/common/uinit.cpp"
+"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
+"${ICU_SOURCE_DIR}/common/uiter.cpp"
+"${ICU_SOURCE_DIR}/common/ulist.cpp"
+"${ICU_SOURCE_DIR}/common/uloc.cpp"
+"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
+"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
+"${ICU_SOURCE_DIR}/common/ulocale.cpp"
+"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp"
+"${ICU_SOURCE_DIR}/common/umapfile.cpp"
+"${ICU_SOURCE_DIR}/common/umath.cpp"
+"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
+"${ICU_SOURCE_DIR}/common/umutex.cpp"
+"${ICU_SOURCE_DIR}/common/unames.cpp"
+"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
+"${ICU_SOURCE_DIR}/common/unifilt.cpp"
+"${ICU_SOURCE_DIR}/common/unifunct.cpp"
+"${ICU_SOURCE_DIR}/common/uniset.cpp"
+"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
+"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
+"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
+"${ICU_SOURCE_DIR}/common/unistr.cpp"
+"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
+"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
+"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
+"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
+"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
+"${ICU_SOURCE_DIR}/common/unorm.cpp"
+"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
+"${ICU_SOURCE_DIR}/common/uobject.cpp"
+"${ICU_SOURCE_DIR}/common/uprops.cpp"
+"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
+"${ICU_SOURCE_DIR}/common/uresbund.cpp"
+"${ICU_SOURCE_DIR}/common/uresdata.cpp"
+"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
+"${ICU_SOURCE_DIR}/common/uscript.cpp"
+"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
+"${ICU_SOURCE_DIR}/common/uset.cpp"
+"${ICU_SOURCE_DIR}/common/uset_props.cpp"
+"${ICU_SOURCE_DIR}/common/usetiter.cpp"
+"${ICU_SOURCE_DIR}/common/ushape.cpp"
+"${ICU_SOURCE_DIR}/common/usprep.cpp"
+"${ICU_SOURCE_DIR}/common/ustack.cpp"
+"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
+"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
+"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
+"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
+"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
+"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
+"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
+"${ICU_SOURCE_DIR}/common/ustring.cpp"
+"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
+"${ICU_SOURCE_DIR}/common/utext.cpp"
+"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
+"${ICU_SOURCE_DIR}/common/util.cpp"
+"${ICU_SOURCE_DIR}/common/util_props.cpp"
+"${ICU_SOURCE_DIR}/common/utrace.cpp"
+"${ICU_SOURCE_DIR}/common/utrie.cpp"
+"${ICU_SOURCE_DIR}/common/utrie2.cpp"
+"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
+"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
+"${ICU_SOURCE_DIR}/common/uts46.cpp"
+"${ICU_SOURCE_DIR}/common/utypes.cpp"
+"${ICU_SOURCE_DIR}/common/uvector.cpp"
+"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
+"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
+"${ICU_SOURCE_DIR}/common/wintz.cpp")
set(ICUI18N_SOURCES
-"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
-"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
-"${ICU_SOURCE_DIR}/i18n/format.cpp"
-"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
-"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/unum.cpp"
-"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
-"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
-"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
-"${ICU_SOURCE_DIR}/i18n/udat.cpp"
-"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
-"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
-"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
-"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
-"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
-"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
-"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
-"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
-"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
-"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
-"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
-"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
-"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
-"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
-"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
+"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
+"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
-"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
+"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
+"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
+"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
-"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
-"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
-"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
-"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
-"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
-"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
-"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
+"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
+"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
-"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
-"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
-"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
+"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
+"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
-"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
-"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
-"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
-"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
-"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
-"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
-"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
-"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
-"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
-"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
-"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
-"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
-"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
-"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
-"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
-"${ICU_SOURCE_DIR}/i18n/search.cpp"
-"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
-"${ICU_SOURCE_DIR}/i18n/translit.cpp"
-"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
-"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
-"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
-"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
-"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
+"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
+"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
+"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
-"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
-"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
-"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
-"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
-"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
-"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
-"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
-"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
-"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
-"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
-"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/quant.cpp"
-"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
-"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
-"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
-"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
-"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
-"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
-"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
-"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
-"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
-"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
-"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
-"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
-"${ICU_SOURCE_DIR}/i18n/measure.cpp"
-"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
@@ -346,60 +262,80 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
-"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
-"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
-"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
-"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
-"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
-"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
-"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
-"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
-"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
-"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
-"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
-"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
+"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
+"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
+"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
+"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
+"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
+"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
+"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
+"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp"
+"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
+"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
+"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
+"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
+"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
+"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
+"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
+"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
-"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
-"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
-"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
-"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
-"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
-"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
-"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
-"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
-"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
-"${ICU_SOURCE_DIR}/i18n/ztrans.cpp"
-"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
-"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
+"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
+"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
+"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
+"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
+"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
+"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
+"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
+"${ICU_SOURCE_DIR}/i18n/format.cpp"
+"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
+"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
+"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
+"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
-"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
-"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
-"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
-"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
-"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
-"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
-"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
-"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
+"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
-"${ICU_SOURCE_DIR}/i18n/region.cpp"
-"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
-"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
-"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
-"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
+"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
+"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
+"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
+"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
+"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
+"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
+"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp"
+"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
+"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
+"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
-"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
-"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
-"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
+"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
+"${ICU_SOURCE_DIR}/i18n/measure.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp"
+"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp"
+"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
+"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
+"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
+"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
+"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
+"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
+"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
+"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
+"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
@@ -407,7 +343,9 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
+"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
+"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
@@ -415,46 +353,125 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
-"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
-"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
-"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
-"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
-"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
+"${ICU_SOURCE_DIR}/i18n/number_simple.cpp"
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
-"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
-"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
-"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
-"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
-"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
-"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
-"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
-"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
-"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
-"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
-"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
-"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
-"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
-"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
-"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
-"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
-"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
-"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
+"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
+"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
+"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
+"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
+"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
+"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
+"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
+"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
+"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
+"${ICU_SOURCE_DIR}/i18n/quant.cpp"
+"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
+"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
+"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
+"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
+"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
+"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
+"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
+"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
+"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
+"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
+"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
+"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
+"${ICU_SOURCE_DIR}/i18n/region.cpp"
+"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
+"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
+"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
+"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
+"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
+"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
+"${ICU_SOURCE_DIR}/i18n/search.cpp"
+"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
+"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
+"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
+"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
+"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
+"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
+"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
+"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
+"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
+"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
+"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
+"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
+"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
+"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
+"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
+"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
+"${ICU_SOURCE_DIR}/i18n/translit.cpp"
+"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
+"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
+"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
+"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
+"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
+"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
+"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
+"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
+"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
+"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
+"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
+"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
+"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
+"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
+"${ICU_SOURCE_DIR}/i18n/udat.cpp"
+"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
+"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
+"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
+"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
+"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
+"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
+"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
+"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
+"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
-"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
+"${ICU_SOURCE_DIR}/i18n/units_router.cpp"
+"${ICU_SOURCE_DIR}/i18n/unum.cpp"
+"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
+"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
+"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
+"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
+"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
+"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
+"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
+"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
+"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
+"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
+"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
+"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
+"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
+"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
+"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
+"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
+"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
+"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
+"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
+"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
+"${ICU_SOURCE_DIR}/i18n/ztrans.cpp")
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
enable_language(ASM)
diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md
index 74f1295ef96..0bbee5f86f3 100644
--- a/docs/en/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md
@@ -995,34 +995,42 @@ They can be used for prewhere optimization only if we enable `set allow_statisti
The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns.
+ Syntax: `minmax`
+
- `TDigest`
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
+ Syntax: `tdigest`
+
- `Uniq`
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
-- `count_min`
+ Syntax: `uniq`
- [Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
+- `CountMin`
+
+ [CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
+
+ Syntax `countmin`
### Supported Data Types {#supported-data-types}
-| | (U)Int* | Float* | Decimal(*) | Date* | Boolean | Enum* | (Fixed)String |
-|-----------|---------|--------|------------|-------|---------|-------|------------------|
-| count_min | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
-| MinMax | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ |
-| TDigest | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ |
-| Uniq | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
+| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString |
+|-----------|----------------------------------------------------|-----------------------|
+| CountMin | ✔ | ✔ |
+| MinMax | ✔ | ✗ |
+| TDigest | ✔ | ✗ |
+| Uniq | ✔ | ✔ |
### Supported Operations {#supported-operations}
| | Equality filters (==) | Range filters (>, >=, <, <=) |
|-----------|-----------------------|------------------------------|
-| count_min | ✔ | ✗ |
+| CountMin | ✔ | ✗ |
| MinMax | ✗ | ✔ |
| TDigest | ✗ | ✔ |
| Uniq | ✔ | ✗ |
diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md
index 8892c6d8d3f..df96b8129f1 100644
--- a/docs/en/interfaces/formats.md
+++ b/docs/en/interfaces/formats.md
@@ -39,6 +39,7 @@ The supported formats are:
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
+| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ |
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
@@ -988,6 +989,59 @@ Example:
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
+## JSONCompactWithProgress (#jsoncompactwithprogress)
+
+In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
+
+Each row is either a metadata object, data object, progress information or statistics object:
+
+1. **Metadata Object (`meta`)**
+ - Describes the structure of the data rows.
+ - Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.).
+ - Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}`
+ - Appears before any data objects.
+
+2. **Data Object (`data`)**
+ - Represents a row of query results.
+ - Fields: An array with values corresponding to the columns defined in the metadata.
+ - Example: `{"data":["1", "John Doe"]}`
+ - Appears after the metadata object, one per row.
+
+3. **Progress Information Object (`progress`)**
+ - Provides real-time progress feedback during query execution.
+ - Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`.
+ - Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}`
+ - May appear intermittently.
+
+4. **Statistics Object (`statistics`)**
+ - Summarizes query execution statistics.
+ - Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`.
+ - Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}`
+ - Appears at the end.
+
+5. **Exception Object (`exception`)**
+ - Represents an error that occurred during query execution.
+ - Fields: A single text field containing the error message.
+ - Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}`
+ - Appears when an error is encountered.
+
+6. **Totals Object (`totals`)**
+ - Provides the totals for each numeric column in the result set.
+ - Fields: An array with total values corresponding to the columns defined in the metadata.
+ - Example: `{"totals": ["", "3"]}`
+ - Appears at the end of the data rows, if applicable.
+
+Example:
+
+```json
+{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}
+{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}}
+{"data":["1", "John Doe"]}
+{"data":["2", "Joe Doe"]}
+{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}}
+```
+
+
## JSONEachRow {#jsoneachrow}
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md
index 9fce83a0dc4..ccc8cf017ca 100644
--- a/docs/en/operations/server-configuration-parameters/settings.md
+++ b/docs/en/operations/server-configuration-parameters/settings.md
@@ -1463,26 +1463,29 @@ Examples:
## logger {#logger}
-Logging settings.
+The location and format of log messages.
Keys:
-- `level` – Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
-- `log` – The log file. Contains all the entries according to `level`.
-- `errorlog` – Error log file.
-- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
-- `count` – The number of archived log files that ClickHouse stores.
-- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
-- `console_log_level` – Logging level for console. Default to `level`.
-- `use_syslog` - Log to syslog as well.
-- `syslog_level` - Logging level for logging to syslog.
-- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
-- `formatting` – Specify log format to be printed in console log (currently only `json` supported).
+- `level` – Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`,
+ `debug`, `trace`, `test`
+- `log` – The path to the log file.
+- `errorlog` – The path to the error log file.
+- `size` – Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created.
+- `count` – Rotation policy: How many historical log files Clickhouse are kept at most.
+- `stream_compress` – Compress log messages using LZ4. Set to `1` or `true` to enable.
+- `console` – Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is
+ `1` if Clickhouse does not run in daemon mode, `0` otherwise.
+- `console_log_level` – Log level for console output. Defaults to `level`.
+- `formatting` – Log format for console output. Currently, only `json` is supported).
+- `use_syslog` - Also forward log output to syslog.
+- `syslog_level` - Log level for logging to syslog.
-Both log and error log file names (only file names, not directories) support date and time format specifiers.
+**Log format specifiers**
-**Format specifiers**
-Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
+File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them).
+
+Column “Example” shows the output at `2023-07-06 18:32:07`.
| Specifier | Description | Example |
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
@@ -1537,18 +1540,37 @@ Using the following format specifiers, you can define a pattern for the resultin
```
-Writing to the console can be configured. Config example:
+To print log messages only in the console:
``` xml
information
- 1
+ true
+
+```
+
+**Per-level Overrides**
+
+The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC".
+
+```xml
+
+
+
+ Backup
+ none
+
+
+ RBAC
+ none
+
+
```
### syslog
-Writing to the syslog is also supported. Config example:
+To write log messages additionally to syslog:
``` xml
@@ -1562,14 +1584,12 @@ Writing to the syslog is also supported. Config example:
```
-Keys for syslog:
+Keys for ``:
-- use_syslog — Required setting if you want to write to the syslog.
-- address — The host\[:port\] of syslogd. If omitted, the local daemon is used.
-- hostname — Optional. The name of the host that logs are sent from.
-- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on).
- Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
-- format – Message format. Possible values: `bsd` and `syslog.`
+- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used.
+- `hostname` — The name of the host from which logs are send. Optional.
+- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
+- `format` – Log message format. Possible values: `bsd` and `syslog.`
### Log formats
@@ -1588,6 +1608,7 @@ You can specify the log format that will be outputted in the console log. Curren
"source_line": "192"
}
```
+
To enable JSON logging support, use the following snippet:
```xml
diff --git a/docs/en/sql-reference/statements/select/from.md b/docs/en/sql-reference/statements/select/from.md
index 7a6e2ab054c..f319e7b1357 100644
--- a/docs/en/sql-reference/statements/select/from.md
+++ b/docs/en/sql-reference/statements/select/from.md
@@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from:
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
-`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
+The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
+
+`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example:
+
+```sql
+FROM table
+SELECT *
+```
## FINAL Modifier
@@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu
### Example Usage
-**Using the `FINAL` keyword**
+Using the `FINAL` keyword
```sql
SELECT x, y FROM mytable FINAL WHERE x > 1;
```
-**Using `FINAL` as a query-level setting**
+Using `FINAL` as a query-level setting
```sql
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
```
-**Using `FINAL` as a session-level setting**
+Using `FINAL` as a session-level setting
```sql
SET final = 1;
diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h
index cef23f766c7..35d6e599e38 100644
--- a/src/AggregateFunctions/AggregateFunctionUniq.h
+++ b/src/AggregateFunctions/AggregateFunctionUniq.h
@@ -459,6 +459,8 @@ public:
bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; }
+ constexpr static bool parallelizeMergeWithKey() { return true; }
+
void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic & is_cancelled) const override
{
if constexpr (is_parallelize_merge_prepare_needed)
diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h
index ee227db6d9d..f8e7051d635 100644
--- a/src/AggregateFunctions/IAggregateFunction.h
+++ b/src/AggregateFunctions/IAggregateFunction.h
@@ -145,6 +145,8 @@ public:
virtual bool isParallelizeMergePrepareNeeded() const { return false; }
+ constexpr static bool parallelizeMergeWithKey() { return false; }
+
virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic & /*is_cancelled*/) const
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName());
@@ -169,7 +171,7 @@ public:
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
/// then destroy states (on which src places points to).
- virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0;
+ virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic & is_cancelled, Arena * arena) const = 0;
/// Serializes state (to transmit it over the network, for example).
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional version = std::nullopt) const = 0; /// NOLINT
@@ -499,11 +501,15 @@ public:
static_cast(this)->merge(places[i] + place_offset, rhs[i], arena);
}
- void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override
+ void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic & is_cancelled, Arena * arena) const override
{
for (size_t i = 0; i < size; ++i)
{
- static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
+ if constexpr (Derived::parallelizeMergeWithKey())
+ static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena);
+ else
+ static_cast(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
+
static_cast(this)->destroy(rhs_places[i] + offset);
}
}
diff --git a/src/AggregateFunctions/UniqExactSet.h b/src/AggregateFunctions/UniqExactSet.h
index 2ae8c3a8386..25c6f7ac55f 100644
--- a/src/AggregateFunctions/UniqExactSet.h
+++ b/src/AggregateFunctions/UniqExactSet.h
@@ -101,6 +101,13 @@ public:
auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic * is_cancelled = nullptr)
{
+ /// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel.
+ if (other.size() > 40000)
+ {
+ if (isSingleLevel())
+ convertToTwoLevel();
+ }
+
if (isSingleLevel() && other.isTwoLevel())
convertToTwoLevel();
diff --git a/src/Backups/RestorerFromBackup.cpp b/src/Backups/RestorerFromBackup.cpp
index 278af9d4eb3..5e6beec791d 100644
--- a/src/Backups/RestorerFromBackup.cpp
+++ b/src/Backups/RestorerFromBackup.cpp
@@ -913,11 +913,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name)
table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database);
DatabasePtr database = table_info.database;
+ auto query_context = Context::createCopy(context);
+ query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
+ query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
+
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
/// database-specific things).
database->createTableRestoredFromBackup(
create_table_query,
- context,
+ query_context,
restore_coordination,
std::chrono::duration_cast(create_table_timeout).count());
}
diff --git a/src/Common/StackTrace.cpp b/src/Common/StackTrace.cpp
index bd01b639913..3dce34803b2 100644
--- a/src/Common/StackTrace.cpp
+++ b/src/Common/StackTrace.cpp
@@ -67,10 +67,18 @@ std::string SigsegvErrorString(const siginfo_t & info, [[maybe_unused]] const uc
= info.si_addr == nullptr ? "NULL pointer"s : (shouldShowAddress(info.si_addr) ? fmt::format("{}", info.si_addr) : ""s);
const std::string_view access =
-#if defined(__x86_64__) && !defined(OS_FREEBSD) && !defined(OS_DARWIN) && !defined(__arm__) && !defined(__powerpc__)
- (context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
+#if defined(__arm__)
+ "";
+#elif defined(__powerpc__)
+ "";
+#elif defined(OS_DARWIN)
+ "";
+#elif defined(OS_FREEBSD)
+ "";
+#elif !defined(__x86_64__)
+ "";
#else
- "";
+ (context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
#endif
std::string_view message;
diff --git a/src/Core/Settings.h b/src/Core/Settings.h
index 8fd81efa8b4..95879cc3243 100644
--- a/src/Core/Settings.h
+++ b/src/Core/Settings.h
@@ -710,7 +710,8 @@ class IColumn;
M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
- M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
+ M(UInt64, database_replicated_allow_replicated_engine_arguments, 0, "0 - Don't allow to explicitly specify ZooKeeper path and replica name for *MergeTree tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified path and use default one instead.", 0) \
+ M(UInt64, database_replicated_allow_explicit_uuid, 0, "0 - Don't allow to explicitly specify UUIDs for tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified UUID and generate a random one instead.", 0) \
M(Bool, database_replicated_allow_heavy_create, false, "Allow long-running DDL queries (CREATE AS SELECT and POPULATE) in Replicated database engine. Note that it can block DDL queue for a long time.", 0) \
M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \
M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \
diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp
index 3d0ecc32f79..dae45ba937a 100644
--- a/src/Core/SettingsChangesHistory.cpp
+++ b/src/Core/SettingsChangesHistory.cpp
@@ -77,6 +77,8 @@ static std::initializer_listtryGet(replica_path, replica_host_id))
+ bool replica_exists_in_zk = current_zookeeper->tryGet(replica_path, replica_host_id);
+ if (replica_exists_in_zk)
{
if (replica_host_id == DROPPED_MARK && !is_create_query)
{
@@ -454,7 +455,7 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
String host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
String host_id_default = getHostID(getContext(), db_uuid, false);
- if (is_create_query || (replica_host_id != host_id && replica_host_id != host_id_default))
+ if (replica_host_id != host_id && replica_host_id != host_id_default)
{
throw Exception(
ErrorCodes::REPLICA_ALREADY_EXISTS,
@@ -484,13 +485,20 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
current_zookeeper->set(replica_path + "/replica_group", replica_group_name, -1);
createEmptyLogEntry(current_zookeeper);
}
+
+ /// Needed to mark all the queries
+ /// in the range (max log ptr at replica ZooKeeper nodes creation, max log ptr after replica recovery] as successful.
+ String max_log_ptr_at_creation_str;
+ if (current_zookeeper->tryGet(replica_path + "/max_log_ptr_at_creation", max_log_ptr_at_creation_str))
+ max_log_ptr_at_creation = parse(max_log_ptr_at_creation_str);
}
- else if (is_create_query)
+
+ if (is_create_query)
{
- /// Create new replica. Throws if replica with the same name already exists
+ /// Create replica nodes in ZooKeeper. If newly initialized nodes already exist, reuse them.
createReplicaNodesInZooKeeper(current_zookeeper);
}
- else
+ else if (!replica_exists_in_zk)
{
/// It's not CREATE query, but replica does not exist. Probably it was dropped.
/// Do not create anything, continue as readonly.
@@ -606,37 +614,84 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt
"already contains some data and it does not look like Replicated database path.", zookeeper_path);
/// Write host name to replica_path, it will protect from multiple replicas with the same name
- auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
+ const auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
+
+ const std::vector check_paths = {
+ replica_path,
+ replica_path + "/replica_group",
+ replica_path + "/digest",
+ };
+ bool nodes_exist = true;
+ auto check_responses = current_zookeeper->tryGet(check_paths);
+ for (size_t i = 0; i < check_responses.size(); ++i)
+ {
+ const auto response = check_responses[i];
+
+ if (response.error == Coordination::Error::ZNONODE)
+ {
+ nodes_exist = false;
+ break;
+ } else if (response.error != Coordination::Error::ZOK)
+ {
+ throw zkutil::KeeperException::fromPath(response.error, check_paths[i]);
+ }
+ }
+
+ if (nodes_exist)
+ {
+ const std::vector expected_data = {
+ host_id,
+ replica_group_name,
+ "0",
+ };
+ for (size_t i = 0; i != expected_data.size(); ++i)
+ {
+ if (check_responses[i].data != expected_data[i])
+ {
+ throw Exception(
+ ErrorCodes::REPLICA_ALREADY_EXISTS,
+ "Replica node {} in ZooKeeper already exists and contains unexpected value: {}",
+ quoteString(check_paths[i]), quoteString(check_responses[i].data));
+ }
+ }
+
+ LOG_DEBUG(log, "Newly initialized replica nodes found in ZooKeeper, reusing them");
+ createEmptyLogEntry(current_zookeeper);
+ return;
+ }
for (int attempts = 10; attempts > 0; --attempts)
{
Coordination::Stat stat;
- String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat);
+ const String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat);
- Coordination::Requests ops;
- ops.emplace_back(zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent));
- ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent));
- ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent));
- ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent));
- /// In addition to creating the replica nodes, we record the max_log_ptr at the instant where
- /// we declared ourself as an existing replica. We'll need this during recoverLostReplica to
- /// notify other nodes that issued new queries while this node was recovering.
- ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version));
+ const Coordination::Requests ops = {
+ zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent),
+ zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent),
+ zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent),
+ zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent),
+
+ /// Previously, this method was not idempotent and max_log_ptr_at_creation could be stored in memory.
+ /// we need to store max_log_ptr_at_creation in ZooKeeper to make this method idempotent during replica creation.
+ zkutil::makeCreateRequest(replica_path + "/max_log_ptr_at_creation", max_log_ptr_str, zkutil::CreateMode::Persistent),
+ zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version),
+ };
+
+ Coordination::Responses ops_responses;
+ const auto code = current_zookeeper->tryMulti(ops, ops_responses);
- Coordination::Responses responses;
- const auto code = current_zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZOK)
{
max_log_ptr_at_creation = parse(max_log_ptr_str);
- break;
+ createEmptyLogEntry(current_zookeeper);
+ return;
}
- else if (code == Coordination::Error::ZNODEEXISTS || attempts == 1)
+
+ if (attempts == 1)
{
- /// If its our last attempt, or if the replica already exists, fail immediately.
- zkutil::KeeperMultiException::check(code, ops, responses);
+ zkutil::KeeperMultiException::check(code, ops, ops_responses);
}
}
- createEmptyLogEntry(current_zookeeper);
}
void DatabaseReplicated::beforeLoadingMetadata(ContextMutablePtr context_, LoadingStrictnessLevel mode)
@@ -852,18 +907,6 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora
bool maybe_replica_macros = info.expanded_other;
bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros");
- if (!enable_functional_tests_helper)
- {
- if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments)
- LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments");
- else
- throw Exception(ErrorCodes::INCORRECT_QUERY,
- "It's not allowed to specify explicit zookeeper_path and replica_name "
- "for ReplicatedMergeTree arguments in Replicated database. If you really want to "
- "specify them explicitly, enable setting "
- "database_replicated_allow_replicated_engine_arguments.");
- }
-
if (maybe_shard_macros && maybe_replica_macros)
return;
@@ -876,7 +919,9 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora
return;
}
- throw Exception(ErrorCodes::INCORRECT_QUERY,
+ /// We will replace it with default arguments if the setting is 2
+ if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments != 2)
+ throw Exception(ErrorCodes::INCORRECT_QUERY,
"Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. "
"If you really want to specify it explicitly, then you should use some macros "
"to distinguish different shards and replicas");
@@ -1145,6 +1190,9 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
/// so we need to allow experimental features that can be used in a CREATE query
enableAllExperimentalSettings(query_context);
+ query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
+ query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
+
auto txn = std::make_shared(current_zookeeper, zookeeper_path, false, "");
query_context->initZooKeeperMetadataTransaction(txn);
return query_context;
diff --git a/src/Formats/JSONUtils.cpp b/src/Formats/JSONUtils.cpp
index 9d898cd2470..123f2e4f608 100644
--- a/src/Formats/JSONUtils.cpp
+++ b/src/Formats/JSONUtils.cpp
@@ -483,6 +483,33 @@ namespace JSONUtils
writeArrayEnd(out, 1);
}
+
+ void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out)
+ {
+ writeCompactArrayStart(out, 0, "meta");
+
+ for (size_t i = 0; i < names.size(); ++i)
+ {
+ writeCompactObjectStart(out);
+ writeTitle("name", out, 0, "");
+
+ /// The field names are pre-escaped to be put into JSON string literal.
+ writeChar('"', out);
+ writeString(names[i], out);
+ writeChar('"', out);
+
+ writeFieldCompactDelimiter(out);
+ writeTitle("type", out, 0, "");
+ writeJSONString(types[i]->getName(), out, settings);
+ writeCompactObjectEnd(out);
+
+ if (i + 1 < names.size())
+ writeFieldCompactDelimiter(out);
+ }
+
+ writeCompactArrayEnd(out);
+ }
+
void writeAdditionalInfo(
size_t rows,
size_t rows_before_limit,
@@ -530,6 +557,45 @@ namespace JSONUtils
}
}
+ void writeCompactAdditionalInfo(
+ size_t rows,
+ size_t rows_before_limit,
+ bool applied_limit,
+ const Stopwatch & watch,
+ const Progress & progress,
+ bool write_statistics,
+ WriteBuffer & out)
+ {
+ writeCompactObjectStart(out);
+ writeCompactObjectStart(out, 0, "statistics");
+ writeTitle("rows", out, 0, "");
+ writeIntText(rows, out);
+
+ if (applied_limit)
+ {
+ writeFieldCompactDelimiter(out);
+ writeTitle("rows_before_limit_at_least", out, 0, "");
+ writeIntText(rows_before_limit, out);
+ }
+
+ if (write_statistics)
+ {
+ writeFieldCompactDelimiter(out);
+ writeTitle("elapsed", out, 0, "");
+ writeText(watch.elapsedSeconds(), out);
+ writeFieldCompactDelimiter(out);
+
+ writeTitle("rows_read", out, 0, "");
+ writeText(progress.read_rows.load(), out);
+ writeFieldCompactDelimiter(out);
+
+ writeTitle("bytes_read", out, 0, "");
+ writeText(progress.read_bytes.load(), out);
+ }
+ writeCompactObjectEnd(out);
+ writeCompactObjectEnd(out);
+ }
+
void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent)
{
writeTitle("exception", out, indent, " ");
diff --git a/src/Formats/JSONUtils.h b/src/Formats/JSONUtils.h
index e2ac3467971..622703947b9 100644
--- a/src/Formats/JSONUtils.h
+++ b/src/Formats/JSONUtils.h
@@ -99,6 +99,7 @@ namespace JSONUtils
WriteBuffer & out);
void writeMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out);
+ void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out);
void writeAdditionalInfo(
size_t rows,
@@ -111,6 +112,15 @@ namespace JSONUtils
bool write_statistics,
WriteBuffer & out);
+ void writeCompactAdditionalInfo(
+ size_t rows,
+ size_t rows_before_limit,
+ bool applied_limit,
+ const Stopwatch & watch,
+ const Progress & progress,
+ bool write_statistics,
+ WriteBuffer & out);
+
void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent = 0);
void skipColon(ReadBuffer & in);
diff --git a/src/Formats/registerFormats.cpp b/src/Formats/registerFormats.cpp
index 57ca1bb49c8..770b747fafd 100644
--- a/src/Formats/registerFormats.cpp
+++ b/src/Formats/registerFormats.cpp
@@ -95,6 +95,7 @@ void registerOutputFormatMarkdown(FormatFactory & factory);
void registerOutputFormatPostgreSQLWire(FormatFactory & factory);
void registerOutputFormatPrometheus(FormatFactory & factory);
void registerOutputFormatSQLInsert(FormatFactory & factory);
+void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory);
/// Input only formats.
@@ -242,6 +243,7 @@ void registerFormats()
registerOutputFormatCapnProto(factory);
registerOutputFormatPrometheus(factory);
registerOutputFormatSQLInsert(factory);
+ registerOutputFormatJSONCompactWithProgress(factory);
registerInputFormatRegexp(factory);
registerInputFormatJSONAsString(factory);
diff --git a/src/Functions/array/array.cpp b/src/Functions/array/array.cpp
index dfe589fb74f..d2aedd57f99 100644
--- a/src/Functions/array/array.cpp
+++ b/src/Functions/array/array.cpp
@@ -1,11 +1,15 @@
-#include
-#include
+#include
+#include
+#include
+#include
+#include
+#include
#include
#include
-#include
-#include
-#include
+#include
+#include
#include
+#include
namespace DB
@@ -44,11 +48,13 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
{
- size_t num_elements = arguments.size();
+ const size_t num_elements = arguments.size();
if (num_elements == 0)
+ {
/// We should return constant empty array.
return result_type->createColumnConstWithDefaultValue(input_rows_count);
+ }
const DataTypePtr & elem_type = static_cast(*result_type).getNestedType();
@@ -60,7 +66,6 @@ public:
Columns columns_holder(num_elements);
ColumnRawPtrs column_ptrs(num_elements);
-
for (size_t i = 0; i < num_elements; ++i)
{
const auto & arg = arguments[i];
@@ -77,35 +82,199 @@ public:
}
/// Create and fill the result array.
-
auto out = ColumnArray::create(elem_type->createColumn());
IColumn & out_data = out->getData();
IColumn::Offsets & out_offsets = out->getOffsets();
- out_data.reserve(input_rows_count * num_elements);
- out_offsets.resize(input_rows_count);
-
+ /// Fill out_offsets
+ out_offsets.resize_exact(input_rows_count);
IColumn::Offset current_offset = 0;
for (size_t i = 0; i < input_rows_count; ++i)
{
- for (size_t j = 0; j < num_elements; ++j)
- out_data.insertFrom(*column_ptrs[j], i);
-
current_offset += num_elements;
out_offsets[i] = current_offset;
}
+ /// Fill out_data
+ out_data.reserve(input_rows_count * num_elements);
+ if (num_elements == 1)
+ out_data.insertRangeFrom(*column_ptrs[0], 0, input_rows_count);
+ else
+ execute(column_ptrs, out_data, input_rows_count);
return out;
}
private:
+ bool execute(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
+ {
+ return executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count) || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count)
+ || executeNumber(columns, out_data, input_rows_count) || executeString(columns, out_data, input_rows_count)
+ || executeNullable(columns, out_data, input_rows_count) || executeTuple(columns, out_data, input_rows_count)
+ || executeFixedString(columns, out_data, input_rows_count) || executeGeneric(columns, out_data, input_rows_count);
+ }
+
+ template
+ bool executeNumber(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
+ {
+ using Container = ColumnVectorOrDecimal::Container;
+ std::vector containers(columns.size(), nullptr);
+ for (size_t i = 0; i < columns.size(); ++i)
+ {
+ const ColumnVectorOrDecimal * concrete_column = checkAndGetColumn>(columns[i]);
+ if (!concrete_column)
+ return false;
+
+ containers[i] = &concrete_column->getData();
+ }
+
+ ColumnVectorOrDecimal & concrete_out_data = assert_cast &>(out_data);
+ Container & out_container = concrete_out_data.getData();
+ out_container.resize_exact(columns.size() * input_rows_count);
+
+ for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
+ {
+ const size_t base = row_i * columns.size();
+ for (size_t col_i = 0; col_i < columns.size(); ++col_i)
+ out_container[base + col_i] = (*containers[col_i])[row_i];
+ }
+ return true;
+ }
+
+ bool executeString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
+ {
+ size_t total_bytes = 0;
+ std::vector concrete_columns(columns.size(), nullptr);
+ for (size_t i = 0; i < columns.size(); ++i)
+ {
+ const ColumnString * concrete_column = checkAndGetColumn(columns[i]);
+ if (!concrete_column)
+ return false;
+
+ total_bytes += concrete_column->getChars().size();
+ concrete_columns[i] = concrete_column;
+ }
+
+ ColumnString & concrete_out_data = assert_cast(out_data);
+ auto & out_chars = concrete_out_data.getChars();
+ auto & out_offsets = concrete_out_data.getOffsets();
+ out_chars.resize_exact(total_bytes);
+ out_offsets.resize_exact(input_rows_count * columns.size());
+
+ size_t cur_out_offset = 0;
+ for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
+ {
+ const size_t base = row_i * columns.size();
+ for (size_t col_i = 0; col_i < columns.size(); ++col_i)
+ {
+ StringRef ref = concrete_columns[col_i]->getDataAt(row_i);
+ memcpySmallAllowReadWriteOverflow15(&out_chars[cur_out_offset], ref.data, ref.size);
+ out_chars[cur_out_offset + ref.size] = 0;
+
+ cur_out_offset += ref.size + 1;
+ out_offsets[base + col_i] = cur_out_offset;
+ }
+ }
+ return true;
+ }
+
+ bool executeFixedString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
+ {
+ std::vector concrete_columns(columns.size(), nullptr);
+ for (size_t i = 0; i < columns.size(); ++i)
+ {
+ const ColumnFixedString * concrete_column = checkAndGetColumn(columns[i]);
+ if (!concrete_column)
+ return false;
+
+ concrete_columns[i] = concrete_column;
+ }
+
+ ColumnFixedString & concrete_out_data = assert_cast(out_data);
+ auto & out_chars = concrete_out_data.getChars();
+
+ const size_t n = concrete_out_data.getN();
+ size_t total_bytes = n * columns.size() * input_rows_count;
+ out_chars.resize_exact(total_bytes);
+
+ size_t curr_out_offset = 0;
+ for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
+ {
+ for (size_t col_i = 0; col_i < columns.size(); ++col_i)
+ {
+ StringRef ref = concrete_columns[col_i]->getDataAt(row_i);
+ memcpySmallAllowReadWriteOverflow15(&out_chars[curr_out_offset], ref.data, n);
+ curr_out_offset += n;
+ }
+ }
+ return true;
+ }
+
+ bool executeNullable(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
+ {
+ ColumnRawPtrs null_maps(columns.size(), nullptr);
+ ColumnRawPtrs nested_columns(columns.size(), nullptr);
+ for (size_t i = 0; i < columns.size(); ++i)
+ {
+ const ColumnNullable * concrete_column = checkAndGetColumn(columns[i]);
+ if (!concrete_column)
+ return false;
+
+ null_maps[i] = &concrete_column->getNullMapColumn();
+ nested_columns[i] = &concrete_column->getNestedColumn();
+ }
+
+ ColumnNullable & concrete_out_data = assert_cast(out_data);
+ auto & out_null_map = concrete_out_data.getNullMapColumn();
+ auto & out_nested_column = concrete_out_data.getNestedColumn();
+ execute(null_maps, out_null_map, input_rows_count);
+ execute(nested_columns, out_nested_column, input_rows_count);
+ return true;
+ }
+
+ bool executeTuple(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
+ {
+ ColumnTuple * concrete_out_data = typeid_cast(&out_data);
+ if (!concrete_out_data)
+ return false;
+
+ const size_t tuple_size = concrete_out_data->tupleSize();
+ for (size_t i = 0; i < tuple_size; ++i)
+ {
+ ColumnRawPtrs elem_columns(columns.size(), nullptr);
+ for (size_t j = 0; j < columns.size(); ++j)
+ {
+ const ColumnTuple * concrete_column = assert_cast(columns[j]);
+ elem_columns[j] = &concrete_column->getColumn(i);
+ }
+ execute(elem_columns, concrete_out_data->getColumn(i), input_rows_count);
+ }
+ return true;
+ }
+
+ bool executeGeneric(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
+ {
+ for (size_t i = 0; i < input_rows_count; ++i)
+ for (const auto * column : columns)
+ out_data.insertFrom(*column, i);
+ return true;
+ }
+
+
String getName() const override
{
return name;
}
- bool addField(DataTypePtr type_res, const Field & f, Array & arr) const;
-
bool use_variant_as_common_type = false;
};
diff --git a/src/Functions/map.cpp b/src/Functions/map.cpp
index 534f7c0d8cd..14672cfd568 100644
--- a/src/Functions/map.cpp
+++ b/src/Functions/map.cpp
@@ -2,6 +2,8 @@
#include
#include
#include
+#include
+#include
#include
#include
#include
@@ -13,7 +15,6 @@
#include
#include
#include
-#include
namespace DB
@@ -36,11 +37,18 @@ class FunctionMap : public IFunction
public:
static constexpr auto name = "map";
- explicit FunctionMap(bool use_variant_as_common_type_) : use_variant_as_common_type(use_variant_as_common_type_) {}
+ explicit FunctionMap(ContextPtr context_)
+ : context(context_)
+ , use_variant_as_common_type(
+ context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type)
+ , function_array(FunctionFactory::instance().get("array", context))
+ , function_map_from_arrays(FunctionFactory::instance().get("mapFromArrays", context))
+ {
+ }
static FunctionPtr create(ContextPtr context)
{
- return std::make_shared(context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type);
+ return std::make_shared(context);
}
String getName() const override
@@ -101,62 +109,38 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
{
size_t num_elements = arguments.size();
-
if (num_elements == 0)
return result_type->createColumnConstWithDefaultValue(input_rows_count);
+ ColumnsWithTypeAndName key_args;
+ ColumnsWithTypeAndName value_args;
+ for (size_t i = 0; i < num_elements; i += 2)
+ {
+ key_args.emplace_back(arguments[i]);
+ value_args.emplace_back(arguments[i+1]);
+ }
+
const auto & result_type_map = static_cast(*result_type);
const DataTypePtr & key_type = result_type_map.getKeyType();
const DataTypePtr & value_type = result_type_map.getValueType();
+ const DataTypePtr & key_array_type = std::make_shared(key_type);
+ const DataTypePtr & value_array_type = std::make_shared(value_type);
- Columns columns_holder(num_elements);
- ColumnRawPtrs column_ptrs(num_elements);
+ /// key_array = array(args[0], args[2]...)
+ ColumnPtr key_array = function_array->build(key_args)->execute(key_args, key_array_type, input_rows_count);
+ /// value_array = array(args[1], args[3]...)
+ ColumnPtr value_array = function_array->build(value_args)->execute(value_args, value_array_type, input_rows_count);
- for (size_t i = 0; i < num_elements; ++i)
- {
- const auto & arg = arguments[i];
- const auto to_type = i % 2 == 0 ? key_type : value_type;
-
- ColumnPtr preprocessed_column = castColumn(arg, to_type);
- preprocessed_column = preprocessed_column->convertToFullColumnIfConst();
-
- columns_holder[i] = std::move(preprocessed_column);
- column_ptrs[i] = columns_holder[i].get();
- }
-
- /// Create and fill the result map.
-
- MutableColumnPtr keys_data = key_type->createColumn();
- MutableColumnPtr values_data = value_type->createColumn();
- MutableColumnPtr offsets = DataTypeNumber().createColumn();
-
- size_t total_elements = input_rows_count * num_elements / 2;
- keys_data->reserve(total_elements);
- values_data->reserve(total_elements);
- offsets->reserve(input_rows_count);
-
- IColumn::Offset current_offset = 0;
- for (size_t i = 0; i < input_rows_count; ++i)
- {
- for (size_t j = 0; j < num_elements; j += 2)
- {
- keys_data->insertFrom(*column_ptrs[j], i);
- values_data->insertFrom(*column_ptrs[j + 1], i);
- }
-
- current_offset += num_elements / 2;
- offsets->insert(current_offset);
- }
-
- auto nested_column = ColumnArray::create(
- ColumnTuple::create(Columns{std::move(keys_data), std::move(values_data)}),
- std::move(offsets));
-
- return ColumnMap::create(nested_column);
+ /// result = mapFromArrays(key_array, value_array)
+ ColumnsWithTypeAndName map_args{{key_array, key_array_type, ""}, {value_array, value_array_type, ""}};
+ return function_map_from_arrays->build(map_args)->execute(map_args, result_type, input_rows_count);
}
private:
+ ContextPtr context;
bool use_variant_as_common_type = false;
+ FunctionOverloadResolverPtr function_array;
+ FunctionOverloadResolverPtr function_map_from_arrays;
};
/// mapFromArrays(keys, values) is a function that allows you to make key-value pair from a pair of arrays or maps
@@ -173,6 +157,7 @@ public:
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
bool useDefaultImplementationForNulls() const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
+ bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp
index d1aa8a0fff0..aff06c490c5 100644
--- a/src/Interpreters/Aggregator.cpp
+++ b/src/Interpreters/Aggregator.cpp
@@ -2371,7 +2371,7 @@ void NO_INLINE Aggregator::mergeDataNullKey(
template
void NO_INLINE Aggregator::mergeDataImpl(
- Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch) const
+ Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch, ThreadPool & thread_pool, std::atomic & is_cancelled) const
{
if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization)
mergeDataNullKey(table_dst, table_src, arena);
@@ -2410,7 +2410,7 @@ void NO_INLINE Aggregator::mergeDataImpl(
{
if (!is_aggregate_function_compiled[i])
aggregate_functions[i]->mergeAndDestroyBatch(
- dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
+ dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena);
}
return;
@@ -2420,7 +2420,7 @@ void NO_INLINE Aggregator::mergeDataImpl(
for (size_t i = 0; i < params.aggregates_size; ++i)
{
aggregate_functions[i]->mergeAndDestroyBatch(
- dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
+ dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena);
}
}
@@ -2535,8 +2535,10 @@ void NO_INLINE Aggregator::mergeWithoutKeyDataImpl(
template
void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
- ManyAggregatedDataVariants & non_empty_data) const
+ ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const
{
+ ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads};
+
AggregatedDataVariantsPtr & res = non_empty_data[0];
bool no_more_keys = false;
@@ -2557,13 +2559,13 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
if (compiled_aggregate_functions_holder)
{
mergeDataImpl(
- getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, true, prefetch);
+ getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, true, prefetch, thread_pool, is_cancelled);
}
else
#endif
{
mergeDataImpl(
- getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, false, prefetch);
+ getDataVariant(*res).data, getDataVariant(current).data, res->aggregates_pool, false, prefetch, thread_pool, is_cancelled);
}
}
else if (res->without_key)
@@ -2589,7 +2591,7 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
#define M(NAME) \
template void NO_INLINE Aggregator::mergeSingleLevelDataImpl( \
- ManyAggregatedDataVariants & non_empty_data) const;
+ ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const;
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
#undef M
@@ -2597,6 +2599,8 @@ template
void NO_INLINE Aggregator::mergeBucketImpl(
ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena, std::atomic & is_cancelled) const
{
+ ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads};
+
/// We merge all aggregation results to the first.
AggregatedDataVariantsPtr & res = data[0];
@@ -2613,7 +2617,7 @@ void NO_INLINE Aggregator::mergeBucketImpl(
if (compiled_aggregate_functions_holder)
{
mergeDataImpl(
- getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena, true, prefetch);
+ getDataVariant(*res).data.impls[bucket], getDataVariant(current).data.impls[bucket], arena, true, prefetch, thread_pool, is_cancelled);
}
else
#endif
@@ -2623,7 +2627,9 @@ void NO_INLINE Aggregator::mergeBucketImpl(
getDataVariant(current).data.impls[bucket],
arena,
false,
- prefetch);
+ prefetch,
+ thread_pool,
+ is_cancelled);
}
}
}
diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h
index 2cb04fc7c51..4de0a640219 100644
--- a/src/Interpreters/Aggregator.h
+++ b/src/Interpreters/Aggregator.h
@@ -467,7 +467,7 @@ private:
/// Merge data from hash table `src` into `dst`.
template
- void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch) const;
+ void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch, ThreadPool & thread_pool, std::atomic & is_cancelled) const;
/// Merge data from hash table `src` into `dst`, but only for keys that already exist in dst. In other cases, merge the data into `overflows`.
template
@@ -490,7 +490,7 @@ private:
template
void mergeSingleLevelDataImpl(
- ManyAggregatedDataVariants & non_empty_data) const;
+ ManyAggregatedDataVariants & non_empty_data, std::atomic & is_cancelled) const;
template
using ConvertToBlockRes = std::conditional_t;
diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp
index d83c148ebad..c1f9b4637f8 100644
--- a/src/Interpreters/InterpreterCreateQuery.cpp
+++ b/src/Interpreters/InterpreterCreateQuery.cpp
@@ -228,8 +228,8 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
metadata_path = metadata_path / "store" / DatabaseCatalog::getPathForUUID(create.uuid);
- if (!create.attach && fs::exists(metadata_path))
- throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists", metadata_path.string());
+ if (!create.attach && fs::exists(metadata_path) && !fs::is_empty(metadata_path))
+ throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists and is not empty", metadata_path.string());
}
else if (create.storage->engine->name == "MaterializeMySQL"
|| create.storage->engine->name == "MaterializedMySQL")
@@ -329,6 +329,9 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
writeChar('\n', statement_buf);
String statement = statement_buf.str();
+ /// Needed to make database creation retriable if it fails after the file is created
+ fs::remove(metadata_file_tmp_path);
+
/// Exclusive flag guarantees, that database is not created right now in another thread.
WriteBufferFromFile out(metadata_file_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL);
writeString(statement, out);
@@ -350,13 +353,6 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
DatabaseCatalog::instance().attachDatabase(database_name, database);
added = true;
- if (need_write_metadata)
- {
- /// Prevents from overwriting metadata of detached database
- renameNoReplace(metadata_file_tmp_path, metadata_file_path);
- renamed = true;
- }
-
if (!load_database_without_tables)
{
/// We use global context here, because storages lifetime is bigger than query context lifetime
@@ -368,6 +364,13 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
/// Only then prioritize, schedule and wait all the startup tasks
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks);
}
+
+ if (need_write_metadata)
+ {
+ /// Prevents from overwriting metadata of detached database
+ renameNoReplace(metadata_file_tmp_path, metadata_file_path);
+ renamed = true;
+ }
}
catch (...)
{
@@ -1226,6 +1229,27 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data
bool from_path = create.attach_from_path.has_value();
bool is_on_cluster = getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY;
+ if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal && !is_on_cluster && !create.attach)
+ {
+ if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0)
+ {
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "It's not allowed to explicitly specify UUIDs for tables in Replicated databases, "
+ "see database_replicated_allow_explicit_uuid");
+ }
+ else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 1)
+ {
+ LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "It's not recommended to explicitly specify UUIDs for tables in Replicated databases");
+ }
+ else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2)
+ {
+ UUID old_uuid = create.uuid;
+ create.uuid = UUIDHelpers::Nil;
+ create.generateRandomUUIDs();
+ LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) "
+ "to make sure it's unique", old_uuid, create.uuid);
+ }
+ }
+
if (is_replicated_database_internal && !internal)
{
if (create.uuid == UUIDHelpers::Nil)
diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp
index 7e1b4e2fb0e..737353095b8 100644
--- a/src/Interpreters/convertFieldToType.cpp
+++ b/src/Interpreters/convertFieldToType.cpp
@@ -164,7 +164,7 @@ Field convertDecimalType(const Field & from, const To & type)
}
-Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint)
+Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
if (from_type_hint && from_type_hint->equals(type))
{
@@ -359,7 +359,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
Array res(src_arr_size);
for (size_t i = 0; i < src_arr_size; ++i)
{
- res[i] = convertFieldToType(src_arr[i], element_type);
+ res[i] = convertFieldToType(src_arr[i], element_type, nullptr, format_settings);
if (res[i].isNull() && !canContainNull(element_type))
{
// See the comment for Tuples below.
@@ -387,7 +387,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
for (size_t i = 0; i < dst_tuple_size; ++i)
{
const auto & element_type = *(type_tuple->getElements()[i]);
- res[i] = convertFieldToType(src_tuple[i], element_type);
+ res[i] = convertFieldToType(src_tuple[i], element_type, nullptr, format_settings);
if (res[i].isNull() && !canContainNull(element_type))
{
/*
@@ -435,12 +435,12 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
Tuple updated_entry(2);
- updated_entry[0] = convertFieldToType(key, key_type);
+ updated_entry[0] = convertFieldToType(key, key_type, nullptr, format_settings);
if (updated_entry[0].isNull() && !canContainNull(key_type))
have_unconvertible_element = true;
- updated_entry[1] = convertFieldToType(value, value_type);
+ updated_entry[1] = convertFieldToType(value, value_type, nullptr, format_settings);
if (updated_entry[1].isNull() && !canContainNull(value_type))
have_unconvertible_element = true;
@@ -551,7 +551,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
ReadBufferFromString in_buffer(src.safeGet());
try
{
- type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, FormatSettings{});
+ type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, format_settings);
}
catch (Exception & e)
{
@@ -563,7 +563,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
}
Field parsed = (*col)[0];
- return convertFieldToType(parsed, type, from_type_hint);
+ return convertFieldToType(parsed, type, from_type_hint, format_settings);
}
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch in IN or VALUES section. Expected: {}. Got: {}",
@@ -573,7 +573,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
}
-Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint)
+Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
if (from_value.isNull())
return from_value;
@@ -582,7 +582,7 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co
return from_value;
if (const auto * low_cardinality_type = typeid_cast(&to_type))
- return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint);
+ return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint, format_settings);
else if (const auto * nullable_type = typeid_cast(&to_type))
{
const IDataType & nested_type = *nullable_type->getNestedType();
@@ -593,20 +593,20 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co
if (from_type_hint && from_type_hint->equals(nested_type))
return from_value;
- return convertFieldToTypeImpl(from_value, nested_type, from_type_hint);
+ return convertFieldToTypeImpl(from_value, nested_type, from_type_hint, format_settings);
}
else
- return convertFieldToTypeImpl(from_value, to_type, from_type_hint);
+ return convertFieldToTypeImpl(from_value, to_type, from_type_hint, format_settings);
}
-Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint)
+Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
bool is_null = from_value.isNull();
if (is_null && !canContainNull(to_type))
throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert NULL to {}", to_type.getName());
- Field converted = convertFieldToType(from_value, to_type, from_type_hint);
+ Field converted = convertFieldToType(from_value, to_type, from_type_hint, format_settings);
if (!is_null && converted.isNull())
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND,
@@ -626,9 +626,9 @@ static bool decimalEqualsFloat(Field field, Float64 float_value)
return decimal_to_float == float_value;
}
-std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type)
+std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings)
{
- Field result_value = convertFieldToType(from_value, to_type, &from_type);
+ Field result_value = convertFieldToType(from_value, to_type, &from_type, format_settings);
if (Field::isDecimal(from_value.getType()) && Field::isDecimal(result_value.getType()))
{
diff --git a/src/Interpreters/convertFieldToType.h b/src/Interpreters/convertFieldToType.h
index 4aa09f8619e..c3c6271a157 100644
--- a/src/Interpreters/convertFieldToType.h
+++ b/src/Interpreters/convertFieldToType.h
@@ -1,6 +1,7 @@
#pragma once
#include
+#include
namespace DB
@@ -15,13 +16,13 @@ class IDataType;
* Checks for the compatibility of types, checks values fall in the range of valid values of the type, makes type conversion.
* If the value does not fall into the range - returns Null.
*/
-Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr);
+Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {});
/// Does the same, but throws ARGUMENT_OUT_OF_BOUND if value does not fall into the range.
-Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr);
+Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {});
/// Applies stricter rules than convertFieldToType, doesn't allow loss of precision converting to Decimal.
/// Returns `Field` if the conversion was successful and the result is equal to the original value, otherwise returns nullopt.
-std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type);
+std::optional convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings = {});
}
diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp
new file mode 100644
index 00000000000..e90864ecdf3
--- /dev/null
+++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.cpp
@@ -0,0 +1,154 @@
+#include
+#include
+#include
+
+#include
+
+
+namespace DB
+{
+
+JSONCompactWithProgressRowOutputFormat::JSONCompactWithProgressRowOutputFormat(
+ WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_)
+ : JSONRowOutputFormat(out_, header, settings_, yield_strings_)
+{
+}
+
+void JSONCompactWithProgressRowOutputFormat::writePrefix()
+{
+ JSONUtils::writeCompactObjectStart(*ostr);
+ JSONUtils::writeCompactMetadata(names, types, settings, *ostr);
+ JSONUtils::writeCompactObjectEnd(*ostr);
+ writeCString("\n", *ostr);
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num)
+{
+ JSONUtils::writeFieldFromColumn(column, serialization, row_num, yield_strings, settings, *ostr);
+ ++field_number;
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeFieldDelimiter()
+{
+ JSONUtils::writeFieldCompactDelimiter(*ostr);
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeRowStartDelimiter()
+{
+ if (has_progress)
+ writeProgress();
+ writeCString("{\"data\":", *ostr);
+ JSONUtils::writeCompactArrayStart(*ostr);
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeRowEndDelimiter()
+{
+ JSONUtils::writeCompactArrayEnd(*ostr);
+ writeCString("}\n", *ostr);
+ field_number = 0;
+ ++row_count;
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeRowBetweenDelimiter()
+{
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeBeforeTotals()
+{
+ JSONUtils::writeCompactObjectStart(*ostr);
+ JSONUtils::writeCompactArrayStart(*ostr, 0, "totals");
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeTotals(const Columns & columns, size_t row_num)
+{
+ JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr);
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeAfterTotals()
+{
+ JSONUtils::writeCompactArrayEnd(*ostr);
+ JSONUtils::writeCompactObjectEnd(*ostr);
+ writeCString("\n", *ostr);
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num)
+{
+ JSONUtils::writeCompactArrayStart(*ostr, 2, title);
+ JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr);
+ JSONUtils::writeCompactArrayEnd(*ostr);
+}
+
+void JSONCompactWithProgressRowOutputFormat::onProgress(const Progress & value)
+{
+ statistics.progress.incrementPiecewiseAtomically(value);
+ String progress_line;
+ WriteBufferFromString buf(progress_line);
+ writeCString("{\"progress\":", buf);
+ statistics.progress.writeJSON(buf);
+ writeCString("}\n", buf);
+ buf.finalize();
+ std::lock_guard lock(progress_lines_mutex);
+ progress_lines.emplace_back(std::move(progress_line));
+ has_progress = true;
+}
+
+
+void JSONCompactWithProgressRowOutputFormat::flush()
+{
+ if (has_progress)
+ writeProgress();
+ JSONRowOutputFormat::flush();
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeSuffix()
+{
+ if (has_progress)
+ writeProgress();
+}
+
+void JSONCompactWithProgressRowOutputFormat::writeProgress()
+{
+ std::lock_guard lock(progress_lines_mutex);
+ for (const auto & progress_line : progress_lines)
+ writeString(progress_line, *ostr);
+ progress_lines.clear();
+ has_progress = false;
+}
+
+void JSONCompactWithProgressRowOutputFormat::finalizeImpl()
+{
+ if (exception_message.empty())
+ {
+ JSONUtils::writeCompactAdditionalInfo(
+ row_count,
+ statistics.rows_before_limit,
+ statistics.applied_limit,
+ statistics.watch,
+ statistics.progress,
+ settings.write_statistics,
+ *ostr);
+ }
+ else
+ {
+ JSONUtils::writeCompactObjectStart(*ostr);
+ JSONUtils::writeException(exception_message, *ostr, settings, 0);
+ JSONUtils::writeCompactObjectEnd(*ostr);
+ }
+ writeCString("\n", *ostr);
+ ostr->next();
+}
+
+void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory)
+{
+ factory.registerOutputFormat(
+ "JSONCompactWithProgress",
+ [](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings)
+ { return std::make_shared(buf, sample, format_settings, false); });
+
+ factory.registerOutputFormat(
+ "JSONCompactWithProgressStrings",
+ [](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings)
+ { return std::make_shared(buf, sample, format_settings, true); });
+}
+
+}
diff --git a/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h
new file mode 100644
index 00000000000..1c21914d8cb
--- /dev/null
+++ b/src/Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h
@@ -0,0 +1,50 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+
+
+namespace DB
+{
+
+struct FormatSettings;
+
+class JSONCompactWithProgressRowOutputFormat final : public JSONRowOutputFormat
+{
+public:
+ JSONCompactWithProgressRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_);
+
+ String getName() const override { return "JSONCompactWithProgressRowOutputFormat"; }
+
+ void onProgress(const Progress & value) override;
+ void flush() override;
+
+private:
+ void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
+ void writeFieldDelimiter() override;
+ void writeRowStartDelimiter() override;
+ void writeRowEndDelimiter() override;
+ void writeRowBetweenDelimiter() override;
+ bool supportTotals() const override { return true; }
+ bool supportExtremes() const override { return true; }
+ void writeBeforeTotals() override;
+ void writeAfterTotals() override;
+ void writeExtremesElement(const char * title, const Columns & columns, size_t row_num) override;
+ void writeTotals(const Columns & columns, size_t row_num) override;
+
+ void writeProgress();
+ void writePrefix() override;
+ void writeSuffix() override;
+ void finalizeImpl() override;
+
+
+ std::vector progress_lines;
+ std::mutex progress_lines_mutex;
+ /// To not lock mutex and check progress_lines every row,
+ /// we will use atomic flag that progress_lines is not empty.
+ std::atomic_bool has_progress = false;
+};
+
+}
diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp
index 10d0e051665..16b88d0b8dc 100644
--- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp
+++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp
@@ -542,7 +542,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx
if (format_settings.null_as_default)
tryToReplaceNullFieldsInComplexTypesWithDefaultValues(expression_value, type);
- Field value = convertFieldToType(expression_value, type, value_raw.second.get());
+ Field value = convertFieldToType(expression_value, type, value_raw.second.get(), format_settings);
/// Check that we are indeed allowed to insert a NULL.
if (value.isNull() && !type.isNullable() && !type.isLowCardinalityNullable())
diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp
index 44d2703e973..aaa1f53b5ee 100644
--- a/src/Processors/QueryPlan/PartsSplitter.cpp
+++ b/src/Processors/QueryPlan/PartsSplitter.cpp
@@ -50,6 +50,9 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type)
case TypeIndex::Float64:
case TypeIndex::Nullable:
case TypeIndex::ObjectDeprecated:
+ case TypeIndex::Object:
+ case TypeIndex::Variant:
+ case TypeIndex::Dynamic:
return false;
case TypeIndex::Array:
{
@@ -76,16 +79,6 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type)
const auto & data_type_map = static_cast(data_type);
return isSafePrimaryDataKeyType(*data_type_map.getKeyType()) && isSafePrimaryDataKeyType(*data_type_map.getValueType());
}
- case TypeIndex::Variant:
- {
- const auto & data_type_variant = static_cast(data_type);
- const auto & data_type_variant_elements = data_type_variant.getVariants();
- for (const auto & data_type_variant_element : data_type_variant_elements)
- if (!isSafePrimaryDataKeyType(*data_type_variant_element))
- return false;
-
- return false;
- }
default:
{
break;
diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp
index c9ada32b839..2e21df0f387 100644
--- a/src/Processors/Transforms/AggregatingTransform.cpp
+++ b/src/Processors/Transforms/AggregatingTransform.cpp
@@ -486,7 +486,7 @@ private:
#define M(NAME) \
else if (first->type == AggregatedDataVariants::Type::NAME) \
- params->aggregator.mergeSingleLevelDataImplNAME)::element_type>(*data);
+ params->aggregator.mergeSingleLevelDataImplNAME)::element_type>(*data, shared_data->is_cancelled);
if (false) {} // NOLINT
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
#undef M
diff --git a/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h b/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h
index 5ef5e1db62e..212dc048868 100644
--- a/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h
+++ b/src/Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h
@@ -14,6 +14,6 @@ using ContextPtr = std::shared_ptr;
/// Extracts a zookeeper path from a specified CREATE TABLE query.
/// The function checks the table engine and if it is Replicated*MergeTree then it takes the first argument and expands macros in it.
/// Returns std::nullopt if the specified CREATE query doesn't describe a Replicated table or its arguments can't be evaluated.
-std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & context);
+std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & local_context);
}
diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp
index 9a65d590453..18ed7df9b5d 100644
--- a/src/Storages/MergeTree/registerStorageMergeTree.cpp
+++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp
@@ -12,6 +12,7 @@
#include
#include
#include
+#include
#include
#include
@@ -189,7 +190,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
const String & engine_name,
ASTs & engine_args,
LoadingStrictnessLevel mode,
- const ContextPtr & context,
+ const ContextPtr & local_context,
String & zookeeper_path,
String & replica_name,
RenamingRestrictions & renaming_restrictions)
@@ -206,11 +207,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
{
/// Allow expressions in engine arguments.
/// In new syntax argument can be literal or identifier or array/tuple of identifiers.
- evaluateEngineArgs(engine_args, context);
+ evaluateEngineArgs(engine_args, local_context);
}
- bool is_on_cluster = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY;
- bool is_replicated_database = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY &&
+ bool is_on_cluster = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY;
+ bool is_replicated_database = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY &&
DatabaseCatalog::instance().getDatabase(table_id.database_name)->getEngineName() == "Replicated";
/// Allow implicit {uuid} macros only for zookeeper_path in ON CLUSTER queries
@@ -230,10 +231,10 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
/// We did unfold it in previous versions to make moving table from Atomic to Ordinary database work correctly,
/// but now it's not allowed (and it was the only reason to unfold {uuid} macro).
info.table_id.uuid = UUIDHelpers::Nil;
- zookeeper_path = context->getMacros()->expand(zookeeper_path, info);
+ zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info);
info.level = 0;
- replica_name = context->getMacros()->expand(replica_name, info);
+ replica_name = local_context->getMacros()->expand(replica_name, info);
}
ast_zk_path->value = zookeeper_path;
@@ -251,11 +252,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
}
if (!allow_uuid_macro)
info.table_id.uuid = UUIDHelpers::Nil;
- zookeeper_path = context->getMacros()->expand(zookeeper_path, info);
+ zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info);
info.level = 0;
info.table_id.uuid = UUIDHelpers::Nil;
- replica_name = context->getMacros()->expand(replica_name, info);
+ replica_name = local_context->getMacros()->expand(replica_name, info);
/// We do not allow renaming table with these macros in metadata, because zookeeper_path will be broken after RENAME TABLE.
/// NOTE: it may happen if table was created by older version of ClickHouse (< 20.10) and macros was not unfolded on table creation
@@ -272,9 +273,24 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
bool has_arguments = (arg_num + 2 <= arg_cnt);
bool has_valid_arguments = has_arguments && engine_args[arg_num]->as() && engine_args[arg_num + 1]->as();
+ const auto & server_settings = local_context->getServerSettings();
if (has_valid_arguments)
{
+ if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0)
+ {
+ throw Exception(ErrorCodes::BAD_ARGUMENTS,
+ "It's not allowed to specify explicit zookeeper_path and replica_name "
+ "for ReplicatedMergeTree arguments in Replicated database. If you really want to "
+ "specify them explicitly, enable setting "
+ "database_replicated_allow_replicated_engine_arguments.");
+ }
+ else if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1)
+ {
+ LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify "
+ "zookeeper_path and replica_name in ReplicatedMergeTree arguments");
+ }
+
/// Get path and name from engine arguments
auto * ast_zk_path = engine_args[arg_num]->as();
if (ast_zk_path && ast_zk_path->value.getType() == Field::Types::String)
@@ -288,6 +304,15 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message);
+
+ if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2)
+ {
+ LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) "
+ "with default arguments", zookeeper_path, replica_name);
+ engine_args[arg_num]->as()->value = zookeeper_path = server_settings.default_replica_path;
+ engine_args[arg_num + 1]->as()->value = replica_name = server_settings.default_replica_name;
+ }
+
expand_macro(ast_zk_path, ast_replica_name);
}
else if (is_extended_storage_def
@@ -297,7 +322,6 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
{
/// Try use default values if arguments are not specified.
/// Note: {uuid} macro works for ON CLUSTER queries when database engine is Atomic.
- const auto & server_settings = context->getServerSettings();
zookeeper_path = server_settings.default_replica_path;
/// TODO maybe use hostname if {replica} is not defined?
replica_name = server_settings.default_replica_name;
@@ -322,7 +346,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
}
/// Extracts a zookeeper path from a specified CREATE TABLE query.
-std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & context)
+std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & local_context)
{
if (!query.storage || !query.storage->engine)
return {};
@@ -346,7 +370,7 @@ std::optional extractZooKeeperPathFromReplicatedTableDef(const ASTCreate
try
{
- extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, context,
+ extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, local_context,
zookeeper_path, replica_name, renaming_restrictions);
}
catch (Exception & e)
diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp
index 667a925d11e..f04e868ee5a 100644
--- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp
+++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp
@@ -207,18 +207,28 @@ struct DeltaLakeMetadataImpl
Poco::Dynamic::Var json = parser.parse(json_str);
Poco::JSON::Object::Ptr object = json.extract();
+ if (!object)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to parse metadata file");
+
+#ifdef ABORT_ON_LOGICAL_ERROR
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
object->stringify(oss);
LOG_TEST(log, "Metadata: {}", oss.str());
+#endif
if (object->has("metaData"))
{
const auto metadata_object = object->get("metaData").extract();
+ if (!metadata_object)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `metaData` field");
+
const auto schema_object = metadata_object->getValue("schemaString");
Poco::JSON::Parser p;
Poco::Dynamic::Var fields_json = parser.parse(schema_object);
const Poco::JSON::Object::Ptr & fields_object = fields_json.extract();
+ if (!fields_object)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `fields` field");
auto current_schema = parseMetadata(fields_object);
if (file_schema.empty())
@@ -237,6 +247,9 @@ struct DeltaLakeMetadataImpl
if (object->has("add"))
{
auto add_object = object->get("add").extract();
+ if (!add_object)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `add` field");
+
auto path = add_object->getValue("path");
result.insert(fs::path(configuration->getPath()) / path);
@@ -247,6 +260,9 @@ struct DeltaLakeMetadataImpl
if (add_object->has("partitionValues"))
{
auto partition_values = add_object->get("partitionValues").extract();
+ if (!partition_values)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `partitionValues` field");
+
if (partition_values->size())
{
auto & current_partition_columns = file_partition_columns[filename];
@@ -274,7 +290,11 @@ struct DeltaLakeMetadataImpl
}
else if (object->has("remove"))
{
- auto path = object->get("remove").extract()->getValue("path");
+ auto remove_object = object->get("remove").extract();
+ if (!remove_object)
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `remove` field");
+
+ auto path = remove_object->getValue("path");
result.erase(fs::path(configuration->getPath()) / path);
}
}
diff --git a/src/Storages/Statistics/Statistics.cpp b/src/Storages/Statistics/Statistics.cpp
index 795963bd55d..0557530515f 100644
--- a/src/Storages/Statistics/Statistics.cpp
+++ b/src/Storages/Statistics/Statistics.cpp
@@ -248,7 +248,7 @@ ColumnStatisticsPtr MergeTreeStatisticsFactory::get(const ColumnDescription & co
{
auto it = creators.find(type);
if (it == creators.end())
- throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'count_min', 'minmax', 'tdigest' and 'uniq'", type);
+ throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'countmin', 'minmax', 'tdigest' and 'uniq'", type);
auto stat_ptr = (it->second)(desc, column_desc.type);
column_stat->stats[type] = stat_ptr;
}
diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp
index 1a2459c230d..f477181ec2d 100644
--- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp
+++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp
@@ -49,7 +49,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const
if (isStringOrFixedString(data_type))
return sketch.get_estimate(val.safeGet());
- throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'count_min' does not support estimate data type of {}", data_type->getName());
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'countmin' does not support estimate data type of {}", data_type->getName());
}
void StatisticsCountMinSketch::update(const ColumnPtr & column)
@@ -88,7 +88,7 @@ void countMinSketchStatisticsValidator(const SingleStatisticsDescription & /*des
DataTypePtr inner_data_type = removeNullable(data_type);
inner_data_type = removeLowCardinalityAndNullable(inner_data_type);
if (!inner_data_type->isValueRepresentedByNumber() && !isStringOrFixedString(inner_data_type))
- throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName());
+ throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countmin' does not support type {}", data_type->getName());
}
StatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type)
diff --git a/src/Storages/StatisticsDescription.cpp b/src/Storages/StatisticsDescription.cpp
index 4e0d901d0c7..ac7fa8898de 100644
--- a/src/Storages/StatisticsDescription.cpp
+++ b/src/Storages/StatisticsDescription.cpp
@@ -48,11 +48,11 @@ static StatisticsType stringToStatisticsType(String type)
return StatisticsType::TDigest;
if (type == "uniq")
return StatisticsType::Uniq;
- if (type == "count_min")
+ if (type == "countmin")
return StatisticsType::CountMinSketch;
if (type == "minmax")
return StatisticsType::MinMax;
- throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'count_min', 'minmax', 'tdigest' and 'uniq'.", type);
+ throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type);
}
String SingleStatisticsDescription::getTypeName() const
@@ -64,11 +64,11 @@ String SingleStatisticsDescription::getTypeName() const
case StatisticsType::Uniq:
return "Uniq";
case StatisticsType::CountMinSketch:
- return "count_min";
+ return "countmin";
case StatisticsType::MinMax:
return "minmax";
default:
- throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'count_min', 'minmax', 'tdigest' and 'uniq'.", type);
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type);
}
}
diff --git a/tests/config/users.d/database_replicated.xml b/tests/config/users.d/database_replicated.xml
index c049c3559fc..1c2cf2ac22b 100644
--- a/tests/config/users.d/database_replicated.xml
+++ b/tests/config/users.d/database_replicated.xml
@@ -6,6 +6,7 @@
120
1
1
+ 3
diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py
index 891ee8f00f5..ec0bef23731 100644
--- a/tests/integration/test_disk_over_web_server/test.py
+++ b/tests/integration/test_disk_over_web_server/test.py
@@ -311,7 +311,8 @@ def test_replicated_database(cluster):
SETTINGS storage_policy = 'web';
""".format(
uuids[0]
- )
+ ),
+ settings={"database_replicated_allow_explicit_uuid": 3},
)
node2 = cluster.instances["node2"]
diff --git a/tests/integration/test_replicated_database/configs/settings.xml b/tests/integration/test_replicated_database/configs/settings.xml
index c637fe8eead..41799c5bed2 100644
--- a/tests/integration/test_replicated_database/configs/settings.xml
+++ b/tests/integration/test_replicated_database/configs/settings.xml
@@ -5,6 +5,8 @@
1
0
0
+ 3
+ 3
diff --git a/tests/integration/test_replicated_database/configs/settings2.xml b/tests/integration/test_replicated_database/configs/settings2.xml
index dad5740a8ae..fb7f0c8d4d3 100644
--- a/tests/integration/test_replicated_database/configs/settings2.xml
+++ b/tests/integration/test_replicated_database/configs/settings2.xml
@@ -5,6 +5,8 @@
1
0
0
+ 3
+ 3
0
diff --git a/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml b/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml
index 16caee9ba20..a5e45ead44e 100644
--- a/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml
+++ b/tests/integration/test_replicated_database_alter_modify_order_by/configs/settings.xml
@@ -1,6 +1,8 @@
+ 3
+ 3
diff --git a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh
index 4916721764c..1efe529ac24 100755
--- a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh
+++ b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh
@@ -15,6 +15,6 @@ $CLICKHOUSE_CLIENT --query="
INSERT INTO users VALUES (1321770221388956068);
";
-for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT -n | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo
+for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo
$CLICKHOUSE_CLIENT --query="DROP TABLE users;";
diff --git a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh
index d57efaa1f0e..f0c0354ab33 100755
--- a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh
+++ b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh
@@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-$CLICKHOUSE_CLIENT -n --query="
+$CLICKHOUSE_CLIENT --query="
DROP TABLE IF EXISTS numbers_100k;
CREATE VIEW numbers_100k AS SELECT * FROM system.numbers LIMIT 100000;
";
diff --git a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh
index b59aae83e81..b6cc270994f 100755
--- a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh
+++ b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app_long.sh
@@ -48,13 +48,13 @@ pack_unpack_compare "SELECT name, is_aggregate FROM system.functions" "name Stri
echo
# Check settings are passed correctly
${CLICKHOUSE_LOCAL} --max_rows_in_distinct=33 -q "SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'"
-${CLICKHOUSE_LOCAL} -n -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'"
+${CLICKHOUSE_LOCAL} -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'"
${CLICKHOUSE_LOCAL} --max_bytes_before_external_group_by=1 --max_block_size=10 -q "SELECT sum(ignore(*)) FROM (SELECT number, count() FROM numbers(1000) GROUP BY number)"
echo
# Check exta options, we expect zero exit code and no stderr output
-(${CLICKHOUSE_LOCAL} --ignore-error -n --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC")
+(${CLICKHOUSE_LOCAL} --ignore-error --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC")
echo
-${CLICKHOUSE_LOCAL} -n -q "CREATE TABLE sophisticated_default
+${CLICKHOUSE_LOCAL} -q "CREATE TABLE sophisticated_default
(
a UInt8 DEFAULT 3,
b UInt8 ALIAS a + 5,
diff --git a/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh
index eaa50bce6b1..eed0d3bf5c6 100755
--- a/tests/queries/0_stateless/00505_secure.sh
+++ b/tests/queries/0_stateless/00505_secure.sh
@@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT_SECURE -q "SELECT 4;"
# TODO: can test only on unchanged port. Possible solutions: generate config or pass shard port via command line
if [[ "$CLICKHOUSE_PORT_TCP_SECURE" = "$CLICKHOUSE_PORT_TCP_SECURE" ]]; then
- cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -n -m
+ cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -m
else
tail -n 13 "$CURDIR"/00505_secure.reference
fi
diff --git a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql
index 51e6a513608..329f6ad2248 100644
--- a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql
+++ b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql
@@ -6,6 +6,8 @@ DROP TABLE IF EXISTS without_deduplication;
DROP TABLE IF EXISTS with_deduplication_mv;
DROP TABLE IF EXISTS without_deduplication_mv;
+SET database_replicated_allow_explicit_uuid=3;
+SET database_replicated_allow_replicated_engine_arguments=3;
CREATE TABLE with_deduplication(x UInt32)
ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/with_deduplication', 'r1') ORDER BY x;
CREATE TABLE without_deduplication(x UInt32)
diff --git a/tests/queries/0_stateless/00531_client_ignore_error.sh b/tests/queries/0_stateless/00531_client_ignore_error.sh
index d3215e1beac..553cb9fa897 100755
--- a/tests/queries/0_stateless/00531_client_ignore_error.sh
+++ b/tests/queries/0_stateless/00531_client_ignore_error.sh
@@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
-echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
-echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error
+echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
+echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
+echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error
#$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"
diff --git a/tests/queries/0_stateless/00534_client_ignore_error.sh b/tests/queries/0_stateless/00534_client_ignore_error.sh
index d3215e1beac..553cb9fa897 100755
--- a/tests/queries/0_stateless/00534_client_ignore_error.sh
+++ b/tests/queries/0_stateless/00534_client_ignore_error.sh
@@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
-echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
-echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error
+echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
+echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
+echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error
#$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"
diff --git a/tests/queries/0_stateless/00609_mv_index_in_in.sql b/tests/queries/0_stateless/00609_mv_index_in_in.sql
index bd9f35350c1..848938780c2 100644
--- a/tests/queries/0_stateless/00609_mv_index_in_in.sql
+++ b/tests/queries/0_stateless/00609_mv_index_in_in.sql
@@ -1,4 +1,4 @@
--- Tags: no-ordinary-database
+-- Tags: no-ordinary-database, no-parallel
DROP TABLE IF EXISTS test_00609;
DROP TABLE IF EXISTS test_mv_00609;
@@ -6,6 +6,7 @@ DROP TABLE IF EXISTS test_mv_00609;
create table test_00609 (a Int8) engine=Memory;
insert into test_00609 values (1);
+set database_replicated_allow_explicit_uuid=3;
set allow_deprecated_syntax_for_merge_tree=1;
create materialized view test_mv_00609 uuid '00000609-1000-4000-8000-000000000001' Engine=MergeTree(date, (a), 8192) populate as select a, toDate('2000-01-01') date from test_00609;
diff --git a/tests/queries/0_stateless/00686_client_exit_code.sh b/tests/queries/0_stateless/00686_client_exit_code.sh
index 9586ddbd0a3..e348f93f30d 100755
--- a/tests/queries/0_stateless/00686_client_exit_code.sh
+++ b/tests/queries/0_stateless/00686_client_exit_code.sh
@@ -8,5 +8,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=./mergetree_mutations.lib
. "$CURDIR"/mergetree_mutations.lib
-echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} -n 2>/dev/null
+echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} 2>/dev/null
echo $?
diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh
index ea8b9d02e49..fd002668696 100755
--- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh
+++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh
@@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery &
-yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery &
+yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT &
+yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT &
wait
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table"
diff --git a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh
index 5a4fd901f8d..285fd3945f9 100755
--- a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh
+++ b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh
@@ -12,7 +12,7 @@ settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_que
# Test insert logging on each block and checkPacket() method
-$CLICKHOUSE_CLIENT $settings -n -q "
+$CLICKHOUSE_CLIENT $settings -q "
DROP TABLE IF EXISTS merge_tree_table;
CREATE TABLE merge_tree_table (id UInt64, date Date, uid UInt32) ENGINE = MergeTree(date, id, 8192);"
diff --git a/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/tests/queries/0_stateless/00738_lock_for_inner_table.sh
index b62a639d8f4..9bc84dd1063 100755
--- a/tests/queries/0_stateless/00738_lock_for_inner_table.sh
+++ b/tests/queries/0_stateless/00738_lock_for_inner_table.sh
@@ -7,6 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
+CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --database_replicated_allow_explicit_uuid 3"
+
# there are some issues with Atomic database, let's generate it uniq
# otherwise flaky check will not pass.
uuid=$(${CLICKHOUSE_CLIENT} --query "SELECT reinterpretAsUUID(currentDatabase())")
diff --git a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh
index da4d3b05987..a29d2e5bc71 100755
--- a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh
+++ b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh
@@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS table"
-seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT -n &
-seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT -n 2>/dev/null &
+seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT &
+seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT 2>/dev/null &
wait
diff --git a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh
index f0bc52ee356..cb774116356 100755
--- a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh
+++ b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh
@@ -27,7 +27,7 @@ function thread_drop_create()
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 100 ];
do
it=$((it+1))
- $CLICKHOUSE_CLIENT -nm -q "
+ $CLICKHOUSE_CLIENT -m -q "
drop table if exists view_00840;
create view view_00840 as select count(*),database,table from system.columns group by database,table;
"
diff --git a/tests/queries/0_stateless/00900_long_parquet.sh b/tests/queries/0_stateless/00900_long_parquet.sh
index 07d2f24e446..86a0d013078 100755
--- a/tests/queries/0_stateless/00900_long_parquet.sh
+++ b/tests/queries/0_stateless/00900_long_parquet.sh
@@ -8,11 +8,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS contributors;
CREATE TABLE contributors (name String) ENGINE = Memory;"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.contributors ORDER BY name DESC FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO contributors FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
-- random results
SELECT * FROM contributors LIMIT 10 FORMAT Null;
DROP TABLE contributors;
@@ -21,30 +21,30 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_numbers (number UInt64) ENGINE = Memory;"
# less than default block size (65k)
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 10000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
TRUNCATE TABLE parquet_numbers;"
# More than default block size
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 100000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
TRUNCATE TABLE parquet_numbers;"
${CLICKHOUSE_CLIENT} --max_block_size=2 --query="SELECT * FROM system.numbers LIMIT 3 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
TRUNCATE TABLE parquet_numbers;"
${CLICKHOUSE_CLIENT} --max_block_size=1 --query="SELECT * FROM system.numbers LIMIT 1000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
DROP TABLE parquet_numbers;
DROP TABLE IF EXISTS parquet_events;
CREATE TABLE parquet_events (event String, value UInt64, description String) ENGINE = Memory;"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.events FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_events FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT event, description FROM parquet_events WHERE event IN ('ContextLock', 'Query') ORDER BY event;
DROP TABLE parquet_events;
@@ -78,7 +78,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types2 ORDER BY int8 FORMAT
echo diff:
diff "${CLICKHOUSE_TMP}"/parquet_all_types_1.dump "${CLICKHOUSE_TMP}"/parquet_all_types_2.dump
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
TRUNCATE TABLE parquet_types2;
INSERT INTO parquet_types3 values ( 79, 81, 82, 83, 84, 85, 86, 87, 88, 89, 'str01', 'fstr1', '2003-03-04', '2004-05-06', toDateTime64('2004-05-06 07:08:09.012', 9));"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types3 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet"
@@ -88,7 +88,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 values ( 80,
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types4 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT 'dest:';
SELECT * FROM parquet_types2 ORDER BY int8;
SELECT 'min:';
@@ -106,7 +106,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet"
echo dest from null:
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_types6 ORDER BY int8;
DROP TABLE parquet_types5;
@@ -126,7 +126,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
INSERT INTO parquet_arrays VALUES (2, [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_arrays FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_arrays ORDER BY id;
DROP TABLE parquet_arrays;
@@ -135,7 +135,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_nullable_arrays (id UInt32, a1 Array(Nullable(UInt32)), a2 Array(Nullable(String)), a3 Array(Nullable(Decimal(4, 2)))) engine=Memory();
INSERT INTO parquet_nullable_arrays VALUES (1, [1, Null, 2], [Null, 'Some string', Null], [0.001, Null, 42.42]), (2, [Null], [Null], [Null]), (3, [], [], []);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nullable_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nullable_arrays FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_nullable_arrays ORDER BY id;
DROP TABLE parquet_nullable_arrays;
@@ -143,7 +143,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_nested_arrays (a1 Array(Array(Array(UInt32))), a2 Array(Array(Array(String))), a3 Array(Array(Nullable(UInt32))), a4 Array(Array(Nullable(String)))) engine=Memory();
INSERT INTO parquet_nested_arrays VALUES ([[[1,2,3], [1,2,3]], [[1,2,3]], [[], [1,2,3]]], [[['Some string', 'Some string'], []], [['Some string']], [[]]], [[Null, 1, 2], [Null], [1, 2], []], [['Some string', Null, 'Some string'], [Null], []]);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_nested_arrays;
DROP TABLE parquet_nested_arrays;
@@ -151,6 +151,6 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory;
INSERT INTO TABLE parquet_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_decimal FORMAT Arrow"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_decimal;
DROP TABLE parquet_decimal;"
diff --git a/tests/queries/0_stateless/00900_long_parquet_decimal.sh b/tests/queries/0_stateless/00900_long_parquet_decimal.sh
index a819dcbcdc3..14e8fdcc038 100755
--- a/tests/queries/0_stateless/00900_long_parquet_decimal.sh
+++ b/tests/queries/0_stateless/00900_long_parquet_decimal.sh
@@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@@ -26,7 +26,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump
echo diff0:
diff "${CLICKHOUSE_TMP}"/parquet_decimal0_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@@ -61,7 +61,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump
echo diff1:
diff "${CLICKHOUSE_TMP}"/parquet_decimal1_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@@ -75,7 +75,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump
echo diff2:
diff "${CLICKHOUSE_TMP}"/parquet_decimal2_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@@ -86,7 +86,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_1.parquet
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" 2> /dev/null
echo nothing:
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM decimal2 ORDER BY a, b, c, d;
TRUNCATE TABLE decimal2;
@@ -94,7 +94,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_2.parquet
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet"
echo nulls:
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT * FROM decimal2 ORDER BY a, b, c, d;
TRUNCATE TABLE decimal2;
@@ -104,7 +104,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_3.parquet
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet"
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
SELECT 'full orig:';
SELECT * FROM decimal ORDER BY a, b, c, d;
SELECT 'full inserted:';
@@ -115,6 +115,6 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d;" > "${
echo diff3:
diff "${CLICKHOUSE_TMP}"/parquet_decimal3_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal3_2.dump
-${CLICKHOUSE_CLIENT} -n --query="
+${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;"
diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh
index d310a2c3612..152d5a847b7 100755
--- a/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh
+++ b/tests/queries/0_stateless/00921_datetime64_compatibility_long.sh
@@ -13,5 +13,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# ${CURDIR}/00921_datetime64_compatibility.python
python3 "${CURDIR}"/00921_datetime64_compatibility_long.python \
- | ${CLICKHOUSE_CLIENT} --ignore-error -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \
+ | ${CLICKHOUSE_CLIENT} --ignore-error -m --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \
| grep -v -e 'Received exception .*$' -e '^(query: ' | sed 's/^\(Code: [0-9]\+\).*$/\1/g'
diff --git a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh
index 686dd7f6df0..1d35daf9f2e 100755
--- a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh
+++ b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper_long.sh
@@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions1;"
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions2;"
-$CLICKHOUSE_CLIENT -n --query="
+$CLICKHOUSE_CLIENT --query="
CREATE TABLE indices_mutaions1
(
u64 UInt64,
diff --git a/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh b/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh
index 8243c6bde62..df330b82c80 100755
--- a/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh
+++ b/tests/queries/0_stateless/00991_system_parts_race_condition_long.sh
@@ -22,7 +22,7 @@ function thread1()
function thread2()
{
- while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done
+ while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done
}
function thread3()
diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh
index 02a739ece4a..d5c0248e2b3 100755
--- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh
+++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh
@@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
-$CLICKHOUSE_CLIENT -n -q "
+$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS alter_table0;
DROP TABLE IF EXISTS alter_table1;
@@ -31,7 +31,7 @@ function thread1()
function thread2()
{
- while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done
+ while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done
}
function thread3()
@@ -87,6 +87,6 @@ check_replication_consistency "alter_table" "count(), sum(a), sum(b), round(sum(
$CLICKHOUSE_CLIENT -q "SELECT table, lost_part_count FROM system.replicas WHERE database=currentDatabase() AND lost_part_count!=0";
-$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
-$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
+$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
+$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
wait
diff --git a/tests/queries/0_stateless/01014_lazy_database_basic.sh b/tests/queries/0_stateless/01014_lazy_database_basic.sh
index ea7603b2519..55d18a7c527 100755
--- a/tests/queries/0_stateless/01014_lazy_database_basic.sh
+++ b/tests/queries/0_stateless/01014_lazy_database_basic.sh
@@ -5,9 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-${CLICKHOUSE_CLIENT} -n -q "DROP DATABASE IF EXISTS testlazy"
+${CLICKHOUSE_CLIENT} -q "DROP DATABASE IF EXISTS testlazy"
-${CLICKHOUSE_CLIENT} -n -q "
+${CLICKHOUSE_CLIENT} -q "
CREATE DATABASE testlazy ENGINE = Lazy(1);
CREATE TABLE testlazy.log (a UInt64, b UInt64) ENGINE = Log;
CREATE TABLE testlazy.slog (a UInt64, b UInt64) ENGINE = StripeLog;
@@ -30,7 +30,7 @@ ${CLICKHOUSE_CLIENT} -q "
sleep 1.5
-${CLICKHOUSE_CLIENT} -n -q "
+${CLICKHOUSE_CLIENT} -q "
SELECT * FROM testlazy.log LIMIT 0; -- drop testlazy.log from cache
RENAME TABLE testlazy.log TO testlazy.log2;
SELECT database, name FROM system.tables WHERE database = 'testlazy';
@@ -44,7 +44,7 @@ ${CLICKHOUSE_CLIENT} -q "
sleep 1.5
-${CLICKHOUSE_CLIENT} -n -q "
+${CLICKHOUSE_CLIENT} -q "
INSERT INTO testlazy.log2 VALUES (1, 1);
INSERT INTO testlazy.slog VALUES (2, 2);
INSERT INTO testlazy.tlog VALUES (3, 3);
@@ -55,14 +55,14 @@ ${CLICKHOUSE_CLIENT} -n -q "
sleep 1.5
-${CLICKHOUSE_CLIENT} -n -q "
+${CLICKHOUSE_CLIENT} -q "
SELECT * FROM testlazy.log2 LIMIT 0; -- drop testlazy.log2 from cache
DROP TABLE testlazy.log2;
"
sleep 1.5
-${CLICKHOUSE_CLIENT} -n -q "
+${CLICKHOUSE_CLIENT} -q "
SELECT * FROM testlazy.slog;
SELECT * FROM testlazy.tlog;
"
diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh
index e4b3a31b13f..ff2c0b8821e 100755
--- a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh
+++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh
@@ -83,7 +83,7 @@ export -f recreate_lazy_func4;
export -f show_tables_func;
-${CLICKHOUSE_CLIENT} -n -q "
+${CLICKHOUSE_CLIENT} -q "
DROP DATABASE IF EXISTS $CURR_DATABASE;
CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1);
"
diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh
index 872b0a7c1a1..82221eb06e9 100755
--- a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh
+++ b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh
@@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
-$CLICKHOUSE_CLIENT -n -q "
+$CLICKHOUSE_CLIENT -q "
DROP DATABASE IF EXISTS database_for_dict;
DROP TABLE IF EXISTS table_for_dict1;
DROP TABLE IF EXISTS table_for_dict2;
@@ -44,7 +44,7 @@ function thread3()
function thread4()
{
- while true; do $CLICKHOUSE_CLIENT -n -q "
+ while true; do $CLICKHOUSE_CLIENT -q "
SELECT * FROM database_for_dict.dict1 FORMAT Null;
SELECT * FROM database_for_dict.dict2 FORMAT Null;
" ||: ; done
@@ -52,7 +52,7 @@ function thread4()
function thread5()
{
- while true; do $CLICKHOUSE_CLIENT -n -q "
+ while true; do $CLICKHOUSE_CLIENT -q "
SELECT dictGetString('database_for_dict.dict1', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null;
SELECT dictGetString('database_for_dict.dict2', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null;
" ||: ; done
@@ -117,7 +117,7 @@ $CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"
$CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict1"
$CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict2"
-$CLICKHOUSE_CLIENT -n -q "
+$CLICKHOUSE_CLIENT -q "
DROP DATABASE database_for_dict;
DROP TABLE table_for_dict1;
DROP TABLE table_for_dict2;
diff --git a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh
index 4bd21fcee02..eb12a76eb62 100755
--- a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh
+++ b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh
@@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-$CLICKHOUSE_CLIENT --multiquery <&1 \
| grep -c 'Code: 43. DB::Exception: .* DB::Exception:.* Types .* are non-conforming as arguments for aggregate function avgWeighted'
diff --git a/tests/queries/0_stateless/01053_ssd_dictionary.sh b/tests/queries/0_stateless/01053_ssd_dictionary.sh
index fdd40940ab5..7dd3fa4657a 100755
--- a/tests/queries/0_stateless/01053_ssd_dictionary.sh
+++ b/tests/queries/0_stateless/01053_ssd_dictionary.sh
@@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -n --query="
+$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 --query="
DROP DATABASE IF EXISTS 01053_db;
CREATE DATABASE 01053_db;
diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh
index 39e65af039b..22f8e5269bd 100755
--- a/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh
+++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper_long.sh
@@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_bad_alters";
-$CLICKHOUSE_CLIENT -n --query "CREATE TABLE table_for_bad_alters (
+$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_bad_alters (
key UInt64,
value1 UInt8,
value2 String
diff --git a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
index 619b6e91d11..9101b9faa3d 100755
--- a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
+++ b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
@@ -30,7 +30,7 @@ function drop_db()
database=$($CLICKHOUSE_CLIENT -q "select name from system.databases where name like '${CLICKHOUSE_DATABASE}%' order by rand() limit 1")
if [[ "$database" == "$CLICKHOUSE_DATABASE" ]]; then continue; fi
if [ -z "$database" ]; then continue; fi
- $CLICKHOUSE_CLIENT -n --query \
+ $CLICKHOUSE_CLIENT --query \
"drop database if exists $database" 2>&1| grep -Fa "Exception: "
sleep 0.$RANDOM
done
diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh
index 5eebb558575..5fe85136d05 100755
--- a/tests/queries/0_stateless/01114_database_atomic.sh
+++ b/tests/queries/0_stateless/01114_database_atomic.sh
@@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -q "SELECT name,
splitByChar('/', metadata_path)[-2] as uuid_path, ((splitByChar('/', metadata_path)[-3] as metadata) = substr(uuid_path, 1, 3)) OR metadata='metadata'
FROM system.databases WHERE name LIKE '${CLICKHOUSE_DATABASE}_%'" | sed "s/$uuid_db_1/00001114-1000-4000-8000-000000000001/g" | sed "s/$uuid_db_2/00001114-1000-4000-8000-000000000002/g"
-$CLICKHOUSE_CLIENT -nm -q "
+$CLICKHOUSE_CLIENT -m -q "
CREATE TABLE ${DATABASE_1}.mt_tmp (n UInt64) ENGINE=MergeTree() ORDER BY tuple();
INSERT INTO ${DATABASE_1}.mt_tmp SELECT * FROM numbers(100);
CREATE TABLE ${DATABASE_3}.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5);
@@ -65,7 +65,7 @@ while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE que
sleep 0.1
done
-$CLICKHOUSE_CLIENT -nm -q "
+$CLICKHOUSE_CLIENT -m -q "
RENAME TABLE ${DATABASE_1}.mt TO ${DATABASE_1}.mt_tmp;
RENAME TABLE ${DATABASE_1}.mt_tmp TO ${DATABASE_2}.mt_tmp;
EXCHANGE TABLES ${DATABASE_2}.mt AND ${DATABASE_2}.mt_tmp;
@@ -79,7 +79,7 @@ uuid_mt1=$($CLICKHOUSE_CLIENT -q "SELECT uuid FROM system.tables WHERE database=
$CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_1}.mt" | sed "s/$uuid_mt1/00001114-0000-4000-8000-000000000001/g"
$CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_2}.mt" | sed "s/$explicit_uuid/00001114-0000-4000-8000-000000000002/g"
-$CLICKHOUSE_CLIENT -nm -q "
+$CLICKHOUSE_CLIENT -m -q "
DROP TABLE ${DATABASE_1}.mt SETTINGS database_atomic_wait_for_drop_and_detach_synchronously=0;
CREATE TABLE ${DATABASE_1}.mt (s String) ENGINE=Log();
INSERT INTO ${DATABASE_1}.mt SELECT 's' || toString(number) FROM numbers(5);
diff --git a/tests/queries/0_stateless/01119_session_log.sh b/tests/queries/0_stateless/01119_session_log.sh
index 2d17b545276..61bb7cf3ea8 100755
--- a/tests/queries/0_stateless/01119_session_log.sh
+++ b/tests/queries/0_stateless/01119_session_log.sh
@@ -14,7 +14,7 @@ and interface in ('HTTP', 'TCP', 'TCP_Interserver')
and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes)
and event_time >= now() - interval 5 minute"
-$CLICKHOUSE_CLIENT -nm -q "
+$CLICKHOUSE_CLIENT -m -q "
select * from remote('127.0.0.2', system, one, 'default', '');
select * from remote('127.0.0.2', system, one, 'default', 'wrong password'); -- { serverError AUTHENTICATION_FAILED }
select * from remote('127.0.0.2', system, one, 'nonexistsnt_user_1119', ''); -- { serverError AUTHENTICATION_FAILED }
diff --git a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql
index a585ef1c324..c689542e4c3 100644
--- a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql
+++ b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql
@@ -7,6 +7,8 @@ DROP TABLE IF EXISTS rmt1;
DROP TABLE IF EXISTS rmt2;
DROP TABLE IF EXISTS rmt3;
+SET database_replicated_allow_replicated_engine_arguments=1;
+
CREATE TABLE rmt (n UInt64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/test_01148/{shard}/{database}/{table}', '{replica}') ORDER BY n;
SHOW CREATE TABLE rmt;
RENAME TABLE rmt TO rmt1;
diff --git a/tests/queries/0_stateless/01153_attach_mv_uuid.sql b/tests/queries/0_stateless/01153_attach_mv_uuid.sql
index 00cce8a1de4..0ef16449096 100644
--- a/tests/queries/0_stateless/01153_attach_mv_uuid.sql
+++ b/tests/queries/0_stateless/01153_attach_mv_uuid.sql
@@ -14,6 +14,8 @@ INSERT INTO src VALUES (3), (4);
SELECT * FROM mv ORDER BY n;
DROP TABLE mv SYNC;
+SET database_replicated_allow_explicit_uuid=3;
+
SET show_table_uuid_in_table_create_query_if_not_nil=1;
CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n;
ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;
diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh
index a255c1db30e..8afb0c18462 100755
--- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh
+++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper_long.sh
@@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated"
-$CLICKHOUSE_CLIENT -n --query "
+$CLICKHOUSE_CLIENT --query "
CREATE TABLE table_for_rename_replicated
(
date Date,
diff --git a/tests/queries/0_stateless/01238_http_memory_tracking.sh b/tests/queries/0_stateless/01238_http_memory_tracking.sh
index ce1310cf302..f88c8fb47c6 100755
--- a/tests/queries/0_stateless/01238_http_memory_tracking.sh
+++ b/tests/queries/0_stateless/01238_http_memory_tracking.sh
@@ -14,7 +14,7 @@ ${CLICKHOUSE_CLIENT} --format Null -q "CREATE USER $MISTER_USER"
# This is needed to keep at least one running query for user for the time of test.
# (1k http queries takes ~1 second, let's run for 5x more to avoid flaps)
-${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5)' &
+${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null <<<'SELECT sleepEachRow(1) FROM numbers(5)' &
# ignore "yes: standard output: Broken pipe"
yes 'SELECT 1' 2>/dev/null | {
diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh
index 60a65b9a253..c1ec812875c 100755
--- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh
+++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh
@@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-$CLICKHOUSE_CLIENT -n --query="
+$CLICKHOUSE_CLIENT --query="
DROP DATABASE IF EXISTS 01280_db;
CREATE DATABASE 01280_db;
DROP TABLE IF EXISTS 01280_db.table_for_dict;
@@ -39,9 +39,9 @@ $CLICKHOUSE_CLIENT -n --query="
LIFETIME(MIN 1000 MAX 2000)
LAYOUT(COMPLEX_KEY_SSD_CACHE(FILE_SIZE 8192 PATH '$USER_FILES_PATH/0d'));"
-$CLICKHOUSE_CLIENT -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }"
+$CLICKHOUSE_CLIENT -q "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }"
-$CLICKHOUSE_CLIENT -n --query="
+$CLICKHOUSE_CLIENT --query="
SELECT 'TEST_SMALL';
SELECT 'VALUE FROM RAM BUFFER';
SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple('1', toInt32(3)));
@@ -63,9 +63,9 @@ $CLICKHOUSE_CLIENT -n --query="
SELECT dictGetInt32('01280_db.ssd_dict', 'b', tuple('10', toInt32(-20)));
SELECT dictGetString('01280_db.ssd_dict', 'c', tuple('10', toInt32(-20)));"
-$CLICKHOUSE_CLIENT -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }"
+$CLICKHOUSE_CLIENT -q "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }"
-$CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict;
+$CLICKHOUSE_CLIENT --query="DROP DICTIONARY 01280_db.ssd_dict;
DROP TABLE IF EXISTS 01280_db.keys_table;
CREATE TABLE 01280_db.keys_table
(
@@ -122,4 +122,4 @@ $CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict;
DROP DICTIONARY IF EXISTS database_for_dict.ssd_dict;
DROP TABLE IF EXISTS database_for_dict.keys_table;"
-$CLICKHOUSE_CLIENT -n --query="DROP DATABASE IF EXISTS 01280_db;"
+$CLICKHOUSE_CLIENT --query="DROP DATABASE IF EXISTS 01280_db;"
diff --git a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh
index 21f46a34514..00619f13173 100755
--- a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh
+++ b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long.sh
@@ -85,7 +85,7 @@ export -f recreate_lazy_func4;
export -f test_func;
-${CLICKHOUSE_CLIENT} -n -q "
+${CLICKHOUSE_CLIENT} -q "
DROP DATABASE IF EXISTS $CURR_DATABASE;
CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1);
"
diff --git a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh
index 1d2d4516b9c..6ff6644f11e 100755
--- a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh
+++ b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh
@@ -10,7 +10,7 @@ set -e
function thread()
{
while true; do
- $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 SYNC;
+ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table_$1 SYNC;
CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 |
grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now| were removed by another replica|Removing leftovers from table|Another replica was suddenly created|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time|^\(query: '
done
diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh
index 1d5f5d54853..cc96a37a0ce 100755
--- a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh
+++ b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh
@@ -17,7 +17,7 @@ function thread1()
{
local TIMELIMIT=$((SECONDS+$1))
while [ $SECONDS -lt "$TIMELIMIT" ]; do
- $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"
+ $CLICKHOUSE_CLIENT --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"
done
}
diff --git a/tests/queries/0_stateless/01395_limit_more_cases.sh b/tests/queries/0_stateless/01395_limit_more_cases.sh
index 9709bd74f26..6be8a91f0c7 100755
--- a/tests/queries/0_stateless/01395_limit_more_cases.sh
+++ b/tests/queries/0_stateless/01395_limit_more_cases.sh
@@ -20,4 +20,4 @@ for OFFSET in {0..15}; do
FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT);
"
done
-done | $CLICKHOUSE_CLIENT -n --max_block_size 5
+done | $CLICKHOUSE_CLIENT --max_block_size 5
diff --git a/tests/queries/0_stateless/01395_limit_more_cases_random.sh b/tests/queries/0_stateless/01395_limit_more_cases_random.sh
index c2f6b060aab..bb942906e63 100755
--- a/tests/queries/0_stateless/01395_limit_more_cases_random.sh
+++ b/tests/queries/0_stateless/01395_limit_more_cases_random.sh
@@ -19,4 +19,4 @@ for _ in $(seq $ITERATIONS); do
throwIf((c != 0 OR first != 0 OR last != 0) AND (c != last - first + 1))
FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT);
"
-done | $CLICKHOUSE_CLIENT -n --max_block_size $(($RANDOM % 20 + 1)) | uniq
+done | $CLICKHOUSE_CLIENT --max_block_size $(($RANDOM % 20 + 1)) | uniq
diff --git a/tests/queries/0_stateless/01412_cache_dictionary_race.sh b/tests/queries/0_stateless/01412_cache_dictionary_race.sh
index 36295ca01ea..b0c73cf742f 100755
--- a/tests/queries/0_stateless/01412_cache_dictionary_race.sh
+++ b/tests/queries/0_stateless/01412_cache_dictionary_race.sh
@@ -10,7 +10,7 @@ $CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db"
$CLICKHOUSE_CLIENT --query "CREATE DATABASE ordinary_db"
-$CLICKHOUSE_CLIENT -n -q "
+$CLICKHOUSE_CLIENT -q "
CREATE DICTIONARY ordinary_db.dict1
(
@@ -35,7 +35,7 @@ function dict_get_thread()
function drop_create_table_thread()
{
while true; do
- $CLICKHOUSE_CLIENT -n --query "CREATE TABLE ordinary_db.table_for_dict_real (
+ $CLICKHOUSE_CLIENT --query "CREATE TABLE ordinary_db.table_for_dict_real (
key_column UInt64,
second_column UInt8,
third_column String
diff --git a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh
index ec9c5134059..950afea9086 100755
--- a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh
+++ b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh
@@ -23,7 +23,7 @@ function f {
function g {
local TIMELIMIT=$((SECONDS+$1))
for _ in $(seq 1 100); do
- $CLICKHOUSE_CLIENT -n -q "
+ $CLICKHOUSE_CLIENT -q "
INSERT INTO mem SELECT number FROM numbers(1000000);
INSERT INTO mem SELECT number FROM numbers(1000000);
INSERT INTO mem SELECT number FROM numbers(1000000);
diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh
index 22cd6fb8127..0e6ab287146 100755
--- a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh
+++ b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts_long.sh
@@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
NUM_REPLICAS=6
for i in $(seq 1 $NUM_REPLICAS); do
- $CLICKHOUSE_CLIENT -n -q "
+ $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS r$i SYNC;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum_many', 'r$i') ORDER BY x;
"
@@ -39,12 +39,12 @@ done
wait
for i in $(seq 1 $NUM_REPLICAS); do
- $CLICKHOUSE_CLIENT -n -q "
+ $CLICKHOUSE_CLIENT -q "
SYSTEM SYNC REPLICA r$i;
SELECT count(), min(x), max(x), sum(x) FROM r$i;
"
done
for i in $(seq 1 $NUM_REPLICAS); do
- $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS r$i SYNC;"
+ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS r$i SYNC;"
done
diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh
index 1589f17c752..3f4210f9bb0 100755
--- a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh
+++ b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts_long.sh
@@ -12,13 +12,13 @@ NUM_REPLICAS=2
NUM_INSERTS=5
for i in $(seq 1 $NUM_REPLICAS); do
- $CLICKHOUSE_CLIENT -n -q "
+ $CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS r$i;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum', 'r$i') ORDER BY x;
"
done
-$CLICKHOUSE_CLIENT -n -q "SYSTEM STOP REPLICATION QUEUES r2;"
+$CLICKHOUSE_CLIENT -q "SYSTEM STOP REPLICATION QUEUES r2;"
function thread {
$CLICKHOUSE_CLIENT --insert_quorum 2 --insert_quorum_parallel 1 --query "INSERT INTO r1 SELECT $1"
@@ -28,12 +28,12 @@ for i in $(seq 1 $NUM_INSERTS); do
thread $i &
done
-$CLICKHOUSE_CLIENT -n -q "SYSTEM START REPLICATION QUEUES r2;"
+$CLICKHOUSE_CLIENT -q "SYSTEM START REPLICATION QUEUES r2;"
wait
for i in $(seq 1 $NUM_REPLICAS); do
- $CLICKHOUSE_CLIENT -n -q "
+ $CLICKHOUSE_CLIENT -q "
SELECT count(), min(x), max(x), sum(x) FROM r$i;
DROP TABLE IF EXISTS r$i;
"
diff --git a/tests/queries/0_stateless/01563_distributed_query_finish.sh b/tests/queries/0_stateless/01563_distributed_query_finish.sh
index e3c5928f108..50d194b1f15 100755
--- a/tests/queries/0_stateless/01563_distributed_query_finish.sh
+++ b/tests/queries/0_stateless/01563_distributed_query_finish.sh
@@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
-$CLICKHOUSE_CLIENT -nm <&1| grep -Fac "database_replicated_allow_explicit_uuid"
+
+$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=1 -q "CREATE TABLE $db.m1
+UUID '02858000-1000-4000-8000-000000000$(($RANDOM % 10))$(($RANDOM % 10))$(($RANDOM % 10))' (n int) ENGINE=Memory"
+
+$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=2 -q "CREATE TABLE $db.m2
+UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory"
+
+
+$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int)
+ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments"
+
+$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int)
+ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n"
+
+$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int)
+ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n"
+
+
+$CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name"
+
+$CLICKHOUSE_CLIENT -q "SELECT substring(toString(uuid) as s, 1, length(s) - 3) FROM system.tables WHERE database='$db' and name='m1'"
+$CLICKHOUSE_CLIENT -q "SELECT toString(uuid) LIKE '02858000%' FROM system.tables WHERE database='$db' and name='m2'"
+
+$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt1" | sed "s/$db/default/g"
+$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt2" | sed "s/$db/default/g"
+
+$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS rdb_$CLICKHOUSE_DATABASE"
diff --git a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference
index 4ca2c5e5f9b..1843964377d 100644
--- a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference
+++ b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.reference
@@ -1,5 +1,5 @@
Test create statistics:
-CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64) STATISTICS(tdigest, uniq, count_min, minmax),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, count_min),\n `c` LowCardinality(Nullable(Int64)) STATISTICS(tdigest, uniq, count_min, minmax),\n `d` DateTime STATISTICS(tdigest, uniq, count_min, minmax),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
+CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64) STATISTICS(tdigest, uniq, countmin, minmax),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)) STATISTICS(tdigest, uniq, countmin, minmax),\n `d` DateTime STATISTICS(tdigest, uniq, countmin, minmax),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
Test materialize and drop statistics:
-CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, count_min),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
+CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
diff --git a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql
index 90a57c99624..249e3c84a51 100644
--- a/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql
+++ b/tests/queries/0_stateless/02864_statistics_create_materialize_drop.sql
@@ -12,10 +12,10 @@ SELECT 'Test create statistics:';
CREATE TABLE tab
(
- a LowCardinality(Int64) STATISTICS(count_min, minmax, tdigest, uniq),
- b LowCardinality(Nullable(String)) STATISTICS(count_min, uniq),
- c LowCardinality(Nullable(Int64)) STATISTICS(count_min, minmax, tdigest, uniq),
- d DateTime STATISTICS(count_min, minmax, tdigest, uniq),
+ a LowCardinality(Int64) STATISTICS(countmin, minmax, tdigest, uniq),
+ b LowCardinality(Nullable(String)) STATISTICS(countmin, uniq),
+ c LowCardinality(Nullable(Int64)) STATISTICS(countmin, minmax, tdigest, uniq),
+ d DateTime STATISTICS(countmin, minmax, tdigest, uniq),
pk String,
) Engine = MergeTree() ORDER BY pk;
@@ -25,7 +25,7 @@ SHOW CREATE TABLE tab;
SELECT 'Test materialize and drop statistics:';
ALTER TABLE tab DROP STATISTICS a, b, c, d;
-ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq;
+ALTER TABLE tab ADD STATISTICS b TYPE countmin, uniq;
ALTER TABLE tab MATERIALIZE STATISTICS b;
SHOW CREATE TABLE tab;
diff --git a/tests/queries/0_stateless/02864_statistics_ddl.sql b/tests/queries/0_stateless/02864_statistics_ddl.sql
index bcaaa9e7b61..5b2c5cebc1d 100644
--- a/tests/queries/0_stateless/02864_statistics_ddl.sql
+++ b/tests/queries/0_stateless/02864_statistics_ddl.sql
@@ -1,5 +1,5 @@
-- Tags: no-fasttest
--- no-fasttest: 'count_min' sketches need a 3rd party library
+-- no-fasttest: 'countmin' sketches need a 3rd party library
-- Tests that DDL statements which create / drop / materialize statistics
@@ -71,29 +71,29 @@ CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree()
CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
--- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String
+-- countmin requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String
-- These types work:
-CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col UInt8 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col UInt256 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col Float32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col Decimal32(3) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col Date STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col Date32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col DateTime STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col DateTime64 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col IPv4 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col Nullable(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col String STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
+CREATE TABLE tab (col FixedString(1) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-- These types don't work:
-CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
-CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
-CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
-CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
-CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
+CREATE TABLE tab (col Array(Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
+CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
+CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
+CREATE TABLE tab (col UUID STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
+CREATE TABLE tab (col IPv6 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
-- minmax requires data_type.isValueRepresentedByInteger
-- These types work:
@@ -187,17 +187,17 @@ ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATI
-- uniq
-- Works:
ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64;
-ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64;
+ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64;
-- Doesn't work:
ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS }
ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS }
--- count_min
+-- countmin
-- Works:
-ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64;
-ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64;
+ALTER TABLE tab ADD STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64;
+ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64;
-- Doesn't work:
-ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS }
-ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS }
+ALTER TABLE tab ADD STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS }
+ALTER TABLE tab MODIFY STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS }
-- minmax
-- Works:
ALTER TABLE tab ADD STATISTICS f64 TYPE minmax; ALTER TABLE tab DROP STATISTICS f64;
diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql
index 473a7bc95ad..d7afba12c1d 100644
--- a/tests/queries/0_stateless/02864_statistics_predicates.sql
+++ b/tests/queries/0_stateless/02864_statistics_predicates.sql
@@ -1,5 +1,5 @@
-- Tags: no-fasttest
--- no-fasttest: 'count_min' sketches need a 3rd party library
+-- no-fasttest: 'countmin' sketches need a 3rd party library
-- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types.
@@ -13,27 +13,27 @@ CREATE TABLE tab
u64 UInt64,
u64_tdigest UInt64 STATISTICS(tdigest),
u64_minmax UInt64 STATISTICS(minmax),
- u64_count_min UInt64 STATISTICS(count_min),
+ u64_countmin UInt64 STATISTICS(countmin),
u64_uniq UInt64 STATISTICS(uniq),
f64 Float64,
f64_tdigest Float64 STATISTICS(tdigest),
f64_minmax Float64 STATISTICS(minmax),
- f64_count_min Float64 STATISTICS(count_min),
+ f64_countmin Float64 STATISTICS(countmin),
f64_uniq Float64 STATISTICS(uniq),
dt DateTime,
dt_tdigest DateTime STATISTICS(tdigest),
dt_minmax DateTime STATISTICS(minmax),
- dt_count_min DateTime STATISTICS(count_min),
+ dt_countmin DateTime STATISTICS(countmin),
dt_uniq DateTime STATISTICS(uniq),
b Bool,
b_tdigest Bool STATISTICS(tdigest),
b_minmax Bool STATISTICS(minmax),
- b_count_min Bool STATISTICS(count_min),
+ b_countmin Bool STATISTICS(countmin),
b_uniq Bool STATISTICS(uniq),
s String,
-- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest
-- s_minmax String STATISTICS(minmax), -- not supported by minmax
- s_count_min String STATISTICS(count_min),
+ s_countmin String STATISTICS(countmin),
s_uniq String STATISTICS(uniq)
) Engine = MergeTree() ORDER BY tuple()
SETTINGS min_bytes_for_wide_part = 0;
@@ -72,25 +72,25 @@ SELECT 'u64 and =';
SELECT count(*) FROM tab WHERE u64 = 7;
SELECT count(*) FROM tab WHERE u64_tdigest = 7;
SELECT count(*) FROM tab WHERE u64_minmax = 7;
-SELECT count(*) FROM tab WHERE u64_count_min = 7;
+SELECT count(*) FROM tab WHERE u64_countmin = 7;
SELECT count(*) FROM tab WHERE u64_uniq = 7;
SELECT count(*) FROM tab WHERE u64 = 7.7;
SELECT count(*) FROM tab WHERE u64_tdigest = 7.7;
SELECT count(*) FROM tab WHERE u64_minmax = 7.7;
-SELECT count(*) FROM tab WHERE u64_count_min = 7.7;
+SELECT count(*) FROM tab WHERE u64_countmin = 7.7;
SELECT count(*) FROM tab WHERE u64_uniq = 7.7;
SELECT count(*) FROM tab WHERE u64 = '7';
SELECT count(*) FROM tab WHERE u64_tdigest = '7';
SELECT count(*) FROM tab WHERE u64_minmax = '7';
-SELECT count(*) FROM tab WHERE u64_count_min = '7';
+SELECT count(*) FROM tab WHERE u64_countmin = '7';
SELECT count(*) FROM tab WHERE u64_uniq = '7';
SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_minmax = '7.7'; -- { serverError TYPE_MISMATCH }
-SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH }
+SELECT count(*) FROM tab WHERE u64_countmin = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT 'u64 and <';
@@ -98,25 +98,25 @@ SELECT 'u64 and <';
SELECT count(*) FROM tab WHERE u64 < 7;
SELECT count(*) FROM tab WHERE u64_tdigest < 7;
SELECT count(*) FROM tab WHERE u64_minmax < 7;
-SELECT count(*) FROM tab WHERE u64_count_min < 7;
+SELECT count(*) FROM tab WHERE u64_countmin < 7;
SELECT count(*) FROM tab WHERE u64_uniq < 7;
SELECT count(*) FROM tab WHERE u64 < 7.7;
SELECT count(*) FROM tab WHERE u64_tdigest < 7.7;
SELECT count(*) FROM tab WHERE u64_minmax < 7.7;
-SELECT count(*) FROM tab WHERE u64_count_min < 7.7;
+SELECT count(*) FROM tab WHERE u64_countmin < 7.7;
SELECT count(*) FROM tab WHERE u64_uniq < 7.7;
SELECT count(*) FROM tab WHERE u64 < '7';
SELECT count(*) FROM tab WHERE u64_tdigest < '7';
SELECT count(*) FROM tab WHERE u64_minmax < '7';
-SELECT count(*) FROM tab WHERE u64_count_min < '7';
+SELECT count(*) FROM tab WHERE u64_countmin < '7';
SELECT count(*) FROM tab WHERE u64_uniq < '7';
SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_minmax < '7.7'; -- { serverError TYPE_MISMATCH }
-SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH }
+SELECT count(*) FROM tab WHERE u64_countmin < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH }
-- f64 ----------------------------------------------------
@@ -126,25 +126,25 @@ SELECT 'f64 and =';
SELECT count(*) FROM tab WHERE f64 = 7;
SELECT count(*) FROM tab WHERE f64_tdigest = 7;
SELECT count(*) FROM tab WHERE f64_minmax = 7;
-SELECT count(*) FROM tab WHERE f64_count_min = 7;
+SELECT count(*) FROM tab WHERE f64_countmin = 7;
SELECT count(*) FROM tab WHERE f64_uniq = 7;
SELECT count(*) FROM tab WHERE f64 = 7.7;
SELECT count(*) FROM tab WHERE f64_tdigest = 7.7;
SELECT count(*) FROM tab WHERE f64_minmax = 7.7;
-SELECT count(*) FROM tab WHERE f64_count_min = 7.7;
+SELECT count(*) FROM tab WHERE f64_countmin = 7.7;
SELECT count(*) FROM tab WHERE f64_uniq = 7.7;
SELECT count(*) FROM tab WHERE f64 = '7';
SELECT count(*) FROM tab WHERE f64_tdigest = '7';
SELECT count(*) FROM tab WHERE f64_minmax = '7';
-SELECT count(*) FROM tab WHERE f64_count_min = '7';
+SELECT count(*) FROM tab WHERE f64_countmin = '7';
SELECT count(*) FROM tab WHERE f64_uniq = '7';
SELECT count(*) FROM tab WHERE f64 = '7.7';
SELECT count(*) FROM tab WHERE f64_tdigest = '7.7';
SELECT count(*) FROM tab WHERE f64_minmax = '7.7';
-SELECT count(*) FROM tab WHERE f64_count_min = '7.7';
+SELECT count(*) FROM tab WHERE f64_countmin = '7.7';
SELECT count(*) FROM tab WHERE f64_uniq = '7.7';
SELECT 'f64 and <';
@@ -152,25 +152,25 @@ SELECT 'f64 and <';
SELECT count(*) FROM tab WHERE f64 < 7;
SELECT count(*) FROM tab WHERE f64_tdigest < 7;
SELECT count(*) FROM tab WHERE f64_minmax < 7;
-SELECT count(*) FROM tab WHERE f64_count_min < 7;
+SELECT count(*) FROM tab WHERE f64_countmin < 7;
SELECT count(*) FROM tab WHERE f64_uniq < 7;
SELECT count(*) FROM tab WHERE f64 < 7.7;
SELECT count(*) FROM tab WHERE f64_tdigest < 7.7;
SELECT count(*) FROM tab WHERE f64_minmax < 7.7;
-SELECT count(*) FROM tab WHERE f64_count_min < 7.7;
+SELECT count(*) FROM tab WHERE f64_countmin < 7.7;
SELECT count(*) FROM tab WHERE f64_uniq < 7.7;
SELECT count(*) FROM tab WHERE f64 < '7';
SELECT count(*) FROM tab WHERE f64_tdigest < '7';
SELECT count(*) FROM tab WHERE f64_minmax < '7';
-SELECT count(*) FROM tab WHERE f64_count_min < '7';
+SELECT count(*) FROM tab WHERE f64_countmin < '7';
SELECT count(*) FROM tab WHERE f64_uniq < '7';
SELECT count(*) FROM tab WHERE f64 < '7.7';
SELECT count(*) FROM tab WHERE f64_tdigest < '7.7';
SELECT count(*) FROM tab WHERE f64_minmax < '7.7';
-SELECT count(*) FROM tab WHERE f64_count_min < '7.7';
+SELECT count(*) FROM tab WHERE f64_countmin < '7.7';
SELECT count(*) FROM tab WHERE f64_uniq < '7.7';
-- dt ----------------------------------------------------
@@ -180,13 +180,13 @@ SELECT 'dt and =';
SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_minmax = '2024-08-08 11:12:13';
-SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13';
+SELECT count(*) FROM tab WHERE dt_countmin = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt = 7;
SELECT count(*) FROM tab WHERE dt_tdigest = 7;
SELECT count(*) FROM tab WHERE dt_minmax = 7;
-SELECT count(*) FROM tab WHERE dt_count_min = 7;
+SELECT count(*) FROM tab WHERE dt_countmin = 7;
SELECT count(*) FROM tab WHERE dt_uniq = 7;
SELECT 'dt and <';
@@ -194,13 +194,13 @@ SELECT 'dt and <';
SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_minmax < '2024-08-08 11:12:13';
-SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13';
+SELECT count(*) FROM tab WHERE dt_countmin < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt < 7;
SELECT count(*) FROM tab WHERE dt_tdigest < 7;
SELECT count(*) FROM tab WHERE dt_minmax < 7;
-SELECT count(*) FROM tab WHERE dt_count_min < 7;
+SELECT count(*) FROM tab WHERE dt_countmin < 7;
SELECT count(*) FROM tab WHERE dt_uniq < 7;
-- b ----------------------------------------------------
@@ -210,25 +210,25 @@ SELECT 'b and =';
SELECT count(*) FROM tab WHERE b = true;
SELECT count(*) FROM tab WHERE b_tdigest = true;
SELECT count(*) FROM tab WHERE b_minmax = true;
-SELECT count(*) FROM tab WHERE b_count_min = true;
+SELECT count(*) FROM tab WHERE b_countmin = true;
SELECT count(*) FROM tab WHERE b_uniq = true;
SELECT count(*) FROM tab WHERE b = 'true';
SELECT count(*) FROM tab WHERE b_tdigest = 'true';
SELECT count(*) FROM tab WHERE b_minmax = 'true';
-SELECT count(*) FROM tab WHERE b_count_min = 'true';
+SELECT count(*) FROM tab WHERE b_countmin = 'true';
SELECT count(*) FROM tab WHERE b_uniq = 'true';
SELECT count(*) FROM tab WHERE b = 1;
SELECT count(*) FROM tab WHERE b_tdigest = 1;
SELECT count(*) FROM tab WHERE b_minmax = 1;
-SELECT count(*) FROM tab WHERE b_count_min = 1;
+SELECT count(*) FROM tab WHERE b_countmin = 1;
SELECT count(*) FROM tab WHERE b_uniq = 1;
SELECT count(*) FROM tab WHERE b = 1.1;
SELECT count(*) FROM tab WHERE b_tdigest = 1.1;
SELECT count(*) FROM tab WHERE b_minmax = 1.1;
-SELECT count(*) FROM tab WHERE b_count_min = 1.1;
+SELECT count(*) FROM tab WHERE b_countmin = 1.1;
SELECT count(*) FROM tab WHERE b_uniq = 1.1;
-- s ----------------------------------------------------
@@ -238,13 +238,13 @@ SELECT 's and =';
SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE }
-- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported
-- SELECT count(*) FROM tab WHERE s_minmax = 7; -- not supported
-SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE }
+SELECT count(*) FROM tab WHERE s_countmin = 7; -- { serverError NO_COMMON_TYPE }
SELECT count(*) FROM tab WHERE s_uniq = 7; -- { serverError NO_COMMON_TYPE }
SELECT count(*) FROM tab WHERE s = '7';
-- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported
-- SELECT count(*) FROM tab WHERE s_minmax = '7'; -- not supported
-SELECT count(*) FROM tab WHERE s_count_min = '7';
+SELECT count(*) FROM tab WHERE s_countmin = '7';
SELECT count(*) FROM tab WHERE s_uniq = '7';
DROP TABLE tab;
diff --git a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh
index b9603e75d2e..466f0d01a7f 100755
--- a/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh
+++ b/tests/queries/0_stateless/02888_replicated_merge_tree_creation.sh
@@ -5,6 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
+CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --database_replicated_allow_explicit_uuid 3 --database_replicated_allow_replicated_engine_arguments 3"
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS test_exception_replicated SYNC"
diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.reference b/tests/queries/0_stateless/03174_json_compact_with_progress.reference
new file mode 100644
index 00000000000..cdbe7cfcb3e
--- /dev/null
+++ b/tests/queries/0_stateless/03174_json_compact_with_progress.reference
@@ -0,0 +1,15 @@
+1
+{"meta": [{"name":"value", "type":"UInt8"}, {"name":"name", "type":"String"}]}
+{"data":[1, "a"]}
+{"data":[2, "b"]}
+{"data":[3, "c"]}
+{"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":33}}
+2
+{"meta": [{"name":"name", "type":"String"}, {"name":"c", "type":"UInt64"}]}
+{"data":["a", "1"]}
+{"data":["b", "1"]}
+{"data":["c", "1"]}
+{"totals": ["", "3"]}
+{"statistics": {"rows":3, "elapsed":ELAPSED, "rows_read":3, "bytes_read":30}}
+3
+Value passed to 'throwIf' function is non-zero:
diff --git a/tests/queries/0_stateless/03174_json_compact_with_progress.sh b/tests/queries/0_stateless/03174_json_compact_with_progress.sh
new file mode 100755
index 00000000000..b15dc7cfdb2
--- /dev/null
+++ b/tests/queries/0_stateless/03174_json_compact_with_progress.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;"
+
+$CLICKHOUSE_CLIENT -q "SELECT 1;"
+# Check JSONCompactWithProgress Output
+$CLICKHOUSE_CLIENT -q "CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value;"
+$CLICKHOUSE_CLIENT -q "INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c');"
+$CLICKHOUSE_CLIENT -q "SELECT * FROM test_table FORMAT JSONCompactWithProgress settings max_block_size=2;" | grep -v --text "progress" | sed -E 's/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g'
+
+$CLICKHOUSE_CLIENT -q "SELECT 2;"
+# Check Totals
+$CLICKHOUSE_CLIENT -q "SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactWithProgress settings max_block_size=2;" | grep -v --text "progress" | sed -E 's/"elapsed":[0-9]+\.[0-9]+/"elapsed":ELAPSED/g'
+
+$CLICKHOUSE_CLIENT -q "SELECT 3;"
+# Check exceptions
+${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "SELECT throwIf(number = 15), 1::Int64 as a, '\"' from numbers(100) format JSONCompactWithProgress settings output_format_json_quote_64bit_integers=1, max_block_size=10" | grep "exception" | cut -c42-88
+
+$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table;"
diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql
new file mode 100644
index 00000000000..f207581f482
--- /dev/null
+++ b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql
@@ -0,0 +1,10 @@
+SET allow_experimental_dynamic_type = 1;
+DROP TABLE IF EXISTS t0;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY (c0);
+CREATE TABLE t1 (c0 Array(Dynamic), c1 Int) ENGINE = MergeTree() ORDER BY (c0);
+INSERT INTO t1 (c0, c1) VALUES ([18446717433683171873], 13623876564923702671), ([-4], 6111684076076982207);
+SELECT 1 FROM t0 FINAL JOIN t1 ON TRUE;
+DROP TABLE t0;
+DROP TABLE t1;
+
diff --git a/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference
new file mode 100644
index 00000000000..8dbf92d6590
--- /dev/null
+++ b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.reference
@@ -0,0 +1 @@
+{'Hello':'2020-01-01 00:00:00'}
diff --git a/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql
new file mode 100644
index 00000000000..484a16bb22f
--- /dev/null
+++ b/tests/queries/0_stateless/03231_values_respect_format_settings_in_fields_conversion.sql
@@ -0,0 +1,7 @@
+drop table if exists test;
+create table test (map Map(String, DateTime)) engine=Memory;
+set date_time_input_format='best_effort';
+insert into test values (map('Hello', '01/01/2020'));
+select * from test;
+drop table test;
+
diff --git a/tests/queries/1_stateful/00177_memory_bound_merging.sh b/tests/queries/1_stateful/00177_memory_bound_merging.sh
index 1110ab9a61d..3744d89f354 100755
--- a/tests/queries/1_stateful/00177_memory_bound_merging.sh
+++ b/tests/queries/1_stateful/00177_memory_bound_merging.sh
@@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
check_replicas_read_in_order() {
# NOTE: lack of "current_database = '$CLICKHOUSE_DATABASE'" filter is made on purpose
- $CLICKHOUSE_CLIENT -nq "
+ $CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS;
SELECT COUNT() > 0
@@ -22,7 +22,7 @@ check_replicas_read_in_order() {
# at some point we had a bug in this logic (see https://github.com/ClickHouse/ClickHouse/pull/45892#issue-1566140414)
test1() {
query_id="query_id_memory_bound_merging_$RANDOM$RANDOM"
- $CLICKHOUSE_CLIENT --query_id="$query_id" -nq "
+ $CLICKHOUSE_CLIENT --query_id="$query_id" -q "
SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost';
SELECT URL, EventDate, max(URL)
@@ -39,7 +39,7 @@ test1() {
# at some point we had a bug in this logic (see https://github.com/ClickHouse/ClickHouse/pull/45892#issue-1566140414)
test2() {
query_id="query_id_memory_bound_merging_$RANDOM$RANDOM"
- $CLICKHOUSE_CLIENT --query_id="$query_id" -nq "
+ $CLICKHOUSE_CLIENT --query_id="$query_id" -q "
SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost';
SELECT URL, EventDate, max(URL)
@@ -53,7 +53,7 @@ test2() {
}
test3() {
- $CLICKHOUSE_CLIENT -nq "
+ $CLICKHOUSE_CLIENT -q "
SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost';
SET max_threads = 16, read_in_order_two_level_merge_threshold = 1000, query_plan_aggregation_in_order = 1, distributed_aggregation_memory_efficient = 1;
diff --git a/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh b/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh
index f9fea2c1dad..bf44f2d7ce7 100755
--- a/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh
+++ b/tests/queries/1_stateful/00180_no_seek_avoiding_when_reading_from_cache.sh
@@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT -q "SELECT * FROM hits_s3_sampled WHERE URL LIKE '%google%' O
query_id=02906_read_from_cache_$RANDOM
$CLICKHOUSE_CLIENT --query_id ${query_id} -q "SELECT * FROM hits_s3_sampled WHERE URL LIKE '%google%' ORDER BY EventTime LIMIT 10 FORMAT Null SETTINGS filesystem_cache_reserve_space_wait_lock_timeout_milliseconds=2000"
-$CLICKHOUSE_CLIENT -nq "
+$CLICKHOUSE_CLIENT -q "
SYSTEM FLUSH LOGS;
-- AsynchronousReaderIgnoredBytes = 0: no seek-avoiding happened
diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt
index 0e08a5f8540..111047a285f 100644
--- a/utils/check-style/aspell-ignore/en/aspell-dict.txt
+++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt
@@ -120,6 +120,7 @@ CMPLNT
CMake
CMakeLists
CODECS
+CountMin
COVID
CPUFrequencyMHz
CPUs
@@ -421,6 +422,7 @@ JSONCompactStringsEachRowWithNames
JSONCompactStringsEachRowWithNamesAndTypes
JSONDynamicPaths
JSONDynamicPathsWithTypes
+JSONCompactWithProgress
JSONEachRow
JSONEachRowWithProgress
JSONExtract
@@ -1916,6 +1918,7 @@ jsoncompactstrings
jsoncompactstringseachrow
jsoncompactstringseachrowwithnames
jsoncompactstringseachrowwithnamesandtypes
+jsoncompactwithprogress
jsoneachrow
jsoneachrowwithprogress
jsonobjecteachrow