Merge branch 'master' into refactor_statistics

# Conflicts:
#	src/Storages/StatisticsDescription.cpp
This commit is contained in:
JackyWoo 2024-09-10 09:44:18 +08:00
commit 6f8165b2c5
112 changed files with 2009 additions and 839 deletions

View File

@ -15,162 +15,64 @@ set(ICUDATA_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/icudata/")
# These lists of sources were generated from build log of the original ICU build system (configure + make).
set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
"${ICU_SOURCE_DIR}/common/putil.cpp"
"${ICU_SOURCE_DIR}/common/umath.cpp"
"${ICU_SOURCE_DIR}/common/utypes.cpp"
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
"${ICU_SOURCE_DIR}/common/umutex.cpp"
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
"${ICU_SOURCE_DIR}/common/uinit.cpp"
"${ICU_SOURCE_DIR}/common/uobject.cpp"
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
"${ICU_SOURCE_DIR}/common/charstr.cpp"
"${ICU_SOURCE_DIR}/common/cstr.cpp"
"${ICU_SOURCE_DIR}/common/udata.cpp"
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
"${ICU_SOURCE_DIR}/common/utrace.cpp"
"${ICU_SOURCE_DIR}/common/uhash.cpp"
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
"${ICU_SOURCE_DIR}/common/uenum.cpp"
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
"${ICU_SOURCE_DIR}/common/uvector.cpp"
"${ICU_SOURCE_DIR}/common/ustack.cpp"
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
"${ICU_SOURCE_DIR}/common/resource.cpp"
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
"${ICU_SOURCE_DIR}/common/resbund.cpp"
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
"${ICU_SOURCE_DIR}/common/ucat.cpp"
"${ICU_SOURCE_DIR}/common/locmap.cpp"
"${ICU_SOURCE_DIR}/common/uloc.cpp"
"${ICU_SOURCE_DIR}/common/locid.cpp"
"${ICU_SOURCE_DIR}/common/locutil.cpp"
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
"${ICU_SOURCE_DIR}/common/lsr.cpp"
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
"${ICU_SOURCE_DIR}/common/edits.cpp"
"${ICU_SOURCE_DIR}/common/appendable.cpp"
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
"${ICU_SOURCE_DIR}/common/ustring.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/cstring.cpp"
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
"${ICU_SOURCE_DIR}/common/utext.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
"${ICU_SOURCE_DIR}/common/unorm.cpp"
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/chariter.cpp"
"${ICU_SOURCE_DIR}/common/schriter.cpp"
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
"${ICU_SOURCE_DIR}/common/uiter.cpp"
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
"${ICU_SOURCE_DIR}/common/uchar.cpp"
"${ICU_SOURCE_DIR}/common/uprops.cpp"
"${ICU_SOURCE_DIR}/common/ucase.cpp"
"${ICU_SOURCE_DIR}/common/propname.cpp"
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
"${ICU_SOURCE_DIR}/common/ushape.cpp"
"${ICU_SOURCE_DIR}/common/uscript.cpp"
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
"${ICU_SOURCE_DIR}/common/unames.cpp"
"${ICU_SOURCE_DIR}/common/utrie.cpp"
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
"${ICU_SOURCE_DIR}/common/bmpset.cpp"
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
"${ICU_SOURCE_DIR}/common/uset.cpp"
"${ICU_SOURCE_DIR}/common/uniset.cpp"
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
"${ICU_SOURCE_DIR}/common/caniter.cpp"
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
"${ICU_SOURCE_DIR}/common/brkeng.cpp"
"${ICU_SOURCE_DIR}/common/brkiter.cpp"
"${ICU_SOURCE_DIR}/common/bytesinkutil.cpp"
"${ICU_SOURCE_DIR}/common/bytestream.cpp"
"${ICU_SOURCE_DIR}/common/bytestrie.cpp"
"${ICU_SOURCE_DIR}/common/bytestriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/bytestrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/caniter.cpp"
"${ICU_SOURCE_DIR}/common/characterproperties.cpp"
"${ICU_SOURCE_DIR}/common/chariter.cpp"
"${ICU_SOURCE_DIR}/common/charstr.cpp"
"${ICU_SOURCE_DIR}/common/cmemory.cpp"
"${ICU_SOURCE_DIR}/common/cstr.cpp"
"${ICU_SOURCE_DIR}/common/cstring.cpp"
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
"${ICU_SOURCE_DIR}/common/dictbe.cpp"
"${ICU_SOURCE_DIR}/common/dictionarydata.cpp"
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
"${ICU_SOURCE_DIR}/common/edits.cpp"
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
"${ICU_SOURCE_DIR}/common/errorcode.cpp"
"${ICU_SOURCE_DIR}/common/filteredbrk.cpp"
"${ICU_SOURCE_DIR}/common/filterednormalizer2.cpp"
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
"${ICU_SOURCE_DIR}/common/loadednormalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/localebuilder.cpp"
"${ICU_SOURCE_DIR}/common/localematcher.cpp"
"${ICU_SOURCE_DIR}/common/localeprioritylist.cpp"
"${ICU_SOURCE_DIR}/common/locavailable.cpp"
"${ICU_SOURCE_DIR}/common/locbased.cpp"
"${ICU_SOURCE_DIR}/common/locdispnames.cpp"
"${ICU_SOURCE_DIR}/common/locdistance.cpp"
"${ICU_SOURCE_DIR}/common/locdspnm.cpp"
"${ICU_SOURCE_DIR}/common/locid.cpp"
"${ICU_SOURCE_DIR}/common/loclikely.cpp"
"${ICU_SOURCE_DIR}/common/loclikelysubtags.cpp"
"${ICU_SOURCE_DIR}/common/locmap.cpp"
"${ICU_SOURCE_DIR}/common/locresdata.cpp"
"${ICU_SOURCE_DIR}/common/locutil.cpp"
"${ICU_SOURCE_DIR}/common/lsr.cpp"
"${ICU_SOURCE_DIR}/common/lstmbe.cpp"
"${ICU_SOURCE_DIR}/common/messagepattern.cpp"
"${ICU_SOURCE_DIR}/common/mlbe.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2.cpp"
"${ICU_SOURCE_DIR}/common/normalizer2impl.cpp"
"${ICU_SOURCE_DIR}/common/normlzr.cpp"
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
"${ICU_SOURCE_DIR}/common/patternprops.cpp"
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/propname.cpp"
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
"${ICU_SOURCE_DIR}/common/punycode.cpp"
"${ICU_SOURCE_DIR}/common/putil.cpp"
"${ICU_SOURCE_DIR}/common/rbbi.cpp"
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
"${ICU_SOURCE_DIR}/common/rbbidata.cpp"
"${ICU_SOURCE_DIR}/common/rbbinode.cpp"
"${ICU_SOURCE_DIR}/common/rbbirb.cpp"
@ -178,166 +80,180 @@ set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/rbbisetb.cpp"
"${ICU_SOURCE_DIR}/common/rbbistbl.cpp"
"${ICU_SOURCE_DIR}/common/rbbitblb.cpp"
"${ICU_SOURCE_DIR}/common/rbbi_cache.cpp"
"${ICU_SOURCE_DIR}/common/resbund.cpp"
"${ICU_SOURCE_DIR}/common/resbund_cnv.cpp"
"${ICU_SOURCE_DIR}/common/resource.cpp"
"${ICU_SOURCE_DIR}/common/restrace.cpp"
"${ICU_SOURCE_DIR}/common/ruleiter.cpp"
"${ICU_SOURCE_DIR}/common/schriter.cpp"
"${ICU_SOURCE_DIR}/common/serv.cpp"
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
"${ICU_SOURCE_DIR}/common/servls.cpp"
"${ICU_SOURCE_DIR}/common/servlk.cpp"
"${ICU_SOURCE_DIR}/common/servlkf.cpp"
"${ICU_SOURCE_DIR}/common/servls.cpp"
"${ICU_SOURCE_DIR}/common/servnotf.cpp"
"${ICU_SOURCE_DIR}/common/servrbf.cpp"
"${ICU_SOURCE_DIR}/common/servslkf.cpp"
"${ICU_SOURCE_DIR}/common/uidna.cpp"
"${ICU_SOURCE_DIR}/common/usprep.cpp"
"${ICU_SOURCE_DIR}/common/uts46.cpp"
"${ICU_SOURCE_DIR}/common/punycode.cpp"
"${ICU_SOURCE_DIR}/common/util.cpp"
"${ICU_SOURCE_DIR}/common/util_props.cpp"
"${ICU_SOURCE_DIR}/common/parsepos.cpp"
"${ICU_SOURCE_DIR}/common/locbased.cpp"
"${ICU_SOURCE_DIR}/common/cwchar.cpp"
"${ICU_SOURCE_DIR}/common/wintz.cpp"
"${ICU_SOURCE_DIR}/common/dtintrv.cpp"
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
"${ICU_SOURCE_DIR}/common/propsvec.cpp"
"${ICU_SOURCE_DIR}/common/ulist.cpp"
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
"${ICU_SOURCE_DIR}/common/icudataver.cpp"
"${ICU_SOURCE_DIR}/common/icuplug.cpp"
"${ICU_SOURCE_DIR}/common/sharedobject.cpp"
"${ICU_SOURCE_DIR}/common/simpleformatter.cpp"
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
"${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
"${ICU_SOURCE_DIR}/common/restrace.cpp"
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
"${ICU_SOURCE_DIR}/common/stringpiece.cpp"
"${ICU_SOURCE_DIR}/common/stringtriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/uarrsort.cpp"
"${ICU_SOURCE_DIR}/common/ubidi.cpp"
"${ICU_SOURCE_DIR}/common/ubidi_props.cpp"
"${ICU_SOURCE_DIR}/common/ubidiln.cpp"
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
"${ICU_SOURCE_DIR}/common/ubidiwrt.cpp"
"${ICU_SOURCE_DIR}/common/ubrk.cpp"
"${ICU_SOURCE_DIR}/common/ucase.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap.cpp"
"${ICU_SOURCE_DIR}/common/ucasemap_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ucat.cpp"
"${ICU_SOURCE_DIR}/common/uchar.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrie.cpp"
"${ICU_SOURCE_DIR}/common/ucharstriebuilder.cpp"
"${ICU_SOURCE_DIR}/common/ucharstrieiterator.cpp"
"${ICU_SOURCE_DIR}/common/uchriter.cpp"
"${ICU_SOURCE_DIR}/common/ucln_cmn.cpp"
"${ICU_SOURCE_DIR}/common/ucmndata.cpp"
"${ICU_SOURCE_DIR}/common/ucnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv2022.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_bld.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ct.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_err.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_ext.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_io.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_lmb.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_set.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u16.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u32.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u7.cpp"
"${ICU_SOURCE_DIR}/common/ucnv_u8.cpp"
"${ICU_SOURCE_DIR}/common/ucnvbocu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvdisp.cpp"
"${ICU_SOURCE_DIR}/common/ucnvhz.cpp"
"${ICU_SOURCE_DIR}/common/ucnvisci.cpp"
"${ICU_SOURCE_DIR}/common/ucnvlat1.cpp"
"${ICU_SOURCE_DIR}/common/ucnvmbcs.cpp"
"${ICU_SOURCE_DIR}/common/ucnvscsu.cpp"
"${ICU_SOURCE_DIR}/common/ucnvsel.cpp"
"${ICU_SOURCE_DIR}/common/ucol_swp.cpp"
"${ICU_SOURCE_DIR}/common/ucptrie.cpp"
"${ICU_SOURCE_DIR}/common/ucurr.cpp"
"${ICU_SOURCE_DIR}/common/udata.cpp"
"${ICU_SOURCE_DIR}/common/udatamem.cpp"
"${ICU_SOURCE_DIR}/common/udataswp.cpp"
"${ICU_SOURCE_DIR}/common/uenum.cpp"
"${ICU_SOURCE_DIR}/common/uhash.cpp"
"${ICU_SOURCE_DIR}/common/uhash_us.cpp"
"${ICU_SOURCE_DIR}/common/uidna.cpp"
"${ICU_SOURCE_DIR}/common/uinit.cpp"
"${ICU_SOURCE_DIR}/common/uinvchar.cpp"
"${ICU_SOURCE_DIR}/common/uiter.cpp"
"${ICU_SOURCE_DIR}/common/ulist.cpp"
"${ICU_SOURCE_DIR}/common/uloc.cpp"
"${ICU_SOURCE_DIR}/common/uloc_keytype.cpp"
"${ICU_SOURCE_DIR}/common/uloc_tag.cpp"
"${ICU_SOURCE_DIR}/common/ulocale.cpp"
"${ICU_SOURCE_DIR}/common/ulocbuilder.cpp"
"${ICU_SOURCE_DIR}/common/umapfile.cpp"
"${ICU_SOURCE_DIR}/common/umath.cpp"
"${ICU_SOURCE_DIR}/common/umutablecptrie.cpp"
"${ICU_SOURCE_DIR}/common/umutex.cpp"
"${ICU_SOURCE_DIR}/common/unames.cpp"
"${ICU_SOURCE_DIR}/common/unifiedcache.cpp"
"${ICU_SOURCE_DIR}/common/unifilt.cpp"
"${ICU_SOURCE_DIR}/common/unifunct.cpp"
"${ICU_SOURCE_DIR}/common/uniset.cpp"
"${ICU_SOURCE_DIR}/common/uniset_closure.cpp"
"${ICU_SOURCE_DIR}/common/uniset_props.cpp"
"${ICU_SOURCE_DIR}/common/unisetspan.cpp"
"${ICU_SOURCE_DIR}/common/unistr.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case.cpp"
"${ICU_SOURCE_DIR}/common/unistr_case_locale.cpp"
"${ICU_SOURCE_DIR}/common/unistr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/unistr_props.cpp"
"${ICU_SOURCE_DIR}/common/unistr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/unorm.cpp"
"${ICU_SOURCE_DIR}/common/unormcmp.cpp"
"${ICU_SOURCE_DIR}/common/uobject.cpp"
"${ICU_SOURCE_DIR}/common/uprops.cpp"
"${ICU_SOURCE_DIR}/common/ures_cnv.cpp"
"${ICU_SOURCE_DIR}/common/uresbund.cpp"
"${ICU_SOURCE_DIR}/common/uresdata.cpp"
"${ICU_SOURCE_DIR}/common/usc_impl.cpp"
"${ICU_SOURCE_DIR}/common/uscript.cpp"
"${ICU_SOURCE_DIR}/common/uscript_props.cpp"
"${ICU_SOURCE_DIR}/common/uset.cpp"
"${ICU_SOURCE_DIR}/common/uset_props.cpp"
"${ICU_SOURCE_DIR}/common/usetiter.cpp"
"${ICU_SOURCE_DIR}/common/ushape.cpp"
"${ICU_SOURCE_DIR}/common/usprep.cpp"
"${ICU_SOURCE_DIR}/common/ustack.cpp"
"${ICU_SOURCE_DIR}/common/ustr_cnv.cpp"
"${ICU_SOURCE_DIR}/common/ustr_titlecase_brkiter.cpp"
"${ICU_SOURCE_DIR}/common/ustr_wcs.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase.cpp"
"${ICU_SOURCE_DIR}/common/ustrcase_locale.cpp"
"${ICU_SOURCE_DIR}/common/ustrenum.cpp"
"${ICU_SOURCE_DIR}/common/ustrfmt.cpp"
"${ICU_SOURCE_DIR}/common/ustring.cpp"
"${ICU_SOURCE_DIR}/common/ustrtrns.cpp"
"${ICU_SOURCE_DIR}/common/utext.cpp"
"${ICU_SOURCE_DIR}/common/utf_impl.cpp"
"${ICU_SOURCE_DIR}/common/util.cpp"
"${ICU_SOURCE_DIR}/common/util_props.cpp"
"${ICU_SOURCE_DIR}/common/utrace.cpp"
"${ICU_SOURCE_DIR}/common/utrie.cpp"
"${ICU_SOURCE_DIR}/common/utrie2.cpp"
"${ICU_SOURCE_DIR}/common/utrie2_builder.cpp"
"${ICU_SOURCE_DIR}/common/utrie_swap.cpp"
"${ICU_SOURCE_DIR}/common/uts46.cpp"
"${ICU_SOURCE_DIR}/common/utypes.cpp"
"${ICU_SOURCE_DIR}/common/uvector.cpp"
"${ICU_SOURCE_DIR}/common/uvectr32.cpp"
"${ICU_SOURCE_DIR}/common/uvectr64.cpp"
"${ICU_SOURCE_DIR}/common/wintz.cpp")
set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
"${ICU_SOURCE_DIR}/i18n/format.cpp"
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
"${ICU_SOURCE_DIR}/i18n/astro.cpp"
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
"${ICU_SOURCE_DIR}/i18n/buddhcal.cpp"
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
"${ICU_SOURCE_DIR}/i18n/calendar.cpp"
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
"${ICU_SOURCE_DIR}/i18n/cecal.cpp"
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
"${ICU_SOURCE_DIR}/i18n/chnsecal.cpp"
"${ICU_SOURCE_DIR}/i18n/choicfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/coleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/coll.cpp"
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
"${ICU_SOURCE_DIR}/i18n/bocsu.cpp"
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
"${ICU_SOURCE_DIR}/i18n/collation.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdata.cpp"
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatareader.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatawriter.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfcd.cpp"
"${ICU_SOURCE_DIR}/i18n/collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
"${ICU_SOURCE_DIR}/i18n/collationcompare.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatin.cpp"
"${ICU_SOURCE_DIR}/i18n/collationkeys.cpp"
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
"${ICU_SOURCE_DIR}/i18n/collationroot.cpp"
"${ICU_SOURCE_DIR}/i18n/collationrootelements.cpp"
"${ICU_SOURCE_DIR}/i18n/collationdatabuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
"${ICU_SOURCE_DIR}/i18n/collationruleparser.cpp"
"${ICU_SOURCE_DIR}/i18n/collationbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/collationfastlatinbuilder.cpp"
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
"${ICU_SOURCE_DIR}/i18n/search.cpp"
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsets.cpp"
"${ICU_SOURCE_DIR}/i18n/collationsettings.cpp"
"${ICU_SOURCE_DIR}/i18n/collationtailoring.cpp"
"${ICU_SOURCE_DIR}/i18n/collationweights.cpp"
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/coptccal.cpp"
"${ICU_SOURCE_DIR}/i18n/cpdtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/casetrn.cpp"
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
"${ICU_SOURCE_DIR}/i18n/anytrans.cpp"
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
"${ICU_SOURCE_DIR}/i18n/brktrans.cpp"
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
"${ICU_SOURCE_DIR}/i18n/csdetect.cpp"
"${ICU_SOURCE_DIR}/i18n/csmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/csr2022.cpp"
@ -346,60 +262,80 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/csrsbcs.cpp"
"${ICU_SOURCE_DIR}/i18n/csrucode.cpp"
"${ICU_SOURCE_DIR}/i18n/csrutf8.cpp"
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/basictz.cpp"
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/curramt.cpp"
"${ICU_SOURCE_DIR}/i18n/currfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
"${ICU_SOURCE_DIR}/i18n/currunit.cpp"
"${ICU_SOURCE_DIR}/i18n/dangical.cpp"
"${ICU_SOURCE_DIR}/i18n/datefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
"${ICU_SOURCE_DIR}/i18n/dcfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
"${ICU_SOURCE_DIR}/i18n/decimfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/displayoptions.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
"${ICU_SOURCE_DIR}/i18n/dtfmtsym.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/dtitvinf.cpp"
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/currpinf.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
"${ICU_SOURCE_DIR}/i18n/dtptngen.cpp"
"${ICU_SOURCE_DIR}/i18n/dtrule.cpp"
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
"${ICU_SOURCE_DIR}/i18n/esctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/ethpccal.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable.cpp"
"${ICU_SOURCE_DIR}/i18n/fmtable_cnv.cpp"
"${ICU_SOURCE_DIR}/i18n/format.cpp"
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/fphdlimp.cpp"
"${ICU_SOURCE_DIR}/i18n/fpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/decNumber.cpp"
"${ICU_SOURCE_DIR}/i18n/decContext.cpp"
"${ICU_SOURCE_DIR}/i18n/alphaindex.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/compactdecimalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/funcrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/gender.cpp"
"${ICU_SOURCE_DIR}/i18n/region.cpp"
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/gregocal.cpp"
"${ICU_SOURCE_DIR}/i18n/gregoimp.cpp"
"${ICU_SOURCE_DIR}/i18n/hebrwcal.cpp"
"${ICU_SOURCE_DIR}/i18n/indiancal.cpp"
"${ICU_SOURCE_DIR}/i18n/inputext.cpp"
"${ICU_SOURCE_DIR}/i18n/islamcal.cpp"
"${ICU_SOURCE_DIR}/i18n/iso8601cal.cpp"
"${ICU_SOURCE_DIR}/i18n/japancal.cpp"
"${ICU_SOURCE_DIR}/i18n/listformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/measfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit.cpp"
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/measure.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_arguments.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_checker.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_data_model.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_errors.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_evaluation.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_formattable.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_formatter.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_function_registry.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_parser.cpp"
"${ICU_SOURCE_DIR}/i18n/messageformat2_serializer.cpp"
"${ICU_SOURCE_DIR}/i18n/msgfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/name2uni.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrs.cpp"
"${ICU_SOURCE_DIR}/i18n/nfrule.cpp"
"${ICU_SOURCE_DIR}/i18n/nfsubs.cpp"
"${ICU_SOURCE_DIR}/i18n/nortrans.cpp"
"${ICU_SOURCE_DIR}/i18n/nultrans.cpp"
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimfmtprops.cpp"
"${ICU_SOURCE_DIR}/i18n/number_fluent.cpp"
@ -407,7 +343,9 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_grouping.cpp"
"${ICU_SOURCE_DIR}/i18n/number_integerwidth.cpp"
"${ICU_SOURCE_DIR}/i18n/number_longnames.cpp"
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_modifiers.cpp"
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
"${ICU_SOURCE_DIR}/i18n/number_notation.cpp"
"${ICU_SOURCE_DIR}/i18n/number_output.cpp"
"${ICU_SOURCE_DIR}/i18n/number_padding.cpp"
@ -415,46 +353,125 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/number_patternstring.cpp"
"${ICU_SOURCE_DIR}/i18n/number_rounding.cpp"
"${ICU_SOURCE_DIR}/i18n/number_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
"${ICU_SOURCE_DIR}/i18n/number_asformat.cpp"
"${ICU_SOURCE_DIR}/i18n/number_mapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_multiplier.cpp"
"${ICU_SOURCE_DIR}/i18n/number_currencysymbols.cpp"
"${ICU_SOURCE_DIR}/i18n/number_simple.cpp"
"${ICU_SOURCE_DIR}/i18n/number_skeletons.cpp"
"${ICU_SOURCE_DIR}/i18n/number_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-string-to-double.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-double-to-string.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-bignum.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-cached-powers.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-fast-dtoa.cpp"
"${ICU_SOURCE_DIR}/i18n/double-conversion-strtod.cpp"
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/erarules.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
"${ICU_SOURCE_DIR}/i18n/number_utils.cpp"
"${ICU_SOURCE_DIR}/i18n/numfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_affixes.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_compositions.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_currency.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_decimal.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_parsednumber.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_scientific.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_symbols.cpp"
"${ICU_SOURCE_DIR}/i18n/numparse_validators.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_fluent.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/numsys.cpp"
"${ICU_SOURCE_DIR}/i18n/olsontz.cpp"
"${ICU_SOURCE_DIR}/i18n/persncal.cpp"
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
"${ICU_SOURCE_DIR}/i18n/plurfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/plurrule.cpp"
"${ICU_SOURCE_DIR}/i18n/quant.cpp"
"${ICU_SOURCE_DIR}/i18n/quantityformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/rbnf.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_data.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_pars.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_rule.cpp"
"${ICU_SOURCE_DIR}/i18n/rbt_set.cpp"
"${ICU_SOURCE_DIR}/i18n/rbtz.cpp"
"${ICU_SOURCE_DIR}/i18n/regexcmp.cpp"
"${ICU_SOURCE_DIR}/i18n/regeximp.cpp"
"${ICU_SOURCE_DIR}/i18n/regexst.cpp"
"${ICU_SOURCE_DIR}/i18n/regextxt.cpp"
"${ICU_SOURCE_DIR}/i18n/region.cpp"
"${ICU_SOURCE_DIR}/i18n/reldatefmt.cpp"
"${ICU_SOURCE_DIR}/i18n/reldtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/rematch.cpp"
"${ICU_SOURCE_DIR}/i18n/remtrans.cpp"
"${ICU_SOURCE_DIR}/i18n/repattrn.cpp"
"${ICU_SOURCE_DIR}/i18n/rulebasedcollator.cpp"
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/scriptset.cpp"
"${ICU_SOURCE_DIR}/i18n/search.cpp"
"${ICU_SOURCE_DIR}/i18n/selfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/simpletz.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/smpdtfst.cpp"
"${ICU_SOURCE_DIR}/i18n/sortkey.cpp"
"${ICU_SOURCE_DIR}/i18n/standardplural.cpp"
"${ICU_SOURCE_DIR}/i18n/string_segment.cpp"
"${ICU_SOURCE_DIR}/i18n/strmatch.cpp"
"${ICU_SOURCE_DIR}/i18n/strrepl.cpp"
"${ICU_SOURCE_DIR}/i18n/stsearch.cpp"
"${ICU_SOURCE_DIR}/i18n/taiwncal.cpp"
"${ICU_SOURCE_DIR}/i18n/timezone.cpp"
"${ICU_SOURCE_DIR}/i18n/titletrn.cpp"
"${ICU_SOURCE_DIR}/i18n/tmunit.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutamt.cpp"
"${ICU_SOURCE_DIR}/i18n/tmutfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/tolowtrn.cpp"
"${ICU_SOURCE_DIR}/i18n/toupptrn.cpp"
"${ICU_SOURCE_DIR}/i18n/translit.cpp"
"${ICU_SOURCE_DIR}/i18n/transreg.cpp"
"${ICU_SOURCE_DIR}/i18n/tridpars.cpp"
"${ICU_SOURCE_DIR}/i18n/tzfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/tzgnames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames.cpp"
"${ICU_SOURCE_DIR}/i18n/tznames_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/tzrule.cpp"
"${ICU_SOURCE_DIR}/i18n/tztrans.cpp"
"${ICU_SOURCE_DIR}/i18n/ucal.cpp"
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_res.cpp"
"${ICU_SOURCE_DIR}/i18n/ucol_sit.cpp"
"${ICU_SOURCE_DIR}/i18n/ucoleitr.cpp"
"${ICU_SOURCE_DIR}/i18n/ucsdet.cpp"
"${ICU_SOURCE_DIR}/i18n/udat.cpp"
"${ICU_SOURCE_DIR}/i18n/udateintervalformat.cpp"
"${ICU_SOURCE_DIR}/i18n/udatpg.cpp"
"${ICU_SOURCE_DIR}/i18n/ufieldpositer.cpp"
"${ICU_SOURCE_DIR}/i18n/uitercollationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/ulistformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/ulocdata.cpp"
"${ICU_SOURCE_DIR}/i18n/umsg.cpp"
"${ICU_SOURCE_DIR}/i18n/unesctrn.cpp"
"${ICU_SOURCE_DIR}/i18n/uni2name.cpp"
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
"${ICU_SOURCE_DIR}/i18n/units_router.cpp"
"${ICU_SOURCE_DIR}/i18n/unum.cpp"
"${ICU_SOURCE_DIR}/i18n/unumsys.cpp"
"${ICU_SOURCE_DIR}/i18n/upluralrules.cpp"
"${ICU_SOURCE_DIR}/i18n/uregex.cpp"
"${ICU_SOURCE_DIR}/i18n/uregexc.cpp"
"${ICU_SOURCE_DIR}/i18n/uregion.cpp"
"${ICU_SOURCE_DIR}/i18n/usearch.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_build.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_conf.cpp"
"${ICU_SOURCE_DIR}/i18n/uspoof_impl.cpp"
"${ICU_SOURCE_DIR}/i18n/utf16collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utf8collationiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/utmscale.cpp"
"${ICU_SOURCE_DIR}/i18n/utrans.cpp"
"${ICU_SOURCE_DIR}/i18n/vtzone.cpp"
"${ICU_SOURCE_DIR}/i18n/vzone.cpp"
"${ICU_SOURCE_DIR}/i18n/windtfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/winnmfmt.cpp"
"${ICU_SOURCE_DIR}/i18n/wintzimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/zonemeta.cpp"
"${ICU_SOURCE_DIR}/i18n/zrule.cpp"
"${ICU_SOURCE_DIR}/i18n/ztrans.cpp")
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
enable_language(ASM)

View File

@ -995,34 +995,42 @@ They can be used for prewhere optimization only if we enable `set allow_statisti
The minimum and maximum column value which allows to estimate the selectivity of range filters on numeric columns.
Syntax: `minmax`
- `TDigest`
[TDigest](https://github.com/tdunning/t-digest) sketches which allow to compute approximate percentiles (e.g. the 90th percentile) for numeric columns.
Syntax: `tdigest`
- `Uniq`
[HyperLogLog](https://en.wikipedia.org/wiki/HyperLogLog) sketches which provide an estimation how many distinct values a column contains.
- `count_min`
Syntax: `uniq`
[Count-min](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
- `CountMin`
[CountMin](https://en.wikipedia.org/wiki/Count%E2%80%93min_sketch) sketches which provide an approximate count of the frequency of each value in a column.
Syntax `countmin`
### Supported Data Types {#supported-data-types}
| | (U)Int* | Float* | Decimal(*) | Date* | Boolean | Enum* | (Fixed)String |
|-----------|---------|--------|------------|-------|---------|-------|------------------|
| count_min | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| MinMax | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ |
| TDigest | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✗ |
| Uniq | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| | (U)Int*, Float*, Decimal(*), Date*, Boolean, Enum* | String or FixedString |
|-----------|----------------------------------------------------|-----------------------|
| CountMin | ✔ | ✔ |
| MinMax | ✔ | ✗ |
| TDigest | ✔ | ✗ |
| Uniq | ✔ | ✔ |
### Supported Operations {#supported-operations}
| | Equality filters (==) | Range filters (>, >=, <, <=) |
|-----------|-----------------------|------------------------------|
| count_min | ✔ | ✗ |
| CountMin | ✔ | ✗ |
| MinMax | ✗ | ✔ |
| TDigest | ✗ | ✔ |
| Uniq | ✔ | ✗ |

View File

@ -39,6 +39,7 @@ The supported formats are:
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
| [JSONCompactWithProgress](#jsoncompactwithprogress) | ✗ | ✔ |
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
@ -988,6 +989,59 @@ Example:
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
## JSONCompactWithProgress (#jsoncompactwithprogress)
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.
Each row is either a metadata object, data object, progress information or statistics object:
1. **Metadata Object (`meta`)**
- Describes the structure of the data rows.
- Fields: `name` (column name), `type` (data type, e.g., `UInt32`, `String`, etc.).
- Example: `{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}`
- Appears before any data objects.
2. **Data Object (`data`)**
- Represents a row of query results.
- Fields: An array with values corresponding to the columns defined in the metadata.
- Example: `{"data":["1", "John Doe"]}`
- Appears after the metadata object, one per row.
3. **Progress Information Object (`progress`)**
- Provides real-time progress feedback during query execution.
- Fields: `read_rows`, `read_bytes`, `written_rows`, `written_bytes`, `total_rows_to_read`, `result_rows`, `result_bytes`, `elapsed_ns`.
- Example: `{"progress":{"read_rows":"8","read_bytes":"168"}}`
- May appear intermittently.
4. **Statistics Object (`statistics`)**
- Summarizes query execution statistics.
- Fields: `rows`, `rows_before_limit_at_least`, `elapsed`, `rows_read`, `bytes_read`.
- Example: `{"statistics": {"rows":2, "elapsed":0.001995, "rows_read":8}}`
- Appears at the end.
5. **Exception Object (`exception`)**
- Represents an error that occurred during query execution.
- Fields: A single text field containing the error message.
- Example: `{"exception": "Code: 395. DB::Exception: Value passed to 'throwIf' function is non-zero..."}`
- Appears when an error is encountered.
6. **Totals Object (`totals`)**
- Provides the totals for each numeric column in the result set.
- Fields: An array with total values corresponding to the columns defined in the metadata.
- Example: `{"totals": ["", "3"]}`
- Appears at the end of the data rows, if applicable.
Example:
```json
{"meta": [{"name":"id", "type":"UInt32"}, {"name":"name", "type":"String"}]}
{"progress":{"read_rows":"8","read_bytes":"168","written_rows":"0","written_bytes":"0","total_rows_to_read":"2","result_rows":"0","result_bytes":"0","elapsed_ns":"0"}}
{"data":["1", "John Doe"]}
{"data":["2", "Joe Doe"]}
{"statistics": {"rows":2, "rows_before_limit_at_least":8, "elapsed":0.001995, "rows_read":8, "bytes_read":168}}
```
## JSONEachRow {#jsoneachrow}
In this format, ClickHouse outputs each row as a separated, newline-delimited JSON Object.

View File

@ -1463,26 +1463,29 @@ Examples:
## logger {#logger}
Logging settings.
The location and format of log messages.
Keys:
- `level` Logging level. Acceptable values: `trace`, `debug`, `information`, `warning`, `error`.
- `log` The log file. Contains all the entries according to `level`.
- `errorlog` Error log file.
- `size` Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
- `count` The number of archived log files that ClickHouse stores.
- `console` Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
- `console_log_level` Logging level for console. Default to `level`.
- `use_syslog` - Log to syslog as well.
- `syslog_level` - Logging level for logging to syslog.
- `stream_compress` Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
- `formatting` Specify log format to be printed in console log (currently only `json` supported).
- `level` Log level. Acceptable values: `none` (turn logging off), `fatal`, `critical`, `error`, `warning`, `notice`, `information`,
`debug`, `trace`, `test`
- `log` The path to the log file.
- `errorlog` The path to the error log file.
- `size` Rotation policy: Maximum size of the log files in bytes. Once the log file size exceeds this threshold, it is renamed and archived, and a new log file is created.
- `count` Rotation policy: How many historical log files Clickhouse are kept at most.
- `stream_compress` Compress log messages using LZ4. Set to `1` or `true` to enable.
- `console` Do not write log messages to log files, instead print them in the console. Set to `1` or `true` to enable. Default is
`1` if Clickhouse does not run in daemon mode, `0` otherwise.
- `console_log_level` Log level for console output. Defaults to `level`.
- `formatting` Log format for console output. Currently, only `json` is supported).
- `use_syslog` - Also forward log output to syslog.
- `syslog_level` - Log level for logging to syslog.
Both log and error log file names (only file names, not directories) support date and time format specifiers.
**Log format specifiers**
**Format specifiers**
Using the following format specifiers, you can define a pattern for the resulting file name. “Example” column shows possible results for `2023-07-06 18:32:07`.
File names in `log` and `errorLog` paths support below format specifiers for the resulting file name (the directory part does not support them).
Column “Example” shows the output at `2023-07-06 18:32:07`.
| Specifier | Description | Example |
|-------------|---------------------------------------------------------------------------------------------------------------------|--------------------------|
@ -1537,18 +1540,37 @@ Using the following format specifiers, you can define a pattern for the resultin
</logger>
```
Writing to the console can be configured. Config example:
To print log messages only in the console:
``` xml
<logger>
<level>information</level>
<console>1</console>
<console>true</console>
</logger>
```
**Per-level Overrides**
The log level of individual log names can be overridden. For example, to mute all messages of loggers "Backup" and "RBAC".
```xml
<logger>
<levels>
<logger>
<name>Backup</name>
<level>none</level>
</logger>
<logger>
<name>RBAC</name>
<level>none</level>
</logger>
</levels>
</logger>
```
### syslog
Writing to the syslog is also supported. Config example:
To write log messages additionally to syslog:
``` xml
<logger>
@ -1562,14 +1584,12 @@ Writing to the syslog is also supported. Config example:
</logger>
```
Keys for syslog:
Keys for `<syslog>`:
- use_syslog — Required setting if you want to write to the syslog.
- address — The host\[:port\] of syslogd. If omitted, the local daemon is used.
- hostname — Optional. The name of the host that logs are sent from.
- facility — [The syslog facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility) in uppercase letters with the “LOG_” prefix: (`LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, and so on).
Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
- format Message format. Possible values: `bsd` and `syslog.`
- `address` — The address of syslog in format `host\[:port\]`. If omitted, the local daemon is used.
- `hostname` — The name of the host from which logs are send. Optional.
- `facility` — The syslog [facility keyword](https://en.wikipedia.org/wiki/Syslog#Facility). Must be specified uppercase with a “LOG_” prefix, e.g. `LOG_USER`, `LOG_DAEMON`, `LOG_LOCAL3`, etc. Default value: `LOG_USER` if `address` is specified, `LOG_DAEMON` otherwise.
- `format` Log message format. Possible values: `bsd` and `syslog.`
### Log formats
@ -1588,6 +1608,7 @@ You can specify the log format that will be outputted in the console log. Curren
"source_line": "192"
}
```
To enable JSON logging support, use the following snippet:
```xml

View File

@ -1617,8 +1617,19 @@ The calculation is performed relative to specific points in time:
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
**See Also**
**Syntax**
```sql
toStartOfInterval(value, INTERVAL x unit[, time_zone])
toStartOfInterval(value, INTERVAL x unit[, origin[, time_zone]])
```
The second overload emulates TimescaleDB's `time_bucket()` function, respectively PostgreSQL's `date_bin()` function, e.g.
``` SQL
SELECT toStartOfInterval(toDateTime('2023-01-01 14:45:00'), INTERVAL 1 MINUTE, toDateTime('2023-01-01 14:35:30'));
```
**See Also**
- [date_trunc](#date_trunc)
## toTime
@ -3884,19 +3895,29 @@ Result:
└───────────────────────────────────────────────────────────────────────┘
```
## timeSlots(StartTime, Duration,\[, Size\])
## timeSlots
For a time interval starting at StartTime and continuing for Duration seconds, it returns an array of moments in time, consisting of points from this interval rounded down to the Size in seconds. Size is an optional parameter set to 1800 (30 minutes) by default.
This is necessary, for example, when searching for pageviews in the corresponding session.
Accepts DateTime and DateTime64 as StartTime argument. For DateTime, Duration and Size arguments must be `UInt32`. For DateTime64 they must be `Decimal64`.
Returns an array of DateTime/DateTime64 (return type matches the type of StartTime). For DateTime64, the return value's scale can differ from the scale of StartTime --- the highest scale among all given arguments is taken.
Example:
**Syntax**
```sql
timeSlots(StartTime, Duration,\[, Size\])
```
**Example**
```sql
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
```
Result:
``` text
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │

View File

@ -15,7 +15,14 @@ The `FROM` clause specifies the source to read data from:
Subquery is another `SELECT` query that may be specified in parenthesis inside `FROM` clause.
`FROM` clause can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
The `FROM` can contain multiple data sources, separated by commas, which is equivalent of performing [CROSS JOIN](../../../sql-reference/statements/select/join.md) on them.
`FROM` can optionally appear before a `SELECT` clause. This is a ClickHouse-specific extension of standard SQL which makes `SELECT` statements easier to read. Example:
```sql
FROM table
SELECT *
```
## FINAL Modifier
@ -45,19 +52,19 @@ As an alternative to using `FINAL`, it is sometimes possible to use different qu
### Example Usage
**Using the `FINAL` keyword**
Using the `FINAL` keyword
```sql
SELECT x, y FROM mytable FINAL WHERE x > 1;
```
**Using `FINAL` as a query-level setting**
Using `FINAL` as a query-level setting
```sql
SELECT x, y FROM mytable WHERE x > 1 SETTINGS final = 1;
```
**Using `FINAL` as a session-level setting**
Using `FINAL` as a session-level setting
```sql
SET final = 1;

View File

@ -459,6 +459,8 @@ public:
bool isParallelizeMergePrepareNeeded() const override { return is_parallelize_merge_prepare_needed; }
constexpr static bool parallelizeMergeWithKey() { return true; }
void parallelizeMergePrepare(AggregateDataPtrs & places, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const override
{
if constexpr (is_parallelize_merge_prepare_needed)

View File

@ -145,6 +145,8 @@ public:
virtual bool isParallelizeMergePrepareNeeded() const { return false; }
constexpr static bool parallelizeMergeWithKey() { return false; }
virtual void parallelizeMergePrepare(AggregateDataPtrs & /*places*/, ThreadPool & /*thread_pool*/, std::atomic<bool> & /*is_cancelled*/) const
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "parallelizeMergePrepare() with thread pool parameter isn't implemented for {} ", getName());
@ -169,7 +171,7 @@ public:
/// Merges states (on which src places points to) with other states (on which dst places points to) of current aggregation function
/// then destroy states (on which src places points to).
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, Arena * arena) const = 0;
virtual void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * src_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const = 0;
/// Serializes state (to transmit it over the network, for example).
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0; /// NOLINT
@ -499,11 +501,15 @@ public:
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
}
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, Arena * arena) const override
void mergeAndDestroyBatch(AggregateDataPtr * dst_places, AggregateDataPtr * rhs_places, size_t size, size_t offset, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled, Arena * arena) const override
{
for (size_t i = 0; i < size; ++i)
{
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
if constexpr (Derived::parallelizeMergeWithKey())
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, thread_pool, is_cancelled, arena);
else
static_cast<const Derived *>(this)->merge(dst_places[i] + offset, rhs_places[i] + offset, arena);
static_cast<const Derived *>(this)->destroy(rhs_places[i] + offset);
}
}

View File

@ -101,6 +101,13 @@ public:
auto merge(const UniqExactSet & other, ThreadPool * thread_pool = nullptr, std::atomic<bool> * is_cancelled = nullptr)
{
/// If the size is large, we may convert the singleLevelHash to twoLevelHash and merge in parallel.
if (other.size() > 40000)
{
if (isSingleLevel())
convertToTwoLevel();
}
if (isSingleLevel() && other.isTwoLevel())
convertToTwoLevel();

View File

@ -913,11 +913,15 @@ void RestorerFromBackup::createTable(const QualifiedTableName & table_name)
table_info.database = DatabaseCatalog::instance().getDatabase(table_name.database);
DatabasePtr database = table_info.database;
auto query_context = Context::createCopy(context);
query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
/// Execute CREATE TABLE query (we call IDatabase::createTableRestoredFromBackup() to allow the database to do some
/// database-specific things).
database->createTableRestoredFromBackup(
create_table_query,
context,
query_context,
restore_coordination,
std::chrono::duration_cast<std::chrono::milliseconds>(create_table_timeout).count());
}

View File

@ -67,10 +67,18 @@ std::string SigsegvErrorString(const siginfo_t & info, [[maybe_unused]] const uc
= info.si_addr == nullptr ? "NULL pointer"s : (shouldShowAddress(info.si_addr) ? fmt::format("{}", info.si_addr) : ""s);
const std::string_view access =
#if defined(__x86_64__) && !defined(OS_FREEBSD) && !defined(OS_DARWIN) && !defined(__arm__) && !defined(__powerpc__)
(context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
#if defined(__arm__)
"<not available on ARM>";
#elif defined(__powerpc__)
"<not available on PowerPC>";
#elif defined(OS_DARWIN)
"<not available on Darwin>";
#elif defined(OS_FREEBSD)
"<not available on FreeBSD>";
#elif !defined(__x86_64__)
"<not available>";
#else
"";
(context.uc_mcontext.gregs[REG_ERR] & 0x02) ? "write" : "read";
#endif
std::string_view message;

View File

@ -710,7 +710,8 @@ class IColumn;
M(UInt64, max_distributed_depth, 5, "Maximum distributed query depth", 0) \
M(Bool, database_replicated_always_detach_permanently, false, "Execute DETACH TABLE as DETACH TABLE PERMANENTLY if database engine is Replicated", 0) \
M(Bool, database_replicated_allow_only_replicated_engine, false, "Allow to create only Replicated tables in database with engine Replicated", 0) \
M(Bool, database_replicated_allow_replicated_engine_arguments, true, "Allow to create only Replicated tables in database with engine Replicated with explicit arguments", 0) \
M(UInt64, database_replicated_allow_replicated_engine_arguments, 0, "0 - Don't allow to explicitly specify ZooKeeper path and replica name for *MergeTree tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified path and use default one instead.", 0) \
M(UInt64, database_replicated_allow_explicit_uuid, 0, "0 - Don't allow to explicitly specify UUIDs for tables in Replicated databases. 1 - Allow. 2 - Allow, but ignore the specified UUID and generate a random one instead.", 0) \
M(Bool, database_replicated_allow_heavy_create, false, "Allow long-running DDL queries (CREATE AS SELECT and POPULATE) in Replicated database engine. Note that it can block DDL queue for a long time.", 0) \
M(Bool, cloud_mode, false, "Only available in ClickHouse Cloud", 0) \
M(UInt64, cloud_mode_engine, 1, "Only available in ClickHouse Cloud", 0) \

View File

@ -76,7 +76,9 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"create_if_not_exists", false, false, "New setting."},
{"allow_materialized_view_with_bad_select", true, true, "Support (but not enable yet) stricter validation in CREATE MATERIALIZED VIEW"},
{"output_format_always_quote_identifiers", false, false, "New setting."},
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."}
{"output_format_identifier_quoting_style", "Backticks", "Backticks", "New setting."},
{"database_replicated_allow_replicated_engine_arguments", 1, 0, "Don't allow explicit arguments by default"},
{"database_replicated_allow_explicit_uuid", 0, 0, "Added a new setting to disallow explicitly specifying table UUID"},
}
},
{"24.8",

View File

@ -441,7 +441,8 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
bool is_create_query = mode == LoadingStrictnessLevel::CREATE;
String replica_host_id;
if (current_zookeeper->tryGet(replica_path, replica_host_id))
bool replica_exists_in_zk = current_zookeeper->tryGet(replica_path, replica_host_id);
if (replica_exists_in_zk)
{
if (replica_host_id == DROPPED_MARK && !is_create_query)
{
@ -454,7 +455,7 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
String host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
String host_id_default = getHostID(getContext(), db_uuid, false);
if (is_create_query || (replica_host_id != host_id && replica_host_id != host_id_default))
if (replica_host_id != host_id && replica_host_id != host_id_default)
{
throw Exception(
ErrorCodes::REPLICA_ALREADY_EXISTS,
@ -484,13 +485,20 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
current_zookeeper->set(replica_path + "/replica_group", replica_group_name, -1);
createEmptyLogEntry(current_zookeeper);
}
/// Needed to mark all the queries
/// in the range (max log ptr at replica ZooKeeper nodes creation, max log ptr after replica recovery] as successful.
String max_log_ptr_at_creation_str;
if (current_zookeeper->tryGet(replica_path + "/max_log_ptr_at_creation", max_log_ptr_at_creation_str))
max_log_ptr_at_creation = parse<UInt32>(max_log_ptr_at_creation_str);
}
else if (is_create_query)
if (is_create_query)
{
/// Create new replica. Throws if replica with the same name already exists
/// Create replica nodes in ZooKeeper. If newly initialized nodes already exist, reuse them.
createReplicaNodesInZooKeeper(current_zookeeper);
}
else
else if (!replica_exists_in_zk)
{
/// It's not CREATE query, but replica does not exist. Probably it was dropped.
/// Do not create anything, continue as readonly.
@ -606,37 +614,84 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt
"already contains some data and it does not look like Replicated database path.", zookeeper_path);
/// Write host name to replica_path, it will protect from multiple replicas with the same name
auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
const auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
const std::vector<String> check_paths = {
replica_path,
replica_path + "/replica_group",
replica_path + "/digest",
};
bool nodes_exist = true;
auto check_responses = current_zookeeper->tryGet(check_paths);
for (size_t i = 0; i < check_responses.size(); ++i)
{
const auto response = check_responses[i];
if (response.error == Coordination::Error::ZNONODE)
{
nodes_exist = false;
break;
} else if (response.error != Coordination::Error::ZOK)
{
throw zkutil::KeeperException::fromPath(response.error, check_paths[i]);
}
}
if (nodes_exist)
{
const std::vector<String> expected_data = {
host_id,
replica_group_name,
"0",
};
for (size_t i = 0; i != expected_data.size(); ++i)
{
if (check_responses[i].data != expected_data[i])
{
throw Exception(
ErrorCodes::REPLICA_ALREADY_EXISTS,
"Replica node {} in ZooKeeper already exists and contains unexpected value: {}",
quoteString(check_paths[i]), quoteString(check_responses[i].data));
}
}
LOG_DEBUG(log, "Newly initialized replica nodes found in ZooKeeper, reusing them");
createEmptyLogEntry(current_zookeeper);
return;
}
for (int attempts = 10; attempts > 0; --attempts)
{
Coordination::Stat stat;
String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat);
const String max_log_ptr_str = current_zookeeper->get(zookeeper_path + "/max_log_ptr", &stat);
Coordination::Requests ops;
ops.emplace_back(zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent));
ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent));
/// In addition to creating the replica nodes, we record the max_log_ptr at the instant where
/// we declared ourself as an existing replica. We'll need this during recoverLostReplica to
/// notify other nodes that issued new queries while this node was recovering.
ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version));
const Coordination::Requests ops = {
zkutil::makeCreateRequest(replica_path, host_id, zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest(replica_path + "/log_ptr", "0", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest(replica_path + "/digest", "0", zkutil::CreateMode::Persistent),
zkutil::makeCreateRequest(replica_path + "/replica_group", replica_group_name, zkutil::CreateMode::Persistent),
/// Previously, this method was not idempotent and max_log_ptr_at_creation could be stored in memory.
/// we need to store max_log_ptr_at_creation in ZooKeeper to make this method idempotent during replica creation.
zkutil::makeCreateRequest(replica_path + "/max_log_ptr_at_creation", max_log_ptr_str, zkutil::CreateMode::Persistent),
zkutil::makeCheckRequest(zookeeper_path + "/max_log_ptr", stat.version),
};
Coordination::Responses ops_responses;
const auto code = current_zookeeper->tryMulti(ops, ops_responses);
Coordination::Responses responses;
const auto code = current_zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZOK)
{
max_log_ptr_at_creation = parse<UInt32>(max_log_ptr_str);
break;
createEmptyLogEntry(current_zookeeper);
return;
}
else if (code == Coordination::Error::ZNODEEXISTS || attempts == 1)
if (attempts == 1)
{
/// If its our last attempt, or if the replica already exists, fail immediately.
zkutil::KeeperMultiException::check(code, ops, responses);
zkutil::KeeperMultiException::check(code, ops, ops_responses);
}
}
createEmptyLogEntry(current_zookeeper);
}
void DatabaseReplicated::beforeLoadingMetadata(ContextMutablePtr context_, LoadingStrictnessLevel mode)
@ -852,18 +907,6 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora
bool maybe_replica_macros = info.expanded_other;
bool enable_functional_tests_helper = getContext()->getConfigRef().has("_functional_tests_helper_database_replicated_replace_args_macros");
if (!enable_functional_tests_helper)
{
if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments)
LOG_WARNING(log, "It's not recommended to explicitly specify zookeeper_path and replica_name in ReplicatedMergeTree arguments");
else
throw Exception(ErrorCodes::INCORRECT_QUERY,
"It's not allowed to specify explicit zookeeper_path and replica_name "
"for ReplicatedMergeTree arguments in Replicated database. If you really want to "
"specify them explicitly, enable setting "
"database_replicated_allow_replicated_engine_arguments.");
}
if (maybe_shard_macros && maybe_replica_macros)
return;
@ -876,7 +919,9 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora
return;
}
throw Exception(ErrorCodes::INCORRECT_QUERY,
/// We will replace it with default arguments if the setting is 2
if (query_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments != 2)
throw Exception(ErrorCodes::INCORRECT_QUERY,
"Explicit zookeeper_path and replica_name are specified in ReplicatedMergeTree arguments. "
"If you really want to specify it explicitly, then you should use some macros "
"to distinguish different shards and replicas");
@ -1145,6 +1190,9 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
/// so we need to allow experimental features that can be used in a CREATE query
enableAllExperimentalSettings(query_context);
query_context->setSetting("database_replicated_allow_explicit_uuid", 3);
query_context->setSetting("database_replicated_allow_replicated_engine_arguments", 3);
auto txn = std::make_shared<ZooKeeperMetadataTransaction>(current_zookeeper, zookeeper_path, false, "");
query_context->initZooKeeperMetadataTransaction(txn);
return query_context;

View File

@ -483,6 +483,33 @@ namespace JSONUtils
writeArrayEnd(out, 1);
}
void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out)
{
writeCompactArrayStart(out, 0, "meta");
for (size_t i = 0; i < names.size(); ++i)
{
writeCompactObjectStart(out);
writeTitle("name", out, 0, "");
/// The field names are pre-escaped to be put into JSON string literal.
writeChar('"', out);
writeString(names[i], out);
writeChar('"', out);
writeFieldCompactDelimiter(out);
writeTitle("type", out, 0, "");
writeJSONString(types[i]->getName(), out, settings);
writeCompactObjectEnd(out);
if (i + 1 < names.size())
writeFieldCompactDelimiter(out);
}
writeCompactArrayEnd(out);
}
void writeAdditionalInfo(
size_t rows,
size_t rows_before_limit,
@ -530,6 +557,45 @@ namespace JSONUtils
}
}
void writeCompactAdditionalInfo(
size_t rows,
size_t rows_before_limit,
bool applied_limit,
const Stopwatch & watch,
const Progress & progress,
bool write_statistics,
WriteBuffer & out)
{
writeCompactObjectStart(out);
writeCompactObjectStart(out, 0, "statistics");
writeTitle("rows", out, 0, "");
writeIntText(rows, out);
if (applied_limit)
{
writeFieldCompactDelimiter(out);
writeTitle("rows_before_limit_at_least", out, 0, "");
writeIntText(rows_before_limit, out);
}
if (write_statistics)
{
writeFieldCompactDelimiter(out);
writeTitle("elapsed", out, 0, "");
writeText(watch.elapsedSeconds(), out);
writeFieldCompactDelimiter(out);
writeTitle("rows_read", out, 0, "");
writeText(progress.read_rows.load(), out);
writeFieldCompactDelimiter(out);
writeTitle("bytes_read", out, 0, "");
writeText(progress.read_bytes.load(), out);
}
writeCompactObjectEnd(out);
writeCompactObjectEnd(out);
}
void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent)
{
writeTitle("exception", out, indent, " ");

View File

@ -99,6 +99,7 @@ namespace JSONUtils
WriteBuffer & out);
void writeMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out);
void writeCompactMetadata(const Names & names, const DataTypes & types, const FormatSettings & settings, WriteBuffer & out);
void writeAdditionalInfo(
size_t rows,
@ -111,6 +112,15 @@ namespace JSONUtils
bool write_statistics,
WriteBuffer & out);
void writeCompactAdditionalInfo(
size_t rows,
size_t rows_before_limit,
bool applied_limit,
const Stopwatch & watch,
const Progress & progress,
bool write_statistics,
WriteBuffer & out);
void writeException(const String & exception_message, WriteBuffer & out, const FormatSettings & settings, size_t indent = 0);
void skipColon(ReadBuffer & in);

View File

@ -95,6 +95,7 @@ void registerOutputFormatMarkdown(FormatFactory & factory);
void registerOutputFormatPostgreSQLWire(FormatFactory & factory);
void registerOutputFormatPrometheus(FormatFactory & factory);
void registerOutputFormatSQLInsert(FormatFactory & factory);
void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory);
/// Input only formats.
@ -242,6 +243,7 @@ void registerFormats()
registerOutputFormatCapnProto(factory);
registerOutputFormatPrometheus(factory);
registerOutputFormatSQLInsert(factory);
registerOutputFormatJSONCompactWithProgress(factory);
registerInputFormatRegexp(factory);
registerInputFormatJSONAsString(factory);

View File

@ -492,7 +492,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Nanosecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 nanoseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000000000)
{
@ -527,7 +527,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Microsecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 microseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000000)
{
@ -570,7 +570,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Millisecond>
{
throwDateTimeIsNotSupported(TO_START_OF_INTERVAL_NAME);
}
static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 milliseconds, const DateLUTImpl &, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
if (scale_multiplier < 1000)
{
@ -613,7 +613,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Second>
{
return time_zone.toStartOfSecondInterval(t, seconds);
}
static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 seconds, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfSecondInterval(t / scale_multiplier, seconds);
}
@ -634,7 +634,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Minute>
{
return time_zone.toStartOfMinuteInterval(t, minutes);
}
static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 minutes, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfMinuteInterval(t / scale_multiplier, minutes);
}
@ -655,7 +655,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Hour>
{
return time_zone.toStartOfHourInterval(t, hours);
}
static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 hours, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfHourInterval(t / scale_multiplier, hours);
}
@ -676,7 +676,7 @@ struct ToStartOfInterval<IntervalKind::Kind::Day>
{
return static_cast<UInt32>(time_zone.toStartOfDayInterval(time_zone.toDayNum(t), days));
}
static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 days, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 /*origin*/ = 0)
{
return time_zone.toStartOfDayInterval(time_zone.toDayNum(t / scale_multiplier), days);
}
@ -697,9 +697,13 @@ struct ToStartOfInterval<IntervalKind::Kind::Week>
{
return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t), weeks);
}
static UInt16 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 weeks, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t / scale_multiplier), weeks);
if (origin == 0)
return time_zone.toStartOfWeekInterval(time_zone.toDayNum(t / scale_multiplier), weeks);
else
return ToStartOfInterval<IntervalKind::Kind::Day>::execute(t, weeks * 7, time_zone, scale_multiplier, origin);
}
};
@ -718,9 +722,23 @@ struct ToStartOfInterval<IntervalKind::Kind::Month>
{
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t), months);
}
static UInt16 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 months, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(t / scale_multiplier), months);
const Int64 scaled_time = t / scale_multiplier;
if (origin == 0)
return time_zone.toStartOfMonthInterval(time_zone.toDayNum(scaled_time), months);
else
{
const Int64 scaled_origin = origin / scale_multiplier;
const Int64 days = time_zone.toDayOfMonth(scaled_time + scaled_origin) - time_zone.toDayOfMonth(scaled_origin);
Int64 months_to_add = time_zone.toMonth(scaled_time + scaled_origin) - time_zone.toMonth(scaled_origin);
const Int64 years = time_zone.toYear(scaled_time + scaled_origin) - time_zone.toYear(scaled_origin);
months_to_add = days < 0 ? months_to_add - 1 : months_to_add;
months_to_add += years * 12;
Int64 month_multiplier = (months_to_add / months) * months;
return (time_zone.addMonths(time_zone.toDate(scaled_origin), month_multiplier) - time_zone.toDate(scaled_origin));
}
}
};
@ -739,9 +757,12 @@ struct ToStartOfInterval<IntervalKind::Kind::Quarter>
{
return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t), quarters);
}
static UInt16 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 quarters, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t / scale_multiplier), quarters);
if (origin == 0)
return time_zone.toStartOfQuarterInterval(time_zone.toDayNum(t / scale_multiplier), quarters);
else
return ToStartOfInterval<IntervalKind::Kind::Month>::execute(t, quarters * 3, time_zone, scale_multiplier, origin);
}
};
@ -760,9 +781,12 @@ struct ToStartOfInterval<IntervalKind::Kind::Year>
{
return time_zone.toStartOfYearInterval(time_zone.toDayNum(t), years);
}
static UInt16 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier)
static Int64 execute(Int64 t, Int64 years, const DateLUTImpl & time_zone, Int64 scale_multiplier, Int64 origin = 0)
{
return time_zone.toStartOfYearInterval(time_zone.toDayNum(t / scale_multiplier), years);
if (origin == 0)
return time_zone.toStartOfYearInterval(time_zone.toDayNum(t / scale_multiplier), years);
else
return ToStartOfInterval<IntervalKind::Kind::Month>::execute(t, years * 12, time_zone, scale_multiplier, origin);
}
};

View File

@ -1,11 +1,15 @@
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <Columns/ColumnArray.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnFixedString.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnString.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/getLeastSupertype.h>
#include <Columns/ColumnArray.h>
#include <Core/Settings.h>
#include <Interpreters/castColumn.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Interpreters/Context.h>
#include <Interpreters/castColumn.h>
namespace DB
@ -44,11 +48,13 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
{
size_t num_elements = arguments.size();
const size_t num_elements = arguments.size();
if (num_elements == 0)
{
/// We should return constant empty array.
return result_type->createColumnConstWithDefaultValue(input_rows_count);
}
const DataTypePtr & elem_type = static_cast<const DataTypeArray &>(*result_type).getNestedType();
@ -60,7 +66,6 @@ public:
Columns columns_holder(num_elements);
ColumnRawPtrs column_ptrs(num_elements);
for (size_t i = 0; i < num_elements; ++i)
{
const auto & arg = arguments[i];
@ -77,35 +82,199 @@ public:
}
/// Create and fill the result array.
auto out = ColumnArray::create(elem_type->createColumn());
IColumn & out_data = out->getData();
IColumn::Offsets & out_offsets = out->getOffsets();
out_data.reserve(input_rows_count * num_elements);
out_offsets.resize(input_rows_count);
/// Fill out_offsets
out_offsets.resize_exact(input_rows_count);
IColumn::Offset current_offset = 0;
for (size_t i = 0; i < input_rows_count; ++i)
{
for (size_t j = 0; j < num_elements; ++j)
out_data.insertFrom(*column_ptrs[j], i);
current_offset += num_elements;
out_offsets[i] = current_offset;
}
/// Fill out_data
out_data.reserve(input_rows_count * num_elements);
if (num_elements == 1)
out_data.insertRangeFrom(*column_ptrs[0], 0, input_rows_count);
else
execute(column_ptrs, out_data, input_rows_count);
return out;
}
private:
bool execute(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
return executeNumber<UInt8>(columns, out_data, input_rows_count) || executeNumber<UInt16>(columns, out_data, input_rows_count)
|| executeNumber<UInt32>(columns, out_data, input_rows_count) || executeNumber<UInt64>(columns, out_data, input_rows_count)
|| executeNumber<UInt128>(columns, out_data, input_rows_count) || executeNumber<UInt256>(columns, out_data, input_rows_count)
|| executeNumber<Int8>(columns, out_data, input_rows_count) || executeNumber<Int16>(columns, out_data, input_rows_count)
|| executeNumber<Int32>(columns, out_data, input_rows_count) || executeNumber<Int64>(columns, out_data, input_rows_count)
|| executeNumber<Int128>(columns, out_data, input_rows_count) || executeNumber<Int256>(columns, out_data, input_rows_count)
|| executeNumber<Float32>(columns, out_data, input_rows_count) || executeNumber<Float64>(columns, out_data, input_rows_count)
|| executeNumber<Decimal32>(columns, out_data, input_rows_count)
|| executeNumber<Decimal64>(columns, out_data, input_rows_count)
|| executeNumber<Decimal128>(columns, out_data, input_rows_count)
|| executeNumber<Decimal256>(columns, out_data, input_rows_count)
|| executeNumber<DateTime64>(columns, out_data, input_rows_count) || executeString(columns, out_data, input_rows_count)
|| executeNullable(columns, out_data, input_rows_count) || executeTuple(columns, out_data, input_rows_count)
|| executeFixedString(columns, out_data, input_rows_count) || executeGeneric(columns, out_data, input_rows_count);
}
template <typename T>
bool executeNumber(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
using Container = ColumnVectorOrDecimal<T>::Container;
std::vector<const Container *> containers(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnVectorOrDecimal<T> * concrete_column = checkAndGetColumn<ColumnVectorOrDecimal<T>>(columns[i]);
if (!concrete_column)
return false;
containers[i] = &concrete_column->getData();
}
ColumnVectorOrDecimal<T> & concrete_out_data = assert_cast<ColumnVectorOrDecimal<T> &>(out_data);
Container & out_container = concrete_out_data.getData();
out_container.resize_exact(columns.size() * input_rows_count);
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{
const size_t base = row_i * columns.size();
for (size_t col_i = 0; col_i < columns.size(); ++col_i)
out_container[base + col_i] = (*containers[col_i])[row_i];
}
return true;
}
bool executeString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
size_t total_bytes = 0;
std::vector<const ColumnString *> concrete_columns(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnString * concrete_column = checkAndGetColumn<ColumnString>(columns[i]);
if (!concrete_column)
return false;
total_bytes += concrete_column->getChars().size();
concrete_columns[i] = concrete_column;
}
ColumnString & concrete_out_data = assert_cast<ColumnString &>(out_data);
auto & out_chars = concrete_out_data.getChars();
auto & out_offsets = concrete_out_data.getOffsets();
out_chars.resize_exact(total_bytes);
out_offsets.resize_exact(input_rows_count * columns.size());
size_t cur_out_offset = 0;
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{
const size_t base = row_i * columns.size();
for (size_t col_i = 0; col_i < columns.size(); ++col_i)
{
StringRef ref = concrete_columns[col_i]->getDataAt(row_i);
memcpySmallAllowReadWriteOverflow15(&out_chars[cur_out_offset], ref.data, ref.size);
out_chars[cur_out_offset + ref.size] = 0;
cur_out_offset += ref.size + 1;
out_offsets[base + col_i] = cur_out_offset;
}
}
return true;
}
bool executeFixedString(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
std::vector<const ColumnFixedString *> concrete_columns(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnFixedString * concrete_column = checkAndGetColumn<ColumnFixedString>(columns[i]);
if (!concrete_column)
return false;
concrete_columns[i] = concrete_column;
}
ColumnFixedString & concrete_out_data = assert_cast<ColumnFixedString &>(out_data);
auto & out_chars = concrete_out_data.getChars();
const size_t n = concrete_out_data.getN();
size_t total_bytes = n * columns.size() * input_rows_count;
out_chars.resize_exact(total_bytes);
size_t curr_out_offset = 0;
for (size_t row_i = 0; row_i < input_rows_count; ++row_i)
{
for (size_t col_i = 0; col_i < columns.size(); ++col_i)
{
StringRef ref = concrete_columns[col_i]->getDataAt(row_i);
memcpySmallAllowReadWriteOverflow15(&out_chars[curr_out_offset], ref.data, n);
curr_out_offset += n;
}
}
return true;
}
bool executeNullable(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
ColumnRawPtrs null_maps(columns.size(), nullptr);
ColumnRawPtrs nested_columns(columns.size(), nullptr);
for (size_t i = 0; i < columns.size(); ++i)
{
const ColumnNullable * concrete_column = checkAndGetColumn<ColumnNullable>(columns[i]);
if (!concrete_column)
return false;
null_maps[i] = &concrete_column->getNullMapColumn();
nested_columns[i] = &concrete_column->getNestedColumn();
}
ColumnNullable & concrete_out_data = assert_cast<ColumnNullable &>(out_data);
auto & out_null_map = concrete_out_data.getNullMapColumn();
auto & out_nested_column = concrete_out_data.getNestedColumn();
execute(null_maps, out_null_map, input_rows_count);
execute(nested_columns, out_nested_column, input_rows_count);
return true;
}
bool executeTuple(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
ColumnTuple * concrete_out_data = typeid_cast<ColumnTuple *>(&out_data);
if (!concrete_out_data)
return false;
const size_t tuple_size = concrete_out_data->tupleSize();
for (size_t i = 0; i < tuple_size; ++i)
{
ColumnRawPtrs elem_columns(columns.size(), nullptr);
for (size_t j = 0; j < columns.size(); ++j)
{
const ColumnTuple * concrete_column = assert_cast<const ColumnTuple *>(columns[j]);
elem_columns[j] = &concrete_column->getColumn(i);
}
execute(elem_columns, concrete_out_data->getColumn(i), input_rows_count);
}
return true;
}
bool executeGeneric(const ColumnRawPtrs & columns, IColumn & out_data, size_t input_rows_count) const
{
for (size_t i = 0; i < input_rows_count; ++i)
for (const auto * column : columns)
out_data.insertFrom(*column, i);
return true;
}
String getName() const override
{
return name;
}
bool addField(DataTypePtr type_res, const Field & f, Array & arr) const;
bool use_variant_as_common_type = false;
};

View File

@ -2,6 +2,8 @@
#include <Columns/ColumnMap.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnsCommon.h>
#include <Columns/ColumnsNumber.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeMap.h>
#include <DataTypes/DataTypeTuple.h>
@ -13,7 +15,6 @@
#include <Interpreters/Context.h>
#include <Interpreters/castColumn.h>
#include <Common/HashTable/HashSet.h>
#include <Core/Settings.h>
namespace DB
@ -36,11 +37,18 @@ class FunctionMap : public IFunction
public:
static constexpr auto name = "map";
explicit FunctionMap(bool use_variant_as_common_type_) : use_variant_as_common_type(use_variant_as_common_type_) {}
explicit FunctionMap(ContextPtr context_)
: context(context_)
, use_variant_as_common_type(
context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type)
, function_array(FunctionFactory::instance().get("array", context))
, function_map_from_arrays(FunctionFactory::instance().get("mapFromArrays", context))
{
}
static FunctionPtr create(ContextPtr context)
{
return std::make_shared<FunctionMap>(context->getSettingsRef().allow_experimental_variant_type && context->getSettingsRef().use_variant_as_common_type);
return std::make_shared<FunctionMap>(context);
}
String getName() const override
@ -101,62 +109,38 @@ public:
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
{
size_t num_elements = arguments.size();
if (num_elements == 0)
return result_type->createColumnConstWithDefaultValue(input_rows_count);
ColumnsWithTypeAndName key_args;
ColumnsWithTypeAndName value_args;
for (size_t i = 0; i < num_elements; i += 2)
{
key_args.emplace_back(arguments[i]);
value_args.emplace_back(arguments[i+1]);
}
const auto & result_type_map = static_cast<const DataTypeMap &>(*result_type);
const DataTypePtr & key_type = result_type_map.getKeyType();
const DataTypePtr & value_type = result_type_map.getValueType();
const DataTypePtr & key_array_type = std::make_shared<DataTypeArray>(key_type);
const DataTypePtr & value_array_type = std::make_shared<DataTypeArray>(value_type);
Columns columns_holder(num_elements);
ColumnRawPtrs column_ptrs(num_elements);
/// key_array = array(args[0], args[2]...)
ColumnPtr key_array = function_array->build(key_args)->execute(key_args, key_array_type, input_rows_count);
/// value_array = array(args[1], args[3]...)
ColumnPtr value_array = function_array->build(value_args)->execute(value_args, value_array_type, input_rows_count);
for (size_t i = 0; i < num_elements; ++i)
{
const auto & arg = arguments[i];
const auto to_type = i % 2 == 0 ? key_type : value_type;
ColumnPtr preprocessed_column = castColumn(arg, to_type);
preprocessed_column = preprocessed_column->convertToFullColumnIfConst();
columns_holder[i] = std::move(preprocessed_column);
column_ptrs[i] = columns_holder[i].get();
}
/// Create and fill the result map.
MutableColumnPtr keys_data = key_type->createColumn();
MutableColumnPtr values_data = value_type->createColumn();
MutableColumnPtr offsets = DataTypeNumber<IColumn::Offset>().createColumn();
size_t total_elements = input_rows_count * num_elements / 2;
keys_data->reserve(total_elements);
values_data->reserve(total_elements);
offsets->reserve(input_rows_count);
IColumn::Offset current_offset = 0;
for (size_t i = 0; i < input_rows_count; ++i)
{
for (size_t j = 0; j < num_elements; j += 2)
{
keys_data->insertFrom(*column_ptrs[j], i);
values_data->insertFrom(*column_ptrs[j + 1], i);
}
current_offset += num_elements / 2;
offsets->insert(current_offset);
}
auto nested_column = ColumnArray::create(
ColumnTuple::create(Columns{std::move(keys_data), std::move(values_data)}),
std::move(offsets));
return ColumnMap::create(nested_column);
/// result = mapFromArrays(key_array, value_array)
ColumnsWithTypeAndName map_args{{key_array, key_array_type, ""}, {value_array, value_array_type, ""}};
return function_map_from_arrays->build(map_args)->execute(map_args, result_type, input_rows_count);
}
private:
ContextPtr context;
bool use_variant_as_common_type = false;
FunctionOverloadResolverPtr function_array;
FunctionOverloadResolverPtr function_map_from_arrays;
};
/// mapFromArrays(keys, values) is a function that allows you to make key-value pair from a pair of arrays or maps
@ -173,6 +157,7 @@ public:
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
bool useDefaultImplementationForNulls() const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{

View File

@ -10,21 +10,31 @@
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <IO/WriteHelpers.h>
#include <algorithm>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
class FunctionToStartOfInterval : public IFunction
{
private:
enum class Overload
{
Default, /// toStartOfInterval(time, interval) or toStartOfInterval(time, interval, timezone)
Origin /// toStartOfInterval(time, interval, origin) or toStartOfInterval(time, interval, origin, timezone)
};
mutable Overload overload;
public:
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionToStartOfInterval>(); }
@ -34,7 +44,7 @@ public:
size_t getNumberOfArguments() const override { return 0; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2}; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1, 2, 3}; }
bool hasInformationAboutMonotonicity() const override { return true; }
Monotonicity getMonotonicityForRange(const IDataType &, const Field &, const Field &) const override { return { .is_monotonic = true, .is_always_monotonic = true }; }
@ -72,6 +82,9 @@ public:
"Illegal type {} of 2nd argument of function {}, expected a time interval",
type_arg2->getName(), getName());
overload = Overload::Default;
/// Determine result type for default overload (no origin)
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
@ -97,13 +110,49 @@ public:
auto check_third_argument = [&]
{
const DataTypePtr & type_arg3 = arguments[2].type;
if (!isString(type_arg3))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of 3rd argument of function {}, expected a constant timezone string",
if (isString(type_arg3))
{
if (value_is_date && result_type == ResultType::Date)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64",
getName(), interval_type->getKind().toString());
}
else if (isDateOrDate32OrDateTimeOrDateTime64(type_arg3))
{
overload = Overload::Origin;
const DataTypePtr & type_arg1 = arguments[0].type;
if (isDate(type_arg1) && isDate(type_arg3))
result_type = ResultType::Date;
else if (isDate32(type_arg1) && isDate32(type_arg3))
result_type = ResultType::Date32;
else if (isDateTime(type_arg1) && isDateTime(type_arg3))
result_type = ResultType::DateTime;
else if (isDateTime64(type_arg1) && isDateTime64(type_arg3))
result_type = ResultType::DateTime64;
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same type", getName());
}
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. "
"This argument is optional and must be a constant String with timezone name or a Date/Date32/DateTime/DateTime64 with a constant origin",
type_arg3->getName(), getName());
if (value_is_date && result_type == ResultType::Date) /// weird why this is && instead of || but too afraid to change it
};
auto check_fourth_argument = [&]
{
if (overload != Overload::Origin) /// sanity check
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 3rd argument of function {}. "
"The third argument must a Date/Date32/DateTime/DateTime64 with a constant origin",
arguments[2].type->getName(), getName());
const DataTypePtr & type_arg4 = arguments[3].type;
if (!isString(type_arg4))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of 4th argument of function {}. "
"This argument is optional and must be a constant String with timezone name",
type_arg4->getName(), getName());
if (value_is_date && result_type == ResultType::Date)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"The timezone argument of function {} with interval type {} is allowed only when the 1st argument has type DateTime or DateTimt64",
"A timezone argument of function {} with interval type {} is allowed only when the 1st argument has the type DateTime or DateTime64",
getName(), interval_type->getKind().toString());
};
@ -118,10 +167,17 @@ public:
check_second_argument();
check_third_argument();
}
else if (arguments.size() == 4)
{
check_first_argument();
check_second_argument();
check_third_argument();
check_fourth_argument();
}
else
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 2 or 3",
"Number of arguments for function {} doesn't match: passed {}, must be 2, 3 or 4",
getName(), arguments.size());
}
@ -132,10 +188,19 @@ public:
case ResultType::Date32:
return std::make_shared<DataTypeDate32>();
case ResultType::DateTime:
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
{
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
return std::make_shared<DataTypeDateTime>(extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false));
}
case ResultType::DateTime64:
{
UInt32 scale = 0;
if (isDateTime64(arguments[0].type) && overload == Overload::Origin)
{
scale = assert_cast<const DataTypeDateTime64 &>(*arguments[0].type.get()).getScale();
if (assert_cast<const DataTypeDateTime64 &>(*arguments[2].type.get()).getScale() != scale)
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Datetime argument and origin argument for function {} must have the same scale", getName());
}
if (interval_type->getKind() == IntervalKind::Kind::Nanosecond)
scale = 9;
else if (interval_type->getKind() == IntervalKind::Kind::Microsecond)
@ -143,69 +208,103 @@ public:
else if (interval_type->getKind() == IntervalKind::Kind::Millisecond)
scale = 3;
return std::make_shared<DataTypeDateTime64>(scale, extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, false));
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
return std::make_shared<DataTypeDateTime64>(scale, extractTimeZoneNameFromFunctionArguments(arguments, time_zone_arg_num, 0, false));
}
}
std::unreachable();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t /* input_rows_count */) const override
{
const auto & time_column = arguments[0];
const auto & interval_column = arguments[1];
const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, 2, 0);
auto result_column = dispatchForTimeColumn(time_column, interval_column, result_type, time_zone, input_rows_count);
ColumnWithTypeAndName origin_column;
if (overload == Overload::Origin)
origin_column = arguments[2];
const size_t time_zone_arg_num = (overload == Overload::Default) ? 2 : 3;
const auto & time_zone = extractTimeZoneFromFunctionArguments(arguments, time_zone_arg_num, 0);
ColumnPtr result_column;
if (isDate(result_type))
result_column = dispatchForTimeColumn<DataTypeDate>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDate32(result_type))
result_column = dispatchForTimeColumn<DataTypeDate32>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDateTime(result_type))
result_column = dispatchForTimeColumn<DataTypeDateTime>(time_column, interval_column, origin_column, result_type, time_zone);
else if (isDateTime64(result_type))
result_column = dispatchForTimeColumn<DataTypeDateTime64>(time_column, interval_column, origin_column, result_type, time_zone);
return result_column;
}
private:
template <typename ReturnType>
ColumnPtr dispatchForTimeColumn(
const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone,
size_t input_rows_count) const
const ColumnWithTypeAndName & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone) const
{
const auto & time_column_type = *time_column.type.get();
const auto & time_column_col = *time_column.column.get();
if (isDateTime64(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime64>(&time_column_col);
auto scale = assert_cast<const DataTypeDateTime64 &>(time_column_type).getScale();
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDateTime64 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count, scale);
}
else if (isDateTime(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDateTime &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
}
else if (isDate(time_column_type))
if (isDate(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDate>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
return dispatchForIntervalColumn<ReturnType, DataTypeDate, ColumnDate>(assert_cast<const DataTypeDate &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDate32(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDate32>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, result_type, time_zone, input_rows_count);
return dispatchForIntervalColumn<ReturnType, DataTypeDate32, ColumnDate32>(assert_cast<const DataTypeDate32 &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDateTime(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime>(&time_column_col);
if (time_column_vec)
return dispatchForIntervalColumn<ReturnType, DataTypeDateTime, ColumnDateTime>(assert_cast<const DataTypeDateTime &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone);
}
else if (isDateTime64(time_column_type))
{
const auto * time_column_vec = checkAndGetColumn<ColumnDateTime64>(&time_column_col);
auto scale = assert_cast<const DataTypeDateTime64 &>(time_column_type).getScale();
if (time_column_vec)
return dispatchForIntervalColumn<ReturnType, DataTypeDateTime64, ColumnDateTime64>(assert_cast<const DataTypeDateTime64 &>(time_column_type), *time_column_vec, interval_column, origin_column, result_type, time_zone, scale);
}
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal column for 1st argument of function {}, expected a Date, Date32, DateTime or DateTime64", getName());
}
template <typename TimeDataType, typename TimeColumnType>
template <typename ReturnType, typename TimeDataType, typename TimeColumnType>
ColumnPtr dispatchForIntervalColumn(
const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale = 1) const
const TimeDataType & time_data_type, const TimeColumnType & time_column, const ColumnWithTypeAndName & interval_column, const ColumnWithTypeAndName & origin_column,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale = 1) const
{
const auto * interval_type = checkAndGetDataType<DataTypeInterval>(interval_column.type.get());
if (!interval_type)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a time interval", getName());
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
case IntervalKind::Kind::Microsecond:
case IntervalKind::Kind::Millisecond:
if (isDateOrDate32(time_data_type) || isDateTime(time_data_type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type {}", isDate(time_data_type) ? "Date" : "DateTime");
break;
case IntervalKind::Kind::Second:
case IntervalKind::Kind::Minute:
case IntervalKind::Kind::Hour:
if (isDateOrDate32(time_data_type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal interval kind for argument data type Date");
break;
default:
break;
}
const auto * interval_column_const_int64 = checkAndGetColumnConst<ColumnInt64>(interval_column.column.get());
if (!interval_column_const_int64)
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column for 2nd argument of function {}, must be a const time interval", getName());
@ -217,51 +316,102 @@ private:
switch (interval_type->getKind()) // NOLINT(bugprone-switch-missing-default-case)
{
case IntervalKind::Kind::Nanosecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Nanosecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Nanosecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Microsecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Microsecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Microsecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Millisecond:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime64, IntervalKind::Kind::Millisecond>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Millisecond>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Second:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Second>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Second>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Minute:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Minute>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Minute>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Hour:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Hour>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Hour>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Day:
return execute<TimeDataType, TimeColumnType, DataTypeDateTime, IntervalKind::Kind::Day>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Day>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Week:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Week>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Week>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Month:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Month>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Month>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Quarter:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Quarter>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Quarter>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
case IntervalKind::Kind::Year:
return execute<TimeDataType, TimeColumnType, DataTypeDate, IntervalKind::Kind::Year>(time_data_type, time_column, num_units, result_type, time_zone, input_rows_count, scale);
return execute<ReturnType, TimeDataType, TimeColumnType, IntervalKind::Kind::Year>(time_data_type, time_column, num_units, origin_column, result_type, time_zone, scale);
}
std::unreachable();
}
template <typename TimeDataType, typename TimeColumnType, typename ResultDataType, IntervalKind::Kind unit>
ColumnPtr execute(
const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units,
const DataTypePtr & result_type, const DateLUTImpl & time_zone, size_t input_rows_count, UInt16 scale) const
template <typename ResultDataType, typename TimeDataType, typename TimeColumnType, IntervalKind::Kind unit>
ColumnPtr execute(const TimeDataType &, const TimeColumnType & time_column_type, Int64 num_units, const ColumnWithTypeAndName & origin_column, const DataTypePtr & result_type, const DateLUTImpl & time_zone, UInt16 scale) const
{
using ResultColumnType = typename ResultDataType::ColumnType;
using ResultFieldType = typename ResultDataType::FieldType;
const auto & time_data = time_column_type.getData();
size_t size = time_data.size();
auto result_col = result_type->createColumn();
auto * col_to = assert_cast<ResultColumnType *>(result_col.get());
auto & result_data = col_to->getData();
result_data.resize(input_rows_count);
result_data.resize(size);
Int64 scale_multiplier = DecimalUtils::scaleMultiplier<DateTime64>(scale);
for (size_t i = 0; i != input_rows_count; ++i)
result_data[i] = static_cast<ResultFieldType>(ToStartOfInterval<unit>::execute(time_data[i], num_units, time_zone, scale_multiplier));
if (origin_column.column) // Overload: Origin
{
const bool is_small_interval = (unit == IntervalKind::Kind::Nanosecond || unit == IntervalKind::Kind::Microsecond || unit == IntervalKind::Kind::Millisecond);
const bool is_result_date = isDateOrDate32(result_type);
Int64 result_scale = scale_multiplier;
Int64 origin_scale = 1;
if (isDateTime64(result_type)) /// We have origin scale only in case if arguments are DateTime64.
origin_scale = assert_cast<const DataTypeDateTime64 &>(*origin_column.type).getScaleMultiplier();
else if (!is_small_interval) /// In case of large interval and arguments are not DateTime64, we should not have scale in result.
result_scale = 1;
if (is_small_interval)
result_scale = assert_cast<const DataTypeDateTime64 &>(*result_type).getScaleMultiplier();
/// In case if we have a difference between time arguments and Interval, we need to calculate the difference between them
/// to get the right precision for the result. In case of large intervals, we should not have scale difference.
Int64 scale_diff = is_small_interval ? std::max(result_scale / origin_scale, origin_scale / result_scale) : 1;
static constexpr Int64 SECONDS_PER_DAY = 86'400;
UInt64 origin = origin_column.column->get64(0);
for (size_t i = 0; i != size; ++i)
{
UInt64 time_arg = time_data[i];
if (origin > static_cast<size_t>(time_arg))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The origin must be before the end date / date with time");
if (is_result_date) /// All internal calculations of ToStartOfInterval<...> expect arguments to be seconds or milli-, micro-, nanoseconds.
{
time_arg *= SECONDS_PER_DAY;
origin *= SECONDS_PER_DAY;
}
Int64 offset = ToStartOfInterval<unit>::execute(time_arg - origin, num_units, time_zone, result_scale, origin);
/// In case if arguments are DateTime64 with large interval, we should apply scale on it.
offset *= (!is_small_interval) ? result_scale : 1;
if (is_result_date) /// Convert back to date after calculations.
{
offset /= SECONDS_PER_DAY;
origin /= SECONDS_PER_DAY;
}
result_data[i] = 0;
result_data[i] += (result_scale < origin_scale) ? (origin + offset) / scale_diff : (origin + offset) * scale_diff;
}
}
else // Overload: Default
{
for (size_t i = 0; i != size; ++i)
result_data[i] = static_cast<typename ResultDataType::FieldType>(ToStartOfInterval<unit>::execute(time_data[i], num_units, time_zone, scale_multiplier));
}
return result_col;
}

View File

@ -2371,7 +2371,7 @@ void NO_INLINE Aggregator::mergeDataNullKey(
template <typename Method, typename Table>
void NO_INLINE Aggregator::mergeDataImpl(
Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch) const
Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions [[maybe_unused]], bool prefetch, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const
{
if constexpr (Method::low_cardinality_optimization || Method::one_key_nullable_optimization)
mergeDataNullKey<Method, Table>(table_dst, table_src, arena);
@ -2410,7 +2410,7 @@ void NO_INLINE Aggregator::mergeDataImpl(
{
if (!is_aggregate_function_compiled[i])
aggregate_functions[i]->mergeAndDestroyBatch(
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena);
}
return;
@ -2420,7 +2420,7 @@ void NO_INLINE Aggregator::mergeDataImpl(
for (size_t i = 0; i < params.aggregates_size; ++i)
{
aggregate_functions[i]->mergeAndDestroyBatch(
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], arena);
dst_places.data(), src_places.data(), dst_places.size(), offsets_of_aggregate_states[i], thread_pool, is_cancelled, arena);
}
}
@ -2535,8 +2535,10 @@ void NO_INLINE Aggregator::mergeWithoutKeyDataImpl(
template <typename Method>
void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
ManyAggregatedDataVariants & non_empty_data) const
ManyAggregatedDataVariants & non_empty_data, std::atomic<bool> & is_cancelled) const
{
ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads};
AggregatedDataVariantsPtr & res = non_empty_data[0];
bool no_more_keys = false;
@ -2557,13 +2559,13 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
if (compiled_aggregate_functions_holder)
{
mergeDataImpl<Method>(
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, true, prefetch);
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, true, prefetch, thread_pool, is_cancelled);
}
else
#endif
{
mergeDataImpl<Method>(
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, false, prefetch);
getDataVariant<Method>(*res).data, getDataVariant<Method>(current).data, res->aggregates_pool, false, prefetch, thread_pool, is_cancelled);
}
}
else if (res->without_key)
@ -2589,7 +2591,7 @@ void NO_INLINE Aggregator::mergeSingleLevelDataImpl(
#define M(NAME) \
template void NO_INLINE Aggregator::mergeSingleLevelDataImpl<decltype(AggregatedDataVariants::NAME)::element_type>( \
ManyAggregatedDataVariants & non_empty_data) const;
ManyAggregatedDataVariants & non_empty_data, std::atomic<bool> & is_cancelled) const;
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
#undef M
@ -2597,6 +2599,8 @@ template <typename Method>
void NO_INLINE Aggregator::mergeBucketImpl(
ManyAggregatedDataVariants & data, Int32 bucket, Arena * arena, std::atomic<bool> & is_cancelled) const
{
ThreadPool thread_pool{CurrentMetrics::AggregatorThreads, CurrentMetrics::AggregatorThreadsActive, CurrentMetrics::AggregatorThreadsScheduled, params.max_threads};
/// We merge all aggregation results to the first.
AggregatedDataVariantsPtr & res = data[0];
@ -2613,7 +2617,7 @@ void NO_INLINE Aggregator::mergeBucketImpl(
if (compiled_aggregate_functions_holder)
{
mergeDataImpl<Method>(
getDataVariant<Method>(*res).data.impls[bucket], getDataVariant<Method>(current).data.impls[bucket], arena, true, prefetch);
getDataVariant<Method>(*res).data.impls[bucket], getDataVariant<Method>(current).data.impls[bucket], arena, true, prefetch, thread_pool, is_cancelled);
}
else
#endif
@ -2623,7 +2627,9 @@ void NO_INLINE Aggregator::mergeBucketImpl(
getDataVariant<Method>(current).data.impls[bucket],
arena,
false,
prefetch);
prefetch,
thread_pool,
is_cancelled);
}
}
}

View File

@ -467,7 +467,7 @@ private:
/// Merge data from hash table `src` into `dst`.
template <typename Method, typename Table>
void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch) const;
void mergeDataImpl(Table & table_dst, Table & table_src, Arena * arena, bool use_compiled_functions, bool prefetch, ThreadPool & thread_pool, std::atomic<bool> & is_cancelled) const;
/// Merge data from hash table `src` into `dst`, but only for keys that already exist in dst. In other cases, merge the data into `overflows`.
template <typename Method, typename Table>
@ -490,7 +490,7 @@ private:
template <typename Method>
void mergeSingleLevelDataImpl(
ManyAggregatedDataVariants & non_empty_data) const;
ManyAggregatedDataVariants & non_empty_data, std::atomic<bool> & is_cancelled) const;
template <bool return_single_block>
using ConvertToBlockRes = std::conditional_t<return_single_block, Block, BlocksList>;

View File

@ -228,8 +228,8 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
metadata_path = metadata_path / "store" / DatabaseCatalog::getPathForUUID(create.uuid);
if (!create.attach && fs::exists(metadata_path))
throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists", metadata_path.string());
if (!create.attach && fs::exists(metadata_path) && !fs::is_empty(metadata_path))
throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Metadata directory {} already exists and is not empty", metadata_path.string());
}
else if (create.storage->engine->name == "MaterializeMySQL"
|| create.storage->engine->name == "MaterializedMySQL")
@ -329,6 +329,9 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
writeChar('\n', statement_buf);
String statement = statement_buf.str();
/// Needed to make database creation retriable if it fails after the file is created
fs::remove(metadata_file_tmp_path);
/// Exclusive flag guarantees, that database is not created right now in another thread.
WriteBufferFromFile out(metadata_file_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL);
writeString(statement, out);
@ -350,13 +353,6 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
DatabaseCatalog::instance().attachDatabase(database_name, database);
added = true;
if (need_write_metadata)
{
/// Prevents from overwriting metadata of detached database
renameNoReplace(metadata_file_tmp_path, metadata_file_path);
renamed = true;
}
if (!load_database_without_tables)
{
/// We use global context here, because storages lifetime is bigger than query context lifetime
@ -368,6 +364,13 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
/// Only then prioritize, schedule and wait all the startup tasks
waitLoad(currentPoolOr(TablesLoaderForegroundPoolId), startup_tasks);
}
if (need_write_metadata)
{
/// Prevents from overwriting metadata of detached database
renameNoReplace(metadata_file_tmp_path, metadata_file_path);
renamed = true;
}
}
catch (...)
{
@ -1226,6 +1229,27 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data
bool from_path = create.attach_from_path.has_value();
bool is_on_cluster = getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY;
if (database->getEngineName() == "Replicated" && create.uuid != UUIDHelpers::Nil && !is_replicated_database_internal && !is_on_cluster && !create.attach)
{
if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 0)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "It's not allowed to explicitly specify UUIDs for tables in Replicated databases, "
"see database_replicated_allow_explicit_uuid");
}
else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 1)
{
LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "It's not recommended to explicitly specify UUIDs for tables in Replicated databases");
}
else if (getContext()->getSettingsRef().database_replicated_allow_explicit_uuid == 2)
{
UUID old_uuid = create.uuid;
create.uuid = UUIDHelpers::Nil;
create.generateRandomUUIDs();
LOG_WARNING(&Poco::Logger::get("InterpreterCreateQuery"), "Replaced a user-provided UUID ({}) with a random one ({}) "
"to make sure it's unique", old_uuid, create.uuid);
}
}
if (is_replicated_database_internal && !internal)
{
if (create.uuid == UUIDHelpers::Nil)

View File

@ -164,7 +164,7 @@ Field convertDecimalType(const Field & from, const To & type)
}
Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint)
Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
if (from_type_hint && from_type_hint->equals(type))
{
@ -359,7 +359,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
Array res(src_arr_size);
for (size_t i = 0; i < src_arr_size; ++i)
{
res[i] = convertFieldToType(src_arr[i], element_type);
res[i] = convertFieldToType(src_arr[i], element_type, nullptr, format_settings);
if (res[i].isNull() && !canContainNull(element_type))
{
// See the comment for Tuples below.
@ -387,7 +387,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
for (size_t i = 0; i < dst_tuple_size; ++i)
{
const auto & element_type = *(type_tuple->getElements()[i]);
res[i] = convertFieldToType(src_tuple[i], element_type);
res[i] = convertFieldToType(src_tuple[i], element_type, nullptr, format_settings);
if (res[i].isNull() && !canContainNull(element_type))
{
/*
@ -435,12 +435,12 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
Tuple updated_entry(2);
updated_entry[0] = convertFieldToType(key, key_type);
updated_entry[0] = convertFieldToType(key, key_type, nullptr, format_settings);
if (updated_entry[0].isNull() && !canContainNull(key_type))
have_unconvertible_element = true;
updated_entry[1] = convertFieldToType(value, value_type);
updated_entry[1] = convertFieldToType(value, value_type, nullptr, format_settings);
if (updated_entry[1].isNull() && !canContainNull(value_type))
have_unconvertible_element = true;
@ -551,7 +551,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
ReadBufferFromString in_buffer(src.safeGet<String>());
try
{
type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, FormatSettings{});
type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, format_settings);
}
catch (Exception & e)
{
@ -563,7 +563,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
}
Field parsed = (*col)[0];
return convertFieldToType(parsed, type, from_type_hint);
return convertFieldToType(parsed, type, from_type_hint, format_settings);
}
throw Exception(ErrorCodes::TYPE_MISMATCH, "Type mismatch in IN or VALUES section. Expected: {}. Got: {}",
@ -573,7 +573,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
}
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint)
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
if (from_value.isNull())
return from_value;
@ -582,7 +582,7 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co
return from_value;
if (const auto * low_cardinality_type = typeid_cast<const DataTypeLowCardinality *>(&to_type))
return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint);
return convertFieldToType(from_value, *low_cardinality_type->getDictionaryType(), from_type_hint, format_settings);
else if (const auto * nullable_type = typeid_cast<const DataTypeNullable *>(&to_type))
{
const IDataType & nested_type = *nullable_type->getNestedType();
@ -593,20 +593,20 @@ Field convertFieldToType(const Field & from_value, const IDataType & to_type, co
if (from_type_hint && from_type_hint->equals(nested_type))
return from_value;
return convertFieldToTypeImpl(from_value, nested_type, from_type_hint);
return convertFieldToTypeImpl(from_value, nested_type, from_type_hint, format_settings);
}
else
return convertFieldToTypeImpl(from_value, to_type, from_type_hint);
return convertFieldToTypeImpl(from_value, to_type, from_type_hint, format_settings);
}
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint)
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint, const FormatSettings & format_settings)
{
bool is_null = from_value.isNull();
if (is_null && !canContainNull(to_type))
throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert NULL to {}", to_type.getName());
Field converted = convertFieldToType(from_value, to_type, from_type_hint);
Field converted = convertFieldToType(from_value, to_type, from_type_hint, format_settings);
if (!is_null && converted.isNull())
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND,
@ -626,9 +626,9 @@ static bool decimalEqualsFloat(Field field, Float64 float_value)
return decimal_to_float == float_value;
}
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type)
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings)
{
Field result_value = convertFieldToType(from_value, to_type, &from_type);
Field result_value = convertFieldToType(from_value, to_type, &from_type, format_settings);
if (Field::isDecimal(from_value.getType()) && Field::isDecimal(result_value.getType()))
{

View File

@ -1,6 +1,7 @@
#pragma once
#include <Core/Field.h>
#include <Formats/FormatSettings.h>
namespace DB
@ -15,13 +16,13 @@ class IDataType;
* Checks for the compatibility of types, checks values fall in the range of valid values of the type, makes type conversion.
* If the value does not fall into the range - returns Null.
*/
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr);
Field convertFieldToType(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {});
/// Does the same, but throws ARGUMENT_OUT_OF_BOUND if value does not fall into the range.
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr);
Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_type, const IDataType * from_type_hint = nullptr, const FormatSettings & format_settings = {});
/// Applies stricter rules than convertFieldToType, doesn't allow loss of precision converting to Decimal.
/// Returns `Field` if the conversion was successful and the result is equal to the original value, otherwise returns nullopt.
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type);
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type, const FormatSettings & format_settings = {});
}

View File

@ -0,0 +1,154 @@
#include <Formats/FormatFactory.h>
#include <Formats/JSONUtils.h>
#include <Processors/Formats/Impl/JSONCompactWithProgressRowOutputFormat.h>
#include <IO/WriteHelpers.h>
namespace DB
{
JSONCompactWithProgressRowOutputFormat::JSONCompactWithProgressRowOutputFormat(
WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_)
: JSONRowOutputFormat(out_, header, settings_, yield_strings_)
{
}
void JSONCompactWithProgressRowOutputFormat::writePrefix()
{
JSONUtils::writeCompactObjectStart(*ostr);
JSONUtils::writeCompactMetadata(names, types, settings, *ostr);
JSONUtils::writeCompactObjectEnd(*ostr);
writeCString("\n", *ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num)
{
JSONUtils::writeFieldFromColumn(column, serialization, row_num, yield_strings, settings, *ostr);
++field_number;
}
void JSONCompactWithProgressRowOutputFormat::writeFieldDelimiter()
{
JSONUtils::writeFieldCompactDelimiter(*ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeRowStartDelimiter()
{
if (has_progress)
writeProgress();
writeCString("{\"data\":", *ostr);
JSONUtils::writeCompactArrayStart(*ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeRowEndDelimiter()
{
JSONUtils::writeCompactArrayEnd(*ostr);
writeCString("}\n", *ostr);
field_number = 0;
++row_count;
}
void JSONCompactWithProgressRowOutputFormat::writeRowBetweenDelimiter()
{
}
void JSONCompactWithProgressRowOutputFormat::writeBeforeTotals()
{
JSONUtils::writeCompactObjectStart(*ostr);
JSONUtils::writeCompactArrayStart(*ostr, 0, "totals");
}
void JSONCompactWithProgressRowOutputFormat::writeTotals(const Columns & columns, size_t row_num)
{
JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeAfterTotals()
{
JSONUtils::writeCompactArrayEnd(*ostr);
JSONUtils::writeCompactObjectEnd(*ostr);
writeCString("\n", *ostr);
}
void JSONCompactWithProgressRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num)
{
JSONUtils::writeCompactArrayStart(*ostr, 2, title);
JSONUtils::writeCompactColumns(columns, serializations, row_num, yield_strings, settings, *ostr);
JSONUtils::writeCompactArrayEnd(*ostr);
}
void JSONCompactWithProgressRowOutputFormat::onProgress(const Progress & value)
{
statistics.progress.incrementPiecewiseAtomically(value);
String progress_line;
WriteBufferFromString buf(progress_line);
writeCString("{\"progress\":", buf);
statistics.progress.writeJSON(buf);
writeCString("}\n", buf);
buf.finalize();
std::lock_guard lock(progress_lines_mutex);
progress_lines.emplace_back(std::move(progress_line));
has_progress = true;
}
void JSONCompactWithProgressRowOutputFormat::flush()
{
if (has_progress)
writeProgress();
JSONRowOutputFormat::flush();
}
void JSONCompactWithProgressRowOutputFormat::writeSuffix()
{
if (has_progress)
writeProgress();
}
void JSONCompactWithProgressRowOutputFormat::writeProgress()
{
std::lock_guard lock(progress_lines_mutex);
for (const auto & progress_line : progress_lines)
writeString(progress_line, *ostr);
progress_lines.clear();
has_progress = false;
}
void JSONCompactWithProgressRowOutputFormat::finalizeImpl()
{
if (exception_message.empty())
{
JSONUtils::writeCompactAdditionalInfo(
row_count,
statistics.rows_before_limit,
statistics.applied_limit,
statistics.watch,
statistics.progress,
settings.write_statistics,
*ostr);
}
else
{
JSONUtils::writeCompactObjectStart(*ostr);
JSONUtils::writeException(exception_message, *ostr, settings, 0);
JSONUtils::writeCompactObjectEnd(*ostr);
}
writeCString("\n", *ostr);
ostr->next();
}
void registerOutputFormatJSONCompactWithProgress(FormatFactory & factory)
{
factory.registerOutputFormat(
"JSONCompactWithProgress",
[](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings)
{ return std::make_shared<JSONCompactWithProgressRowOutputFormat>(buf, sample, format_settings, false); });
factory.registerOutputFormat(
"JSONCompactWithProgressStrings",
[](WriteBuffer & buf, const Block & sample, const FormatSettings & format_settings)
{ return std::make_shared<JSONCompactWithProgressRowOutputFormat>(buf, sample, format_settings, true); });
}
}

View File

@ -0,0 +1,50 @@
#pragma once
#include <Core/Block.h>
#include <IO/WriteBuffer.h>
#include <IO/WriteBufferValidUTF8.h>
#include <Processors/Formats/Impl/JSONRowOutputFormat.h>
namespace DB
{
struct FormatSettings;
class JSONCompactWithProgressRowOutputFormat final : public JSONRowOutputFormat
{
public:
JSONCompactWithProgressRowOutputFormat(WriteBuffer & out_, const Block & header, const FormatSettings & settings_, bool yield_strings_);
String getName() const override { return "JSONCompactWithProgressRowOutputFormat"; }
void onProgress(const Progress & value) override;
void flush() override;
private:
void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
void writeFieldDelimiter() override;
void writeRowStartDelimiter() override;
void writeRowEndDelimiter() override;
void writeRowBetweenDelimiter() override;
bool supportTotals() const override { return true; }
bool supportExtremes() const override { return true; }
void writeBeforeTotals() override;
void writeAfterTotals() override;
void writeExtremesElement(const char * title, const Columns & columns, size_t row_num) override;
void writeTotals(const Columns & columns, size_t row_num) override;
void writeProgress();
void writePrefix() override;
void writeSuffix() override;
void finalizeImpl() override;
std::vector<String> progress_lines;
std::mutex progress_lines_mutex;
/// To not lock mutex and check progress_lines every row,
/// we will use atomic flag that progress_lines is not empty.
std::atomic_bool has_progress = false;
};
}

View File

@ -542,7 +542,7 @@ bool ValuesBlockInputFormat::parseExpression(IColumn & column, size_t column_idx
if (format_settings.null_as_default)
tryToReplaceNullFieldsInComplexTypesWithDefaultValues(expression_value, type);
Field value = convertFieldToType(expression_value, type, value_raw.second.get());
Field value = convertFieldToType(expression_value, type, value_raw.second.get(), format_settings);
/// Check that we are indeed allowed to insert a NULL.
if (value.isNull() && !type.isNullable() && !type.isLowCardinalityNullable())

View File

@ -50,6 +50,9 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type)
case TypeIndex::Float64:
case TypeIndex::Nullable:
case TypeIndex::ObjectDeprecated:
case TypeIndex::Object:
case TypeIndex::Variant:
case TypeIndex::Dynamic:
return false;
case TypeIndex::Array:
{
@ -76,16 +79,6 @@ bool isSafePrimaryDataKeyType(const IDataType & data_type)
const auto & data_type_map = static_cast<const DataTypeMap &>(data_type);
return isSafePrimaryDataKeyType(*data_type_map.getKeyType()) && isSafePrimaryDataKeyType(*data_type_map.getValueType());
}
case TypeIndex::Variant:
{
const auto & data_type_variant = static_cast<const DataTypeVariant &>(data_type);
const auto & data_type_variant_elements = data_type_variant.getVariants();
for (const auto & data_type_variant_element : data_type_variant_elements)
if (!isSafePrimaryDataKeyType(*data_type_variant_element))
return false;
return false;
}
default:
{
break;

View File

@ -486,7 +486,7 @@ private:
#define M(NAME) \
else if (first->type == AggregatedDataVariants::Type::NAME) \
params->aggregator.mergeSingleLevelDataImpl<decltype(first->NAME)::element_type>(*data);
params->aggregator.mergeSingleLevelDataImpl<decltype(first->NAME)::element_type>(*data, shared_data->is_cancelled);
if (false) {} // NOLINT
APPLY_FOR_VARIANTS_SINGLE_LEVEL(M)
#undef M

View File

@ -14,6 +14,6 @@ using ContextPtr = std::shared_ptr<const Context>;
/// Extracts a zookeeper path from a specified CREATE TABLE query.
/// The function checks the table engine and if it is Replicated*MergeTree then it takes the first argument and expands macros in it.
/// Returns std::nullopt if the specified CREATE query doesn't describe a Replicated table or its arguments can't be evaluated.
std::optional<String> extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & context);
std::optional<String> extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & create_query, const ContextPtr & local_context);
}

View File

@ -12,6 +12,7 @@
#include <Common/Macros.h>
#include <Common/OptimizedRegularExpression.h>
#include <Common/typeid_cast.h>
#include <Common/logger_useful.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTIdentifier.h>
@ -189,7 +190,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
const String & engine_name,
ASTs & engine_args,
LoadingStrictnessLevel mode,
const ContextPtr & context,
const ContextPtr & local_context,
String & zookeeper_path,
String & replica_name,
RenamingRestrictions & renaming_restrictions)
@ -206,11 +207,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
{
/// Allow expressions in engine arguments.
/// In new syntax argument can be literal or identifier or array/tuple of identifiers.
evaluateEngineArgs(engine_args, context);
evaluateEngineArgs(engine_args, local_context);
}
bool is_on_cluster = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY;
bool is_replicated_database = context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY &&
bool is_on_cluster = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY;
bool is_replicated_database = local_context->getClientInfo().query_kind == ClientInfo::QueryKind::SECONDARY_QUERY &&
DatabaseCatalog::instance().getDatabase(table_id.database_name)->getEngineName() == "Replicated";
/// Allow implicit {uuid} macros only for zookeeper_path in ON CLUSTER queries
@ -230,10 +231,10 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
/// We did unfold it in previous versions to make moving table from Atomic to Ordinary database work correctly,
/// but now it's not allowed (and it was the only reason to unfold {uuid} macro).
info.table_id.uuid = UUIDHelpers::Nil;
zookeeper_path = context->getMacros()->expand(zookeeper_path, info);
zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info);
info.level = 0;
replica_name = context->getMacros()->expand(replica_name, info);
replica_name = local_context->getMacros()->expand(replica_name, info);
}
ast_zk_path->value = zookeeper_path;
@ -251,11 +252,11 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
}
if (!allow_uuid_macro)
info.table_id.uuid = UUIDHelpers::Nil;
zookeeper_path = context->getMacros()->expand(zookeeper_path, info);
zookeeper_path = local_context->getMacros()->expand(zookeeper_path, info);
info.level = 0;
info.table_id.uuid = UUIDHelpers::Nil;
replica_name = context->getMacros()->expand(replica_name, info);
replica_name = local_context->getMacros()->expand(replica_name, info);
/// We do not allow renaming table with these macros in metadata, because zookeeper_path will be broken after RENAME TABLE.
/// NOTE: it may happen if table was created by older version of ClickHouse (< 20.10) and macros was not unfolded on table creation
@ -272,9 +273,24 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
bool has_arguments = (arg_num + 2 <= arg_cnt);
bool has_valid_arguments = has_arguments && engine_args[arg_num]->as<ASTLiteral>() && engine_args[arg_num + 1]->as<ASTLiteral>();
const auto & server_settings = local_context->getServerSettings();
if (has_valid_arguments)
{
if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 0)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"It's not allowed to specify explicit zookeeper_path and replica_name "
"for ReplicatedMergeTree arguments in Replicated database. If you really want to "
"specify them explicitly, enable setting "
"database_replicated_allow_replicated_engine_arguments.");
}
else if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 1)
{
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "It's not recommended to explicitly specify "
"zookeeper_path and replica_name in ReplicatedMergeTree arguments");
}
/// Get path and name from engine arguments
auto * ast_zk_path = engine_args[arg_num]->as<ASTLiteral>();
if (ast_zk_path && ast_zk_path->value.getType() == Field::Types::String)
@ -288,6 +304,15 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Replica name must be a string literal{}", verbose_help_message);
if (is_replicated_database && local_context->getSettingsRef().database_replicated_allow_replicated_engine_arguments == 2)
{
LOG_WARNING(&Poco::Logger::get("registerStorageMergeTree"), "Replacing user-provided ZooKeeper path and replica name ({}, {}) "
"with default arguments", zookeeper_path, replica_name);
engine_args[arg_num]->as<ASTLiteral>()->value = zookeeper_path = server_settings.default_replica_path;
engine_args[arg_num + 1]->as<ASTLiteral>()->value = replica_name = server_settings.default_replica_name;
}
expand_macro(ast_zk_path, ast_replica_name);
}
else if (is_extended_storage_def
@ -297,7 +322,6 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
{
/// Try use default values if arguments are not specified.
/// Note: {uuid} macro works for ON CLUSTER queries when database engine is Atomic.
const auto & server_settings = context->getServerSettings();
zookeeper_path = server_settings.default_replica_path;
/// TODO maybe use hostname if {replica} is not defined?
replica_name = server_settings.default_replica_name;
@ -322,7 +346,7 @@ static void extractZooKeeperPathAndReplicaNameFromEngineArgs(
}
/// Extracts a zookeeper path from a specified CREATE TABLE query.
std::optional<String> extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & context)
std::optional<String> extractZooKeeperPathFromReplicatedTableDef(const ASTCreateQuery & query, const ContextPtr & local_context)
{
if (!query.storage || !query.storage->engine)
return {};
@ -346,7 +370,7 @@ std::optional<String> extractZooKeeperPathFromReplicatedTableDef(const ASTCreate
try
{
extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, context,
extractZooKeeperPathAndReplicaNameFromEngineArgs(query, table_id, engine_name, engine_args, mode, local_context,
zookeeper_path, replica_name, renaming_restrictions);
}
catch (Exception & e)

View File

@ -207,18 +207,28 @@ struct DeltaLakeMetadataImpl
Poco::Dynamic::Var json = parser.parse(json_str);
Poco::JSON::Object::Ptr object = json.extract<Poco::JSON::Object::Ptr>();
if (!object)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to parse metadata file");
#ifdef ABORT_ON_LOGICAL_ERROR
std::ostringstream oss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
object->stringify(oss);
LOG_TEST(log, "Metadata: {}", oss.str());
#endif
if (object->has("metaData"))
{
const auto metadata_object = object->get("metaData").extract<Poco::JSON::Object::Ptr>();
if (!metadata_object)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `metaData` field");
const auto schema_object = metadata_object->getValue<String>("schemaString");
Poco::JSON::Parser p;
Poco::Dynamic::Var fields_json = parser.parse(schema_object);
const Poco::JSON::Object::Ptr & fields_object = fields_json.extract<Poco::JSON::Object::Ptr>();
if (!fields_object)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `fields` field");
auto current_schema = parseMetadata(fields_object);
if (file_schema.empty())
@ -237,6 +247,9 @@ struct DeltaLakeMetadataImpl
if (object->has("add"))
{
auto add_object = object->get("add").extract<Poco::JSON::Object::Ptr>();
if (!add_object)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `add` field");
auto path = add_object->getValue<String>("path");
result.insert(fs::path(configuration->getPath()) / path);
@ -247,6 +260,9 @@ struct DeltaLakeMetadataImpl
if (add_object->has("partitionValues"))
{
auto partition_values = add_object->get("partitionValues").extract<Poco::JSON::Object::Ptr>();
if (!partition_values)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `partitionValues` field");
if (partition_values->size())
{
auto & current_partition_columns = file_partition_columns[filename];
@ -274,7 +290,11 @@ struct DeltaLakeMetadataImpl
}
else if (object->has("remove"))
{
auto path = object->get("remove").extract<Poco::JSON::Object::Ptr>()->getValue<String>("path");
auto remove_object = object->get("remove").extract<Poco::JSON::Object::Ptr>();
if (!remove_object)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `remove` field");
auto path = remove_object->getValue<String>("path");
result.erase(fs::path(configuration->getPath()) / path);
}
}

View File

@ -284,7 +284,7 @@ ColumnStatisticsPtr MergeTreeStatisticsFactory::get(const ColumnDescription & co
{
auto it = creators.find(type);
if (it == creators.end())
throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'count_min', 'minmax', 'tdigest' and 'uniq'", type);
throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistic type '{}'. Available types: 'countmin', 'minmax', 'tdigest' and 'uniq'", type);
auto stat_ptr = (it->second)(desc, column_desc.type);
column_stat->stats[type] = stat_ptr;
}

View File

@ -49,7 +49,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const
if (isStringOrFixedString(data_type))
return sketch.get_estimate(val.safeGet<String>());
throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'count_min' does not support estimate data type of {}", data_type->getName());
throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'countmin' does not support estimate data type of {}", data_type->getName());
}
void StatisticsCountMinSketch::update(const ColumnPtr & column)
@ -96,7 +96,7 @@ void countMinSketchStatisticsValidator(const SingleStatisticsDescription & /*des
DataTypePtr inner_data_type = removeNullable(data_type);
inner_data_type = removeLowCardinalityAndNullable(inner_data_type);
if (!inner_data_type->isValueRepresentedByNumber() && !isStringOrFixedString(inner_data_type))
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'count_min' does not support type {}", data_type->getName());
throw Exception(ErrorCodes::ILLEGAL_STATISTICS, "Statistics of type 'countmin' does not support type {}", data_type->getName());
}
SingleStatisticsPtr countMinSketchStatisticsCreator(const SingleStatisticsDescription & description, const DataTypePtr & data_type)

View File

@ -48,11 +48,11 @@ static SingleStatisticsType stringToStatisticsType(String type)
return SingleStatisticsType::TDigest;
if (type == "uniq")
return SingleStatisticsType::Uniq;
if (type == "count_min")
if (type == "countmin")
return SingleStatisticsType::CountMinSketch;
if (type == "minmax")
return SingleStatisticsType::MinMax;
throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'count_min', 'minmax', 'tdigest' and 'uniq'.", type);
throw Exception(ErrorCodes::INCORRECT_QUERY, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type);
}
String SingleStatisticsDescription::getTypeName() const
@ -64,11 +64,11 @@ String SingleStatisticsDescription::getTypeName() const
case SingleStatisticsType::Uniq:
return "Uniq";
case SingleStatisticsType::CountMinSketch:
return "count_min";
return "countmin";
case SingleStatisticsType::MinMax:
return "minmax";
default:
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'count_min', 'minmax', 'tdigest' and 'uniq'.", type);
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown statistics type: {}. Supported statistics types are 'countmin', 'minmax', 'tdigest' and 'uniq'.", type);
}
}

View File

@ -6,6 +6,7 @@
<distributed_ddl_task_timeout>120</distributed_ddl_task_timeout>
<database_replicated_always_detach_permanently>1</database_replicated_always_detach_permanently>
<database_replicated_enforce_synchronous_settings>1</database_replicated_enforce_synchronous_settings>
<database_replicated_allow_replicated_engine_arguments>3</database_replicated_allow_replicated_engine_arguments>
</default>
</profiles>
</clickhouse>

View File

@ -311,7 +311,8 @@ def test_replicated_database(cluster):
SETTINGS storage_policy = 'web';
""".format(
uuids[0]
)
),
settings={"database_replicated_allow_explicit_uuid": 3},
)
node2 = cluster.instances["node2"]

View File

@ -5,6 +5,8 @@
<allow_experimental_alter_materialized_view_structure>1</allow_experimental_alter_materialized_view_structure>
<allow_experimental_object_type>0</allow_experimental_object_type>
<allow_suspicious_codecs>0</allow_suspicious_codecs>
<database_replicated_allow_replicated_engine_arguments>3</database_replicated_allow_replicated_engine_arguments>
<database_replicated_allow_explicit_uuid>3</database_replicated_allow_explicit_uuid>
</default>
</profiles>
<users>

View File

@ -5,6 +5,8 @@
<allow_experimental_alter_materialized_view_structure>1</allow_experimental_alter_materialized_view_structure>
<allow_experimental_object_type>0</allow_experimental_object_type>
<allow_suspicious_codecs>0</allow_suspicious_codecs>
<database_replicated_allow_replicated_engine_arguments>3</database_replicated_allow_replicated_engine_arguments>
<database_replicated_allow_explicit_uuid>3</database_replicated_allow_explicit_uuid>
<throw_on_unsupported_query_inside_transaction>0</throw_on_unsupported_query_inside_transaction>
</default>

View File

@ -1,6 +1,8 @@
<clickhouse>
<profiles>
<default>
<database_replicated_allow_replicated_engine_arguments>3</database_replicated_allow_replicated_engine_arguments>
<database_replicated_allow_explicit_uuid>3</database_replicated_allow_explicit_uuid>
</default>
</profiles>
<users>

View File

@ -15,6 +15,6 @@ $CLICKHOUSE_CLIENT --query="
INSERT INTO users VALUES (1321770221388956068);
";
for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT -n | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo
for _ in {1..10}; do seq 1 10 | sed "s/.*/SELECT count() FROM (SELECT * FROM remote('127.0.0.{2,3}', ${CLICKHOUSE_DATABASE}, users) WHERE UserID IN (SELECT arrayJoin([1468013291393583084, 1321770221388956068])));/" | $CLICKHOUSE_CLIENT | grep -vE '^4$' && echo 'Fail!' && break; echo -n '.'; done; echo
$CLICKHOUSE_CLIENT --query="DROP TABLE users;";

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
DROP TABLE IF EXISTS numbers_100k;
CREATE VIEW numbers_100k AS SELECT * FROM system.numbers LIMIT 100000;
";

View File

@ -48,13 +48,13 @@ pack_unpack_compare "SELECT name, is_aggregate FROM system.functions" "name Stri
echo
# Check settings are passed correctly
${CLICKHOUSE_LOCAL} --max_rows_in_distinct=33 -q "SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'"
${CLICKHOUSE_LOCAL} -n -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'"
${CLICKHOUSE_LOCAL} -q "SET max_rows_in_distinct=33; SELECT name, value FROM system.settings WHERE name = 'max_rows_in_distinct'"
${CLICKHOUSE_LOCAL} --max_bytes_before_external_group_by=1 --max_block_size=10 -q "SELECT sum(ignore(*)) FROM (SELECT number, count() FROM numbers(1000) GROUP BY number)"
echo
# Check exta options, we expect zero exit code and no stderr output
(${CLICKHOUSE_LOCAL} --ignore-error -n --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC")
(${CLICKHOUSE_LOCAL} --ignore-error --echo -q "SELECT nothing_to_do();SELECT 42;" 2>/dev/null || echo "Wrong RC")
echo
${CLICKHOUSE_LOCAL} -n -q "CREATE TABLE sophisticated_default
${CLICKHOUSE_LOCAL} -q "CREATE TABLE sophisticated_default
(
a UInt8 DEFAULT 3,
b UInt8 ALIAS a + 5,

View File

@ -23,7 +23,7 @@ $CLICKHOUSE_CLIENT_SECURE -q "SELECT 4;"
# TODO: can test only on unchanged port. Possible solutions: generate config or pass shard port via command line
if [[ "$CLICKHOUSE_PORT_TCP_SECURE" = "$CLICKHOUSE_PORT_TCP_SECURE" ]]; then
cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -n -m
cat "$CURDIR"/00505_distributed_secure.data | $CLICKHOUSE_CLIENT_SECURE -m
else
tail -n 13 "$CURDIR"/00505_secure.reference
fi

View File

@ -6,6 +6,8 @@ DROP TABLE IF EXISTS without_deduplication;
DROP TABLE IF EXISTS with_deduplication_mv;
DROP TABLE IF EXISTS without_deduplication_mv;
SET database_replicated_allow_explicit_uuid=3;
SET database_replicated_allow_replicated_engine_arguments=3;
CREATE TABLE with_deduplication(x UInt32)
ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/with_deduplication', 'r1') ORDER BY x;
CREATE TABLE without_deduplication(x UInt32)

View File

@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error
echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error
#$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"

View File

@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
echo "SELECT CAST();" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null
echo "SELECT 5;" | $CLICKHOUSE_CLIENT -n --ignore-error
echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
echo "SELECT CAST();" | $CLICKHOUSE_CLIENT --ignore-error 2>/dev/null
echo "SELECT 5;" | $CLICKHOUSE_CLIENT --ignore-error
#$CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"

View File

@ -1,4 +1,4 @@
-- Tags: no-ordinary-database
-- Tags: no-ordinary-database, no-parallel
DROP TABLE IF EXISTS test_00609;
DROP TABLE IF EXISTS test_mv_00609;
@ -6,6 +6,7 @@ DROP TABLE IF EXISTS test_mv_00609;
create table test_00609 (a Int8) engine=Memory;
insert into test_00609 values (1);
set database_replicated_allow_explicit_uuid=3;
set allow_deprecated_syntax_for_merge_tree=1;
create materialized view test_mv_00609 uuid '00000609-1000-4000-8000-000000000001' Engine=MergeTree(date, (a), 8192) populate as select a, toDate('2000-01-01') date from test_00609;

View File

@ -8,5 +8,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=./mergetree_mutations.lib
. "$CURDIR"/mergetree_mutations.lib
echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} -n 2>/dev/null
echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} 2>/dev/null
echo $?

View File

@ -5,8 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery &
yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT --multiquery &
yes 'CREATE TABLE IF NOT EXISTS table (x UInt8) ENGINE = MergeTree ORDER BY tuple();' | head -n 1000 | $CLICKHOUSE_CLIENT &
yes 'DROP TABLE IF EXISTS table;' | head -n 1000 | $CLICKHOUSE_CLIENT &
wait
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS table"

View File

@ -12,7 +12,7 @@ settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_que
# Test insert logging on each block and checkPacket() method
$CLICKHOUSE_CLIENT $settings -n -q "
$CLICKHOUSE_CLIENT $settings -q "
DROP TABLE IF EXISTS merge_tree_table;
CREATE TABLE merge_tree_table (id UInt64, date Date, uid UInt32) ENGINE = MergeTree(date, id, 8192);"

View File

@ -7,6 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
CLICKHOUSE_CLIENT="${CLICKHOUSE_CLIENT} --database_replicated_allow_explicit_uuid 3"
# there are some issues with Atomic database, let's generate it uniq
# otherwise flaky check will not pass.
uuid=$(${CLICKHOUSE_CLIENT} --query "SELECT reinterpretAsUUID(currentDatabase())")

View File

@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS table"
seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT -n &
seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT -n 2>/dev/null &
seq 1 100 | sed -r -e "s/.+/CREATE TABLE table (x UInt8) ENGINE = MergeTree ORDER BY x; DROP TABLE table;/" | $CLICKHOUSE_CLIENT &
seq 1 100 | sed -r -e "s/.+/SELECT * FROM system.tables WHERE database = '${CLICKHOUSE_DATABASE}' LIMIT 1000000, 1;/" | $CLICKHOUSE_CLIENT 2>/dev/null &
wait

View File

@ -27,7 +27,7 @@ function thread_drop_create()
while [ $SECONDS -lt "$TIMELIMIT" ] && [ $it -lt 100 ];
do
it=$((it+1))
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists view_00840;
create view view_00840 as select count(*),database,table from system.columns group by database,table;
"

View File

@ -8,11 +8,11 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS contributors;
CREATE TABLE contributors (name String) ENGINE = Memory;"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.contributors ORDER BY name DESC FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO contributors FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
-- random results
SELECT * FROM contributors LIMIT 10 FORMAT Null;
DROP TABLE contributors;
@ -21,30 +21,30 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_numbers (number UInt64) ENGINE = Memory;"
# less than default block size (65k)
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 10000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
TRUNCATE TABLE parquet_numbers;"
# More than default block size
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.numbers LIMIT 100000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
TRUNCATE TABLE parquet_numbers;"
${CLICKHOUSE_CLIENT} --max_block_size=2 --query="SELECT * FROM system.numbers LIMIT 3 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
TRUNCATE TABLE parquet_numbers;"
${CLICKHOUSE_CLIENT} --max_block_size=1 --query="SELECT * FROM system.numbers LIMIT 1000 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_numbers FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_numbers ORDER BY number DESC LIMIT 10;
DROP TABLE parquet_numbers;
DROP TABLE IF EXISTS parquet_events;
CREATE TABLE parquet_events (event String, value UInt64, description String) ENGINE = Memory;"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM system.events FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_events FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT event, description FROM parquet_events WHERE event IN ('ContextLock', 'Query') ORDER BY event;
DROP TABLE parquet_events;
@ -78,7 +78,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types2 ORDER BY int8 FORMAT
echo diff:
diff "${CLICKHOUSE_TMP}"/parquet_all_types_1.dump "${CLICKHOUSE_TMP}"/parquet_all_types_2.dump
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
TRUNCATE TABLE parquet_types2;
INSERT INTO parquet_types3 values ( 79, 81, 82, 83, 84, 85, 86, 87, 88, 89, 'str01', 'fstr1', '2003-03-04', '2004-05-06', toDateTime64('2004-05-06 07:08:09.012', 9));"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types3 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet"
@ -88,7 +88,7 @@ ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 values ( 80,
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types4 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types2 FORMAT Parquet"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types4 FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT 'dest:';
SELECT * FROM parquet_types2 ORDER BY int8;
SELECT 'min:';
@ -106,7 +106,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types5 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_types1 ORDER BY int8 FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_types6 FORMAT Parquet"
echo dest from null:
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_types6 ORDER BY int8;
DROP TABLE parquet_types5;
@ -126,7 +126,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
INSERT INTO parquet_arrays VALUES (2, [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_arrays FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_arrays ORDER BY id;
DROP TABLE parquet_arrays;
@ -135,7 +135,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_nullable_arrays (id UInt32, a1 Array(Nullable(UInt32)), a2 Array(Nullable(String)), a3 Array(Nullable(Decimal(4, 2)))) engine=Memory();
INSERT INTO parquet_nullable_arrays VALUES (1, [1, Null, 2], [Null, 'Some string', Null], [0.001, Null, 42.42]), (2, [Null], [Null], [Null]), (3, [], [], []);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nullable_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nullable_arrays FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_nullable_arrays ORDER BY id;
DROP TABLE parquet_nullable_arrays;
@ -143,7 +143,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_nested_arrays (a1 Array(Array(Array(UInt32))), a2 Array(Array(Array(String))), a3 Array(Array(Nullable(UInt32))), a4 Array(Array(Nullable(String)))) engine=Memory();
INSERT INTO parquet_nested_arrays VALUES ([[[1,2,3], [1,2,3]], [[1,2,3]], [[], [1,2,3]]], [[['Some string', 'Some string'], []], [['Some string']], [[]]], [[Null, 1, 2], [Null], [1, 2], []], [['Some string', Null, 'Some string'], [Null], []]);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_nested_arrays FORMAT Parquet" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_nested_arrays FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_nested_arrays;
DROP TABLE parquet_nested_arrays;
@ -151,6 +151,6 @@ ${CLICKHOUSE_CLIENT} -n --query="
CREATE TABLE parquet_decimal (d1 Decimal32(4), d2 Decimal64(8), d3 Decimal128(16), d4 Decimal256(32)) ENGINE = Memory;
INSERT INTO TABLE parquet_decimal VALUES (0.123, 0.123123123, 0.123123123123, 0.123123123123123123);"
${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_decimal FORMAT Arrow" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO parquet_decimal FORMAT Arrow"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM parquet_decimal;
DROP TABLE parquet_decimal;"

View File

@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@ -26,7 +26,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump
echo diff0:
diff "${CLICKHOUSE_TMP}"/parquet_decimal0_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal0_2.dump
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@ -61,7 +61,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump
echo diff1:
diff "${CLICKHOUSE_TMP}"/parquet_decimal1_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal1_2.dump
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@ -75,7 +75,7 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d, e, f, g, h, i, j;" > "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump
echo diff2:
diff "${CLICKHOUSE_TMP}"/parquet_decimal2_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal2_2.dump
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;
@ -86,7 +86,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_1.parquet
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet" 2> /dev/null
echo nothing:
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM decimal2 ORDER BY a, b, c, d;
TRUNCATE TABLE decimal2;
@ -94,7 +94,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_2.parquet
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet"
echo nulls:
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT * FROM decimal2 ORDER BY a, b, c, d;
TRUNCATE TABLE decimal2;
@ -104,7 +104,7 @@ ${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" > "${CLICKHOUSE_TMP}"/parquet_decimal3_3.parquet
${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal ORDER BY a, b, c, d FORMAT Parquet;" | ${CLICKHOUSE_CLIENT} --query="INSERT INTO decimal2 FORMAT Parquet"
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
SELECT 'full orig:';
SELECT * FROM decimal ORDER BY a, b, c, d;
SELECT 'full inserted:';
@ -115,6 +115,6 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM decimal2 ORDER BY a, b, c, d;" > "${
echo diff3:
diff "${CLICKHOUSE_TMP}"/parquet_decimal3_1.dump "${CLICKHOUSE_TMP}"/parquet_decimal3_2.dump
${CLICKHOUSE_CLIENT} -n --query="
${CLICKHOUSE_CLIENT} --query="
DROP TABLE IF EXISTS decimal;
DROP TABLE IF EXISTS decimal2;"

View File

@ -13,5 +13,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# ${CURDIR}/00921_datetime64_compatibility.python
python3 "${CURDIR}"/00921_datetime64_compatibility_long.python \
| ${CLICKHOUSE_CLIENT} --ignore-error -nm --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \
| ${CLICKHOUSE_CLIENT} --ignore-error -m --calculate_text_stack_trace 0 --log-level 'error' 2>&1 \
| grep -v -e 'Received exception .*$' -e '^(query: ' | sed 's/^\(Code: [0-9]\+\).*$/\1/g'

View File

@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions1;"
$CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions2;"
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
CREATE TABLE indices_mutaions1
(
u64 UInt64,

View File

@ -22,7 +22,7 @@ function thread1()
function thread2()
{
while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done
while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table ADD COLUMN h String '0'; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done
}
function thread3()

View File

@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS alter_table0;
DROP TABLE IF EXISTS alter_table1;
@ -31,7 +31,7 @@ function thread1()
function thread2()
{
while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done
while true; do $CLICKHOUSE_CLIENT --query "ALTER TABLE alter_table0 ADD COLUMN h String DEFAULT '0'; ALTER TABLE alter_table0 MODIFY COLUMN h UInt64; ALTER TABLE alter_table0 DROP COLUMN h;"; done
}
function thread3()
@ -87,6 +87,6 @@ check_replication_consistency "alter_table" "count(), sum(a), sum(b), round(sum(
$CLICKHOUSE_CLIENT -q "SELECT table, lost_part_count FROM system.replicas WHERE database=currentDatabase() AND lost_part_count!=0";
$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
$CLICKHOUSE_CLIENT -n -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table0;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
$CLICKHOUSE_CLIENT -q "DROP TABLE alter_table1;" 2> >(grep -F -v 'is already started to be removing by another replica right now') &
wait

View File

@ -5,9 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -n -q "DROP DATABASE IF EXISTS testlazy"
${CLICKHOUSE_CLIENT} -q "DROP DATABASE IF EXISTS testlazy"
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
CREATE DATABASE testlazy ENGINE = Lazy(1);
CREATE TABLE testlazy.log (a UInt64, b UInt64) ENGINE = Log;
CREATE TABLE testlazy.slog (a UInt64, b UInt64) ENGINE = StripeLog;
@ -30,7 +30,7 @@ ${CLICKHOUSE_CLIENT} -q "
sleep 1.5
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
SELECT * FROM testlazy.log LIMIT 0; -- drop testlazy.log from cache
RENAME TABLE testlazy.log TO testlazy.log2;
SELECT database, name FROM system.tables WHERE database = 'testlazy';
@ -44,7 +44,7 @@ ${CLICKHOUSE_CLIENT} -q "
sleep 1.5
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
INSERT INTO testlazy.log2 VALUES (1, 1);
INSERT INTO testlazy.slog VALUES (2, 2);
INSERT INTO testlazy.tlog VALUES (3, 3);
@ -55,14 +55,14 @@ ${CLICKHOUSE_CLIENT} -n -q "
sleep 1.5
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
SELECT * FROM testlazy.log2 LIMIT 0; -- drop testlazy.log2 from cache
DROP TABLE testlazy.log2;
"
sleep 1.5
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
SELECT * FROM testlazy.slog;
SELECT * FROM testlazy.tlog;
"

View File

@ -83,7 +83,7 @@ export -f recreate_lazy_func4;
export -f show_tables_func;
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
DROP DATABASE IF EXISTS $CURR_DATABASE;
CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1);
"

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
set -e
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP DATABASE IF EXISTS database_for_dict;
DROP TABLE IF EXISTS table_for_dict1;
DROP TABLE IF EXISTS table_for_dict2;
@ -44,7 +44,7 @@ function thread3()
function thread4()
{
while true; do $CLICKHOUSE_CLIENT -n -q "
while true; do $CLICKHOUSE_CLIENT -q "
SELECT * FROM database_for_dict.dict1 FORMAT Null;
SELECT * FROM database_for_dict.dict2 FORMAT Null;
" ||: ; done
@ -52,7 +52,7 @@ function thread4()
function thread5()
{
while true; do $CLICKHOUSE_CLIENT -n -q "
while true; do $CLICKHOUSE_CLIENT -q "
SELECT dictGetString('database_for_dict.dict1', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null;
SELECT dictGetString('database_for_dict.dict2', 'value_column', toUInt64(number)) from numbers(1000) FROM FORMAT Null;
" ||: ; done
@ -117,7 +117,7 @@ $CLICKHOUSE_CLIENT -q "SELECT 'Still alive'"
$CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict1"
$CLICKHOUSE_CLIENT -q "ATTACH DICTIONARY IF NOT EXISTS database_for_dict.dict2"
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP DATABASE database_for_dict;
DROP TABLE table_for_dict1;
DROP TABLE table_for_dict2;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT <<EOF
DROP TABLE IF EXISTS src;
DROP TABLE IF EXISTS mv;

View File

@ -28,7 +28,7 @@ exttypes=("Int128" "Int256" "UInt256")
echo "SELECT avgWeighted(to${left}(1), to${right}(2));"
done
done
) | $CLICKHOUSE_CLIENT_BINARY -nm
) | $CLICKHOUSE_CLIENT_BINARY -m
${CLICKHOUSE_CLIENT} --server_logs_file=/dev/null --query="SELECT avgWeighted(['string'], toFloat64(0))" 2>&1 \
| grep -c 'Code: 43. DB::Exception: .* DB::Exception:.* Types .* are non-conforming as arguments for aggregate function avgWeighted'

View File

@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -n --query="
$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 --query="
DROP DATABASE IF EXISTS 01053_db;
CREATE DATABASE 01053_db;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_bad_alters";
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE table_for_bad_alters (
$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_bad_alters (
key UInt64,
value1 UInt8,
value2 String

View File

@ -30,7 +30,7 @@ function drop_db()
database=$($CLICKHOUSE_CLIENT -q "select name from system.databases where name like '${CLICKHOUSE_DATABASE}%' order by rand() limit 1")
if [[ "$database" == "$CLICKHOUSE_DATABASE" ]]; then continue; fi
if [ -z "$database" ]; then continue; fi
$CLICKHOUSE_CLIENT -n --query \
$CLICKHOUSE_CLIENT --query \
"drop database if exists $database" 2>&1| grep -Fa "Exception: "
sleep 0.$RANDOM
done

View File

@ -31,7 +31,7 @@ $CLICKHOUSE_CLIENT -q "SELECT name,
splitByChar('/', metadata_path)[-2] as uuid_path, ((splitByChar('/', metadata_path)[-3] as metadata) = substr(uuid_path, 1, 3)) OR metadata='metadata'
FROM system.databases WHERE name LIKE '${CLICKHOUSE_DATABASE}_%'" | sed "s/$uuid_db_1/00001114-1000-4000-8000-000000000001/g" | sed "s/$uuid_db_2/00001114-1000-4000-8000-000000000002/g"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
CREATE TABLE ${DATABASE_1}.mt_tmp (n UInt64) ENGINE=MergeTree() ORDER BY tuple();
INSERT INTO ${DATABASE_1}.mt_tmp SELECT * FROM numbers(100);
CREATE TABLE ${DATABASE_3}.mt (n UInt64) ENGINE=MergeTree() ORDER BY tuple() PARTITION BY (n % 5);
@ -65,7 +65,7 @@ while [[ $($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE que
sleep 0.1
done
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
RENAME TABLE ${DATABASE_1}.mt TO ${DATABASE_1}.mt_tmp;
RENAME TABLE ${DATABASE_1}.mt_tmp TO ${DATABASE_2}.mt_tmp;
EXCHANGE TABLES ${DATABASE_2}.mt AND ${DATABASE_2}.mt_tmp;
@ -79,7 +79,7 @@ uuid_mt1=$($CLICKHOUSE_CLIENT -q "SELECT uuid FROM system.tables WHERE database=
$CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_1}.mt" | sed "s/$uuid_mt1/00001114-0000-4000-8000-000000000001/g"
$CLICKHOUSE_CLIENT --show_table_uuid_in_table_create_query_if_not_nil=1 -q "SHOW CREATE TABLE ${DATABASE_2}.mt" | sed "s/$explicit_uuid/00001114-0000-4000-8000-000000000002/g"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
DROP TABLE ${DATABASE_1}.mt SETTINGS database_atomic_wait_for_drop_and_detach_synchronously=0;
CREATE TABLE ${DATABASE_1}.mt (s String) ENGINE=Log();
INSERT INTO ${DATABASE_1}.mt SELECT 's' || toString(number) FROM numbers(5);

View File

@ -14,7 +14,7 @@ and interface in ('HTTP', 'TCP', 'TCP_Interserver')
and (user != 'default' or (a=1 and b=1)) -- FIXME: we should not write uninitialized address and port (but we do sometimes)
and event_time >= now() - interval 5 minute"
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
select * from remote('127.0.0.2', system, one, 'default', '');
select * from remote('127.0.0.2', system, one, 'default', 'wrong password'); -- { serverError AUTHENTICATION_FAILED }
select * from remote('127.0.0.2', system, one, 'nonexistsnt_user_1119', ''); -- { serverError AUTHENTICATION_FAILED }

View File

@ -7,6 +7,8 @@ DROP TABLE IF EXISTS rmt1;
DROP TABLE IF EXISTS rmt2;
DROP TABLE IF EXISTS rmt3;
SET database_replicated_allow_replicated_engine_arguments=1;
CREATE TABLE rmt (n UInt64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/test_01148/{shard}/{database}/{table}', '{replica}') ORDER BY n;
SHOW CREATE TABLE rmt;
RENAME TABLE rmt TO rmt1;

View File

@ -14,6 +14,8 @@ INSERT INTO src VALUES (3), (4);
SELECT * FROM mv ORDER BY n;
DROP TABLE mv SYNC;
SET database_replicated_allow_explicit_uuid=3;
SET show_table_uuid_in_table_create_query_if_not_nil=1;
CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n;
ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src;

View File

@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated"
$CLICKHOUSE_CLIENT -n --query "
$CLICKHOUSE_CLIENT --query "
CREATE TABLE table_for_rename_replicated
(
date Date,

View File

@ -14,7 +14,7 @@ ${CLICKHOUSE_CLIENT} --format Null -q "CREATE USER $MISTER_USER"
# This is needed to keep at least one running query for user for the time of test.
# (1k http queries takes ~1 second, let's run for 5x more to avoid flaps)
${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null -n <<<'SELECT sleepEachRow(1) FROM numbers(5)' &
${CLICKHOUSE_CLIENT} --user ${MISTER_USER} --function_sleep_max_microseconds_per_block 5000000 --format Null <<<'SELECT sleepEachRow(1) FROM numbers(5)' &
# ignore "yes: standard output: Broken pipe"
yes 'SELECT 1' 2>/dev/null | {

View File

@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
DROP DATABASE IF EXISTS 01280_db;
CREATE DATABASE 01280_db;
DROP TABLE IF EXISTS 01280_db.table_for_dict;
@ -39,9 +39,9 @@ $CLICKHOUSE_CLIENT -n --query="
LIFETIME(MIN 1000 MAX 2000)
LAYOUT(COMPLEX_KEY_SSD_CACHE(FILE_SIZE 8192 PATH '$USER_FILES_PATH/0d'));"
$CLICKHOUSE_CLIENT -nq "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }"
$CLICKHOUSE_CLIENT -q "SELECT dictHas('01280_db.ssd_dict', 'a', tuple('1')); -- { serverError 43 }"
$CLICKHOUSE_CLIENT -n --query="
$CLICKHOUSE_CLIENT --query="
SELECT 'TEST_SMALL';
SELECT 'VALUE FROM RAM BUFFER';
SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple('1', toInt32(3)));
@ -63,9 +63,9 @@ $CLICKHOUSE_CLIENT -n --query="
SELECT dictGetInt32('01280_db.ssd_dict', 'b', tuple('10', toInt32(-20)));
SELECT dictGetString('01280_db.ssd_dict', 'c', tuple('10', toInt32(-20)));"
$CLICKHOUSE_CLIENT -nq "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }"
$CLICKHOUSE_CLIENT -q "SELECT dictGetUInt64('01280_db.ssd_dict', 'a', tuple(toInt32(3))); -- { serverError 53 }"
$CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict;
$CLICKHOUSE_CLIENT --query="DROP DICTIONARY 01280_db.ssd_dict;
DROP TABLE IF EXISTS 01280_db.keys_table;
CREATE TABLE 01280_db.keys_table
(
@ -122,4 +122,4 @@ $CLICKHOUSE_CLIENT -n --query="DROP DICTIONARY 01280_db.ssd_dict;
DROP DICTIONARY IF EXISTS database_for_dict.ssd_dict;
DROP TABLE IF EXISTS database_for_dict.keys_table;"
$CLICKHOUSE_CLIENT -n --query="DROP DATABASE IF EXISTS 01280_db;"
$CLICKHOUSE_CLIENT --query="DROP DATABASE IF EXISTS 01280_db;"

View File

@ -85,7 +85,7 @@ export -f recreate_lazy_func4;
export -f test_func;
${CLICKHOUSE_CLIENT} -n -q "
${CLICKHOUSE_CLIENT} -q "
DROP DATABASE IF EXISTS $CURR_DATABASE;
CREATE DATABASE $CURR_DATABASE ENGINE = Lazy(1);
"

View File

@ -10,7 +10,7 @@ set -e
function thread()
{
while true; do
$CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS test_table_$1 SYNC;
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS test_table_$1 SYNC;
CREATE TABLE test_table_$1 (a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$1') ORDER BY tuple();" 2>&1 |
grep -vP '(^$)|(^Received exception from server)|(^\d+\. )|because the last replica of the table was dropped right now|is already started to be removing by another replica right now| were removed by another replica|Removing leftovers from table|Another replica was suddenly created|was created by another server at the same moment|was suddenly removed|some other replicas were created at the same time|^\(query: '
done

View File

@ -17,7 +17,7 @@ function thread1()
{
local TIMELIMIT=$((SECONDS+$1))
while [ $SECONDS -lt "$TIMELIMIT" ]; do
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"
$CLICKHOUSE_CLIENT --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"
done
}

View File

@ -20,4 +20,4 @@ for OFFSET in {0..15}; do
FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT);
"
done
done | $CLICKHOUSE_CLIENT -n --max_block_size 5
done | $CLICKHOUSE_CLIENT --max_block_size 5

View File

@ -19,4 +19,4 @@ for _ in $(seq $ITERATIONS); do
throwIf((c != 0 OR first != 0 OR last != 0) AND (c != last - first + 1))
FROM (SELECT * FROM numbers($SIZE) LIMIT $OFFSET, $LIMIT);
"
done | $CLICKHOUSE_CLIENT -n --max_block_size $(($RANDOM % 20 + 1)) | uniq
done | $CLICKHOUSE_CLIENT --max_block_size $(($RANDOM % 20 + 1)) | uniq

View File

@ -10,7 +10,7 @@ $CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS ordinary_db"
$CLICKHOUSE_CLIENT --query "CREATE DATABASE ordinary_db"
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
CREATE DICTIONARY ordinary_db.dict1
(
@ -35,7 +35,7 @@ function dict_get_thread()
function drop_create_table_thread()
{
while true; do
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE ordinary_db.table_for_dict_real (
$CLICKHOUSE_CLIENT --query "CREATE TABLE ordinary_db.table_for_dict_real (
key_column UInt64,
second_column UInt8,
third_column String

View File

@ -23,7 +23,7 @@ function f {
function g {
local TIMELIMIT=$((SECONDS+$1))
for _ in $(seq 1 100); do
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
INSERT INTO mem SELECT number FROM numbers(1000000);
INSERT INTO mem SELECT number FROM numbers(1000000);
INSERT INTO mem SELECT number FROM numbers(1000000);

View File

@ -11,7 +11,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
NUM_REPLICAS=6
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS r$i SYNC;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum_many', 'r$i') ORDER BY x;
"
@ -39,12 +39,12 @@ done
wait
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
SYSTEM SYNC REPLICA r$i;
SELECT count(), min(x), max(x), sum(x) FROM r$i;
"
done
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS r$i SYNC;"
$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS r$i SYNC;"
done

View File

@ -12,13 +12,13 @@ NUM_REPLICAS=2
NUM_INSERTS=5
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
DROP TABLE IF EXISTS r$i;
CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_quorum', 'r$i') ORDER BY x;
"
done
$CLICKHOUSE_CLIENT -n -q "SYSTEM STOP REPLICATION QUEUES r2;"
$CLICKHOUSE_CLIENT -q "SYSTEM STOP REPLICATION QUEUES r2;"
function thread {
$CLICKHOUSE_CLIENT --insert_quorum 2 --insert_quorum_parallel 1 --query "INSERT INTO r1 SELECT $1"
@ -28,12 +28,12 @@ for i in $(seq 1 $NUM_INSERTS); do
thread $i &
done
$CLICKHOUSE_CLIENT -n -q "SYSTEM START REPLICATION QUEUES r2;"
$CLICKHOUSE_CLIENT -q "SYSTEM START REPLICATION QUEUES r2;"
wait
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n -q "
$CLICKHOUSE_CLIENT -q "
SELECT count(), min(x), max(x), sum(x) FROM r$i;
DROP TABLE IF EXISTS r$i;
"

View File

@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm <<EOL
$CLICKHOUSE_CLIENT -m <<EOL
drop table if exists dist_01247;
drop table if exists data_01247;
@ -29,7 +29,7 @@ for ((i = 0; i < 100; ++i)); do
"--optimize_distributed_group_by_sharding_key=1"
"--prefer_localhost_replica=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --format CSV -nm -q "select count(), * from dist_01247 group by number order by number limit 1 format Null"
$CLICKHOUSE_CLIENT "${opts[@]}" --format CSV -m -q "select count(), * from dist_01247 group by number order by number limit 1 format Null"
# expect zero new network errors
network_errors_after=$($CLICKHOUSE_CLIENT -q "SELECT value FROM system.errors WHERE name = 'NETWORK_ERROR'")

View File

@ -11,14 +11,14 @@ set -o pipefail
echo "
DROP TABLE IF EXISTS storage_join_race;
CREATE TABLE storage_join_race (x UInt64, y UInt64) Engine = Join(ALL, FULL, x);
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
function read_thread_big()
{
while true; do
echo "
SELECT * FROM ( SELECT number AS x FROM numbers(100000) ) AS t1 ALL FULL JOIN storage_join_race USING (x) FORMAT Null;
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
done
}
@ -27,7 +27,7 @@ function read_thread_small()
while true; do
echo "
SELECT * FROM ( SELECT number AS x FROM numbers(10) ) AS t1 ALL FULL JOIN storage_join_race USING (x) FORMAT Null;
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
done
}
@ -36,7 +36,7 @@ function read_thread_select()
while true; do
echo "
SELECT * FROM storage_join_race FORMAT Null;
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
done
}
@ -56,7 +56,7 @@ echo "
INSERT INTO storage_join_race
SELECT number AS x, sleepEachRow(0.1) + number AS y FROM numbers ($TIMEOUT * 10)
SETTINGS function_sleep_max_microseconds_per_block = 100000000, max_block_size = 10;
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
wait

View File

@ -17,7 +17,7 @@ INSERT INTO db01802.postgresql SELECT number FROM numbers(10);
SELECT 'before row policy';
SELECT * FROM db01802.postgresql;
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
echo "
@ -28,7 +28,7 @@ CREATE ROW POLICY IF NOT EXISTS test_policy ON db01802.postgresql FOR SELECT USI
SELECT '';
SELECT 'after row policy with no password';
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} db01802 --user postgresql_user -c "SELECT * FROM postgresql;"
@ -40,7 +40,7 @@ GRANT SELECT(val) ON db01802.postgresql TO postgresql_user;
CREATE ROW POLICY IF NOT EXISTS test_policy ON db01802.postgresql FOR SELECT USING val = 2 TO postgresql_user;
SELECT 'after row policy with plaintext_password';
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
psql "postgresql://postgresql_user:qwerty@localhost:${CLICKHOUSE_PORT_POSTGRESQL}/db01802" -c "SELECT * FROM postgresql;"

View File

@ -17,10 +17,10 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# The number of threads removing data parts should be between 1 and 129.
# Because max_parts_cleaning_thread_pool_size is 128 by default
$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -nm -q "create database ordinary_$CLICKHOUSE_DATABASE engine=Ordinary"
$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -m -q "create database ordinary_$CLICKHOUSE_DATABASE engine=Ordinary"
# MergeTree
$CLICKHOUSE_CLIENT -nm -q """
$CLICKHOUSE_CLIENT -m -q """
use ordinary_$CLICKHOUSE_DATABASE;
drop table if exists data_01810;
@ -47,7 +47,7 @@ $CLICKHOUSE_CLIENT -nm -q """
"""
# ReplicatedMergeTree
$CLICKHOUSE_CLIENT -nm -q """
$CLICKHOUSE_CLIENT -m -q """
use ordinary_$CLICKHOUSE_DATABASE;
drop table if exists rep_data_01810;
@ -76,4 +76,4 @@ $CLICKHOUSE_CLIENT -nm -q """
format Null;
"""
$CLICKHOUSE_CLIENT -nm -q "drop database ordinary_$CLICKHOUSE_DATABASE"
$CLICKHOUSE_CLIENT -m -q "drop database ordinary_$CLICKHOUSE_DATABASE"

View File

@ -9,6 +9,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
echo "
DROP USER IF EXISTS postgresql_user;
CREATE USER postgresql_user HOST IP '127.0.0.1' IDENTIFIED WITH no_password;
" | $CLICKHOUSE_CLIENT -n
" | $CLICKHOUSE_CLIENT
psql --host localhost --port ${CLICKHOUSE_PORT_POSTGRESQL} ${CLICKHOUSE_DATABASE} --user postgresql_user -c "SELECT NULL;"

View File

@ -12,7 +12,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -nm -q "
$CLICKHOUSE_CLIENT -m -q "
drop table if exists data_01900_1;
drop table if exists data_01900_2;
@ -27,18 +27,18 @@ $CLICKHOUSE_CLIENT -nm -q "
# so 100 mutations will be scheduled and killed later.
for i in {1..100}; do
echo "alter table data_01900_1 update s = 'foo_$i' where 1;"
done | $CLICKHOUSE_CLIENT -nm
done | $CLICKHOUSE_CLIENT -m
# but these mutations should not be killed.
(
for i in {1..100}; do
echo "alter table data_01900_2 update s = 'bar_$i' where 1;"
done | $CLICKHOUSE_CLIENT -nm --mutations_sync=1
done | $CLICKHOUSE_CLIENT -m --mutations_sync=1
) &
$CLICKHOUSE_CLIENT --format Null -nm -q "kill mutation where table = 'data_01900_1' and database = '$CLICKHOUSE_DATABASE';"
$CLICKHOUSE_CLIENT --format Null -m -q "kill mutation where table = 'data_01900_1' and database = '$CLICKHOUSE_DATABASE';"
wait
$CLICKHOUSE_CLIENT -nm -q "select * from data_01900_2"
$CLICKHOUSE_CLIENT -m -q "select * from data_01900_2"
$CLICKHOUSE_CLIENT -q "drop table data_01900_1"
$CLICKHOUSE_CLIENT -q "drop table data_01900_2"

View File

@ -18,7 +18,7 @@ done
wait
for i in $(seq 1 $NUM_REPLICAS); do
$CLICKHOUSE_CLIENT -n --query "CREATE TABLE ttl_table$i(
$CLICKHOUSE_CLIENT --query "CREATE TABLE ttl_table$i(
key DateTime
)
ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/ttl_table', '$i')

View File

@ -15,7 +15,7 @@ ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO us
${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}"
${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')"
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;"
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }"
${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 36 }"
${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db"
${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}"

View File

@ -0,0 +1,10 @@
2
3
m1
m2
rmt1
rmt2
02858000-1000-4000-8000-000000000
0
CREATE TABLE default.rmt1\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/test/02858_explicit_uuid_and_zk_path_default/rmt/{shard}\', \'_{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192
CREATE TABLE default.rmt2\n(\n `n` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')\nORDER BY n\nSETTINGS index_granularity = 8192

View File

@ -0,0 +1,44 @@
#!/usr/bin/env bash
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
db=$CLICKHOUSE_DATABASE
if [[ $($CLICKHOUSE_CLIENT -q "SELECT engine = 'Replicated' FROM system.databases WHERE name='$CLICKHOUSE_DATABASE'") != 1 ]]; then
$CLICKHOUSE_CLIENT -q "CREATE DATABASE rdb_$CLICKHOUSE_DATABASE ENGINE=Replicated('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rdb', '1', '1')"
db="rdb_$CLICKHOUSE_DATABASE"
fi
$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=0 -q "CREATE TABLE $db.m0
UUID '02858000-1000-4000-8000-000000000000' (n int) ENGINE=Memory" 2>&1| grep -Fac "database_replicated_allow_explicit_uuid"
$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=1 -q "CREATE TABLE $db.m1
UUID '02858000-1000-4000-8000-000000000$(($RANDOM % 10))$(($RANDOM % 10))$(($RANDOM % 10))' (n int) ENGINE=Memory"
$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_explicit_uuid=2 -q "CREATE TABLE $db.m2
UUID '02858000-1000-4000-8000-000000000002' (n int) ENGINE=Memory"
$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=0 -q "CREATE TABLE $db.rmt0 (n int)
ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n" 2>&1| grep -Fac "database_replicated_allow_replicated_engine_arguments"
$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=1 -q "CREATE TABLE $db.rmt1 (n int)
ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n"
$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none --database_replicated_allow_replicated_engine_arguments=2 -q "CREATE TABLE $db.rmt2 (n int)
ENGINE=ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/rmt/{shard}', '_{replica}') ORDER BY n"
$CLICKHOUSE_CLIENT -q "SELECT name FROM system.tables WHERE database='$db' ORDER BY name"
$CLICKHOUSE_CLIENT -q "SELECT substring(toString(uuid) as s, 1, length(s) - 3) FROM system.tables WHERE database='$db' and name='m1'"
$CLICKHOUSE_CLIENT -q "SELECT toString(uuid) LIKE '02858000%' FROM system.tables WHERE database='$db' and name='m2'"
$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt1" | sed "s/$db/default/g"
$CLICKHOUSE_CLIENT -q "SHOW CREATE $db.rmt2" | sed "s/$db/default/g"
$CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS rdb_$CLICKHOUSE_DATABASE"

View File

@ -1,5 +1,5 @@
Test create statistics:
CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64) STATISTICS(tdigest, uniq, count_min, minmax),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, count_min),\n `c` LowCardinality(Nullable(Int64)) STATISTICS(tdigest, uniq, count_min, minmax),\n `d` DateTime STATISTICS(tdigest, uniq, count_min, minmax),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64) STATISTICS(tdigest, uniq, countmin, minmax),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)) STATISTICS(tdigest, uniq, countmin, minmax),\n `d` DateTime STATISTICS(tdigest, uniq, countmin, minmax),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
Test materialize and drop statistics:
CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, count_min),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)) STATISTICS(uniq, countmin),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192
CREATE TABLE default.tab\n(\n `a` LowCardinality(Int64),\n `b` LowCardinality(Nullable(String)),\n `c` LowCardinality(Nullable(Int64)),\n `d` DateTime,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS index_granularity = 8192

View File

@ -12,10 +12,10 @@ SELECT 'Test create statistics:';
CREATE TABLE tab
(
a LowCardinality(Int64) STATISTICS(count_min, minmax, tdigest, uniq),
b LowCardinality(Nullable(String)) STATISTICS(count_min, uniq),
c LowCardinality(Nullable(Int64)) STATISTICS(count_min, minmax, tdigest, uniq),
d DateTime STATISTICS(count_min, minmax, tdigest, uniq),
a LowCardinality(Int64) STATISTICS(countmin, minmax, tdigest, uniq),
b LowCardinality(Nullable(String)) STATISTICS(countmin, uniq),
c LowCardinality(Nullable(Int64)) STATISTICS(countmin, minmax, tdigest, uniq),
d DateTime STATISTICS(countmin, minmax, tdigest, uniq),
pk String,
) Engine = MergeTree() ORDER BY pk;
@ -25,7 +25,7 @@ SHOW CREATE TABLE tab;
SELECT 'Test materialize and drop statistics:';
ALTER TABLE tab DROP STATISTICS a, b, c, d;
ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq;
ALTER TABLE tab ADD STATISTICS b TYPE countmin, uniq;
ALTER TABLE tab MATERIALIZE STATISTICS b;
SHOW CREATE TABLE tab;

View File

@ -1,5 +1,5 @@
-- Tags: no-fasttest
-- no-fasttest: 'count_min' sketches need a 3rd party library
-- no-fasttest: 'countmin' sketches need a 3rd party library
-- Tests that DDL statements which create / drop / materialize statistics
@ -71,29 +71,29 @@ CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree()
CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
-- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String
-- countmin requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String
-- These types work:
CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col UInt8 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col UInt256 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Float32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Decimal32(3) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Date STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Date32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col DateTime STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col DateTime64 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col IPv4 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col Nullable(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col String STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
CREATE TABLE tab (col FixedString(1) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab;
-- These types don't work:
CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col Array(Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col UUID STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
CREATE TABLE tab (col IPv6 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS }
-- minmax requires data_type.isValueRepresentedByInteger
-- These types work:
@ -187,17 +187,17 @@ ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATI
-- uniq
-- Works:
ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64;
ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64;
ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64;
-- Doesn't work:
ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS }
ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS }
-- count_min
-- countmin
-- Works:
ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64;
ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64;
ALTER TABLE tab ADD STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64;
ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64;
-- Doesn't work:
ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS }
ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS }
ALTER TABLE tab ADD STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS }
ALTER TABLE tab MODIFY STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS }
-- minmax
-- Works:
ALTER TABLE tab ADD STATISTICS f64 TYPE minmax; ALTER TABLE tab DROP STATISTICS f64;

View File

@ -1,5 +1,5 @@
-- Tags: no-fasttest
-- no-fasttest: 'count_min' sketches need a 3rd party library
-- no-fasttest: 'countmin' sketches need a 3rd party library
-- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types.
@ -13,27 +13,27 @@ CREATE TABLE tab
u64 UInt64,
u64_tdigest UInt64 STATISTICS(tdigest),
u64_minmax UInt64 STATISTICS(minmax),
u64_count_min UInt64 STATISTICS(count_min),
u64_countmin UInt64 STATISTICS(countmin),
u64_uniq UInt64 STATISTICS(uniq),
f64 Float64,
f64_tdigest Float64 STATISTICS(tdigest),
f64_minmax Float64 STATISTICS(minmax),
f64_count_min Float64 STATISTICS(count_min),
f64_countmin Float64 STATISTICS(countmin),
f64_uniq Float64 STATISTICS(uniq),
dt DateTime,
dt_tdigest DateTime STATISTICS(tdigest),
dt_minmax DateTime STATISTICS(minmax),
dt_count_min DateTime STATISTICS(count_min),
dt_countmin DateTime STATISTICS(countmin),
dt_uniq DateTime STATISTICS(uniq),
b Bool,
b_tdigest Bool STATISTICS(tdigest),
b_minmax Bool STATISTICS(minmax),
b_count_min Bool STATISTICS(count_min),
b_countmin Bool STATISTICS(countmin),
b_uniq Bool STATISTICS(uniq),
s String,
-- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest
-- s_minmax String STATISTICS(minmax), -- not supported by minmax
s_count_min String STATISTICS(count_min),
s_countmin String STATISTICS(countmin),
s_uniq String STATISTICS(uniq)
) Engine = MergeTree() ORDER BY tuple()
SETTINGS min_bytes_for_wide_part = 0;
@ -72,25 +72,25 @@ SELECT 'u64 and =';
SELECT count(*) FROM tab WHERE u64 = 7;
SELECT count(*) FROM tab WHERE u64_tdigest = 7;
SELECT count(*) FROM tab WHERE u64_minmax = 7;
SELECT count(*) FROM tab WHERE u64_count_min = 7;
SELECT count(*) FROM tab WHERE u64_countmin = 7;
SELECT count(*) FROM tab WHERE u64_uniq = 7;
SELECT count(*) FROM tab WHERE u64 = 7.7;
SELECT count(*) FROM tab WHERE u64_tdigest = 7.7;
SELECT count(*) FROM tab WHERE u64_minmax = 7.7;
SELECT count(*) FROM tab WHERE u64_count_min = 7.7;
SELECT count(*) FROM tab WHERE u64_countmin = 7.7;
SELECT count(*) FROM tab WHERE u64_uniq = 7.7;
SELECT count(*) FROM tab WHERE u64 = '7';
SELECT count(*) FROM tab WHERE u64_tdigest = '7';
SELECT count(*) FROM tab WHERE u64_minmax = '7';
SELECT count(*) FROM tab WHERE u64_count_min = '7';
SELECT count(*) FROM tab WHERE u64_countmin = '7';
SELECT count(*) FROM tab WHERE u64_uniq = '7';
SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_minmax = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_countmin = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH }
SELECT 'u64 and <';
@ -98,25 +98,25 @@ SELECT 'u64 and <';
SELECT count(*) FROM tab WHERE u64 < 7;
SELECT count(*) FROM tab WHERE u64_tdigest < 7;
SELECT count(*) FROM tab WHERE u64_minmax < 7;
SELECT count(*) FROM tab WHERE u64_count_min < 7;
SELECT count(*) FROM tab WHERE u64_countmin < 7;
SELECT count(*) FROM tab WHERE u64_uniq < 7;
SELECT count(*) FROM tab WHERE u64 < 7.7;
SELECT count(*) FROM tab WHERE u64_tdigest < 7.7;
SELECT count(*) FROM tab WHERE u64_minmax < 7.7;
SELECT count(*) FROM tab WHERE u64_count_min < 7.7;
SELECT count(*) FROM tab WHERE u64_countmin < 7.7;
SELECT count(*) FROM tab WHERE u64_uniq < 7.7;
SELECT count(*) FROM tab WHERE u64 < '7';
SELECT count(*) FROM tab WHERE u64_tdigest < '7';
SELECT count(*) FROM tab WHERE u64_minmax < '7';
SELECT count(*) FROM tab WHERE u64_count_min < '7';
SELECT count(*) FROM tab WHERE u64_countmin < '7';
SELECT count(*) FROM tab WHERE u64_uniq < '7';
SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_minmax < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_countmin < '7.7'; -- { serverError TYPE_MISMATCH }
SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH }
-- f64 ----------------------------------------------------
@ -126,25 +126,25 @@ SELECT 'f64 and =';
SELECT count(*) FROM tab WHERE f64 = 7;
SELECT count(*) FROM tab WHERE f64_tdigest = 7;
SELECT count(*) FROM tab WHERE f64_minmax = 7;
SELECT count(*) FROM tab WHERE f64_count_min = 7;
SELECT count(*) FROM tab WHERE f64_countmin = 7;
SELECT count(*) FROM tab WHERE f64_uniq = 7;
SELECT count(*) FROM tab WHERE f64 = 7.7;
SELECT count(*) FROM tab WHERE f64_tdigest = 7.7;
SELECT count(*) FROM tab WHERE f64_minmax = 7.7;
SELECT count(*) FROM tab WHERE f64_count_min = 7.7;
SELECT count(*) FROM tab WHERE f64_countmin = 7.7;
SELECT count(*) FROM tab WHERE f64_uniq = 7.7;
SELECT count(*) FROM tab WHERE f64 = '7';
SELECT count(*) FROM tab WHERE f64_tdigest = '7';
SELECT count(*) FROM tab WHERE f64_minmax = '7';
SELECT count(*) FROM tab WHERE f64_count_min = '7';
SELECT count(*) FROM tab WHERE f64_countmin = '7';
SELECT count(*) FROM tab WHERE f64_uniq = '7';
SELECT count(*) FROM tab WHERE f64 = '7.7';
SELECT count(*) FROM tab WHERE f64_tdigest = '7.7';
SELECT count(*) FROM tab WHERE f64_minmax = '7.7';
SELECT count(*) FROM tab WHERE f64_count_min = '7.7';
SELECT count(*) FROM tab WHERE f64_countmin = '7.7';
SELECT count(*) FROM tab WHERE f64_uniq = '7.7';
SELECT 'f64 and <';
@ -152,25 +152,25 @@ SELECT 'f64 and <';
SELECT count(*) FROM tab WHERE f64 < 7;
SELECT count(*) FROM tab WHERE f64_tdigest < 7;
SELECT count(*) FROM tab WHERE f64_minmax < 7;
SELECT count(*) FROM tab WHERE f64_count_min < 7;
SELECT count(*) FROM tab WHERE f64_countmin < 7;
SELECT count(*) FROM tab WHERE f64_uniq < 7;
SELECT count(*) FROM tab WHERE f64 < 7.7;
SELECT count(*) FROM tab WHERE f64_tdigest < 7.7;
SELECT count(*) FROM tab WHERE f64_minmax < 7.7;
SELECT count(*) FROM tab WHERE f64_count_min < 7.7;
SELECT count(*) FROM tab WHERE f64_countmin < 7.7;
SELECT count(*) FROM tab WHERE f64_uniq < 7.7;
SELECT count(*) FROM tab WHERE f64 < '7';
SELECT count(*) FROM tab WHERE f64_tdigest < '7';
SELECT count(*) FROM tab WHERE f64_minmax < '7';
SELECT count(*) FROM tab WHERE f64_count_min < '7';
SELECT count(*) FROM tab WHERE f64_countmin < '7';
SELECT count(*) FROM tab WHERE f64_uniq < '7';
SELECT count(*) FROM tab WHERE f64 < '7.7';
SELECT count(*) FROM tab WHERE f64_tdigest < '7.7';
SELECT count(*) FROM tab WHERE f64_minmax < '7.7';
SELECT count(*) FROM tab WHERE f64_count_min < '7.7';
SELECT count(*) FROM tab WHERE f64_countmin < '7.7';
SELECT count(*) FROM tab WHERE f64_uniq < '7.7';
-- dt ----------------------------------------------------
@ -180,13 +180,13 @@ SELECT 'dt and =';
SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_minmax = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_countmin = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt = 7;
SELECT count(*) FROM tab WHERE dt_tdigest = 7;
SELECT count(*) FROM tab WHERE dt_minmax = 7;
SELECT count(*) FROM tab WHERE dt_count_min = 7;
SELECT count(*) FROM tab WHERE dt_countmin = 7;
SELECT count(*) FROM tab WHERE dt_uniq = 7;
SELECT 'dt and <';
@ -194,13 +194,13 @@ SELECT 'dt and <';
SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_minmax < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_countmin < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13';
SELECT count(*) FROM tab WHERE dt < 7;
SELECT count(*) FROM tab WHERE dt_tdigest < 7;
SELECT count(*) FROM tab WHERE dt_minmax < 7;
SELECT count(*) FROM tab WHERE dt_count_min < 7;
SELECT count(*) FROM tab WHERE dt_countmin < 7;
SELECT count(*) FROM tab WHERE dt_uniq < 7;
-- b ----------------------------------------------------
@ -210,25 +210,25 @@ SELECT 'b and =';
SELECT count(*) FROM tab WHERE b = true;
SELECT count(*) FROM tab WHERE b_tdigest = true;
SELECT count(*) FROM tab WHERE b_minmax = true;
SELECT count(*) FROM tab WHERE b_count_min = true;
SELECT count(*) FROM tab WHERE b_countmin = true;
SELECT count(*) FROM tab WHERE b_uniq = true;
SELECT count(*) FROM tab WHERE b = 'true';
SELECT count(*) FROM tab WHERE b_tdigest = 'true';
SELECT count(*) FROM tab WHERE b_minmax = 'true';
SELECT count(*) FROM tab WHERE b_count_min = 'true';
SELECT count(*) FROM tab WHERE b_countmin = 'true';
SELECT count(*) FROM tab WHERE b_uniq = 'true';
SELECT count(*) FROM tab WHERE b = 1;
SELECT count(*) FROM tab WHERE b_tdigest = 1;
SELECT count(*) FROM tab WHERE b_minmax = 1;
SELECT count(*) FROM tab WHERE b_count_min = 1;
SELECT count(*) FROM tab WHERE b_countmin = 1;
SELECT count(*) FROM tab WHERE b_uniq = 1;
SELECT count(*) FROM tab WHERE b = 1.1;
SELECT count(*) FROM tab WHERE b_tdigest = 1.1;
SELECT count(*) FROM tab WHERE b_minmax = 1.1;
SELECT count(*) FROM tab WHERE b_count_min = 1.1;
SELECT count(*) FROM tab WHERE b_countmin = 1.1;
SELECT count(*) FROM tab WHERE b_uniq = 1.1;
-- s ----------------------------------------------------
@ -238,13 +238,13 @@ SELECT 's and =';
SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE }
-- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported
-- SELECT count(*) FROM tab WHERE s_minmax = 7; -- not supported
SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE }
SELECT count(*) FROM tab WHERE s_countmin = 7; -- { serverError NO_COMMON_TYPE }
SELECT count(*) FROM tab WHERE s_uniq = 7; -- { serverError NO_COMMON_TYPE }
SELECT count(*) FROM tab WHERE s = '7';
-- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported
-- SELECT count(*) FROM tab WHERE s_minmax = '7'; -- not supported
SELECT count(*) FROM tab WHERE s_count_min = '7';
SELECT count(*) FROM tab WHERE s_countmin = '7';
SELECT count(*) FROM tab WHERE s_uniq = '7';
DROP TABLE tab;

Some files were not shown because too many files have changed in this diff Show More