diff --git a/.gitmodules b/.gitmodules index 43c878427ec..37b22527eb4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -243,3 +243,6 @@ [submodule "contrib/s2geometry"] path = contrib/s2geometry url = https://github.com/ClickHouse-Extras/s2geometry.git +[submodule "contrib/bzip2"] + path = contrib/bzip2 + url = https://github.com/ClickHouse-Extras/bzip2.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 24022c256ec..d3cb5f70c83 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -543,6 +543,7 @@ include (cmake/find/nuraft.cmake) include (cmake/find/yaml-cpp.cmake) include (cmake/find/s2geometry.cmake) include (cmake/find/nlp.cmake) +include (cmake/find/bzip2.cmake) if(NOT USE_INTERNAL_PARQUET_LIBRARY) set (ENABLE_ORC OFF CACHE INTERNAL "") diff --git a/base/common/memory.h b/base/common/memory.h deleted file mode 100644 index e82c019ceab..00000000000 --- a/base/common/memory.h +++ /dev/null @@ -1,57 +0,0 @@ -#pragma once - -#include -#include "defines.h" - -#if USE_JEMALLOC -# include -#endif - -#if !USE_JEMALLOC || JEMALLOC_VERSION_MAJOR < 4 -# include -#endif - - -namespace Memory -{ - -inline ALWAYS_INLINE void * newImpl(std::size_t size) -{ - auto * ptr = malloc(size); - if (likely(ptr != nullptr)) - return ptr; - - /// @note no std::get_new_handler logic implemented - throw std::bad_alloc{}; -} - -inline ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept -{ - return malloc(size); -} - -inline ALWAYS_INLINE void deleteImpl(void * ptr) noexcept -{ - free(ptr); -} - -#if USE_JEMALLOC && JEMALLOC_VERSION_MAJOR >= 4 - -inline ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size) noexcept -{ - if (unlikely(ptr == nullptr)) - return; - - sdallocx(ptr, size, 0); -} - -#else - -inline ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size [[maybe_unused]]) noexcept -{ - free(ptr); -} - -#endif - -} diff --git a/cmake/find/bzip2.cmake b/cmake/find/bzip2.cmake new file mode 100644 index 00000000000..15532a67c00 --- /dev/null +++ b/cmake/find/bzip2.cmake @@ -0,0 +1,19 @@ +option(ENABLE_BZIP2 "Enable bzip2 compression support" ${ENABLE_LIBRARIES}) + +if (NOT ENABLE_BZIP2) + message (STATUS "bzip2 compression disabled") + return() +endif() + +if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/bzip2/bzlib.h") + message (WARNING "submodule contrib/bzip2 is missing. to fix try run: \n git submodule update --init --recursive") + message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal bzip2 library") + set (USE_NLP 0) + return() +endif () + +set (USE_BZIP2 1) +set (BZIP2_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/bzip2") +set (BZIP2_LIBRARY bzip2) + +message (STATUS "Using bzip2=${USE_BZIP2}: ${BZIP2_INCLUDE_DIR} : ${BZIP2_LIBRARY}") diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 82cddb0ace0..e6e098a05b3 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -334,6 +334,10 @@ if (USE_NLP) add_subdirectory(lemmagen-c-cmake) endif() +if (USE_BZIP2) + add_subdirectory(bzip2-cmake) +endif() + if (USE_SQLITE) add_subdirectory(sqlite-cmake) endif() diff --git a/contrib/NuRaft b/contrib/NuRaft index 0ce94900930..7ecb16844af 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 0ce9490093021c63564cca159571a8b27772ad48 +Subproject commit 7ecb16844af6a9c283ad432d85ecc2e7d1544676 diff --git a/contrib/bzip2 b/contrib/bzip2 new file mode 160000 index 00000000000..bf905ea2251 --- /dev/null +++ b/contrib/bzip2 @@ -0,0 +1 @@ +Subproject commit bf905ea2251191ff9911ae7ec0cfc35d41f9f7f6 diff --git a/contrib/bzip2-cmake/CMakeLists.txt b/contrib/bzip2-cmake/CMakeLists.txt new file mode 100644 index 00000000000..a9d2efa43c1 --- /dev/null +++ b/contrib/bzip2-cmake/CMakeLists.txt @@ -0,0 +1,23 @@ +set(BZIP2_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/bzip2") +set(BZIP2_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/bzip2") + +set(SRCS + "${BZIP2_SOURCE_DIR}/blocksort.c" + "${BZIP2_SOURCE_DIR}/huffman.c" + "${BZIP2_SOURCE_DIR}/crctable.c" + "${BZIP2_SOURCE_DIR}/randtable.c" + "${BZIP2_SOURCE_DIR}/compress.c" + "${BZIP2_SOURCE_DIR}/decompress.c" + "${BZIP2_SOURCE_DIR}/bzlib.c" +) + +# From bzip2/CMakeLists.txt +set(BZ_VERSION "1.0.7") +configure_file ( + "${BZIP2_SOURCE_DIR}/bz_version.h.in" + "${BZIP2_BINARY_DIR}/bz_version.h" +) + +add_library(bzip2 ${SRCS}) + +target_include_directories(bzip2 PUBLIC "${BZIP2_SOURCE_DIR}" "${BZIP2_BINARY_DIR}") diff --git a/contrib/croaring-cmake/CMakeLists.txt b/contrib/croaring-cmake/CMakeLists.txt index f4a5d8a01dc..f0cb378864b 100644 --- a/contrib/croaring-cmake/CMakeLists.txt +++ b/contrib/croaring-cmake/CMakeLists.txt @@ -24,3 +24,19 @@ add_library(roaring ${SRCS}) target_include_directories(roaring PRIVATE "${LIBRARY_DIR}/include/roaring") target_include_directories(roaring SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include") target_include_directories(roaring SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/cpp") + +# We redirect malloc/free family of functions to different functions that will track memory in ClickHouse. +# It will make this library depend on linking to 'clickhouse_common_io' library that is not done explicitly via 'target_link_libraries'. +# And we check that all libraries dependencies are satisfied and all symbols are resolved if we do build with shared libraries. +# That's why we enable it only in static build. +# Also note that we exploit implicit function declarations. + +if (USE_STATIC_LIBRARIES) + target_compile_definitions(roaring PRIVATE + -Dmalloc=clickhouse_malloc + -Dcalloc=clickhouse_calloc + -Drealloc=clickhouse_realloc + -Dreallocarray=clickhouse_reallocarray + -Dfree=clickhouse_free + -Dposix_memalign=clickhouse_posix_memalign) +endif () diff --git a/contrib/zlib-ng b/contrib/zlib-ng index db232d30b4c..6a5e93b9007 160000 --- a/contrib/zlib-ng +++ b/contrib/zlib-ng @@ -1 +1 @@ -Subproject commit db232d30b4c72fd58e6d7eae2d12cebf9c3d90db +Subproject commit 6a5e93b9007782115f7f7e5235dedc81c4f1facb diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 6419ea3659c..a0dcf07c41d 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -312,6 +312,7 @@ function run_tests 01798_uniq_theta_sketch 01799_long_uniq_theta_sketch 01890_stem # depends on libstemmer_c + 02003_compress_bz2 # depends on bzip2 collate collation _orc_ diff --git a/docs/en/getting-started/example-datasets/index.md b/docs/en/getting-started/example-datasets/index.md index 53007c33306..669bdf31a61 100644 --- a/docs/en/getting-started/example-datasets/index.md +++ b/docs/en/getting-started/example-datasets/index.md @@ -15,6 +15,7 @@ The list of documented datasets: - [Recipes](../../getting-started/example-datasets/recipes.md) - [OnTime](../../getting-started/example-datasets/ontime.md) - [New York Taxi Data](../../getting-started/example-datasets/nyc-taxi.md) +- [UK Property Price Paid](../../getting-started/example-datasets/uk-price-paid.md) - [Star Schema Benchmark](../../getting-started/example-datasets/star-schema.md) - [WikiStat](../../getting-started/example-datasets/wikistat.md) - [Terabyte of Click Logs from Criteo](../../getting-started/example-datasets/criteo.md) diff --git a/docs/en/getting-started/example-datasets/uk-price-paid.md b/docs/en/getting-started/example-datasets/uk-price-paid.md new file mode 100644 index 00000000000..f328ea51a24 --- /dev/null +++ b/docs/en/getting-started/example-datasets/uk-price-paid.md @@ -0,0 +1,325 @@ +--- +toc_priority: 20 +toc_title: UK Property Price Paid +--- + +# UK Property Price Paid + +The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995. +The size of the dataset in uncompressed form is about 4 GiB and it will take about 226 MiB in ClickHouse. + +Source: https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads +Description of the fields: https://www.gov.uk/guidance/about-the-price-paid-data + +Contains HM Land Registry data © Crown copyright and database right 2021. This data is licensed under the Open Government Licence v3.0. + +## Download the Dataset + +``` +wget http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv +``` + +Download will take about 2 minutes with good internet connection. + +## Create the Table + +``` +CREATE TABLE uk_price_paid +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0), + is_new UInt8, + duration Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String), + category UInt8 +) ENGINE = MergeTree ORDER BY (postcode1, postcode2, addr1, addr2); +``` + +## Preprocess and Import Data + +We will use `clickhouse-local` tool for data preprocessing and `clickhouse-client` to upload it. + +In this example, we define the structure of source data from the CSV file and specify a query to preprocess the data with `clickhouse-local`. + +The preprocessing is: +- splitting the postcode to two different columns `postcode1` and `postcode2` that is better for storage and queries; +- coverting the `time` field to date as it only contains 00:00 time; +- ignoring the `uuid` field because we don't need it for analysis; +- transforming `type` and `duration` to more readable Enum fields with function `transform`; +- transforming `is_new` and `category` fields from single-character string (`Y`/`N` and `A`/`B`) to UInt8 field with 0 and 1. + +Preprocessed data is piped directly to `clickhouse-client` to be inserted into ClickHouse table in streaming fashion. + +``` +clickhouse-local --input-format CSV --structure ' + uuid String, + price UInt32, + time DateTime, + postcode String, + a String, + b String, + c String, + addr1 String, + addr2 String, + street String, + locality String, + town String, + district String, + county String, + d String, + e String +' --query " + WITH splitByChar(' ', postcode) AS p + SELECT + price, + toDate(time) AS date, + p[1] AS postcode1, + p[2] AS postcode2, + transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type, + b = 'Y' AS is_new, + transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration, + addr1, + addr2, + street, + locality, + town, + district, + county, + d = 'B' AS category + FROM table" --date_time_input_format best_effort < pp-complete.csv | clickhouse-client --query "INSERT INTO uk_price_paid FORMAT TSV" +``` + +It will take about 40 seconds. + +## Validate the Data + +``` +SELECT count() FROM uk_price_paid +26248711 +``` + +The size of dataset in ClickHouse is just 226 MiB: + +``` +SELECT formatReadableSize(total_bytes) FROM system.tables WHERE name = 'uk_price_paid' +226.40 MiB +``` + +## Run Some Queries + +### Average price per year: + +``` +SELECT toYear(date) AS year, round(avg(price)) AS price, bar(price, 0, 1000000, 80) FROM uk_price_paid GROUP BY year ORDER BY year + +┌─year─┬──price─┬─bar(round(avg(price)), 0, 1000000, 80)─┐ +│ 1995 │ 67932 │ █████▍ │ +│ 1996 │ 71505 │ █████▋ │ +│ 1997 │ 78532 │ ██████▎ │ +│ 1998 │ 85435 │ ██████▋ │ +│ 1999 │ 96036 │ ███████▋ │ +│ 2000 │ 107478 │ ████████▌ │ +│ 2001 │ 118886 │ █████████▌ │ +│ 2002 │ 137940 │ ███████████ │ +│ 2003 │ 155888 │ ████████████▍ │ +│ 2004 │ 178885 │ ██████████████▎ │ +│ 2005 │ 189350 │ ███████████████▏ │ +│ 2006 │ 203528 │ ████████████████▎ │ +│ 2007 │ 219377 │ █████████████████▌ │ +│ 2008 │ 217056 │ █████████████████▎ │ +│ 2009 │ 213419 │ █████████████████ │ +│ 2010 │ 236110 │ ██████████████████▊ │ +│ 2011 │ 232804 │ ██████████████████▌ │ +│ 2012 │ 238366 │ ███████████████████ │ +│ 2013 │ 256931 │ ████████████████████▌ │ +│ 2014 │ 279917 │ ██████████████████████▍ │ +│ 2015 │ 297264 │ ███████████████████████▋ │ +│ 2016 │ 313197 │ █████████████████████████ │ +│ 2017 │ 346070 │ ███████████████████████████▋ │ +│ 2018 │ 350117 │ ████████████████████████████ │ +│ 2019 │ 351010 │ ████████████████████████████ │ +│ 2020 │ 368974 │ █████████████████████████████▌ │ +│ 2021 │ 384351 │ ██████████████████████████████▋ │ +└──────┴────────┴────────────────────────────────────────┘ + +27 rows in set. Elapsed: 0.027 sec. Processed 26.25 million rows, 157.49 MB (955.96 million rows/s., 5.74 GB/s.) +``` + +### Average price per year in London: + +``` +SELECT toYear(date) AS year, round(avg(price)) AS price, bar(price, 0, 2000000, 100) FROM uk_price_paid WHERE town = 'LONDON' GROUP BY year ORDER BY year + +┌─year─┬───price─┬─bar(round(avg(price)), 0, 2000000, 100)───────────────┐ +│ 1995 │ 109112 │ █████▍ │ +│ 1996 │ 118667 │ █████▊ │ +│ 1997 │ 136518 │ ██████▋ │ +│ 1998 │ 152983 │ ███████▋ │ +│ 1999 │ 180633 │ █████████ │ +│ 2000 │ 215830 │ ██████████▋ │ +│ 2001 │ 232996 │ ███████████▋ │ +│ 2002 │ 263672 │ █████████████▏ │ +│ 2003 │ 278394 │ █████████████▊ │ +│ 2004 │ 304665 │ ███████████████▏ │ +│ 2005 │ 322875 │ ████████████████▏ │ +│ 2006 │ 356192 │ █████████████████▋ │ +│ 2007 │ 404055 │ ████████████████████▏ │ +│ 2008 │ 420741 │ █████████████████████ │ +│ 2009 │ 427754 │ █████████████████████▍ │ +│ 2010 │ 480306 │ ████████████████████████ │ +│ 2011 │ 496274 │ ████████████████████████▋ │ +│ 2012 │ 519441 │ █████████████████████████▊ │ +│ 2013 │ 616209 │ ██████████████████████████████▋ │ +│ 2014 │ 724144 │ ████████████████████████████████████▏ │ +│ 2015 │ 792112 │ ███████████████████████████████████████▌ │ +│ 2016 │ 843568 │ ██████████████████████████████████████████▏ │ +│ 2017 │ 982566 │ █████████████████████████████████████████████████▏ │ +│ 2018 │ 1016845 │ ██████████████████████████████████████████████████▋ │ +│ 2019 │ 1043277 │ ████████████████████████████████████████████████████▏ │ +│ 2020 │ 1003963 │ ██████████████████████████████████████████████████▏ │ +│ 2021 │ 940794 │ ███████████████████████████████████████████████ │ +└──────┴─────────┴───────────────────────────────────────────────────────┘ + +27 rows in set. Elapsed: 0.024 sec. Processed 26.25 million rows, 76.88 MB (1.08 billion rows/s., 3.15 GB/s.) +``` + +Something happened in 2013. I don't have a clue. Maybe you have a clue what happened in 2020? + +### The most expensive neighborhoods: + +``` +SELECT + town, + district, + count() AS c, + round(avg(price)) AS price, + bar(price, 0, 5000000, 100) +FROM uk_price_paid +WHERE date >= '2020-01-01' +GROUP BY + town, + district +HAVING c >= 100 +ORDER BY price DESC +LIMIT 100 + +Query id: df8c0a98-4713-4f0e-9690-5f73b52f7206 + +┌─town─────────────────┬─district───────────────┬────c─┬───price─┬─bar(round(avg(price)), 0, 5000000, 100)────────────────────────────┐ +│ LONDON │ CITY OF WESTMINSTER │ 3372 │ 3305225 │ ██████████████████████████████████████████████████████████████████ │ +│ LONDON │ CITY OF LONDON │ 257 │ 3294478 │ █████████████████████████████████████████████████████████████████▊ │ +│ LONDON │ KENSINGTON AND CHELSEA │ 2367 │ 2342422 │ ██████████████████████████████████████████████▋ │ +│ LEATHERHEAD │ ELMBRIDGE │ 108 │ 1927143 │ ██████████████████████████████████████▌ │ +│ VIRGINIA WATER │ RUNNYMEDE │ 142 │ 1868819 │ █████████████████████████████████████▍ │ +│ LONDON │ CAMDEN │ 2815 │ 1736788 │ ██████████████████████████████████▋ │ +│ THORNTON HEATH │ CROYDON │ 521 │ 1733051 │ ██████████████████████████████████▋ │ +│ WINDLESHAM │ SURREY HEATH │ 103 │ 1717255 │ ██████████████████████████████████▎ │ +│ BARNET │ ENFIELD │ 115 │ 1503458 │ ██████████████████████████████ │ +│ OXFORD │ SOUTH OXFORDSHIRE │ 298 │ 1275200 │ █████████████████████████▌ │ +│ LONDON │ ISLINGTON │ 2458 │ 1274308 │ █████████████████████████▍ │ +│ COBHAM │ ELMBRIDGE │ 364 │ 1260005 │ █████████████████████████▏ │ +│ LONDON │ HOUNSLOW │ 618 │ 1215682 │ ████████████████████████▎ │ +│ ASCOT │ WINDSOR AND MAIDENHEAD │ 379 │ 1215146 │ ████████████████████████▎ │ +│ LONDON │ RICHMOND UPON THAMES │ 654 │ 1207551 │ ████████████████████████▏ │ +│ BEACONSFIELD │ BUCKINGHAMSHIRE │ 307 │ 1186220 │ ███████████████████████▋ │ +│ RICHMOND │ RICHMOND UPON THAMES │ 805 │ 1100420 │ ██████████████████████ │ +│ LONDON │ HAMMERSMITH AND FULHAM │ 2888 │ 1062959 │ █████████████████████▎ │ +│ WEYBRIDGE │ ELMBRIDGE │ 607 │ 1027161 │ ████████████████████▌ │ +│ RADLETT │ HERTSMERE │ 265 │ 1015896 │ ████████████████████▎ │ +│ SALCOMBE │ SOUTH HAMS │ 124 │ 1014393 │ ████████████████████▎ │ +│ BURFORD │ WEST OXFORDSHIRE │ 102 │ 993100 │ ███████████████████▋ │ +│ ESHER │ ELMBRIDGE │ 454 │ 969770 │ ███████████████████▍ │ +│ HINDHEAD │ WAVERLEY │ 128 │ 967786 │ ███████████████████▎ │ +│ BROCKENHURST │ NEW FOREST │ 121 │ 967046 │ ███████████████████▎ │ +│ LEATHERHEAD │ GUILDFORD │ 191 │ 964489 │ ███████████████████▎ │ +│ GERRARDS CROSS │ BUCKINGHAMSHIRE │ 376 │ 958555 │ ███████████████████▏ │ +│ EAST MOLESEY │ ELMBRIDGE │ 181 │ 943457 │ ██████████████████▋ │ +│ OLNEY │ MILTON KEYNES │ 220 │ 942892 │ ██████████████████▋ │ +│ CHALFONT ST GILES │ BUCKINGHAMSHIRE │ 135 │ 926950 │ ██████████████████▌ │ +│ HENLEY-ON-THAMES │ SOUTH OXFORDSHIRE │ 509 │ 905732 │ ██████████████████ │ +│ KINGSTON UPON THAMES │ KINGSTON UPON THAMES │ 889 │ 899689 │ █████████████████▊ │ +│ BELVEDERE │ BEXLEY │ 313 │ 895336 │ █████████████████▊ │ +│ CRANBROOK │ TUNBRIDGE WELLS │ 404 │ 888190 │ █████████████████▋ │ +│ LONDON │ EALING │ 2460 │ 865893 │ █████████████████▎ │ +│ MAIDENHEAD │ BUCKINGHAMSHIRE │ 114 │ 863814 │ █████████████████▎ │ +│ LONDON │ MERTON │ 1958 │ 857192 │ █████████████████▏ │ +│ GUILDFORD │ WAVERLEY │ 131 │ 854447 │ █████████████████ │ +│ LONDON │ HACKNEY │ 3088 │ 846571 │ ████████████████▊ │ +│ LYMM │ WARRINGTON │ 285 │ 839920 │ ████████████████▋ │ +│ HARPENDEN │ ST ALBANS │ 606 │ 836994 │ ████████████████▋ │ +│ LONDON │ WANDSWORTH │ 6113 │ 832292 │ ████████████████▋ │ +│ LONDON │ SOUTHWARK │ 3612 │ 831319 │ ████████████████▋ │ +│ BERKHAMSTED │ DACORUM │ 502 │ 830356 │ ████████████████▌ │ +│ KINGS LANGLEY │ DACORUM │ 137 │ 821358 │ ████████████████▍ │ +│ TONBRIDGE │ TUNBRIDGE WELLS │ 339 │ 806736 │ ████████████████▏ │ +│ EPSOM │ REIGATE AND BANSTEAD │ 157 │ 805903 │ ████████████████ │ +│ WOKING │ GUILDFORD │ 161 │ 803283 │ ████████████████ │ +│ STOCKBRIDGE │ TEST VALLEY │ 168 │ 801973 │ ████████████████ │ +│ TEDDINGTON │ RICHMOND UPON THAMES │ 539 │ 798591 │ ███████████████▊ │ +│ OXFORD │ VALE OF WHITE HORSE │ 329 │ 792907 │ ███████████████▋ │ +│ LONDON │ BARNET │ 3624 │ 789583 │ ███████████████▋ │ +│ TWICKENHAM │ RICHMOND UPON THAMES │ 1090 │ 787760 │ ███████████████▋ │ +│ LUTON │ CENTRAL BEDFORDSHIRE │ 196 │ 786051 │ ███████████████▋ │ +│ TONBRIDGE │ MAIDSTONE │ 277 │ 785746 │ ███████████████▋ │ +│ TOWCESTER │ WEST NORTHAMPTONSHIRE │ 186 │ 783532 │ ███████████████▋ │ +│ LONDON │ LAMBETH │ 4832 │ 783422 │ ███████████████▋ │ +│ LUTTERWORTH │ HARBOROUGH │ 515 │ 781775 │ ███████████████▋ │ +│ WOODSTOCK │ WEST OXFORDSHIRE │ 135 │ 777499 │ ███████████████▌ │ +│ ALRESFORD │ WINCHESTER │ 196 │ 775577 │ ███████████████▌ │ +│ LONDON │ NEWHAM │ 2942 │ 768551 │ ███████████████▎ │ +│ ALDERLEY EDGE │ CHESHIRE EAST │ 168 │ 768280 │ ███████████████▎ │ +│ MARLOW │ BUCKINGHAMSHIRE │ 301 │ 762784 │ ███████████████▎ │ +│ BILLINGSHURST │ CHICHESTER │ 134 │ 760920 │ ███████████████▏ │ +│ LONDON │ TOWER HAMLETS │ 4183 │ 759635 │ ███████████████▏ │ +│ MIDHURST │ CHICHESTER │ 245 │ 759101 │ ███████████████▏ │ +│ THAMES DITTON │ ELMBRIDGE │ 227 │ 753347 │ ███████████████ │ +│ POTTERS BAR │ WELWYN HATFIELD │ 163 │ 752926 │ ███████████████ │ +│ REIGATE │ REIGATE AND BANSTEAD │ 555 │ 740961 │ ██████████████▋ │ +│ TADWORTH │ REIGATE AND BANSTEAD │ 477 │ 738997 │ ██████████████▋ │ +│ SEVENOAKS │ SEVENOAKS │ 1074 │ 734658 │ ██████████████▋ │ +│ PETWORTH │ CHICHESTER │ 138 │ 732432 │ ██████████████▋ │ +│ BOURNE END │ BUCKINGHAMSHIRE │ 127 │ 730742 │ ██████████████▌ │ +│ PURLEY │ CROYDON │ 540 │ 727721 │ ██████████████▌ │ +│ OXTED │ TANDRIDGE │ 320 │ 726078 │ ██████████████▌ │ +│ LONDON │ HARINGEY │ 2988 │ 724573 │ ██████████████▍ │ +│ BANSTEAD │ REIGATE AND BANSTEAD │ 373 │ 713834 │ ██████████████▎ │ +│ PINNER │ HARROW │ 480 │ 712166 │ ██████████████▏ │ +│ MALMESBURY │ WILTSHIRE │ 293 │ 707747 │ ██████████████▏ │ +│ RICKMANSWORTH │ THREE RIVERS │ 732 │ 705400 │ ██████████████ │ +│ SLOUGH │ BUCKINGHAMSHIRE │ 359 │ 705002 │ ██████████████ │ +│ GREAT MISSENDEN │ BUCKINGHAMSHIRE │ 214 │ 704904 │ ██████████████ │ +│ READING │ SOUTH OXFORDSHIRE │ 295 │ 701697 │ ██████████████ │ +│ HYTHE │ FOLKESTONE AND HYTHE │ 457 │ 700334 │ ██████████████ │ +│ WELWYN │ WELWYN HATFIELD │ 217 │ 699649 │ █████████████▊ │ +│ CHIGWELL │ EPPING FOREST │ 242 │ 697869 │ █████████████▊ │ +│ BARNET │ BARNET │ 906 │ 695680 │ █████████████▊ │ +│ HASLEMERE │ CHICHESTER │ 120 │ 694028 │ █████████████▊ │ +│ LEATHERHEAD │ MOLE VALLEY │ 748 │ 692026 │ █████████████▋ │ +│ LONDON │ BRENT │ 1945 │ 690799 │ █████████████▋ │ +│ HASLEMERE │ WAVERLEY │ 258 │ 690765 │ █████████████▋ │ +│ NORTHWOOD │ HILLINGDON │ 252 │ 690753 │ █████████████▋ │ +│ WALTON-ON-THAMES │ ELMBRIDGE │ 871 │ 689431 │ █████████████▋ │ +│ INGATESTONE │ BRENTWOOD │ 150 │ 688345 │ █████████████▋ │ +│ OXFORD │ OXFORD │ 1761 │ 686114 │ █████████████▋ │ +│ CHISLEHURST │ BROMLEY │ 410 │ 682892 │ █████████████▋ │ +│ KINGS LANGLEY │ THREE RIVERS │ 109 │ 682320 │ █████████████▋ │ +│ ASHTEAD │ MOLE VALLEY │ 280 │ 680483 │ █████████████▌ │ +│ WOKING │ SURREY HEATH │ 269 │ 679035 │ █████████████▌ │ +│ ASCOT │ BRACKNELL FOREST │ 160 │ 678632 │ █████████████▌ │ +└──────────────────────┴────────────────────────┴──────┴─────────┴────────────────────────────────────────────────────────────────────┘ + +100 rows in set. Elapsed: 0.039 sec. Processed 26.25 million rows, 278.03 MB (674.32 million rows/s., 7.14 GB/s.) +``` + +### Test it in Playground + +The data is uploaded to ClickHouse Playground, [example](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIHRvd24sIGRpc3RyaWN0LCBjb3VudCgpIEFTIGMsIHJvdW5kKGF2ZyhwcmljZSkpIEFTIHByaWNlLCBiYXIocHJpY2UsIDAsIDUwMDAwMDAsIDEwMCkgRlJPTSB1a19wcmljZV9wYWlkIFdIRVJFIGRhdGUgPj0gJzIwMjAtMDEtMDEnIEdST1VQIEJZIHRvd24sIGRpc3RyaWN0IEhBVklORyBjID49IDEwMCBPUkRFUiBCWSBwcmljZSBERVNDIExJTUlUIDEwMA==). diff --git a/docs/en/sql-reference/functions/geo/coordinates.md b/docs/en/sql-reference/functions/geo/coordinates.md index fd1d46e48ff..2d9bb41fc27 100644 --- a/docs/en/sql-reference/functions/geo/coordinates.md +++ b/docs/en/sql-reference/functions/geo/coordinates.md @@ -41,6 +41,13 @@ SELECT greatCircleDistance(55.755831, 37.617673, -55.755831, -37.617673) └───────────────────────────────────────────────────────────────────┘ ``` +## geoDistance + +Similar to `greatCircleDistance` but calculates the distance on WGS-84 ellipsoid instead of sphere. This is more precise approximation of the Earth Geoid. +The performance is the same as for `greatCircleDistance` (no performance drawback). It is recommended to use `geoDistance` to calculate the distances on Earth. + +Technical note: for close enough points we calculate the distance using planar approximation with the metric on the tangent plane at the midpoint of the coordinates. + ## greatCircleAngle {#greatcircleangle} Calculates the central angle between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance). diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md new file mode 100644 index 00000000000..07a13fc23c4 --- /dev/null +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -0,0 +1,23 @@ +--- +toc_priority: 49 +toc_title: PROJECTION +--- + +# Manipulating Projections {#manipulations-with-projections} + +The following operations are available: + +- `ALTER TABLE [db].name ADD PROJECTION name AS SELECT [GROUP BY] [ORDER BY]` - Adds projection description to tables metadata. + +- `ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk. + +- `ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). + +- `ALTER TABLE [db.]table CLEAR PROJECTION name IN PARTITION partition_name` - Deletes projection files from disk without removing description. + +The commands ADD, DROP and CLEAR are lightweight in a sense that they only change metadata or remove files. + +Also, they are replicated, syncing projections metadata via ZooKeeper. + +!!! note "Note" + Projection manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md index 391d28d5a89..f23c2589c82 100644 --- a/docs/ru/development/developer-instruction.md +++ b/docs/ru/development/developer-instruction.md @@ -140,7 +140,7 @@ ClickHouse использует для сборки некоторое коли Впрочем, наша среда continuous integration проверяет около десятка вариантов сборки, включая gcc, но сборка с помощью gcc непригодна для использования в продакшене. -On Ubuntu/Debian you can use the automatic installation script (check [official webpage](https://apt.llvm.org/)) +На Ubuntu и Debian вы можете использовать скрипт для автоматической установки (см. [официальный сайт](https://apt.llvm.org/)) ```bash sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index 0e23c2f743f..921e1a9e73b 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -2090,9 +2090,9 @@ SELECT tcpPort(); ## currentProfiles {#current-profiles} -Возвращает список [профилей настроек](../../operations/access-rights.md#settings-profiles-management) для текущего пользователя. +Возвращает список [профилей настроек](../../operations/access-rights.md#settings-profiles-management) для текущего пользователя. -Для изменения текущего профиля настроек может быть использована команда [SET PROFILE](../../sql-reference/statements/set.md#set-statement#query-set). Если команда `SET PROFILE` не применялась, функция возвращает профили, указанные при определении текущего пользователя (см. [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement)). +Для изменения текущего профиля настроек может быть использована команда SET PROFILE. Если команда `SET PROFILE` не применялась, функция возвращает профили, указанные при определении текущего пользователя (см. [CREATE USER](../../sql-reference/statements/create/user.md#create-user-statement)). **Синтаксис** @@ -2102,7 +2102,7 @@ currentProfiles() **Возвращаемое значение** -- Список профилей настроек для текущего пользователя. +- Список профилей настроек для текущего пользователя. Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). @@ -2118,7 +2118,7 @@ enabledProfiles() **Возвращаемое значение** -- Список доступных профилей для текущего пользователя. +- Список доступных профилей для текущего пользователя. Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). @@ -2134,6 +2134,6 @@ defaultProfiles() **Возвращаемое значение** -- Список профилей по умолчанию. +- Список профилей по умолчанию. -Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). \ No newline at end of file +Тип: [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)). diff --git a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h index 067daf6dc3a..95c7e6075d7 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h +++ b/src/AggregateFunctions/AggregateFunctionGroupBitmapData.h @@ -44,7 +44,7 @@ private: void toLarge() { - rb = std::make_unique(); + rb = std::make_shared(); for (const auto & x : small) rb->add(static_cast(x.getValue())); small.clear(); @@ -114,7 +114,7 @@ public: readVarUInt(size, in); std::unique_ptr buf(new char[size]); in.readStrict(buf.get(), size); - rb = std::make_unique(RoaringBitmap::read(buf.get())); + rb = std::make_shared(RoaringBitmap::read(buf.get())); } } @@ -141,7 +141,7 @@ public: */ std::shared_ptr getNewRoaringBitmapFromSmall() const { - std::shared_ptr ret = std::make_unique(); + std::shared_ptr ret = std::make_shared(); for (const auto & x : small) ret->add(static_cast(x.getValue())); return ret; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a99201e4aaa..6c10d3e2f2b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -158,6 +158,8 @@ else() target_link_libraries (clickhouse_new_delete PRIVATE clickhouse_common_io jemalloc) endif() +target_link_libraries (clickhouse_common_io PRIVATE jemalloc) + add_subdirectory(Common/ZooKeeper) add_subdirectory(Common/Config) @@ -479,6 +481,11 @@ if (USE_NLP) dbms_target_link_libraries (PUBLIC lemmagen) endif() +if (USE_BZIP2) + target_link_libraries (clickhouse_common_io PRIVATE ${BZIP2_LIBRARY}) + target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${BZIP2_INCLUDE_DIR}) +endif() + include ("${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake") if (ENABLE_TESTS AND USE_GTEST) diff --git a/src/Common/CurrentMemoryTracker.cpp b/src/Common/CurrentMemoryTracker.cpp index d38a5a9c70c..bf0745e667e 100644 --- a/src/Common/CurrentMemoryTracker.cpp +++ b/src/Common/CurrentMemoryTracker.cpp @@ -3,6 +3,7 @@ #include + namespace { @@ -36,6 +37,7 @@ namespace if (current_thread) { current_thread->untracked_memory += size; + if (current_thread->untracked_memory > current_thread->untracked_memory_limit) { /// Zero untracked before track. If tracker throws out-of-limit we would be able to alloc up to untracked_memory_limit bytes @@ -54,6 +56,12 @@ namespace } } +void check() +{ + if (auto * memory_tracker = getMemoryTracker()) + memory_tracker->allocImpl(0, true); +} + void alloc(Int64 size) { bool throw_if_memory_exceeded = true; diff --git a/src/Common/CurrentMemoryTracker.h b/src/Common/CurrentMemoryTracker.h index 5090b7c3687..2f9ace4291f 100644 --- a/src/Common/CurrentMemoryTracker.h +++ b/src/Common/CurrentMemoryTracker.h @@ -9,4 +9,5 @@ namespace CurrentMemoryTracker void allocNoThrow(Int64 size); void realloc(Int64 old_size, Int64 new_size); void free(Int64 size); + void check(); } diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index 7904d0ac61d..04f10fb536a 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -561,6 +561,8 @@ M(591, SQLITE_ENGINE_ERROR) \ M(592, DATA_ENCRYPTION_ERROR) \ M(593, ZERO_COPY_REPLICATION_ERROR) \ + M(594, BZIP2_STREAM_DECODER_FAILED) \ + M(595, BZIP2_STREAM_ENCODER_FAILED) \ \ M(998, POSTGRESQL_CONNECTION_FAILURE) \ M(999, KEEPER_EXCEPTION) \ diff --git a/src/Common/clickhouse_malloc.cpp b/src/Common/clickhouse_malloc.cpp new file mode 100644 index 00000000000..3f69ebdf58d --- /dev/null +++ b/src/Common/clickhouse_malloc.cpp @@ -0,0 +1,55 @@ +#include +#include + + +/** These functions can be substituted instead of regular ones when memory tracking is needed. + */ + +extern "C" void * clickhouse_malloc(size_t size) +{ + void * res = malloc(size); + if (res) + Memory::trackMemory(size); + return res; +} + +extern "C" void * clickhouse_calloc(size_t number_of_members, size_t size) +{ + void * res = calloc(number_of_members, size); + if (res) + Memory::trackMemory(number_of_members * size); + return res; +} + +extern "C" void * clickhouse_realloc(void * ptr, size_t size) +{ + if (ptr) + Memory::untrackMemory(ptr); + void * res = realloc(ptr, size); + if (res) + Memory::trackMemory(size); + return res; +} + +extern "C" void * clickhouse_reallocarray(void * ptr, size_t number_of_members, size_t size) +{ + size_t real_size = 0; + if (__builtin_mul_overflow(number_of_members, size, &real_size)) + return nullptr; + + return clickhouse_realloc(ptr, real_size); +} + +extern "C" void clickhouse_free(void * ptr) +{ + Memory::untrackMemory(ptr); + free(ptr); +} + +extern "C" int clickhouse_posix_memalign(void ** memptr, size_t alignment, size_t size) +{ + int res = posix_memalign(memptr, alignment, size); + if (res == 0) + Memory::trackMemory(size); + return res; +} diff --git a/src/Common/config.h.in b/src/Common/config.h.in index 0665b1717ed..bf118e2507f 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -19,3 +19,4 @@ #cmakedefine01 USE_DATASKETCHES #cmakedefine01 USE_YAML_CPP #cmakedefine01 CLICKHOUSE_SPLIT_BINARY +#cmakedefine01 USE_BZIP2 diff --git a/src/Common/memory.cpp b/src/Common/memory.cpp new file mode 100644 index 00000000000..a79d3572071 --- /dev/null +++ b/src/Common/memory.cpp @@ -0,0 +1,25 @@ +#if defined(OS_DARWIN) && defined(BUNDLED_STATIC_JEMALLOC) + +extern "C" +{ + extern void zone_register(); +} + +struct InitializeJemallocZoneAllocatorForOSX +{ + InitializeJemallocZoneAllocatorForOSX() + { + /// In case of OSX jemalloc register itself as a default zone allocator. + /// + /// But when you link statically then zone_register() will not be called, + /// and even will be optimized out: + /// + /// It is ok to call it twice (i.e. in case of shared libraries) + /// Since zone_register() is a no-op if the default zone is already replaced with something. + /// + /// https://github.com/jemalloc/jemalloc/issues/708 + zone_register(); + } +} initializeJemallocZoneAllocatorForOSX; + +#endif diff --git a/src/Common/memory.h b/src/Common/memory.h new file mode 100644 index 00000000000..84c6af9a1be --- /dev/null +++ b/src/Common/memory.h @@ -0,0 +1,108 @@ +#pragma once + +#include +#include + +#include + +#if USE_JEMALLOC +# include +#endif + +#if !USE_JEMALLOC || JEMALLOC_VERSION_MAJOR < 4 +# include +#endif + + +namespace Memory +{ + +inline ALWAYS_INLINE void * newImpl(std::size_t size) +{ + auto * ptr = malloc(size); + if (likely(ptr != nullptr)) + return ptr; + + /// @note no std::get_new_handler logic implemented + throw std::bad_alloc{}; +} + +inline ALWAYS_INLINE void * newNoExept(std::size_t size) noexcept +{ + return malloc(size); +} + +inline ALWAYS_INLINE void deleteImpl(void * ptr) noexcept +{ + free(ptr); +} + +#if USE_JEMALLOC && JEMALLOC_VERSION_MAJOR >= 4 + +inline ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size) noexcept +{ + if (unlikely(ptr == nullptr)) + return; + + sdallocx(ptr, size, 0); +} + +#else + +inline ALWAYS_INLINE void deleteSized(void * ptr, std::size_t size [[maybe_unused]]) noexcept +{ + free(ptr); +} + +#endif + +#if defined(OS_LINUX) +# include +#elif defined(OS_DARWIN) +# include +#endif + + +inline ALWAYS_INLINE size_t getActualAllocationSize(size_t size) +{ + size_t actual_size = size; + +#if USE_JEMALLOC && JEMALLOC_VERSION_MAJOR >= 5 + /// The nallocx() function allocates no memory, but it performs the same size computation as the mallocx() function + /// @note je_mallocx() != je_malloc(). It's expected they don't differ much in allocation logic. + if (likely(size != 0)) + actual_size = nallocx(size, 0); +#endif + + return actual_size; +} + +inline ALWAYS_INLINE void trackMemory(std::size_t size) +{ + std::size_t actual_size = getActualAllocationSize(size); + CurrentMemoryTracker::allocNoThrow(actual_size); +} + +inline ALWAYS_INLINE void untrackMemory(void * ptr [[maybe_unused]], std::size_t size [[maybe_unused]] = 0) noexcept +{ + try + { +#if USE_JEMALLOC && JEMALLOC_VERSION_MAJOR >= 5 + /// @note It's also possible to use je_malloc_usable_size() here. + if (likely(ptr != nullptr)) + CurrentMemoryTracker::free(sallocx(ptr, 0)); +#else + if (size) + CurrentMemoryTracker::free(size); +# if defined(_GNU_SOURCE) + /// It's innaccurate resource free for sanitizers. malloc_usable_size() result is greater or equal to allocated size. + else + CurrentMemoryTracker::free(malloc_usable_size(ptr)); +# endif +#endif + } + catch (...) + {} +} + +} diff --git a/src/Common/new_delete.cpp b/src/Common/new_delete.cpp index 56173fb108a..fa32d56b350 100644 --- a/src/Common/new_delete.cpp +++ b/src/Common/new_delete.cpp @@ -1,117 +1,34 @@ -#include -#include - -#include +#include #include -#if defined(OS_LINUX) -# include -#elif defined(OS_DARWIN) -# include -#endif - -#if defined(OS_DARWIN) && defined(BUNDLED_STATIC_JEMALLOC) -extern "C" -{ -extern void zone_register(); -} - -struct InitializeJemallocZoneAllocatorForOSX -{ - InitializeJemallocZoneAllocatorForOSX() - { - /// In case of OSX jemalloc register itself as a default zone allocator. - /// - /// But when you link statically then zone_register() will not be called, - /// and even will be optimized out: - /// - /// It is ok to call it twice (i.e. in case of shared libraries) - /// Since zone_register() is a no-op if the default zone is already replaced with something. - /// - /// https://github.com/jemalloc/jemalloc/issues/708 - zone_register(); - } -} initializeJemallocZoneAllocatorForOSX; -#endif /// Replace default new/delete with memory tracking versions. /// @sa https://en.cppreference.com/w/cpp/memory/new/operator_new /// https://en.cppreference.com/w/cpp/memory/new/operator_delete -namespace Memory -{ - -inline ALWAYS_INLINE size_t getActualAllocationSize(size_t size) -{ - size_t actual_size = size; - -#if USE_JEMALLOC && JEMALLOC_VERSION_MAJOR >= 5 - /// The nallocx() function allocates no memory, but it performs the same size computation as the mallocx() function - /// @note je_mallocx() != je_malloc(). It's expected they don't differ much in allocation logic. - if (likely(size != 0)) - actual_size = nallocx(size, 0); -#endif - - return actual_size; -} - -inline ALWAYS_INLINE void trackMemory(std::size_t size) -{ - std::size_t actual_size = getActualAllocationSize(size); - CurrentMemoryTracker::allocNoThrow(actual_size); -} - -inline ALWAYS_INLINE void untrackMemory(void * ptr [[maybe_unused]], std::size_t size [[maybe_unused]] = 0) noexcept -{ - try - { -#if USE_JEMALLOC && JEMALLOC_VERSION_MAJOR >= 5 - /// @note It's also possible to use je_malloc_usable_size() here. - if (likely(ptr != nullptr)) - CurrentMemoryTracker::free(sallocx(ptr, 0)); -#else - if (size) - CurrentMemoryTracker::free(size); -# if defined(_GNU_SOURCE) - /// It's innaccurate resource free for sanitizers. malloc_usable_size() result is greater or equal to allocated size. - else - CurrentMemoryTracker::free(malloc_usable_size(ptr)); -# endif -#endif - } - catch (...) - {} -} - -} - /// new void * operator new(std::size_t size) { Memory::trackMemory(size); - return Memory::newImpl(size); } void * operator new[](std::size_t size) { Memory::trackMemory(size); - return Memory::newImpl(size); } void * operator new(std::size_t size, const std::nothrow_t &) noexcept { Memory::trackMemory(size); - return Memory::newNoExept(size); } void * operator new[](std::size_t size, const std::nothrow_t &) noexcept { Memory::trackMemory(size); - return Memory::newNoExept(size); } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 5b75ca0d3ab..623eb0298dd 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -169,6 +169,7 @@ class IColumn; M(Int64, os_thread_priority, 0, "If non zero - set corresponding 'nice' value for query processing threads. Can be used to adjust query priority for OS scheduler.", 0) \ \ M(Bool, log_queries, 1, "Log requests and write the log to the system table.", 0) \ + M(Bool, log_formatted_queries, 0, "Log formatted queries and write the log to the system table.", 0) \ M(LogQueriesType, log_queries_min_type, QueryLogElementType::QUERY_START, "Minimal type in query_log to log, possible values (from low to high): QUERY_START, QUERY_FINISH, EXCEPTION_BEFORE_START, EXCEPTION_WHILE_PROCESSING.", 0) \ M(Milliseconds, log_queries_min_query_duration_ms, 0, "Minimal time for the query to run, to get to the query_log/query_thread_log.", 0) \ M(UInt64, log_queries_cut_to_length, 100000, "If query length is greater than specified threshold (in bytes), then cut query when writing to query log. Also limit length of printed query in ordinary text log.", 0) \ @@ -499,6 +500,7 @@ class IColumn; M(Bool, enable_debug_queries, false, "Obsolete setting, does nothing.", 0) \ M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing.", 0) \ M(Bool, allow_experimental_bigint_types, true, "Obsolete setting, does nothing.", 0) \ + M(Bool, allow_experimental_window_functions, true, "Obsolete setting, does nothing.", 0) \ M(HandleKafkaErrorMode, handle_kafka_error_mode, HandleKafkaErrorMode::DEFAULT, "Obsolete setting, does nothing.", 0) \ M(Bool, database_replicated_ddl_output, true, "Obsolete setting, does nothing.", 0) \ /** The section above is for obsolete settings. Do not add anything there. */ diff --git a/src/DataStreams/ExecutionSpeedLimits.cpp b/src/DataStreams/ExecutionSpeedLimits.cpp index 6cc1b9006bf..81afd4fe984 100644 --- a/src/DataStreams/ExecutionSpeedLimits.cpp +++ b/src/DataStreams/ExecutionSpeedLimits.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include namespace ProfileEvents @@ -104,14 +105,18 @@ static bool handleOverflowMode(OverflowMode mode, const String & message, int co } } -bool ExecutionSpeedLimits::checkTimeLimit(UInt64 elapsed_ns, OverflowMode overflow_mode) const +bool ExecutionSpeedLimits::checkTimeLimit(const Stopwatch & stopwatch, OverflowMode overflow_mode) const { - if (max_execution_time != 0 - && elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) - return handleOverflowMode(overflow_mode, + if (max_execution_time != 0) + { + auto elapsed_ns = stopwatch.elapsed(); + + if (elapsed_ns > static_cast(max_execution_time.totalMicroseconds()) * 1000) + return handleOverflowMode(overflow_mode, "Timeout exceeded: elapsed " + toString(static_cast(elapsed_ns) / 1000000000ULL) + " seconds, maximum: " + toString(max_execution_time.totalMicroseconds() / 1000000.0), ErrorCodes::TIMEOUT_EXCEEDED); + } return true; } diff --git a/src/DataStreams/ExecutionSpeedLimits.h b/src/DataStreams/ExecutionSpeedLimits.h index 9ab58e12cf4..d52dc713c1a 100644 --- a/src/DataStreams/ExecutionSpeedLimits.h +++ b/src/DataStreams/ExecutionSpeedLimits.h @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { @@ -25,7 +26,7 @@ public: /// Pause execution in case if speed limits were exceeded. void throttle(size_t read_rows, size_t read_bytes, size_t total_rows_to_read, UInt64 total_elapsed_microseconds) const; - bool checkTimeLimit(UInt64 elapsed_ns, OverflowMode overflow_mode) const; + bool checkTimeLimit(const Stopwatch & stopwatch, OverflowMode overflow_mode) const; }; } diff --git a/src/DataStreams/IBlockInputStream.cpp b/src/DataStreams/IBlockInputStream.cpp index c3071cdcf20..e57d6903673 100644 --- a/src/DataStreams/IBlockInputStream.cpp +++ b/src/DataStreams/IBlockInputStream.cpp @@ -201,7 +201,7 @@ void IBlockInputStream::updateExtremes(Block & block) bool IBlockInputStream::checkTimeLimit() const { - return limits.speed_limits.checkTimeLimit(info.total_stopwatch.elapsed(), limits.timeout_overflow_mode); + return limits.speed_limits.checkTimeLimit(info.total_stopwatch, limits.timeout_overflow_mode); } diff --git a/src/IO/Bzip2ReadBuffer.cpp b/src/IO/Bzip2ReadBuffer.cpp new file mode 100644 index 00000000000..e264ce75444 --- /dev/null +++ b/src/IO/Bzip2ReadBuffer.cpp @@ -0,0 +1,97 @@ +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_BZIP2 +# include +# include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BZIP2_STREAM_DECODER_FAILED; +} + + +class Bzip2ReadBuffer::Bzip2StateWrapper +{ +public: + Bzip2StateWrapper() + { + memset(&stream, 0, sizeof(stream)); + + int ret = BZ2_bzDecompressInit(&stream, 0, 0); + + if (ret != BZ_OK) + throw Exception( + ErrorCodes::BZIP2_STREAM_DECODER_FAILED, + "bzip2 stream encoder init failed: error code: {}", + ret); + } + + ~Bzip2StateWrapper() + { + BZ2_bzDecompressEnd(&stream); + } + + bz_stream stream; +}; + +Bzip2ReadBuffer::Bzip2ReadBuffer(std::unique_ptr in_, size_t buf_size, char *existing_memory, size_t alignment) + : BufferWithOwnMemory(buf_size, existing_memory, alignment) + , in(std::move(in_)) + , bz(std::make_unique()) + , eof(false) +{ +} + +Bzip2ReadBuffer::~Bzip2ReadBuffer() = default; + +bool Bzip2ReadBuffer::nextImpl() +{ + if (eof) + return false; + + if (!bz->stream.avail_in) + { + in->nextIfAtEnd(); + bz->stream.avail_in = in->buffer().end() - in->position(); + bz->stream.next_in = in->position(); + } + + bz->stream.avail_out = internal_buffer.size(); + bz->stream.next_out = internal_buffer.begin(); + + int ret = BZ2_bzDecompress(&bz->stream); + + in->position() = in->buffer().end() - bz->stream.avail_in; + working_buffer.resize(internal_buffer.size() - bz->stream.avail_out); + + if (ret == BZ_STREAM_END) + { + if (in->eof()) + { + eof = true; + return !working_buffer.empty(); + } + else + { + throw Exception( + ErrorCodes::BZIP2_STREAM_DECODER_FAILED, + "bzip2 decoder finished, but input stream has not exceeded: error code: {}", ret); + } + } + + if (ret != BZ_OK) + throw Exception( + ErrorCodes::BZIP2_STREAM_DECODER_FAILED, + "bzip2 stream decoder failed: error code: {}", + ret); + + return true; +} +} + +#endif diff --git a/src/IO/Bzip2ReadBuffer.h b/src/IO/Bzip2ReadBuffer.h new file mode 100644 index 00000000000..dc113800683 --- /dev/null +++ b/src/IO/Bzip2ReadBuffer.h @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +class Bzip2ReadBuffer : public BufferWithOwnMemory +{ +public: + Bzip2ReadBuffer( + std::unique_ptr in_, + size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, + char * existing_memory = nullptr, + size_t alignment = 0); + + ~Bzip2ReadBuffer() override; + +private: + bool nextImpl() override; + + std::unique_ptr in; + + class Bzip2StateWrapper; + std::unique_ptr bz; + + bool eof; +}; + +} + diff --git a/src/IO/Bzip2WriteBuffer.cpp b/src/IO/Bzip2WriteBuffer.cpp new file mode 100644 index 00000000000..41cb972966c --- /dev/null +++ b/src/IO/Bzip2WriteBuffer.cpp @@ -0,0 +1,138 @@ +#if !defined(ARCADIA_BUILD) +# include +#endif + +#if USE_BROTLI +# include +# include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BZIP2_STREAM_ENCODER_FAILED; +} + + +class Bzip2WriteBuffer::Bzip2StateWrapper +{ +public: + explicit Bzip2StateWrapper(int compression_level) + { + memset(&stream, 0, sizeof(stream)); + + int ret = BZ2_bzCompressInit(&stream, compression_level, 0, 0); + + if (ret != BZ_OK) + throw Exception( + ErrorCodes::BZIP2_STREAM_ENCODER_FAILED, + "bzip2 stream encoder init failed: error code: {}", + ret); + } + + ~Bzip2StateWrapper() + { + BZ2_bzCompressEnd(&stream); + } + + bz_stream stream; +}; + +Bzip2WriteBuffer::Bzip2WriteBuffer(std::unique_ptr out_, int compression_level, size_t buf_size, char * existing_memory, size_t alignment) + : BufferWithOwnMemory(buf_size, existing_memory, alignment) + , bz(std::make_unique(compression_level)) + , out(std::move(out_)) +{ +} + +Bzip2WriteBuffer::~Bzip2WriteBuffer() +{ + /// FIXME move final flush into the caller + MemoryTracker::LockExceptionInThread lock(VariableContext::Global); + finish(); +} + +void Bzip2WriteBuffer::nextImpl() +{ + if (!offset()) + { + return; + } + + bz->stream.next_in = working_buffer.begin(); + bz->stream.avail_in = offset(); + + try + { + do + { + out->nextIfAtEnd(); + bz->stream.next_out = out->position(); + bz->stream.avail_out = out->buffer().end() - out->position(); + + int ret = BZ2_bzCompress(&bz->stream, BZ_RUN); + + out->position() = out->buffer().end() - bz->stream.avail_out; + + if (ret != BZ_RUN_OK) + throw Exception( + ErrorCodes::BZIP2_STREAM_ENCODER_FAILED, + "bzip2 stream encoder failed: error code: {}", + ret); + + } + while (bz->stream.avail_in > 0); + } + catch (...) + { + /// Do not try to write next time after exception. + out->position() = out->buffer().begin(); + throw; + } +} + +void Bzip2WriteBuffer::finish() +{ + if (finished) + return; + + try + { + finishImpl(); + out->finalize(); + finished = true; + } + catch (...) + { + /// Do not try to flush next time after exception. + out->position() = out->buffer().begin(); + finished = true; + throw; + } +} + +void Bzip2WriteBuffer::finishImpl() +{ + next(); + + out->nextIfAtEnd(); + bz->stream.next_out = out->position(); + bz->stream.avail_out = out->buffer().end() - out->position(); + + int ret = BZ2_bzCompress(&bz->stream, BZ_FINISH); + + out->position() = out->buffer().end() - bz->stream.avail_out; + + if (ret != BZ_STREAM_END && ret != BZ_FINISH_OK) + throw Exception( + ErrorCodes::BZIP2_STREAM_ENCODER_FAILED, + "bzip2 stream encoder failed: error code: {}", + ret); +} + +} + +#endif diff --git a/src/IO/Bzip2WriteBuffer.h b/src/IO/Bzip2WriteBuffer.h new file mode 100644 index 00000000000..72f14a344e3 --- /dev/null +++ b/src/IO/Bzip2WriteBuffer.h @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class Bzip2WriteBuffer : public BufferWithOwnMemory +{ +public: + Bzip2WriteBuffer( + std::unique_ptr out_, + int compression_level, + size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, + char * existing_memory = nullptr, + size_t alignment = 0); + + ~Bzip2WriteBuffer() override; + + void finalize() override { finish(); } + +private: + void nextImpl() override; + + void finish(); + void finishImpl(); + + class Bzip2StateWrapper; + std::unique_ptr bz; + + std::unique_ptr out; + + bool finished = false; +}; + +} diff --git a/src/IO/CompressionMethod.cpp b/src/IO/CompressionMethod.cpp index c6bb5232c0c..e02475c1d83 100644 --- a/src/IO/CompressionMethod.cpp +++ b/src/IO/CompressionMethod.cpp @@ -10,6 +10,8 @@ #include #include #include +#include +#include #if !defined(ARCADIA_BUILD) # include @@ -40,6 +42,8 @@ std::string toContentEncodingName(CompressionMethod method) return "xz"; case CompressionMethod::Zstd: return "zstd"; + case CompressionMethod::Bzip2: + return "bz2"; case CompressionMethod::None: return ""; } @@ -69,11 +73,13 @@ CompressionMethod chooseCompressionMethod(const std::string & path, const std::s return CompressionMethod::Xz; if (method_str == "zstd" || method_str == "zst") return CompressionMethod::Zstd; + if (method_str == "bz2") + return CompressionMethod::Bzip2; if (hint.empty() || hint == "auto" || hint == "none") return CompressionMethod::None; throw Exception( - "Unknown compression method " + hint + ". Only 'auto', 'none', 'gzip', 'deflate', 'br', 'xz', 'zstd' are supported as compression methods", + "Unknown compression method " + hint + ". Only 'auto', 'none', 'gzip', 'deflate', 'br', 'xz', 'zstd', 'bz2' are supported as compression methods", ErrorCodes::NOT_IMPLEMENTED); } @@ -91,7 +97,10 @@ std::unique_ptr wrapReadBufferWithCompressionMethod( return std::make_unique(std::move(nested), buf_size, existing_memory, alignment); if (method == CompressionMethod::Zstd) return std::make_unique(std::move(nested), buf_size, existing_memory, alignment); - +#if USE_BZIP2 + if (method == CompressionMethod::Bzip2) + return std::make_unique(std::move(nested), buf_size, existing_memory, alignment); +#endif if (method == CompressionMethod::None) return nested; @@ -114,7 +123,10 @@ std::unique_ptr wrapWriteBufferWithCompressionMethod( if (method == CompressionMethod::Zstd) return std::make_unique(std::move(nested), level, buf_size, existing_memory, alignment); - +#if USE_BZIP2 + if (method == CompressionMethod::Bzip2) + return std::make_unique(std::move(nested), level, buf_size, existing_memory, alignment); +#endif if (method == CompressionMethod::None) return nested; diff --git a/src/IO/CompressionMethod.h b/src/IO/CompressionMethod.h index 6f2d87b45cf..cf034229f77 100644 --- a/src/IO/CompressionMethod.h +++ b/src/IO/CompressionMethod.h @@ -31,7 +31,8 @@ enum class CompressionMethod /// Zstd compressor /// This option corresponds to HTTP Content-Encoding: zstd Zstd, - Brotli + Brotli, + Bzip2 }; /// How the compression method is named in HTTP. diff --git a/src/IO/ya.make b/src/IO/ya.make index 9e35a062a96..7723464be6f 100644 --- a/src/IO/ya.make +++ b/src/IO/ya.make @@ -23,6 +23,8 @@ SRCS( AIOContextPool.cpp BrotliReadBuffer.cpp BrotliWriteBuffer.cpp + Bzip2ReadBuffer.cpp + Bzip2WriteBuffer.cpp CascadeWriteBuffer.cpp CompressionMethod.cpp DoubleConverter.cpp diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 03fa756276e..61e484ff6f1 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -348,7 +348,7 @@ SetPtr makeExplicitSet( const ASTPtr & left_arg = args.children.at(0); const ASTPtr & right_arg = args.children.at(1); - auto column_name = left_arg->getColumnName(context->getSettingsRef()); + auto column_name = left_arg->getColumnName(); const auto & dag_node = actions.findInIndex(column_name); const DataTypePtr & left_arg_type = dag_node.result_type; @@ -641,7 +641,7 @@ std::optional ActionsMatcher::getNameAndTypeFromAST(const ASTPt { // If the argument is a literal, we generated a unique column name for it. // Use it instead of a generic display name. - auto child_column_name = ast->getColumnName(data.getContext()->getSettingsRef()); + auto child_column_name = ast->getColumnName(); const auto * as_literal = ast->as(); if (as_literal) { @@ -698,7 +698,7 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat auto func = makeASTFunction("tupleElement", tuple_ast, literal); auto function_builder = FunctionFactory::instance().get(func->name, data.getContext()); - data.addFunction(function_builder, {tuple_name_type->name, literal->getColumnName(data.getContext()->getSettingsRef())}, func->getColumnName(data.getContext()->getSettingsRef())); + data.addFunction(function_builder, {tuple_name_type->name, literal->getColumnName()}, func->getColumnName()); columns.push_back(std::move(func)); } @@ -762,7 +762,7 @@ void ActionsMatcher::visit(const ASTIdentifier & identifier, const ASTPtr &, Dat void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & data) { - auto column_name = ast->getColumnName(data.getContext()->getSettingsRef()); + auto column_name = ast->getColumnName(); if (data.hasColumn(column_name)) return; @@ -778,7 +778,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & ASTPtr arg = node.arguments->children.at(0); visit(arg, data); if (!data.only_consts) - data.addArrayJoin(arg->getColumnName(data.getContext()->getSettingsRef()), column_name); + data.addArrayJoin(arg->getColumnName(), column_name); return; } @@ -800,7 +800,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & /// We are in the part of the tree that we are not going to compute. You just need to define types. /// Do not subquery and create sets. We replace "in*" function to "in*IgnoreSet". - auto argument_name = node.arguments->children.at(0)->getColumnName(data.getContext()->getSettingsRef()); + auto argument_name = node.arguments->children.at(0)->getColumnName(); data.addFunction( FunctionFactory::instance().get(node.name + "IgnoreSet", data.getContext()), @@ -929,7 +929,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (!prepared_set->empty()) column.name = data.getUniqueName("__set"); else - column.name = child->getColumnName(data.getContext()->getSettingsRef()); + column.name = child->getColumnName(); if (!data.hasColumn(column.name)) { @@ -1008,7 +1008,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & visit(lambda->arguments->children.at(1), data); auto lambda_dag = data.actions_stack.popLevel(); - String result_name = lambda->arguments->children.at(1)->getColumnName(data.getContext()->getSettingsRef()); + String result_name = lambda->arguments->children.at(1)->getColumnName(); lambda_dag->removeUnusedActions(Names(1, result_name)); auto lambda_actions = std::make_shared( @@ -1023,7 +1023,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (findColumn(required_arg, lambda_arguments) == lambda_arguments.end()) captured.push_back(required_arg); - /// We can not name `getColumnName(data.getContext()->getSettingsRef())`, + /// We can not name `getColumnName()`, /// because it does not uniquely define the expression (the types of arguments can be different). String lambda_name = data.getUniqueName("__lambda"); @@ -1053,7 +1053,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (arguments_present) { /// Calculate column name here again, because AST may be changed here (in case of untuple). - data.addFunction(function_builder, argument_names, ast->getColumnName(data.getContext()->getSettingsRef())); + data.addFunction(function_builder, argument_names, ast->getColumnName()); } } @@ -1067,7 +1067,7 @@ void ActionsMatcher::visit(const ASTLiteral & literal, const ASTPtr & /* ast */, // AST here? Anyway, do not modify the column name if it is set already. if (literal.unique_column_name.empty()) { - const auto default_name = literal.getColumnName(data.getContext()->getSettingsRef()); + const auto default_name = literal.getColumnName(); const auto & index = data.actions_stack.getLastActionsIndex(); const auto * existing_column = index.tryGetNode(default_name); @@ -1147,7 +1147,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su } /// We get the stream of blocks for the subquery. Create Set and put it in place of the subquery. - String set_id = right_in_operand->getColumnName(data.getContext()->getSettingsRef()); + String set_id = right_in_operand->getColumnName(); SubqueryForSet & subquery_for_set = data.subqueries_for_sets[set_id]; @@ -1183,7 +1183,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su { const auto & last_actions = data.actions_stack.getLastActions(); const auto & index = data.actions_stack.getLastActionsIndex(); - if (index.contains(left_in_operand->getColumnName(data.getContext()->getSettingsRef()))) + if (index.contains(left_in_operand->getColumnName())) /// An explicit enumeration of values in parentheses. return makeExplicitSet(&node, last_actions, false, data.getContext(), data.set_size_limit, data.prepared_sets); else diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 7ffae761c0c..69ad1d56359 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1193,6 +1193,9 @@ bool Aggregator::checkLimits(size_t result_size, bool & no_more_keys) const } } + /// Some aggregate functions cannot throw exceptions on allocations (e.g. from C malloc) + /// but still tracks memory. Check it here. + CurrentMemoryTracker::check(); return true; } diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 81f6f224c7b..1c79dedd978 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -779,43 +779,60 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti uint64_t kb = 0; readText(kb, *meminfo); - if (kb) + + if (!kb) { - skipWhitespaceIfAny(*meminfo, true); - assertString("kB", *meminfo); + skipToNextLineOrEOF(*meminfo); + continue; + } - uint64_t bytes = kb * 1024; + skipWhitespaceIfAny(*meminfo, true); - if (name == "MemTotal:") - { - new_values["OSMemoryTotal"] = bytes; - } - else if (name == "MemFree:") - { - /// We cannot simply name this metric "Free", because it confuses users. - /// See https://www.linuxatemyram.com/ - /// For convenience we also provide OSMemoryFreePlusCached, that should be somewhat similar to OSMemoryAvailable. + /** + * Not all entries in /proc/meminfo contain the kB suffix, e.g. + * HugePages_Total: 0 + * HugePages_Free: 0 + * We simply skip such entries as they're not needed + */ + if (*meminfo->position() == '\n') + { + skipToNextLineOrEOF(*meminfo); + continue; + } - free_plus_cached_bytes += bytes; - new_values["OSMemoryFreeWithoutCached"] = bytes; - } - else if (name == "MemAvailable:") - { - new_values["OSMemoryAvailable"] = bytes; - } - else if (name == "Buffers:") - { - new_values["OSMemoryBuffers"] = bytes; - } - else if (name == "Cached:") - { - free_plus_cached_bytes += bytes; - new_values["OSMemoryCached"] = bytes; - } - else if (name == "SwapCached:") - { - new_values["OSMemorySwapCached"] = bytes; - } + assertString("kB", *meminfo); + + uint64_t bytes = kb * 1024; + + if (name == "MemTotal:") + { + new_values["OSMemoryTotal"] = bytes; + } + else if (name == "MemFree:") + { + /// We cannot simply name this metric "Free", because it confuses users. + /// See https://www.linuxatemyram.com/ + /// For convenience we also provide OSMemoryFreePlusCached, that should be somewhat similar to OSMemoryAvailable. + + free_plus_cached_bytes += bytes; + new_values["OSMemoryFreeWithoutCached"] = bytes; + } + else if (name == "MemAvailable:") + { + new_values["OSMemoryAvailable"] = bytes; + } + else if (name == "Buffers:") + { + new_values["OSMemoryBuffers"] = bytes; + } + else if (name == "Cached:") + { + free_plus_cached_bytes += bytes; + new_values["OSMemoryCached"] = bytes; + } + else if (name == "SwapCached:") + { + new_values["OSMemorySwapCached"] = bytes; } skipToNextLineOrEOF(*meminfo); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 6245b297b36..77598e69c00 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -243,7 +243,7 @@ void ExpressionAnalyzer::analyzeAggregation() ssize_t size = group_asts.size(); getRootActionsNoMakeSet(group_asts[i], true, temp_actions, false); - const auto & column_name = group_asts[i]->getColumnName(getContext()->getSettingsRef()); + const auto & column_name = group_asts[i]->getColumnName(); const auto * node = temp_actions->tryFindInIndex(column_name); if (!node) throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER); @@ -408,7 +408,7 @@ void SelectQueryExpressionAnalyzer::makeSetsForIndex(const ASTPtr & node) auto temp_actions = std::make_shared(columns_after_join); getRootActions(left_in_operand, true, temp_actions); - if (temp_actions->tryFindInIndex(left_in_operand->getColumnName(getContext()->getSettingsRef()))) + if (temp_actions->tryFindInIndex(left_in_operand->getColumnName())) makeExplicitSet(func, *temp_actions, true, getContext(), settings.size_limits_for_set, prepared_sets); } } @@ -456,7 +456,7 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions) if (node->arguments) getRootActionsNoMakeSet(node->arguments, true, actions); - aggregate.column_name = node->getColumnName(getContext()->getSettingsRef()); + aggregate.column_name = node->getColumnName(); const ASTs & arguments = node->arguments ? node->arguments->children : ASTs(); aggregate.argument_names.resize(arguments.size()); @@ -464,7 +464,7 @@ bool ExpressionAnalyzer::makeAggregateDescriptions(ActionsDAGPtr & actions) for (size_t i = 0; i < arguments.size(); ++i) { - const std::string & name = arguments[i]->getColumnName(getContext()->getSettingsRef()); + const std::string & name = arguments[i]->getColumnName(); const auto * dag_node = actions->tryFindInIndex(name); if (!dag_node) { @@ -645,7 +645,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) WindowFunctionDescription window_function; window_function.function_node = function_node; window_function.column_name - = window_function.function_node->getColumnName(getContext()->getSettingsRef()); + = window_function.function_node->getColumnName(); window_function.function_parameters = window_function.function_node->parameters ? getAggregateFunctionParametersArray( @@ -664,7 +664,7 @@ void ExpressionAnalyzer::makeWindowDescriptions(ActionsDAGPtr actions) window_function.argument_names.resize(arguments.size()); for (size_t i = 0; i < arguments.size(); ++i) { - const std::string & name = arguments[i]->getColumnName(getContext()->getSettingsRef()); + const std::string & name = arguments[i]->getColumnName(); const auto * node = actions->tryFindInIndex(name); if (!node) @@ -961,7 +961,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( auto & step = chain.lastStep(sourceColumns()); getRootActions(select_query->prewhere(), only_types, step.actions()); - String prewhere_column_name = select_query->prewhere()->getColumnName(getContext()->getSettingsRef()); + String prewhere_column_name = select_query->prewhere()->getColumnName(); step.addRequiredOutput(prewhere_column_name); const auto & node = step.actions()->findInIndex(prewhere_column_name); @@ -1047,7 +1047,7 @@ bool SelectQueryExpressionAnalyzer::appendWhere(ExpressionActionsChain & chain, getRootActions(select_query->where(), only_types, step.actions()); - auto where_column_name = select_query->where()->getColumnName(getContext()->getSettingsRef()); + auto where_column_name = select_query->where()->getColumnName(); step.addRequiredOutput(where_column_name); const auto & node = step.actions()->findInIndex(where_column_name); @@ -1072,7 +1072,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain ASTs asts = select_query->groupBy()->children; for (const auto & ast : asts) { - step.addRequiredOutput(ast->getColumnName(getContext()->getSettingsRef())); + step.addRequiredOutput(ast->getColumnName()); getRootActions(ast, only_types, step.actions()); } @@ -1100,7 +1100,7 @@ void SelectQueryExpressionAnalyzer::appendAggregateFunctionsArguments(Expression for (const auto & name : desc.argument_names) step.addRequiredOutput(name); - /// Collect aggregates removing duplicates by node.getColumnName(getContext()->getSettingsRef()) + /// Collect aggregates removing duplicates by node.getColumnName() /// It's not clear why we recollect aggregates (for query parts) while we're able to use previously collected ones (for entire query) /// @note The original recollection logic didn't remove duplicates. GetAggregatesVisitor::Data data; @@ -1155,7 +1155,7 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments( // (2b) Required function argument columns. for (const auto & a : f.function_node->arguments->children) { - step.addRequiredOutput(a->getColumnName(getContext()->getSettingsRef())); + step.addRequiredOutput(a->getColumnName()); } } @@ -1177,7 +1177,7 @@ bool SelectQueryExpressionAnalyzer::appendHaving(ExpressionActionsChain & chain, ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns); getRootActionsForHaving(select_query->having(), only_types, step.actions()); - step.addRequiredOutput(select_query->having()->getColumnName(getContext()->getSettingsRef())); + step.addRequiredOutput(select_query->having()->getColumnName()); return true; } @@ -1201,7 +1201,7 @@ void SelectQueryExpressionAnalyzer::appendSelect(ExpressionActionsChain & chain, continue; } - step.addRequiredOutput(child->getColumnName(getContext()->getSettingsRef())); + step.addRequiredOutput(child->getColumnName()); } } @@ -1229,7 +1229,7 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai if (!ast || ast->children.empty()) throw Exception("Bad order expression AST", ErrorCodes::UNKNOWN_TYPE_OF_AST_NODE); ASTPtr order_expression = ast->children.at(0); - step.addRequiredOutput(order_expression->getColumnName(getContext()->getSettingsRef())); + step.addRequiredOutput(order_expression->getColumnName()); if (ast->with_fill) with_fill = true; @@ -1279,7 +1279,7 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain for (const auto & child : select_query->limitBy()->children) { - auto child_name = child->getColumnName(getContext()->getSettingsRef()); + auto child_name = child->getColumnName(); if (!aggregated_names.count(child_name)) step.addRequiredOutput(std::move(child_name)); } @@ -1295,15 +1295,13 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendProjectResult(ExpressionActio NamesWithAliases result_columns; - const auto & settings = getContext()->getSettingsRef(); - ASTs asts = select_query->select()->children; for (const auto & ast : asts) { - String result_name = ast->getAliasOrColumnName(settings); + String result_name = ast->getAliasOrColumnName(); if (required_result_columns.empty() || required_result_columns.count(result_name)) { - std::string source_name = ast->getColumnName(settings); + std::string source_name = ast->getColumnName(); /* * For temporary columns created by ExpressionAnalyzer for literals, @@ -1345,7 +1343,7 @@ void ExpressionAnalyzer::appendExpression(ExpressionActionsChain & chain, const { ExpressionActionsChain::Step & step = chain.lastStep(sourceColumns()); getRootActions(expr, only_types, step.actions()); - step.addRequiredOutput(expr->getColumnName(getContext()->getSettingsRef())); + step.addRequiredOutput(expr->getColumnName()); } @@ -1362,13 +1360,12 @@ ActionsDAGPtr ExpressionAnalyzer::getActionsDAG(bool add_aliases, bool project_r else asts = ASTs(1, query); - const auto & settings = getContext()->getSettingsRef(); for (const auto & ast : asts) { - std::string name = ast->getColumnName(settings); + std::string name = ast->getColumnName(); std::string alias; if (add_aliases) - alias = ast->getAliasOrColumnName(settings); + alias = ast->getAliasOrColumnName(); else alias = name; result_columns.emplace_back(name, alias); @@ -1497,7 +1494,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( if (auto actions = query_analyzer.appendPrewhere(chain, !first_stage, additional_required_columns_after_prewhere)) { - prewhere_info = std::make_shared(actions, query.prewhere()->getColumnName(settings)); + prewhere_info = std::make_shared(actions, query.prewhere()->getColumnName()); if (allowEarlyConstantFolding(*prewhere_info->prewhere_actions, settings)) { @@ -1507,7 +1504,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( ExpressionActions( prewhere_info->prewhere_actions, ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); - auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName(settings)); + auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); /// If the filter column is a constant, record it. if (column_elem.column) prewhere_constant_filter_description = ConstantFilterDescription(*column_elem.column); @@ -1542,7 +1539,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( ExpressionActions( before_where, ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); - auto & column_elem = before_where_sample.getByName(query.where()->getColumnName(settings)); + auto & column_elem = before_where_sample.getByName(query.where()->getColumnName()); /// If the filter column is a constant, record it. if (column_elem.column) where_constant_filter_description = ConstantFilterDescription(*column_elem.column); @@ -1633,7 +1630,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( const auto * select_query = query_analyzer.getSelectQuery(); for (const auto & child : select_query->select()->children) { - step.addRequiredOutput(child->getColumnName(settings)); + step.addRequiredOutput(child->getColumnName()); } } @@ -1689,8 +1686,7 @@ void ExpressionAnalysisResult::finalize(const ExpressionActionsChain & chain, si if (hasWhere()) { - const auto & settings = chain.getContext()->getSettingsRef(); - where_column_name = query.where()->getColumnName(settings); + where_column_name = query.where()->getColumnName(); remove_where_filter = chain.steps.at(where_step_num)->required_output.find(where_column_name)->second; } } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 33f9deaf805..49ebd3d48b0 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -141,7 +141,7 @@ String InterpreterSelectQuery::generateFilterActions(ActionsDAGPtr & actions, co SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot); actions = analyzer.simpleSelectActions(); - auto column_name = expr_list->children.at(0)->getColumnName(context->getSettingsRef()); + auto column_name = expr_list->children.at(0)->getColumnName(); actions->removeUnusedActions(NameSet{column_name}); actions->projectInput(false); @@ -782,7 +782,7 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextP order_descr.reserve(query.orderBy()->children.size()); for (const auto & elem : query.orderBy()->children) { - String name = elem->children.front()->getColumnName(context->getSettingsRef()); + String name = elem->children.front()->getColumnName(); const auto & order_by_elem = elem->as(); std::shared_ptr collator; @@ -801,14 +801,14 @@ static SortDescription getSortDescription(const ASTSelectQuery & query, ContextP return order_descr; } -static SortDescription getSortDescriptionFromGroupBy(const ASTSelectQuery & query, ContextPtr context) +static SortDescription getSortDescriptionFromGroupBy(const ASTSelectQuery & query) { SortDescription order_descr; order_descr.reserve(query.groupBy()->children.size()); for (const auto & elem : query.groupBy()->children) { - String name = elem->getColumnName(context->getSettingsRef()); + String name = elem->getColumnName(); order_descr.emplace_back(name, 1, 1); } @@ -1327,24 +1327,29 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu } bool apply_limit = options.to_stage != QueryProcessingStage::WithMergeableStateAfterAggregation; + bool apply_prelimit = apply_limit && + query.limitLength() && !query.limit_with_ties && + !hasWithTotalsInAnySubqueryInFromClause(query) && + !query.arrayJoinExpressionList() && + !query.distinct && + !expressions.hasLimitBy() && + !settings.extremes && + !has_withfill; bool apply_offset = options.to_stage != QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit; - bool has_prelimit = false; - if (apply_limit && - query.limitLength() && !query.limit_with_ties && !hasWithTotalsInAnySubqueryInFromClause(query) && - !query.arrayJoinExpressionList() && !query.distinct && !expressions.hasLimitBy() && !settings.extremes && - !has_withfill) + bool limit_applied = false; + if (apply_prelimit) { executePreLimit(query_plan, /* do_not_skip_offset= */!apply_offset); - has_prelimit = true; + limit_applied = true; } /** If there was more than one stream, * then DISTINCT needs to be performed once again after merging all streams. */ - if (query.distinct) + if (!from_aggregation_stage && query.distinct) executeDistinct(query_plan, false, expressions.selected_columns, false); - if (expressions.hasLimitBy()) + if (!from_aggregation_stage && expressions.hasLimitBy()) { executeExpression(query_plan, expressions.before_limit_by, "Before LIMIT BY"); executeLimitBy(query_plan); @@ -1354,10 +1359,10 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu /// If we have 'WITH TIES', we need execute limit before projection, /// because in that case columns from 'ORDER BY' are used. - if (query.limit_with_ties) + if (query.limit_with_ties && apply_offset) { executeLimit(query_plan); - has_prelimit = true; + limit_applied = true; } /// Projection not be done on the shards, since then initiator will not find column in blocks. @@ -1372,7 +1377,12 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu executeExtremes(query_plan); /// Limit is no longer needed if there is prelimit. - if (apply_limit && !has_prelimit) + /// + /// NOTE: that LIMIT cannot be applied if OFFSET should not be applied, + /// since LIMIT will apply OFFSET too. + /// This is the case for various optimizations for distributed queries, + /// and when LIMIT cannot be applied it will be applied on the initiator anyway. + if (apply_limit && !limit_applied && apply_offset) executeLimit(query_plan); if (apply_offset) @@ -1918,13 +1928,13 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc { query_info.projection->order_optimizer = std::make_shared( query_info.projection->group_by_elements_actions, - getSortDescriptionFromGroupBy(query, context), + getSortDescriptionFromGroupBy(query), query_info.syntax_analyzer_result); } else { query_info.order_optimizer = std::make_shared( - analysis_result.group_by_elements_actions, getSortDescriptionFromGroupBy(query, context), query_info.syntax_analyzer_result); + analysis_result.group_by_elements_actions, getSortDescriptionFromGroupBy(query), query_info.syntax_analyzer_result); } } @@ -2005,7 +2015,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc void InterpreterSelectQuery::executeWhere(QueryPlan & query_plan, const ActionsDAGPtr & expression, bool remove_filter) { auto where_step = std::make_unique( - query_plan.getCurrentDataStream(), expression, getSelectQuery().where()->getColumnName(context->getSettingsRef()), remove_filter); + query_plan.getCurrentDataStream(), expression, getSelectQuery().where()->getColumnName(), remove_filter); where_step->setStepDescription("WHERE"); query_plan.addStep(std::move(where_step)); @@ -2054,7 +2064,7 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac SortDescription group_by_sort_description; if (group_by_info && settings.optimize_aggregation_in_order) - group_by_sort_description = getSortDescriptionFromGroupBy(getSelectQuery(), context); + group_by_sort_description = getSortDescriptionFromGroupBy(getSelectQuery()); else group_by_info = nullptr; @@ -2102,7 +2112,7 @@ void InterpreterSelectQuery::executeMergeAggregated(QueryPlan & query_plan, bool void InterpreterSelectQuery::executeHaving(QueryPlan & query_plan, const ActionsDAGPtr & expression) { auto having_step - = std::make_unique(query_plan.getCurrentDataStream(), expression, getSelectQuery().having()->getColumnName(context->getSettingsRef()), false); + = std::make_unique(query_plan.getCurrentDataStream(), expression, getSelectQuery().having()->getColumnName(), false); having_step->setStepDescription("HAVING"); query_plan.addStep(std::move(having_step)); @@ -2118,7 +2128,7 @@ void InterpreterSelectQuery::executeTotalsAndHaving( query_plan.getCurrentDataStream(), overflow_row, expression, - has_having ? getSelectQuery().having()->getColumnName(context->getSettingsRef()) : "", + has_having ? getSelectQuery().having()->getColumnName() : "", settings.totals_mode, settings.totals_auto_threshold, final); @@ -2429,7 +2439,10 @@ void InterpreterSelectQuery::executePreLimit(QueryPlan & query_plan, bool do_not } auto limit = std::make_unique(query_plan.getCurrentDataStream(), limit_length, limit_offset); - limit->setStepDescription("preliminary LIMIT"); + if (do_not_skip_offset) + limit->setStepDescription("preliminary LIMIT (with OFFSET)"); + else + limit->setStepDescription("preliminary LIMIT (without OFFSET)"); query_plan.addStep(std::move(limit)); } } @@ -2443,7 +2456,7 @@ void InterpreterSelectQuery::executeLimitBy(QueryPlan & query_plan) Names columns; for (const auto & elem : query.limitBy()->children) - columns.emplace_back(elem->getColumnName(context->getSettingsRef())); + columns.emplace_back(elem->getColumnName()); UInt64 length = getLimitUIntValue(query.limitByLength(), context, "LIMIT"); UInt64 offset = (query.limitByOffset() ? getLimitUIntValue(query.limitByOffset(), context, "OFFSET") : 0); diff --git a/src/Interpreters/QueryLog.cpp b/src/Interpreters/QueryLog.cpp index 3f668e5e0ab..0f7ff579f5d 100644 --- a/src/Interpreters/QueryLog.cpp +++ b/src/Interpreters/QueryLog.cpp @@ -57,6 +57,7 @@ NamesAndTypesList QueryLogElement::getNamesAndTypes() {"current_database", std::make_shared()}, {"query", std::make_shared()}, + {"formatted_query", std::make_shared()}, {"normalized_query_hash", std::make_shared()}, {"query_kind", std::make_shared(std::make_shared())}, {"databases", std::make_shared( @@ -151,6 +152,7 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insertData(current_database.data(), current_database.size()); columns[i++]->insertData(query.data(), query.size()); + columns[i++]->insertData(formatted_query.data(), formatted_query.size()); columns[i++]->insert(normalized_query_hash); columns[i++]->insertData(query_kind.data(), query_kind.size()); diff --git a/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h index 0aa02104306..aad3e56190b 100644 --- a/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -51,6 +51,7 @@ struct QueryLogElement String current_database; String query; + String formatted_query; UInt64 normalized_query_hash{}; String query_kind; diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index cc345004f6f..9294cca7bb4 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -609,6 +609,27 @@ std::vector getWindowFunctions(ASTPtr & query, const ASTSel return data.window_functions; } +class MarkTupleLiteralsAsLegacyData +{ +public: + using TypeToVisit = ASTLiteral; + + static void visit(ASTLiteral & literal, ASTPtr &) + { + if (literal.value.getType() == Field::Types::Tuple) + literal.use_legacy_column_name_of_tuple = true; + } +}; + +using MarkTupleLiteralsAsLegacyMatcher = OneTypeMatcher; +using MarkTupleLiteralsAsLegacyVisitor = InDepthNodeVisitor; + +void markTupleLiteralsAsLegacy(ASTPtr & query) +{ + MarkTupleLiteralsAsLegacyVisitor::Data data; + MarkTupleLiteralsAsLegacyVisitor(data).visit(query); +} + } TreeRewriterResult::TreeRewriterResult( @@ -927,6 +948,9 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( /// Executing scalar subqueries - replacing them with constant values. executeScalarSubqueries(query, getContext(), subquery_depth, result.scalars, select_options.only_analyze); + if (settings.legacy_column_name_of_tuple_literal) + markTupleLiteralsAsLegacy(query); + TreeOptimizer::apply(query, result, tables_with_columns, getContext()); /// array_join_alias_to_name, array_join_result_to_source. @@ -994,6 +1018,9 @@ TreeRewriterResultPtr TreeRewriter::analyze( /// Executing scalar subqueries. Column defaults could be a scalar subquery. executeScalarSubqueries(query, getContext(), 0, result.scalars, false); + if (settings.legacy_column_name_of_tuple_literal) + markTupleLiteralsAsLegacy(query); + TreeOptimizer::optimizeIf(query, result.aliases, settings.optimize_if_chain_to_multiif); if (allow_aggregations) diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index f814e1d8c02..e46f644e836 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -39,7 +39,7 @@ std::pair> evaluateConstantExpression(co if (context->getSettingsRef().normalize_function_names) FunctionNameNormalizer().visit(ast.get()); - String name = ast->getColumnName(context->getSettingsRef()); + String name = ast->getColumnName(); auto syntax_result = TreeRewriter(context).analyze(ast, source_columns); ExpressionActionsPtr expr_for_constant_folding = ExpressionAnalyzer(ast, syntax_result, context).getConstActions(); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 3756f1b2765..1b59f3bc7df 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -265,7 +265,11 @@ static void onExceptionBeforeStart(const String & query_for_logging, ContextPtr // Try log query_kind if ast is valid if (ast) + { elem.query_kind = ast->getQueryKindString(); + if (settings.log_formatted_queries) + elem.formatted_query = queryToString(ast); + } // We don't calculate databases, tables and columns when the query isn't able to start @@ -641,6 +645,8 @@ static std::tuple executeQueryImpl( elem.current_database = context->getCurrentDatabase(); elem.query = query_for_logging; + if (settings.log_formatted_queries) + elem.formatted_query = queryToString(ast); elem.normalized_query_hash = normalizedQueryHash(query_for_logging); elem.client_info = client_info; diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index daae3e76aa1..1ff27c61836 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -24,16 +24,6 @@ namespace ErrorCodes } void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr) const -{ - appendColumnNameImpl(ostr, nullptr); -} - -void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const -{ - appendColumnNameImpl(ostr, &settings); -} - -void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr, const Settings * settings) const { if (name == "view") throw Exception("Table function view cannot be used as an expression", ErrorCodes::UNEXPECTED_EXPRESSION); @@ -48,10 +38,7 @@ void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr, const Settings * sett if (it != parameters->children.begin()) writeCString(", ", ostr); - if (settings) - (*it)->appendColumnName(ostr, *settings); - else - (*it)->appendColumnName(ostr); + (*it)->appendColumnName(ostr); } writeChar(')', ostr); } @@ -64,10 +51,7 @@ void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr, const Settings * sett if (it != arguments->children.begin()) writeCString(", ", ostr); - if (settings) - (*it)->appendColumnName(ostr, *settings); - else - (*it)->appendColumnName(ostr); + (*it)->appendColumnName(ostr); } } diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 8e657afbf6e..685aaaadd26 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -54,10 +54,6 @@ public: protected: void formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; void appendColumnNameImpl(WriteBuffer & ostr) const override; - void appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const override; - -private: - void appendColumnNameImpl(WriteBuffer & ostr, const Settings * settings) const; }; diff --git a/src/Parsers/ASTLiteral.cpp b/src/Parsers/ASTLiteral.cpp index c456cb3e933..93d490bc645 100644 --- a/src/Parsers/ASTLiteral.cpp +++ b/src/Parsers/ASTLiteral.cpp @@ -50,16 +50,14 @@ String FieldVisitorToColumnName::operator() (const Tuple & x) const } -void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const -{ - if (settings.legacy_column_name_of_tuple_literal) - appendColumnNameImplLegacy(ostr); - else - appendColumnNameImpl(ostr); -} - void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr) const { + if (use_legacy_column_name_of_tuple) + { + appendColumnNameImplLegacy(ostr); + return; + } + /// 100 - just arbitrary value. constexpr auto min_elements_for_hashing = 100; diff --git a/src/Parsers/ASTLiteral.h b/src/Parsers/ASTLiteral.h index c17310f719b..856bed61979 100644 --- a/src/Parsers/ASTLiteral.h +++ b/src/Parsers/ASTLiteral.h @@ -33,6 +33,10 @@ public: */ String unique_column_name; + /// For compatibility reasons in distributed queries, + /// we may need to use legacy column name for tuple literal. + bool use_legacy_column_name_of_tuple = false; + /** Get the text that identifies this element. */ String getID(char delim) const override { return "Literal" + (delim + applyVisitor(FieldVisitorDump(), value)); } @@ -44,7 +48,6 @@ protected: void formatImplWithoutAlias(const FormatSettings & settings, FormatState &, FormatStateStacked) const override; void appendColumnNameImpl(WriteBuffer & ostr) const override; - void appendColumnNameImpl(WriteBuffer & ostr, const Settings & settings) const override; private: /// Legacy version of 'appendColumnNameImpl'. It differs only with tuple literals. diff --git a/src/Parsers/ASTWithAlias.cpp b/src/Parsers/ASTWithAlias.cpp index 0f5b86763e0..88f6568a719 100644 --- a/src/Parsers/ASTWithAlias.cpp +++ b/src/Parsers/ASTWithAlias.cpp @@ -48,14 +48,6 @@ void ASTWithAlias::appendColumnName(WriteBuffer & ostr) const appendColumnNameImpl(ostr); } -void ASTWithAlias::appendColumnName(WriteBuffer & ostr, const Settings & settings) const -{ - if (prefer_alias_to_column_name && !alias.empty()) - writeString(alias, ostr); - else - appendColumnNameImpl(ostr, settings); -} - void ASTWithAlias::appendColumnNameWithoutAlias(WriteBuffer & ostr) const { appendColumnNameImpl(ostr); diff --git a/src/Parsers/ASTWithAlias.h b/src/Parsers/ASTWithAlias.h index 249be17b74c..ea4419402b0 100644 --- a/src/Parsers/ASTWithAlias.h +++ b/src/Parsers/ASTWithAlias.h @@ -21,10 +21,8 @@ public: using IAST::IAST; void appendColumnName(WriteBuffer & ostr) const final; - void appendColumnName(WriteBuffer & ostr, const Settings & settings) const final; void appendColumnNameWithoutAlias(WriteBuffer & ostr) const final; String getAliasOrColumnName() const override { return alias.empty() ? getColumnName() : alias; } - String getAliasOrColumnName(const Settings & settings) const override { return alias.empty() ? getColumnName(settings) : alias; } String tryGetAlias() const override { return alias; } void setAlias(const String & to) override { alias = to; } @@ -35,7 +33,6 @@ public: protected: virtual void appendColumnNameImpl(WriteBuffer & ostr) const = 0; - virtual void appendColumnNameImpl(WriteBuffer & ostr, const Settings &) const { appendColumnNameImpl(ostr); } }; /// helper for setting aliases and chaining result to other functions diff --git a/src/Parsers/IAST.cpp b/src/Parsers/IAST.cpp index 0f38fcf98dd..3a21d704eb9 100644 --- a/src/Parsers/IAST.cpp +++ b/src/Parsers/IAST.cpp @@ -109,14 +109,6 @@ String IAST::getColumnName() const } -String IAST::getColumnName(const Settings & settings) const -{ - WriteBufferFromOwnString write_buffer; - appendColumnName(write_buffer, settings); - return write_buffer.str(); -} - - String IAST::getColumnNameWithoutAlias() const { WriteBufferFromOwnString write_buffer; diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index 94a7b1a52ab..2f9212da632 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -42,7 +42,6 @@ public: /** Get the canonical name of the column if the element is a column */ String getColumnName() const; - String getColumnName(const Settings & settings) const; /** Same as the above but ensure no alias names are used. This is for index analysis */ String getColumnNameWithoutAlias() const; @@ -52,8 +51,6 @@ public: throw Exception("Trying to get name of not a column: " + getID(), ErrorCodes::LOGICAL_ERROR); } - virtual void appendColumnName(WriteBuffer & ostr, const Settings &) const { appendColumnName(ostr); } - virtual void appendColumnNameWithoutAlias(WriteBuffer &) const { throw Exception("Trying to get name of not a column: " + getID(), ErrorCodes::LOGICAL_ERROR); @@ -61,7 +58,6 @@ public: /** Get the alias, if any, or the canonical name of the column, if it is not. */ virtual String getAliasOrColumnName() const { return getColumnName(); } - virtual String getAliasOrColumnName(const Settings & settings) const { return getColumnName(settings); } /** Get the alias, if any, or an empty string if it does not exist, or if the element does not support aliases. */ virtual String tryGetAlias() const { return String(); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index f8c12449c7e..4276160f514 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -992,17 +992,14 @@ void ReadFromMergeTree::initializePipeline(QueryPipeline & pipeline, const Build }); } - Block cur_header = result_projection ? result_projection->getResultColumns() - : pipe.getHeader(); + Block cur_header = pipe.getHeader(); - auto append_actions = [&result_projection, &cur_header](ActionsDAGPtr actions) + auto append_actions = [&result_projection](ActionsDAGPtr actions) { if (!result_projection) result_projection = std::move(actions); else result_projection = ActionsDAG::merge(std::move(*result_projection), std::move(*actions)); - - cur_header = result_projection->getResultColumns(); }; /// By the way, if a distributed query or query to a Merge table is made, then the `_sample_factor` column can have different values. @@ -1017,6 +1014,9 @@ void ReadFromMergeTree::initializePipeline(QueryPipeline & pipeline, const Build append_actions(std::move(adding_column)); } + if (result_projection) + cur_header = result_projection->updateHeader(cur_header); + /// Extra columns may be returned (for example, if sampling is used). /// Convert pipe to step header structure. if (!isCompatibleHeader(cur_header, getOutputStream().header)) diff --git a/src/Processors/Sources/SourceWithProgress.cpp b/src/Processors/Sources/SourceWithProgress.cpp index 66168e7d73a..647ad0f205f 100644 --- a/src/Processors/Sources/SourceWithProgress.cpp +++ b/src/Processors/Sources/SourceWithProgress.cpp @@ -49,7 +49,7 @@ void SourceWithProgress::setProcessListElement(QueryStatus * elem) void SourceWithProgress::work() { - if (!limits.speed_limits.checkTimeLimit(total_stopwatch.elapsed(), limits.timeout_overflow_mode)) + if (!limits.speed_limits.checkTimeLimit(total_stopwatch, limits.timeout_overflow_mode)) { cancel(); } diff --git a/src/Processors/Transforms/LimitsCheckingTransform.cpp b/src/Processors/Transforms/LimitsCheckingTransform.cpp index 9e021e8e59d..64b6b64ccd9 100644 --- a/src/Processors/Transforms/LimitsCheckingTransform.cpp +++ b/src/Processors/Transforms/LimitsCheckingTransform.cpp @@ -32,7 +32,7 @@ void LimitsCheckingTransform::transform(Chunk & chunk) info.started = true; } - if (!limits.speed_limits.checkTimeLimit(info.total_stopwatch.elapsed(), limits.timeout_overflow_mode)) + if (!limits.speed_limits.checkTimeLimit(info.total_stopwatch, limits.timeout_overflow_mode)) { stopReading(); return; diff --git a/src/Server/KeeperTCPHandler.cpp b/src/Server/KeeperTCPHandler.cpp index 2d5f41fe666..df40a78749b 100644 --- a/src/Server/KeeperTCPHandler.cpp +++ b/src/Server/KeeperTCPHandler.cpp @@ -195,8 +195,8 @@ KeeperTCPHandler::KeeperTCPHandler(IServer & server_, const Poco::Net::StreamSoc , log(&Poco::Logger::get("NuKeeperTCPHandler")) , global_context(Context::createCopy(server.context())) , keeper_dispatcher(global_context->getKeeperStorageDispatcher()) - , operation_timeout(0, global_context->getConfigRef().getUInt("test_keeper_server.operation_timeout_ms", Coordination::DEFAULT_OPERATION_TIMEOUT_MS) * 1000) - , session_timeout(0, global_context->getConfigRef().getUInt("test_keeper_server.session_timeout_ms", Coordination::DEFAULT_SESSION_TIMEOUT_MS) * 1000) + , operation_timeout(0, global_context->getConfigRef().getUInt("keeper_server.operation_timeout_ms", Coordination::DEFAULT_OPERATION_TIMEOUT_MS) * 1000) + , session_timeout(0, global_context->getConfigRef().getUInt("keeper_server.session_timeout_ms", Coordination::DEFAULT_SESSION_TIMEOUT_MS) * 1000) , poll_wrapper(std::make_unique(socket_)) , responses(std::make_unique()) { diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 60ff3d094b7..4730bf9f47c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -3213,8 +3213,7 @@ String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, ContextPtr loc if (!partition_ast.value) { - if (!MergeTreePartInfo::validatePartitionID(partition_ast.id, format_version)) - throw Exception("Invalid partition format: " + partition_ast.id, ErrorCodes::INVALID_PARTITION_VALUE); + MergeTreePartInfo::validatePartitionID(partition_ast.id, format_version); return partition_ast.id; } @@ -3225,10 +3224,7 @@ String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, ContextPtr loc if (partition_lit && partition_lit->value.getType() == Field::Types::String) { String partition_id = partition_lit->value.get(); - if (partition_id.size() != 6 || !std::all_of(partition_id.begin(), partition_id.end(), isNumericASCII)) - throw Exception( - "Invalid partition format: " + partition_id + ". Partition should consist of 6 digits: YYYYMM", - ErrorCodes::INVALID_PARTITION_VALUE); + MergeTreePartInfo::validatePartitionID(partition_id, format_version); return partition_id; } } diff --git a/src/Storages/MergeTree/MergeTreePartInfo.cpp b/src/Storages/MergeTree/MergeTreePartInfo.cpp index ccb26a0999e..6a98e666c34 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.cpp +++ b/src/Storages/MergeTree/MergeTreePartInfo.cpp @@ -9,6 +9,7 @@ namespace DB namespace ErrorCodes { extern const int BAD_DATA_PART_NAME; + extern const int INVALID_PARTITION_VALUE; } @@ -21,38 +22,25 @@ MergeTreePartInfo MergeTreePartInfo::fromPartName(const String & part_name, Merg } -bool MergeTreePartInfo::validatePartitionID(const String & partition_id, MergeTreeDataFormatVersion format_version) +void MergeTreePartInfo::validatePartitionID(const String & partition_id, MergeTreeDataFormatVersion format_version) { if (partition_id.empty()) - return false; - - ReadBufferFromString in(partition_id); + throw Exception(ErrorCodes::INVALID_PARTITION_VALUE, "Partition id is empty"); if (format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { - UInt32 min_yyyymmdd = 0; - UInt32 max_yyyymmdd = 0; - if (!tryReadIntText(min_yyyymmdd, in) - || !checkChar('_', in) - || !tryReadIntText(max_yyyymmdd, in) - || !checkChar('_', in)) - { - return false; - } + if (partition_id.size() != 6 || !std::all_of(partition_id.begin(), partition_id.end(), isNumericASCII)) + throw Exception(ErrorCodes::INVALID_PARTITION_VALUE, + "Invalid partition format: {}. Partition should consist of 6 digits: YYYYMM", + partition_id); } else { - while (!in.eof()) - { - char c; - readChar(c, in); - - if (c == '_') - break; - } + auto is_valid_char = [](char c) { return c == '-' || isAlphaNumericASCII(c); }; + if (!std::all_of(partition_id.begin(), partition_id.end(), is_valid_char)) + throw Exception(ErrorCodes::INVALID_PARTITION_VALUE, "Invalid partition format: {}", partition_id); } - return in.eof(); } bool MergeTreePartInfo::tryParsePartName(const String & part_name, MergeTreePartInfo * part_info, MergeTreeDataFormatVersion format_version) diff --git a/src/Storages/MergeTree/MergeTreePartInfo.h b/src/Storages/MergeTree/MergeTreePartInfo.h index 87f96ed5038..be856c1f157 100644 --- a/src/Storages/MergeTree/MergeTreePartInfo.h +++ b/src/Storages/MergeTree/MergeTreePartInfo.h @@ -88,7 +88,7 @@ struct MergeTreePartInfo } /// Simple sanity check for partition ID. Checking that it's not too long or too short, doesn't contain a lot of '_'. - static bool validatePartitionID(const String & partition_id, MergeTreeDataFormatVersion format_version); + static void validatePartitionID(const String & partition_id, MergeTreeDataFormatVersion format_version); static MergeTreePartInfo fromPartName(const String & part_name, MergeTreeDataFormatVersion format_version); // -V1071 diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index d018059c248..531091bb7f9 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -124,7 +124,7 @@ struct Settings; M(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \ M(String, storage_policy, "default", "Name of storage disk policy", 0) \ M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \ - M(Bool, allow_remote_fs_zero_copy_replication, false, "Allow Zero-copy replication over remote fs", 0) \ + M(Bool, allow_remote_fs_zero_copy_replication, true, "Allow Zero-copy replication over remote fs", 0) \ M(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm", 0) \ M(Bool, assign_part_uuids, false, "Generate UUIDs for parts. Before enabling check that all replicas support new format.", 0) \ M(Int64, max_partitions_to_read, -1, "Limit the max number of partitions that can be accessed in one query. <= 0 means unlimited. This setting is the default that can be overridden by the query-level setting with the same name.", 0) \ diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp index 13e05681fd9..4d8261da2ee 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeMergeStrategyPicker.cpp @@ -144,9 +144,14 @@ void ReplicatedMergeTreeMergeStrategyPicker::refreshState() if (current_replica_index_tmp < 0 || active_replicas_tmp.size() < 2) { - LOG_WARNING(storage.log, "Can't find current replica in the active replicas list, or too few active replicas to use execute_merges_on_single_replica_time_threshold!"); - /// we can reset the settings w/o lock (it's atomic) - execute_merges_on_single_replica_time_threshold = 0; + if (execute_merges_on_single_replica_time_threshold > 0) + { + LOG_WARNING(storage.log, "Can't find current replica in the active replicas list, or too few active replicas to use 'execute_merges_on_single_replica_time_threshold'"); + /// we can reset the settings w/o lock (it's atomic) + execute_merges_on_single_replica_time_threshold = 0; + } + /// default value of remote_fs_execute_merges_on_single_replica_time_threshold is not 0 + /// so we write no warning in log here remote_fs_execute_merges_on_single_replica_time_threshold = 0; return; } diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 9aaa6692560..bad2abf3cdc 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -284,86 +284,6 @@ void replaceConstantExpressions( visitor.visit(node); } -/// This is the implementation of optimize_distributed_group_by_sharding_key. -/// It returns up to which stage the query can be processed on a shard, which -/// is one of the following: -/// - QueryProcessingStage::Complete -/// - QueryProcessingStage::WithMergeableStateAfterAggregation -/// - QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit -/// - none (in this case regular WithMergeableState should be used) -std::optional getOptimizedQueryProcessingStage(const SelectQueryInfo & query_info, bool extremes, const Names & sharding_key_columns) -{ - const auto & select = query_info.query->as(); - - auto sharding_block_has = [&](const auto & exprs) -> bool - { - std::unordered_set expr_columns; - for (auto & expr : exprs) - { - auto id = expr->template as(); - if (!id) - continue; - expr_columns.emplace(id->name()); - } - - for (const auto & column : sharding_key_columns) - { - if (!expr_columns.contains(column)) - return false; - } - - return true; - }; - - // GROUP BY qualifiers - // - TODO: WITH TOTALS can be implemented - // - TODO: WITH ROLLUP can be implemented (I guess) - if (select.group_by_with_totals || select.group_by_with_rollup || select.group_by_with_cube) - return {}; - - // Window functions are not supported. - if (query_info.has_window) - return {}; - - // TODO: extremes support can be implemented - if (extremes) - return {}; - - // DISTINCT - if (select.distinct) - { - if (!sharding_block_has(select.select()->children)) - return {}; - } - - // GROUP BY - const ASTPtr group_by = select.groupBy(); - if (!group_by) - { - if (!select.distinct) - return {}; - } - else - { - if (!sharding_block_has(group_by->children)) - return {}; - } - - // ORDER BY - const ASTPtr order_by = select.orderBy(); - if (order_by) - return QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit; - - // LIMIT BY - // LIMIT - // OFFSET - if (select.limitBy() || select.limitLength() || select.limitOffset()) - return QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit; - - // Only simple SELECT FROM GROUP BY sharding_key can use Complete state. - return QueryProcessingStage::Complete; -} - size_t getClusterQueriedNodes(const Settings & settings, const ClusterPtr & cluster) { size_t num_local_shards = cluster->getLocalShardCount(); @@ -527,13 +447,12 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage( { /// NOTE: distributed_group_by_no_merge=1 does not respect distributed_push_down_limit /// (since in this case queries processed separately and the initiator is just a proxy in this case). + if (to_stage != QueryProcessingStage::Complete) + throw Exception("Queries with distributed_group_by_no_merge=1 should be processed to Complete stage", ErrorCodes::LOGICAL_ERROR); return QueryProcessingStage::Complete; } } - if (settings.distributed_push_down_limit) - return QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit; - /// Nested distributed query cannot return Complete stage, /// since the parent query need to aggregate the results after. if (to_stage == QueryProcessingStage::WithMergeableState) @@ -542,24 +461,107 @@ QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage( /// If there is only one node, the query can be fully processed by the /// shard, initiator will work as a proxy only. if (getClusterQueriedNodes(settings, cluster) == 1) - return QueryProcessingStage::Complete; - - if (settings.optimize_skip_unused_shards && - settings.optimize_distributed_group_by_sharding_key && - has_sharding_key && - (settings.allow_nondeterministic_optimize_skip_unused_shards || sharding_key_is_deterministic)) { - auto stage = getOptimizedQueryProcessingStage(query_info, settings.extremes, sharding_key_expr->getRequiredColumns()); - if (stage) - { - LOG_DEBUG(log, "Force processing stage to {}", QueryProcessingStage::toString(*stage)); - return *stage; - } + /// In case the query was processed to + /// WithMergeableStateAfterAggregation/WithMergeableStateAfterAggregationAndLimit + /// (which are greater the Complete stage) + /// we cannot return Complete (will break aliases and similar), + /// relevant for Distributed over Distributed + return std::max(to_stage, QueryProcessingStage::Complete); + } + + auto optimized_stage = getOptimizedQueryProcessingStage(query_info, settings); + if (optimized_stage) + { + if (*optimized_stage == QueryProcessingStage::Complete) + return std::min(to_stage, *optimized_stage); + return *optimized_stage; } return QueryProcessingStage::WithMergeableState; } +std::optional StorageDistributed::getOptimizedQueryProcessingStage(const SelectQueryInfo & query_info, const Settings & settings) const +{ + bool optimize_sharding_key_aggregation = + settings.optimize_skip_unused_shards && + settings.optimize_distributed_group_by_sharding_key && + has_sharding_key && + (settings.allow_nondeterministic_optimize_skip_unused_shards || sharding_key_is_deterministic); + + QueryProcessingStage::Enum default_stage = QueryProcessingStage::WithMergeableStateAfterAggregation; + if (settings.distributed_push_down_limit) + default_stage = QueryProcessingStage::WithMergeableStateAfterAggregationAndLimit; + + const auto & select = query_info.query->as(); + + auto expr_contains_sharding_key = [&](const auto & exprs) -> bool + { + std::unordered_set expr_columns; + for (auto & expr : exprs) + { + auto id = expr->template as(); + if (!id) + continue; + expr_columns.emplace(id->name()); + } + + for (const auto & column : sharding_key_expr->getRequiredColumns()) + { + if (!expr_columns.contains(column)) + return false; + } + + return true; + }; + + // GROUP BY qualifiers + // - TODO: WITH TOTALS can be implemented + // - TODO: WITH ROLLUP can be implemented (I guess) + if (select.group_by_with_totals || select.group_by_with_rollup || select.group_by_with_cube) + return {}; + // Window functions are not supported. + if (query_info.has_window) + return {}; + // TODO: extremes support can be implemented + if (settings.extremes) + return {}; + + // DISTINCT + if (select.distinct) + { + if (!optimize_sharding_key_aggregation || !expr_contains_sharding_key(select.select()->children)) + return {}; + } + + // GROUP BY + const ASTPtr group_by = select.groupBy(); + if (!query_info.syntax_analyzer_result->aggregates.empty() || group_by) + { + if (!optimize_sharding_key_aggregation || !group_by || !expr_contains_sharding_key(group_by->children)) + return {}; + } + + // LIMIT BY + if (const ASTPtr limit_by = select.limitBy()) + { + if (!optimize_sharding_key_aggregation || !expr_contains_sharding_key(limit_by->children)) + return {}; + } + + // ORDER BY + if (const ASTPtr order_by = select.orderBy()) + return default_stage; + + // LIMIT + // OFFSET + if (select.limitLength() || select.limitOffset()) + return default_stage; + + // Only simple SELECT FROM GROUP BY sharding_key can use Complete state. + return QueryProcessingStage::Complete; +} + Pipe StorageDistributed::read( const Names & column_names, const StorageMetadataPtr & metadata_snapshot, diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 4331817386e..f8b16dec7be 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -177,6 +177,24 @@ private: ClusterPtr skipUnusedShards(ClusterPtr cluster, const ASTPtr & query_ptr, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) const; + /// This method returns optimal query processing stage. + /// + /// Here is the list of stages (from the less optimal to more optimal): + /// - WithMergeableState + /// - WithMergeableStateAfterAggregation + /// - WithMergeableStateAfterAggregationAndLimit + /// - Complete + /// + /// Some simple queries w/o GROUP BY/DISTINCT can use more optimal stage. + /// + /// Also in case of optimize_distributed_group_by_sharding_key=1 the queries + /// with GROUP BY/DISTINCT sharding_key can also use more optimal stage. + /// (see also optimize_skip_unused_shards/allow_nondeterministic_optimize_skip_unused_shards) + /// + /// @return QueryProcessingStage or empty std::optoinal + /// (in this case regular WithMergeableState should be used) + std::optional getOptimizedQueryProcessingStage(const SelectQueryInfo & query_info, const Settings & settings) const; + size_t getRandomShardIndex(const Cluster::ShardsInfo & shards); const DistributedSettings & getDistributedSettingsRef() const { return distributed_settings; } diff --git a/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in b/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in index 8fe574da643..6bb97355151 100644 --- a/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.generated.cpp.in @@ -50,6 +50,7 @@ const char * auto_config_build[] "USE_LDAP", "@USE_LDAP@", "TZDATA_VERSION", "@TZDATA_VERSION@", "USE_KRB5", "@USE_KRB5@", + "USE_BZIP2", "@USE_BZIP2@", nullptr, nullptr }; diff --git a/tests/clickhouse-test b/tests/clickhouse-test index d83b3f08c42..b734af0bdea 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -11,6 +11,7 @@ import copy import traceback from argparse import ArgumentParser +from typing import Tuple, Union, Optional, TextIO import shlex import subprocess from subprocess import Popen @@ -20,10 +21,12 @@ from subprocess import TimeoutExpired from datetime import datetime from time import time, sleep from errno import ESRCH + try: import termcolor except ImportError: termcolor = None + import random import string import multiprocessing @@ -51,7 +54,7 @@ MESSAGES_TO_RETRY = [ "DB::Exception: New table appeared in database being dropped or detached. Try again", "is already started to be removing by another replica right now", "Shutdown is called for table", # It happens in SYSTEM SYNC REPLICA query if session with ZooKeeper is being reinitialized. - DISTRIBUTED_DDL_TIMEOUT_MSG # FIXME + DISTRIBUTED_DDL_TIMEOUT_MSG # FIXME ] MAX_RETRIES = 3 @@ -81,7 +84,7 @@ def stop_tests(): def json_minify(string): """ Removes all js-style comments from json string. Allows to have comments in skip_list.json. - The code taken from https://github.com/getify/JSON.minify/tree/python under the MIT license. + The code was taken from https://github.com/getify/JSON.minify/tree/python under the MIT license. """ tokenizer = re.compile(r'"|(/\*)|(\*/)|(//)|\n|\r') @@ -148,13 +151,17 @@ def remove_control_characters(s): s = re.sub(r"[\x00-\x08\x0b\x0e-\x1f\x7f]", "", s) return s + def get_db_engine(args, database_name): if args.replicated_database: - return " ON CLUSTER test_cluster_database_replicated ENGINE=Replicated('/test/clickhouse/db/{}', '{{shard}}', '{{replica}}')".format(database_name) + return f" ON CLUSTER test_cluster_database_replicated \ + ENGINE=Replicated('/test/clickhouse/db/{database_name}', \ + '{{shard}}', '{{replica}}')" if args.db_engine: return " ENGINE=" + args.db_engine return "" # Will use default engine + def configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file): testcase_args = copy.deepcopy(args) @@ -166,7 +173,6 @@ def configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file): database = testcase_args.database os.environ.setdefault("CLICKHOUSE_DATABASE", database) os.environ.setdefault("CLICKHOUSE_TMP", suite_tmp_dir) - else: # If --database is not specified, we will create temporary database with unique name # And we will recreate and drop it for each test @@ -176,8 +182,14 @@ def configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file): database = 'test_{suffix}'.format(suffix=random_str()) with open(stderr_file, 'w') as stderr: - client_cmd = testcase_args.testcase_client + " " + get_additional_client_options(args) - clickhouse_proc_create = Popen(shlex.split(client_cmd), stdin=PIPE, stdout=PIPE, stderr=stderr, universal_newlines=True) + client_cmd = testcase_args.testcase_client + " " \ + + get_additional_client_options(args) + + clickhouse_proc_create = open_client_process( + universal_newlines=True, + client_args=client_cmd, + stderr_file=stderr) + try: clickhouse_proc_create.communicate(("CREATE DATABASE " + database + get_db_engine(testcase_args, database)), timeout=testcase_args.timeout) except TimeoutExpired: @@ -237,8 +249,10 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std if need_drop_database: with open(stderr_file, 'a') as stderr: - clickhouse_proc_create = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=stderr, universal_newlines=True) + clickhouse_proc_create = open_client_process(client, universal_newlines=True, stderr_file=stderr) + seconds_left = max(args.timeout - (datetime.now() - start_time).total_seconds(), 20) + try: drop_database_query = "DROP DATABASE " + database if args.replicated_database: @@ -254,7 +268,7 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std raise total_time = (datetime.now() - start_time).total_seconds() - return clickhouse_proc_create, "", "Timeout dropping database {} after test".format(database), total_time + return clickhouse_proc_create, "", f"Timeout dropping database {database} after test", total_time shutil.rmtree(args.test_tmp_dir) @@ -286,12 +300,16 @@ def need_retry(stdout, stderr): def get_processlist(args): try: query = b"SHOW PROCESSLIST FORMAT Vertical" + if args.replicated_database: query = b"SELECT materialize((hostName(), tcpPort())) as host, * " \ b"FROM clusterAllReplicas('test_cluster_database_replicated', system.processes) " \ b"WHERE query NOT LIKE '%system.processes%' FORMAT Vertical" - clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + + clickhouse_proc = open_client_process(args.client) + (stdout, _) = clickhouse_proc.communicate((query), timeout=20) + return False, stdout.decode('utf-8') except Exception as ex: print("Exception", ex) @@ -301,47 +319,90 @@ def get_processlist(args): # collect server stacktraces using gdb def get_stacktraces_from_gdb(server_pid): try: - cmd = "gdb -batch -ex 'thread apply all backtrace' -p {}".format(server_pid) + cmd = f"gdb -batch -ex 'thread apply all backtrace' -p {server_pid}" return subprocess.check_output(cmd, shell=True).decode('utf-8') - except Exception as ex: - print("Error occured while receiving stack traces from gdb: {}".format(str(ex))) + except Exception as e: + print(f"Error occurred while receiving stack traces from gdb: {e}") return None # collect server stacktraces from system.stack_trace table # it does not work in Sandbox def get_stacktraces_from_clickhouse(client, replicated_database=False): - try: - if replicated_database: - return subprocess.check_output("{} --allow_introspection_functions=1 --skip_unavailable_shards=1 --query " - "\"SELECT materialize((hostName(), tcpPort())) as host, thread_id, " - "arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), arrayMap(x -> addressToLine(x), trace), " - "arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace " - "FROM clusterAllReplicas('test_cluster_database_replicated', 'system.stack_trace') " - "ORDER BY host, thread_id format Vertical\"".format(client), shell=True, stderr=subprocess.STDOUT).decode('utf-8') + replicated_msg = \ + "{} --allow_introspection_functions=1 --skip_unavailable_shards=1 --query \ + \"SELECT materialize((hostName(), tcpPort())) as host, thread_id, \ + arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), \ + arrayMap(x -> addressToLine(x), trace), \ + arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace \ + FROM clusterAllReplicas('test_cluster_database_replicated', 'system.stack_trace') \ + ORDER BY host, thread_id FORMAT Vertical\"".format(client) - return subprocess.check_output("{} --allow_introspection_functions=1 --query " - "\"SELECT arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), arrayMap(x -> addressToLine(x), trace), " - "arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace " - "FROM system.stack_trace format Vertical\"".format(client), shell=True, stderr=subprocess.STDOUT).decode('utf-8') - except Exception as ex: - print("Error occured while receiving stack traces from client: {}".format(str(ex))) + msg = \ + "{} --allow_introspection_functions=1 --query \ + \"SELECT arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), \ + arrayMap(x -> addressToLine(x), trace), \ + arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace \ + FROM system.stack_trace FORMAT Vertical\"".format(client) + + try: + return subprocess.check_output( + replicated_msg if replicated_database else msg, + shell=True, stderr=subprocess.STDOUT).decode('utf-8') + except Exception as e: + print(f"Error occurred while receiving stack traces from client: {e}") return None -def get_server_pid(server_tcp_port): + +def print_stacktraces() -> None: + server_pid = get_server_pid() + + bt = None + + if server_pid and not args.replicated_database: + print("") + print(f"Located ClickHouse server process {server_pid} listening at TCP port {args.tcp_port}") + print("Collecting stacktraces from all running threads with gdb:") + + bt = get_stacktraces_from_gdb(server_pid) + + if len(bt) < 1000: + print("Got suspiciously small stacktraces: ", bt) + bt = None + + if bt is None: + print("\nCollecting stacktraces from system.stacktraces table:") + + bt = get_stacktraces_from_clickhouse( + args.client, args.replicated_database) + + if bt is not None: + print(bt) + return + + print(colored( + f"\nUnable to locate ClickHouse server process listening at TCP port {args.tcp_port}. " + "It must have crashed or exited prematurely!", + args, "red", attrs=["bold"])) + + +def get_server_pid(): # lsof does not work in stress tests for some reason - cmd_lsof = "lsof -i tcp:{port} -s tcp:LISTEN -Fp | awk '/^p[0-9]+$/{{print substr($0, 2)}}'".format(port=server_tcp_port) + cmd_lsof = f"lsof -i tcp:{args.tcp_port} -s tcp:LISTEN -Fp | awk '/^p[0-9]+$/{{print substr($0, 2)}}'" cmd_pidof = "pidof -s clickhouse-server" + commands = [cmd_lsof, cmd_pidof] output = None + for cmd in commands: try: output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True) if output: return int(output) except Exception as e: - print("Cannot get server pid with {}, got {}: {}".format(cmd, output, e)) - return None # most likely server dead + print(f"Cannot get server pid with {cmd}, got {output}: {e}") + + return None # most likely server is dead def colored(text, args, color=None, on_color=None, attrs=None): @@ -357,6 +418,75 @@ server_died = multiprocessing.Event() stop_tests_triggered_lock = multiprocessing.Lock() stop_tests_triggered = multiprocessing.Event() queue = multiprocessing.Queue(maxsize=1) + + +def print_test_time(test_time) -> str: + if args.print_time: + return " {0:.2f} sec.".format(test_time) + else: + return '' + + +def should_skip_test_by_name(name: str, test_ext: str) -> Tuple[bool, str]: + if args.skip and any(s in name for s in args.skip): + return True, "skip" + + if not args.zookeeper and ('zookeeper' in name or 'replica' in name): + return True, "no zookeeper" + + if not args.shard and \ + ('shard' in name or 'distributed' in name or 'global' in name): + return True, "no shard" + + # Tests for races and deadlocks usually are run in a loop for a significant + # amount of time + if args.no_long and \ + ('long' in name or 'deadlock' in name or 'race' in name): + return True, "no long" + + if not USE_JINJA and test_ext.endswith("j2"): + return True, "no jinja" + + return False, "" + + +def should_skip_disabled_test(name: str, suite_dir: str) -> Tuple[bool, str]: + disabled_file = os.path.join(suite_dir, name) + '.disabled' + + if os.path.exists(disabled_file) and not args.disabled: + return True, open(disabled_file, 'r').read() + + return False, "" + + +# should skip test, should increment skipped_total, skip reason +def should_skip_test(name: str, test_ext: str, suite_dir: str) -> Tuple[bool, bool, str]: + should_skip, skip_reason = should_skip_test_by_name(name, test_ext) + + if should_skip: + return True, True, skip_reason + + should_skip, skip_reason = should_skip_disabled_test(name, suite_dir) + + return should_skip, False, skip_reason + + +def send_test_name_failed(suite: str, case: str) -> bool: + clickhouse_proc = open_client_process(args.client, universal_newlines=True) + + failed_to_check = False + + pid = os.getpid() + query = f"SELECT 'Running test {suite}/{case} from pid={pid}';" + + try: + clickhouse_proc.communicate((query), timeout=20) + except: + failed_to_check = True + + return failed_to_check or clickhouse_proc.returncode != 0 + + restarted_tests = [] # (test, stderr) # def run_tests_array(all_tests, suite, suite_dir, suite_tmp_dir, run_total): @@ -385,15 +515,10 @@ def run_tests_array(all_tests_with_params): client_options = get_additional_client_options(args) - def print_test_time(test_time): - if args.print_time: - return " {0:.2f} sec.".format(test_time) - else: - return '' - if num_tests > 0: about = 'about ' if is_concurrent else '' - print(f"\nRunning {about}{num_tests} {suite} tests ({multiprocessing.current_process().name}).\n") + proc_name = multiprocessing.current_process().name + print(f"\nRunning {about}{num_tests} {suite} tests ({proc_name}).\n") while True: if is_concurrent: @@ -430,162 +555,135 @@ def run_tests_array(all_tests_with_params): else: status = "{0:72}".format(removesuffix(name, ".gen", ".sql") + ": ") - if args.skip and any(s in name for s in args.skip): - status += MSG_SKIPPED + " - skip\n" - skipped_total += 1 - elif not args.zookeeper and ('zookeeper' in name - or 'replica' in name): - status += MSG_SKIPPED + " - no zookeeper\n" - skipped_total += 1 - elif not args.shard and ('shard' in name - or 'distributed' in name - or 'global' in name): - status += MSG_SKIPPED + " - no shard\n" - skipped_total += 1 - elif not args.no_long and ('long' in name - # Tests for races and deadlocks usually are runned in loop - # for significant amount of time - or 'deadlock' in name - or 'race' in name): - status += MSG_SKIPPED + " - no long\n" - skipped_total += 1 - elif not USE_JINJA and ext.endswith("j2"): - status += MSG_SKIPPED + " - no jinja\n" - skipped_total += 1 + skip_test, increment_skip_count, skip_reason = \ + should_skip_test(name, ext, suite_dir) + + if skip_test: + status += MSG_SKIPPED + f" - {skip_reason}\n" + + if increment_skip_count: + skipped_total += 1 else: - disabled_file = os.path.join(suite_dir, name) + '.disabled' + if args.testname and send_test_name_failed(suite, case): + failures += 1 + print("Server does not respond to health check") + server_died.set() + stop_tests() + break - if os.path.exists(disabled_file) and not args.disabled: - message = open(disabled_file, 'r').read() - status += MSG_SKIPPED + " - " + message + "\n" + file_suffix = ('.' + str(os.getpid())) if is_concurrent and args.test_runs > 1 else '' + reference_file = get_reference_file(suite_dir, name) + stdout_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stdout' + stderr_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stderr' + + testcase_args = configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file) + proc, stdout, stderr, total_time = run_single_test(testcase_args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file) + + if proc.returncode is None: + try: + proc.kill() + except OSError as e: + if e.errno != ESRCH: + raise + + failures += 1 + status += MSG_FAIL + status += print_test_time(total_time) + status += " - Timeout!\n" + if stderr: + status += stderr + status += 'Database: ' + testcase_args.testcase_database else: - - if args.testname: - clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) - failed_to_check = False - try: - clickhouse_proc.communicate(("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite)), timeout=20) - except: - failed_to_check = True - - if failed_to_check or clickhouse_proc.returncode != 0: - failures += 1 - print("Server does not respond to health check") - server_died.set() - stop_tests() + counter = 1 + while need_retry(stdout, stderr): + restarted_tests.append((case_file, stderr)) + testcase_args = configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file) + proc, stdout, stderr, total_time = run_single_test(testcase_args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file) + sleep(2**counter) + counter += 1 + if MAX_RETRIES < counter: + if args.replicated_database: + if DISTRIBUTED_DDL_TIMEOUT_MSG in stderr: + server_died.set() break - file_suffix = ('.' + str(os.getpid())) if is_concurrent and args.test_runs > 1 else '' - reference_file = get_reference_file(suite_dir, name) - stdout_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stdout' - stderr_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stderr' - - testcase_args = configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file) - proc, stdout, stderr, total_time = run_single_test(testcase_args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file) - - if proc.returncode is None: - try: - proc.kill() - except OSError as e: - if e.errno != ESRCH: - raise - + if proc.returncode != 0: failures += 1 + failures_chain += 1 status += MSG_FAIL status += print_test_time(total_time) - status += " - Timeout!\n" + status += ' - return code {}\n'.format(proc.returncode) + if stderr: status += stderr + + # Stop on fatal errors like segmentation fault. They are sent to client via logs. + if ' ' in stderr: + server_died.set() + + if testcase_args.stop \ + and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) \ + and 'Received exception from server' not in stderr: + server_died.set() + + if os.path.isfile(stdout_file): + status += ", result:\n\n" + status += '\n'.join( + open(stdout_file).read().split('\n')[:100]) + status += '\n' + + status += 'Database: ' + testcase_args.testcase_database + + elif stderr: + failures += 1 + failures_chain += 1 + status += MSG_FAIL + status += print_test_time(total_time) + status += " - having stderror:\n{}\n".format( + '\n'.join(stderr.split('\n')[:100])) + status += 'Database: ' + testcase_args.testcase_database + elif 'Exception' in stdout: + failures += 1 + failures_chain += 1 + status += MSG_FAIL + status += print_test_time(total_time) + status += " - having exception:\n{}\n".format( + '\n'.join(stdout.split('\n')[:100])) + status += 'Database: ' + testcase_args.testcase_database + elif reference_file is None: + status += MSG_UNKNOWN + status += print_test_time(total_time) + status += " - no reference file\n" status += 'Database: ' + testcase_args.testcase_database else: - counter = 1 - while need_retry(stdout, stderr): - restarted_tests.append((case_file, stderr)) - testcase_args = configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file) - proc, stdout, stderr, total_time = run_single_test(testcase_args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file) - sleep(2**counter) - counter += 1 - if MAX_RETRIES < counter: - if args.replicated_database: - if DISTRIBUTED_DDL_TIMEOUT_MSG in stderr: - server_died.set() - break + result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout=PIPE) - if proc.returncode != 0: + if result_is_different: + diff = Popen(['diff', '-U', str(testcase_args.unified), reference_file, stdout_file], stdout=PIPE, universal_newlines=True).communicate()[0] failures += 1 - failures_chain += 1 status += MSG_FAIL status += print_test_time(total_time) - status += ' - return code {}\n'.format(proc.returncode) - - if stderr: - status += stderr - - # Stop on fatal errors like segmentation fault. They are sent to client via logs. - if ' ' in stderr: - server_died.set() - - if testcase_args.stop and ('Connection refused' in stderr or 'Attempt to read after eof' in stderr) and not 'Received exception from server' in stderr: - server_died.set() - - if os.path.isfile(stdout_file): - status += ", result:\n\n" - status += '\n'.join( - open(stdout_file).read().split('\n')[:100]) - status += '\n' - - status += 'Database: ' + testcase_args.testcase_database - - elif stderr: - failures += 1 - failures_chain += 1 - status += MSG_FAIL - status += print_test_time(total_time) - status += " - having stderror:\n{}\n".format( - '\n'.join(stderr.split('\n')[:100])) - status += 'Database: ' + testcase_args.testcase_database - elif 'Exception' in stdout: - failures += 1 - failures_chain += 1 - status += MSG_FAIL - status += print_test_time(total_time) - status += " - having exception:\n{}\n".format( - '\n'.join(stdout.split('\n')[:100])) - status += 'Database: ' + testcase_args.testcase_database - elif reference_file is None: - status += MSG_UNKNOWN - status += print_test_time(total_time) - status += " - no reference file\n" + status += " - result differs with reference:\n{}\n".format(diff) status += 'Database: ' + testcase_args.testcase_database else: - result_is_different = subprocess.call(['diff', '-q', reference_file, stdout_file], stdout=PIPE) - - if result_is_different: - diff = Popen(['diff', '-U', str(testcase_args.unified), reference_file, stdout_file], stdout=PIPE, universal_newlines=True).communicate()[0] + if testcase_args.test_runs > 1 and total_time > 60 and 'long' not in name: + # We're in Flaky Check mode, check the run time as well while we're at it. failures += 1 + failures_chain += 1 status += MSG_FAIL status += print_test_time(total_time) - status += " - result differs with reference:\n{}\n".format(diff) + status += " - Test runs too long (> 60s). Make it faster.\n" status += 'Database: ' + testcase_args.testcase_database else: - if testcase_args.test_runs > 1 and total_time > 60 and 'long' not in name: - # We're in Flaky Check mode, check the run time as well while we're at it. - failures += 1 - failures_chain += 1 - status += MSG_FAIL - status += print_test_time(total_time) - status += " - Test runs too long (> 60s). Make it faster.\n" - status += 'Database: ' + testcase_args.testcase_database - else: - passed_total += 1 - failures_chain = 0 - status += MSG_OK - status += print_test_time(total_time) - status += "\n" - if os.path.exists(stdout_file): - os.remove(stdout_file) - if os.path.exists(stderr_file): - os.remove(stderr_file) + passed_total += 1 + failures_chain = 0 + status += MSG_OK + status += print_test_time(total_time) + status += "\n" + if os.path.exists(stdout_file): + os.remove(stdout_file) + if os.path.exists(stderr_file): + os.remove(stderr_file) if status and not status.endswith('\n'): status += '\n' @@ -599,7 +697,12 @@ def run_tests_array(all_tests_with_params): except: exc_type, exc_value, tb = sys.exc_info() failures += 1 - print("{0} - Test internal error: {1}\n{2}\n{3}".format(MSG_FAIL, exc_type.__name__, exc_value, "\n".join(traceback.format_tb(tb, 10)))) + + exc_name = exc_type.__name__ + traceback_str = "\n".join(traceback.format_tb(tb, 10)) + + print(f"{MSG_FAIL} - Test internal error: {exc_name}") + print(f"{exc_value}\n{traceback_str}") if failures_chain >= 20: stop_tests() @@ -627,9 +730,11 @@ server_logs_level = "warning" def check_server_started(client, retry_count): print("Connecting to ClickHouse server...", end='') + sys.stdout.flush() + while retry_count > 0: - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc = open_client_process(client) (stdout, stderr) = clickhouse_proc.communicate(b"SELECT 1") if clickhouse_proc.returncode == 0 and stdout.startswith(b"1"): @@ -639,27 +744,30 @@ def check_server_started(client, retry_count): if clickhouse_proc.returncode == 210: # Connection refused, retry - print('.', end = '') + print('.', end='') sys.stdout.flush() retry_count -= 1 sleep(0.5) continue - # Other kind of error, fail. - print('') - print("Client invocation failed with code ", clickhouse_proc.returncode, ": ") + # FIXME Some old comment, maybe now CH supports Python3 ? # We can't print this, because for some reason this is python 2, # and args appeared in 3.3. To hell with it. # print(''.join(clickhouse_proc.args)) - print("stdout: ") - print(stdout) - print("stderr: ") - print(stderr) + + # Other kind of error, fail. + + code: int = clickhouse_proc.returncode + + print(f"\nClient invocation failed with code {code}:\n\ + stdout: {stdout}\n\ + stderr: {stderr}") + sys.stdout.flush() + return False - print('') - print('All connection tries failed') + print('\nAll connection tries failed') sys.stdout.flush() return False @@ -679,7 +787,7 @@ class BuildFlags(): def collect_build_flags(client): - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc = open_client_process(client) (stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'") result = [] @@ -695,7 +803,7 @@ def collect_build_flags(client): else: raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc = open_client_process(client) (stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'BUILD_TYPE'") if clickhouse_proc.returncode == 0: @@ -706,7 +814,7 @@ def collect_build_flags(client): else: raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc = open_client_process(client) (stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.build_options WHERE name = 'UNBUNDLED'") if clickhouse_proc.returncode == 0: @@ -715,7 +823,7 @@ def collect_build_flags(client): else: raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc = open_client_process(client) (stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.settings WHERE name = 'default_database_engine'") if clickhouse_proc.returncode == 0: @@ -724,7 +832,7 @@ def collect_build_flags(client): else: raise Exception("Cannot get information about build from server errorcode {}, stderr {}".format(clickhouse_proc.returncode, stderr)) - clickhouse_proc = Popen(shlex.split(client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc = open_client_process(client) (stdout, stderr) = clickhouse_proc.communicate(b"SELECT value FROM system.merge_tree_settings WHERE name = 'min_bytes_for_wide_part'") if clickhouse_proc.returncode == 0: @@ -736,6 +844,56 @@ def collect_build_flags(client): return result +def suite_key_func(item: str) -> Union[int, Tuple[int, str]]: + if args.order == 'random': + return random.random() + + if -1 == item.find('_'): + return 99998, '' + + prefix, suffix = item.split('_', 1) + + try: + return int(prefix), suffix + except ValueError: + return 99997, '' + + +def tests_in_suite_key_func(item: str) -> int: + if args.order == 'random': + return random.random() + + reverse = 1 if args.order == 'asc' else -1 + + if -1 == item.find('_'): + return 99998 + + prefix, _ = item.split('_', 1) + + try: + return reverse * int(prefix) + except ValueError: + return 99997 + + +def extract_key(key: str) -> str: + return subprocess.getstatusoutput( + args.extract_from_config + + " --try --config " + + args.configserver + key)[1] + + +def open_client_process( + client_args: str, + universal_newlines: bool = False, + stderr_file: Optional[TextIO] = None): + return Popen( + shlex.split(client_args), stdin=PIPE, stdout=PIPE, + stderr=stderr_file if stderr_file is not None else PIPE, + universal_newlines=True if universal_newlines else None) + + + def do_run_tests(jobs, suite, suite_dir, suite_tmp_dir, all_tests, parallel_tests, sequential_tests, parallel): if jobs > 1 and len(parallel_tests) > 0: print("Found", len(parallel_tests), "parallel tests and", len(sequential_tests), "sequential tests") @@ -790,7 +948,7 @@ def removesuffix(text, *suffixes): Added in python 3.9 https://www.python.org/dev/peps/pep-0616/ - This version can work with severtal possible suffixes + This version can work with several possible suffixes """ for suffix in suffixes: if suffix and text.endswith(suffix): @@ -808,7 +966,7 @@ def render_test_template(j2env, suite_dir, test_name): test_base_name = removesuffix(test_name, ".sql.j2", ".sql") - reference_file_name = test_base_name + ".reference.j2" + reference_file_name = test_base_name + ".reference.j2" reference_file_path = os.path.join(suite_dir, reference_file_name) if os.path.isfile(reference_file_path): tpl = j2env.get_template(reference_file_name) @@ -875,7 +1033,7 @@ def main(args): global server_logs_level def is_data_present(): - clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc = open_client_process(args.client) (stdout, stderr) = clickhouse_proc.communicate(b"EXISTS TABLE test.hits") if clickhouse_proc.returncode != 0: raise CalledProcessError(clickhouse_proc.returncode, args.client, stderr) @@ -885,16 +1043,17 @@ def main(args): if not check_server_started(args.client, args.server_check_retries): raise Exception( "Server is not responding. Cannot execute 'SELECT 1' query. \ - Note: if you are using split build, you may have to specify -c option.") + If you are using split build, you have to specify -c option.") build_flags = collect_build_flags(args.client) + if args.replicated_database: build_flags.append(BuildFlags.DATABASE_REPLICATED) if args.use_skip_list: tests_to_skip_from_list = collect_tests_to_skip(args.skip_list_path, build_flags) else: - tests_to_skip_from_list = set([]) + tests_to_skip_from_list = set() if args.skip: args.skip = set(args.skip) | tests_to_skip_from_list @@ -909,8 +1068,9 @@ def main(args): # Keep same default values as in queries/shell_config.sh os.environ.setdefault("CLICKHOUSE_BINARY", args.binary) - #os.environ.setdefault("CLICKHOUSE_CLIENT", args.client) + # os.environ.setdefault("CLICKHOUSE_CLIENT", args.client) os.environ.setdefault("CLICKHOUSE_CONFIG", args.configserver) + if args.configclient: os.environ.setdefault("CLICKHOUSE_CONFIG_CLIENT", args.configclient) @@ -923,58 +1083,41 @@ def main(args): stop_time = time() + args.global_time_limit if args.zookeeper is None: - _, out = subprocess.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key zookeeper | grep . | wc -l') try: - if int(out) > 0: - args.zookeeper = True - else: - args.zookeeper = False + args.zookeeper = int(extract_key(" --key zookeeper | grep . | wc -l")) > 0 except ValueError: args.zookeeper = False if args.shard is None: - _, out = subprocess.getstatusoutput(args.extract_from_config + " --try --config " + args.configserver + ' --key listen_host | grep -E "127.0.0.2|::"') - if out: - args.shard = True - else: - args.shard = False + args.shard = bool(extract_key(' --key listen_host | grep -E "127.0.0.2|::"')) def create_common_database(args, db_name): create_database_retries = 0 while create_database_retries < MAX_RETRIES: client_cmd = args.client + " " + get_additional_client_options(args) - clickhouse_proc_create = Popen(shlex.split(client_cmd), stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) + + clickhouse_proc_create = open_client_process(client_cmd, universal_newlines=True) + (stdout, stderr) = clickhouse_proc_create.communicate(("CREATE DATABASE IF NOT EXISTS " + db_name + get_db_engine(args, db_name))) + if not need_retry(stdout, stderr): break create_database_retries += 1 if args.database and args.database != "test": create_common_database(args, args.database) + create_common_database(args, "test") - def sute_key_func(item): - if args.order == 'random': - return random.random() - - if -1 == item.find('_'): - return 99998, '' - - prefix, suffix = item.split('_', 1) - - try: - return int(prefix), suffix - except ValueError: - return 99997, '' - total_tests_run = 0 - for suite in sorted(os.listdir(base_dir), key=sute_key_func): + + for suite in sorted(os.listdir(base_dir), key=suite_key_func): if server_died.is_set(): break suite_dir = os.path.join(base_dir, suite) suite_re_obj = re.search('^[0-9]+_(.*)$', suite) - if not suite_re_obj: #skip .gitignore and so on + if not suite_re_obj: # skip .gitignore and so on continue suite_tmp_dir = os.path.join(tmp_dir, suite) @@ -982,8 +1125,8 @@ def main(args): os.makedirs(suite_tmp_dir) suite = suite_re_obj.group(1) - if os.path.isdir(suite_dir): + if os.path.isdir(suite_dir): if 'stateful' in suite and not args.no_stateful and not is_data_present(): print("Won't run stateful tests because test data wasn't loaded.") continue @@ -994,29 +1137,14 @@ def main(args): print("Won't run stateful tests because they were manually disabled.") continue - # Reverse sort order: we want run newest test first. - # And not reverse subtests - def key_func(item): - if args.order == 'random': - return random.random() - - reverse = 1 if args.order == 'asc' else -1 - - if -1 == item.find('_'): - return 99998 - - prefix, _ = item.split('_', 1) - - try: - return reverse * int(prefix) - except ValueError: - return 99997 - - all_tests = get_tests_list(suite_dir, args.test, args.test_runs, key_func) + all_tests = get_tests_list( + suite_dir, args.test, args.test_runs, tests_in_suite_key_func) jobs = args.jobs + parallel_tests = [] sequential_tests = [] + for test in all_tests: if any(s in test for s in args.sequential): sequential_tests.append(test) @@ -1042,38 +1170,17 @@ def main(args): else: print(colored("Seems like server hung and cannot respond to queries", args, "red", attrs=["bold"])) - clickhouse_tcp_port = os.getenv("CLICKHOUSE_PORT_TCP", '9000') - server_pid = get_server_pid(clickhouse_tcp_port) - bt = None - if server_pid and not args.replicated_database: - print("\nLocated ClickHouse server process {} listening at TCP port {}".format(server_pid, clickhouse_tcp_port)) - print("\nCollecting stacktraces from all running threads with gdb:") - bt = get_stacktraces_from_gdb(server_pid) - if len(bt) < 1000: - print("Got suspiciously small stacktraces: ", bt) - bt = None - if bt is None: - print("\nCollecting stacktraces from system.stacktraces table:") - bt = get_stacktraces_from_clickhouse(args.client, args.replicated_database) - if bt is None: - print( - colored( - "\nUnable to locate ClickHouse server process listening at TCP port {}. " - "It must have crashed or exited prematurely!".format(clickhouse_tcp_port), - args, "red", attrs=["bold"])) - else: - print(bt) + print_stacktraces() exit_code.value = 1 else: print(colored("\nNo queries hung.", args, "green", attrs=["bold"])) if len(restarted_tests) > 0: print("\nSome tests were restarted:\n") + for (test_case, stderr) in restarted_tests: - print(test_case) - print(stderr) - print("\n") + print(test_case + "\n" + stderr + "\n") if total_tests_run == 0: print("No tests were run.") @@ -1116,18 +1223,23 @@ def get_additional_client_options_url(args): def collect_tests_to_skip(skip_list_path, build_flags): result = set([]) + if not os.path.exists(skip_list_path): return result with open(skip_list_path, 'r') as skip_list_file: content = skip_list_file.read() + # allows to have comments in skip_list.json skip_dict = json.loads(json_minify(content)) + for build_flag in build_flags: result |= set(skip_dict[build_flag]) - if len(result) > 0: - print("Found file with skip-list {}, {} test will be skipped".format(skip_list_path, len(result))) + count = len(result) + + if count > 0: + print(f"Found file with skip-list {skip_list_path}, {count} test will be skipped") return result @@ -1154,7 +1266,7 @@ if __name__ == '__main__': signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGHUP, signal_handler) - parser=ArgumentParser(description='ClickHouse functional tests') + parser = ArgumentParser(description='ClickHouse functional tests') parser.add_argument('-q', '--queries', help='Path to queries dir') parser.add_argument('--tmp', help='Path to tmp dir') @@ -1166,7 +1278,7 @@ if __name__ == '__main__': parser.add_argument('--extract_from_config', help='extract-from-config program') parser.add_argument('--configclient', help='Client config (if you use not default ports)') - parser.add_argument('--configserver', default= '/etc/clickhouse-server/config.xml', help='Preprocessed server config') + parser.add_argument('--configserver', default='/etc/clickhouse-server/config.xml', help='Preprocessed server config') parser.add_argument('-o', '--output', help='Output xUnit compliant test report directory') parser.add_argument('-t', '--timeout', type=int, default=600, help='Timeout for each test case in seconds') parser.add_argument('--global_time_limit', type=int, help='Stop if executing more than specified time (after current test finished)') @@ -1193,20 +1305,22 @@ if __name__ == '__main__': parser.add_argument('--no-stateful', action='store_true', help='Disable all stateful tests') parser.add_argument('--skip', nargs='+', help="Skip these tests") parser.add_argument('--sequential', nargs='+', help="Run these tests sequentially even if --parallel specified") - parser.add_argument('--no-long', action='store_false', dest='no_long', help='Do not run long tests') + parser.add_argument('--no-long', action='store_true', dest='no_long', help='Do not run long tests') parser.add_argument('--client-option', nargs='+', help='Specify additional client argument') parser.add_argument('--print-time', action='store_true', dest='print_time', help='Print test time') - group=parser.add_mutually_exclusive_group(required=False) + + group = parser.add_mutually_exclusive_group(required=False) group.add_argument('--zookeeper', action='store_true', default=None, dest='zookeeper', help='Run zookeeper related tests') group.add_argument('--no-zookeeper', action='store_false', default=None, dest='zookeeper', help='Do not run zookeeper related tests') - group=parser.add_mutually_exclusive_group(required=False) + + group = parser.add_mutually_exclusive_group(required=False) group.add_argument('--shard', action='store_true', default=None, dest='shard', help='Run sharding related tests (required to clickhouse-server listen 127.0.0.2 127.0.0.3)') group.add_argument('--no-shard', action='store_false', default=None, dest='shard', help='Do not run shard related tests') args = parser.parse_args() if args.queries and not os.path.isdir(args.queries): - print("Cannot access the specified directory with queries (" + args.queries + ")", file=sys.stderr) + print(f"Cannot access the specified directory with queries ({args.queries})", file=sys.stderr) sys.exit(1) # Autodetect the directory with queries if not specified @@ -1215,7 +1329,7 @@ if __name__ == '__main__': if not os.path.isdir(args.queries): # If we're running from the repo - args.queries = os.path.join(os.path.dirname(os.path.abspath( __file__ )), 'queries') + args.queries = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'queries') if not os.path.isdir(args.queries): # Next we're going to try some system directories, don't write 'stdout' files into them. @@ -1257,10 +1371,13 @@ if __name__ == '__main__': if args.configclient: args.client += ' --config-file=' + args.configclient + if os.getenv("CLICKHOUSE_HOST"): args.client += ' --host=' + os.getenv("CLICKHOUSE_HOST") - if os.getenv("CLICKHOUSE_PORT_TCP"): - args.client += ' --port=' + os.getenv("CLICKHOUSE_PORT_TCP") + + args.tcp_port = int(os.getenv("CLICKHOUSE_PORT_TCP", "9000")) + args.client += f" --port={args.tcp_port}" + if os.getenv("CLICKHOUSE_DATABASE"): args.client += ' --database=' + os.getenv("CLICKHOUSE_DATABASE") diff --git a/tests/integration/runner b/tests/integration/runner index 36cb4f22f9a..2143d7ebf29 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -277,7 +277,7 @@ if __name__ == "__main__": --volume={base_cfg}:/clickhouse-config --volume={cases_dir}:/ClickHouse/tests/integration \ --volume={src_dir}/Server/grpc_protos:/ClickHouse/src/Server/grpc_protos \ {dockerd_internal_volume} -e DOCKER_CLIENT_TIMEOUT=300 -e COMPOSE_HTTP_TIMEOUT=600 \ - {env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list}' {img} {command}".format( + {env_tags} {env_cleanup} -e PYTEST_OPTS='{parallel} {opts} {tests_list} -vvv' {img} {command}".format( net=net, tty=tty, bin=args.binary, diff --git a/tests/integration/test_library_bridge/test.py b/tests/integration/test_library_bridge/test.py index 607afb6db5f..97b2ccfbdbe 100644 --- a/tests/integration/test_library_bridge/test.py +++ b/tests/integration/test_library_bridge/test.py @@ -100,6 +100,7 @@ def test_load_ids(ch_cluster): if instance.is_built_with_memory_sanitizer(): pytest.skip("Memory Sanitizer cannot work with third-party shared libraries") + instance.query('DROP DICTIONARY IF EXISTS lib_dict_c') instance.query(''' CREATE DICTIONARY lib_dict_c (key UInt64, value1 UInt64, value2 UInt64, value3 UInt64) PRIMARY KEY key SOURCE(library(PATH '/etc/clickhouse-server/config.d/dictionaries_lib/dict_lib.so')) @@ -263,6 +264,7 @@ def test_bridge_dies_with_parent(ch_cluster): assert clickhouse_pid is None assert bridge_pid is None instance.start_clickhouse(20) + instance.query('DROP DICTIONARY lib_dict_c') if __name__ == '__main__': diff --git a/tests/integration/test_merge_tree_s3_failover/test.py b/tests/integration/test_merge_tree_s3_failover/test.py index 4dec1bc713f..56d9441aba6 100644 --- a/tests/integration/test_merge_tree_s3_failover/test.py +++ b/tests/integration/test_merge_tree_s3_failover/test.py @@ -68,17 +68,22 @@ def drop_table(cluster): # S3 request will be failed for an appropriate part file write. FILES_PER_PART_BASE = 5 # partition.dat, default_compression_codec.txt, count.txt, columns.txt, checksums.txt FILES_PER_PART_WIDE = FILES_PER_PART_BASE + 1 + 1 + 3 * 2 # Primary index, MinMax, Mark and data file for column(s) + +# In debug build there are additional requests (from MergeTreeDataPartWriterWide.cpp:554 due to additional validation). +FILES_PER_PART_WIDE_DEBUG = 2 # Additional requests to S3 in debug build + FILES_PER_PART_COMPACT = FILES_PER_PART_BASE + 1 + 1 + 2 +FILES_PER_PART_COMPACT_DEBUG = 0 @pytest.mark.parametrize( - "min_bytes_for_wide_part,request_count", + "min_bytes_for_wide_part,request_count,debug_request_count", [ - (0, FILES_PER_PART_WIDE), - (1024 * 1024, FILES_PER_PART_COMPACT) + (0, FILES_PER_PART_WIDE, FILES_PER_PART_WIDE_DEBUG), + (1024 * 1024, FILES_PER_PART_COMPACT, FILES_PER_PART_COMPACT_DEBUG) ] ) -def test_write_failover(cluster, min_bytes_for_wide_part, request_count): +def test_write_failover(cluster, min_bytes_for_wide_part, request_count, debug_request_count): node = cluster.instances["node"] node.query( @@ -95,17 +100,24 @@ def test_write_failover(cluster, min_bytes_for_wide_part, request_count): .format(min_bytes_for_wide_part) ) - for request in range(request_count + 1): + is_debug_mode = False + success_count = 0 + + for request in range(request_count + debug_request_count + 1): # Fail N-th request to S3. fail_request(cluster, request + 1) data = "('2020-03-01',0,'data'),('2020-03-01',1,'data')" - positive = request == request_count + positive = request >= (request_count + debug_request_count if is_debug_mode else request_count) try: node.query("INSERT INTO s3_failover_test VALUES {}".format(data)) - assert positive, "Insert query should be failed, request {}".format(request) + success_count += 1 except QueryRuntimeException as e: + if not is_debug_mode and positive: + is_debug_mode = True + positive = False + assert not positive, "Insert query shouldn't be failed, request {}".format(request) assert str(e).find("Expected Error") != -1, "Unexpected error {}".format(str(e)) @@ -114,7 +126,9 @@ def test_write_failover(cluster, min_bytes_for_wide_part, request_count): fail_request(cluster, 0) assert node.query("CHECK TABLE s3_failover_test") == '1\n' - assert node.query("SELECT * FROM s3_failover_test FORMAT Values") == data + assert success_count > 1 or node.query("SELECT * FROM s3_failover_test FORMAT Values") == data + + assert success_count == (1 if is_debug_mode else debug_request_count + 1), "Insert query should be successful at least once" # Check that second data part move is ended successfully if first attempt was failed. diff --git a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml index 46a11a8fe16..1405c7274ca 100644 --- a/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml +++ b/tests/integration/test_replicated_merge_tree_hdfs_zero_copy/configs/config.d/storage_conf.xml @@ -59,7 +59,6 @@ 1024000 1 - 1 diff --git a/tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml b/tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml index 0cf9191c4af..20b750ffff3 100644 --- a/tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml +++ b/tests/integration/test_replicated_merge_tree_s3_zero_copy/configs/config.d/storage_conf.xml @@ -21,7 +21,6 @@ 0 - 1 diff --git a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml index 89c97aa3360..e3d3d3543fa 100644 --- a/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml +++ b/tests/integration/test_s3_zero_copy_replication/configs/config.d/s3.xml @@ -66,7 +66,6 @@ 1024 1 - 1 diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index b9fc0b2272f..947b71b5f96 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -2,7 +2,6 @@ import json import os.path as p import random import socket -import subprocess import threading import time import logging @@ -181,32 +180,6 @@ def avro_confluent_message(schema_registry_client, value): }) return serializer.encode_record_with_schema('test_subject', schema, value) -# Since everything is async and shaky when receiving messages from Kafka, -# we may want to try and check results multiple times in a loop. -def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'): - fpath = p.join(p.dirname(__file__), ref_file) - with open(fpath) as reference: - if check: - assert TSV(result) == TSV(reference) - else: - return TSV(result) == TSV(reference) - -def describe_consumer_group(kafka_cluster, name): - admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)) - consumer_groups = admin_client.describe_consumer_groups([name]) - res = [] - for member in consumer_groups[0].members: - member_info = {} - member_info['member_id'] = member.member_id - member_info['client_id'] = member.client_id - member_info['client_host'] = member.client_host - member_topics_assignment = [] - for (topic, partitions) in member.member_assignment.assignment: - member_topics_assignment.append({'topic': topic, 'partitions': partitions}) - member_info['assignment'] = member_topics_assignment - res.append(member_info) - return res - # Fixtures @pytest.fixture(scope="module") @@ -262,7 +235,7 @@ kafka_topic_old old kafka_check_result(result, True) - members = describe_consumer_group('old') + members = describe_consumer_group(kafka_cluster, 'old') assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka' # text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose")) @@ -302,7 +275,7 @@ def test_kafka_settings_new_syntax(kafka_cluster): kafka_check_result(result, True) - members = describe_consumer_group('new') + members = describe_consumer_group(kafka_cluster, 'new') assert members[0]['client_id'] == 'instance test 1234' @@ -734,82 +707,6 @@ def kafka_setup_teardown(): # Tests - -def test_kafka_settings_old_syntax(kafka_cluster): - assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro", - ignore_error=True)) == TSV('''kafka_broker kafka1 -kafka_client_id instance -kafka_format_json_each_row JSONEachRow -kafka_group_name_new new -kafka_group_name_old old -kafka_topic_new new -kafka_topic_old old -''') - - instance.query(''' - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n'); - ''') - - # Don't insert malformed messages since old settings syntax - # doesn't support skipping of broken messages. - messages = [] - for i in range(50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'old', messages) - - result = '' - while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) - if kafka_check_result(result): - break - - kafka_check_result(result, True) - - members = describe_consumer_group(kafka_cluster, 'old') - assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka' - # text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:{} --describe --members --group old --verbose".format(cluster.kafka_port))) - - -def test_kafka_settings_new_syntax(kafka_cluster): - instance.query(''' - CREATE TABLE test.kafka (key UInt64, value UInt64) - ENGINE = Kafka - SETTINGS kafka_broker_list = '{kafka_broker}:19092', - kafka_topic_list = '{kafka_topic_new}', - kafka_group_name = '{kafka_group_name_new}', - kafka_format = '{kafka_format_json_each_row}', - kafka_row_delimiter = '\\n', - kafka_client_id = '{kafka_client_id} test 1234', - kafka_skip_broken_messages = 1; - ''') - - messages = [] - for i in range(25): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'new', messages) - - # Insert couple of malformed messages. - kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,']) - kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,']) - - messages = [] - for i in range(25, 50): - messages.append(json.dumps({'key': i, 'value': i})) - kafka_produce(kafka_cluster, 'new', messages) - - result = '' - while True: - result += instance.query('SELECT * FROM test.kafka', ignore_error=True) - if kafka_check_result(result): - break - - kafka_check_result(result, True) - - members = describe_consumer_group(kafka_cluster, 'new') - assert members[0]['client_id'] == 'instance test 1234' - - def test_kafka_issue11308(kafka_cluster): # Check that matview does respect Kafka SETTINGS kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }', @@ -1585,18 +1482,21 @@ def test_kafka_virtual_columns_with_materialized_view(kafka_cluster): messages.append(json.dumps({'key': i, 'value': i})) kafka_produce(kafka_cluster, 'virt2', messages, 0) - while True: - result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view') - if kafka_check_result(result, False, 'test_kafka_virtual2.reference'): - break + sql = 'SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key' + result = instance.query(sql) + iterations = 0 + while not kafka_check_result(result, False, 'test_kafka_virtual2.reference') and iterations < 10: + time.sleep(3) + iterations += 1 + result = instance.query(sql) + + kafka_check_result(result, True, 'test_kafka_virtual2.reference') instance.query(''' DROP TABLE test.consumer; DROP TABLE test.view; ''') - kafka_check_result(result, True, 'test_kafka_virtual2.reference') - def test_kafka_insert(kafka_cluster): instance.query(''' diff --git a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper_long.reference similarity index 84% rename from tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference rename to tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper_long.reference index cb61ab3e9b9..59e3e1e4a3f 100644 --- a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.reference +++ b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper_long.reference @@ -1,22 +1,22 @@ d Date k UInt64 i32 Int32 -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 10 42 d Date k UInt64 i32 Int32 dt DateTime(\'UTC\') -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\')\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\')\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime(\'UTC\') -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\')\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\')\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 9 41 1992-01-01 08:00:00 2015-01-01 10 42 1970-01-01 00:00:00 d Date @@ -25,14 +25,14 @@ i32 Int32 dt DateTime(\'UTC\') n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime(\'UTC\') n.ui8 Array(UInt8) n.s Array(String) -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 2015-01-01 9 41 1992-01-01 08:00:00 [] [] 2015-01-01 10 42 1970-01-01 00:00:00 [] [] @@ -43,7 +43,7 @@ dt DateTime(\'UTC\') n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -51,7 +51,7 @@ dt DateTime(\'UTC\') n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['1970-01-01','1970-01-01','1970-01-01'] 2015-01-01 9 41 1992-01-01 08:00:00 [] [] [] @@ -64,7 +64,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date),\n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date),\n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -73,7 +73,7 @@ n.ui8 Array(UInt8) n.s Array(String) n.d Array(Date) s String DEFAULT \'0\' -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date),\n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `n.d` Array(Date),\n `s` String DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] ['2000-01-01','2000-01-01','2000-01-03'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] ['2000-01-01','2000-01-01','2000-01-03'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] ['1970-01-01','1970-01-01','1970-01-01'] 0 @@ -86,7 +86,7 @@ dt DateTime(\'UTC\') n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -94,7 +94,7 @@ dt DateTime(\'UTC\') n.ui8 Array(UInt8) n.s Array(String) s Int64 DEFAULT \'0\' -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` Int64 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 @@ -108,7 +108,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\',\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\',\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -117,7 +117,7 @@ n.ui8 Array(UInt8) n.s Array(String) s UInt32 DEFAULT \'0\' n.d Array(Date) -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\',\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.ui8` Array(UInt8),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\',\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 [10,20,30] ['asd','qwe','qwe'] 100500 ['1970-01-01','1970-01-01','1970-01-01'] 2015-01-01 7 39 2014-07-14 13:26:50 [10,20,30] ['120','130','140'] 0 ['1970-01-01','1970-01-01','1970-01-01'] 2015-01-01 8 40 2012-12-12 12:12:12 [1,2,3] ['12','13','14'] 0 ['1970-01-01','1970-01-01','1970-01-01'] @@ -129,14 +129,14 @@ i32 Int32 dt DateTime(\'UTC\') n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime(\'UTC\') n.s Array(String) s UInt32 DEFAULT \'0\' -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `n.s` Array(String),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 ['asd','qwe','qwe'] 100500 2015-01-01 7 39 2014-07-14 13:26:50 ['120','130','140'] 0 2015-01-01 8 40 2012-12-12 12:12:12 ['12','13','14'] 0 @@ -147,13 +147,13 @@ k UInt64 i32 Int32 dt DateTime(\'UTC\') s UInt32 DEFAULT \'0\' -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime(\'UTC\') s UInt32 DEFAULT \'0\' -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 8 40 2012-12-12 12:12:12 0 @@ -166,7 +166,7 @@ dt DateTime(\'UTC\') s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\',\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\',\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 @@ -174,7 +174,7 @@ dt DateTime(\'UTC\') s UInt32 DEFAULT \'0\' n.s Array(String) n.d Array(Date) -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\',\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\',\n `n.s` Array(String),\n `n.d` Array(Date)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 [] [] 2015-01-01 7 39 2014-07-14 13:26:50 0 [] [] 2015-01-01 8 40 2012-12-12 12:12:12 0 [] [] @@ -185,13 +185,13 @@ k UInt64 i32 Int32 dt DateTime(\'UTC\') s UInt32 DEFAULT \'0\' -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt DateTime(\'UTC\') s UInt32 DEFAULT \'0\' -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` DateTime(\'UTC\'),\n `s` UInt32 DEFAULT \'0\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 13:26:50 100500 2015-01-01 7 39 2014-07-14 13:26:50 0 2015-01-01 8 40 2012-12-12 12:12:12 0 @@ -202,13 +202,13 @@ k UInt64 i32 Int32 dt Date s DateTime(\'UTC\') DEFAULT \'1970-01-01 00:00:00\' -CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` Date,\n `s` DateTime(\'UTC\') DEFAULT \'1970-01-01 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r1\', d, k, 8192) +CREATE TABLE default.replicated_alter1\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` Date,\n `s` DateTime(\'UTC\') DEFAULT \'1970-01-01 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r1\', d, k, 8192) d Date k UInt64 i32 Int32 dt Date s DateTime(\'UTC\') DEFAULT \'1970-01-01 00:00:00\' -CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` Date,\n `s` DateTime(\'UTC\') DEFAULT \'1970-01-01 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00062/alter\', \'r2\', d, k, 8192) +CREATE TABLE default.replicated_alter2\n(\n `d` Date,\n `k` UInt64,\n `i32` Int32,\n `dt` Date,\n `s` DateTime(\'UTC\') DEFAULT \'1970-01-01 00:00:00\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00062/alter\', \'r2\', d, k, 8192) 2015-01-01 6 38 2014-07-15 1970-01-02 03:55:00 2015-01-01 7 39 2014-07-14 1970-01-01 00:00:00 2015-01-01 8 40 2012-12-12 1970-01-01 00:00:00 diff --git a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper_long.sql similarity index 95% rename from tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql rename to tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper_long.sql index ac56b3416cd..9901dfad9c8 100644 --- a/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper.sql +++ b/tests/queries/0_stateless/00062_replicated_merge_tree_alter_zookeeper_long.sql @@ -3,8 +3,8 @@ DROP TABLE IF EXISTS replicated_alter2; SET replication_alter_partitions_sync = 2; -CREATE TABLE replicated_alter1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_00062/alter', 'r1', d, k, 8192); -CREATE TABLE replicated_alter2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_00062/alter', 'r2', d, k, 8192); +CREATE TABLE replicated_alter1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00062/alter', 'r1', d, k, 8192); +CREATE TABLE replicated_alter2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00062/alter', 'r2', d, k, 8192); INSERT INTO replicated_alter1 VALUES ('2015-01-01', 10, 42); diff --git a/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference b/tests/queries/0_stateless/00083_create_merge_tree_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00083_create_merge_tree_zookeeper.reference rename to tests/queries/0_stateless/00083_create_merge_tree_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql b/tests/queries/0_stateless/00083_create_merge_tree_zookeeper_long.sql similarity index 97% rename from tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql rename to tests/queries/0_stateless/00083_create_merge_tree_zookeeper_long.sql index 998a4517163..65247d7b12c 100644 --- a/tests/queries/0_stateless/00083_create_merge_tree_zookeeper.sql +++ b/tests/queries/0_stateless/00083_create_merge_tree_zookeeper_long.sql @@ -56,7 +56,7 @@ CREATE TABLE aggregating_merge_tree_with_sampling (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = AggregatingMergeTree(d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); CREATE TABLE replicated_merge_tree - (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00083/01/replicated_merge_tree/', 'r1', d, (a, b), 111); + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_merge_tree/', 'r1', d, (a, b), 111); CREATE TABLE replicated_collapsing_merge_tree (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/test_00083/01/replicated_collapsing_merge_tree/', 'r1', d, (a, b), 111, y); CREATE TABLE replicated_versioned_collapsing_merge_tree @@ -69,7 +69,7 @@ CREATE TABLE replicated_aggregating_merge_tree (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/test_00083/01/replicated_aggregating_merge_tree/', 'r1', d, (a, b), 111); CREATE TABLE replicated_merge_tree_with_sampling - (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00083/01/replicated_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); CREATE TABLE replicated_collapsing_merge_tree_with_sampling (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/test_00083/01/replicated_collapsing_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111, y); CREATE TABLE replicated_versioned_collapsing_merge_tree_with_sampling diff --git a/tests/queries/0_stateless/00121_drop_column_zookeeper.sql b/tests/queries/0_stateless/00121_drop_column_zookeeper.sql index 7ccf69e46b2..2aee56135f2 100644 --- a/tests/queries/0_stateless/00121_drop_column_zookeeper.sql +++ b/tests/queries/0_stateless/00121_drop_column_zookeeper.sql @@ -1,12 +1,12 @@ DROP TABLE IF EXISTS alter_00121; -CREATE TABLE alter_00121 (d Date, x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/alter_00121/t1', 'r1', d, (d), 8192); +CREATE TABLE alter_00121 (d Date, x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/alter_00121/t1', 'r1', d, (d), 8192); INSERT INTO alter_00121 VALUES ('2014-01-01', 1); ALTER TABLE alter_00121 DROP COLUMN x; DROP TABLE alter_00121; -CREATE TABLE alter_00121 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/alter_00121/t2', 'r1', d, (d), 8192); +CREATE TABLE alter_00121 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/alter_00121/t2', 'r1', d, (d), 8192); INSERT INTO alter_00121 VALUES ('2014-01-01'); SELECT * FROM alter_00121 ORDER BY d; diff --git a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference index b667c57a14c..b2b0b43e490 100644 --- a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference +++ b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.reference @@ -25,6 +25,8 @@ ORDER BY LIMIT LIMIT BY 0 1 +0 +1 LIMIT BY LIMIT 0 GROUP BY ORDER BY diff --git a/tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference b/tests/queries/0_stateless/00215_primary_key_order_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00215_primary_key_order_zookeeper.reference rename to tests/queries/0_stateless/00215_primary_key_order_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql b/tests/queries/0_stateless/00215_primary_key_order_zookeeper_long.sql similarity index 84% rename from tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql rename to tests/queries/0_stateless/00215_primary_key_order_zookeeper_long.sql index 8e36cbc85e0..86b84f3f63c 100644 --- a/tests/queries/0_stateless/00215_primary_key_order_zookeeper.sql +++ b/tests/queries/0_stateless/00215_primary_key_order_zookeeper_long.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS primary_key; -CREATE TABLE primary_key (d Date DEFAULT today(), x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00215/primary_key', 'r1', d, -x, 1); +CREATE TABLE primary_key (d Date DEFAULT today(), x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00215/primary_key', 'r1', d, -x, 1); INSERT INTO primary_key (x) VALUES (1), (2), (3); INSERT INTO primary_key (x) VALUES (1), (3), (2); diff --git a/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference b/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts_long.reference similarity index 100% rename from tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.reference rename to tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts_long.reference diff --git a/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql b/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts_long.sql similarity index 89% rename from tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql rename to tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts_long.sql index c14ce53d4a3..d70f337213e 100644 --- a/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts.sql +++ b/tests/queries/0_stateless/00226_zookeeper_deduplication_and_unexpected_parts_long.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS deduplication; -CREATE TABLE deduplication (d Date DEFAULT '2015-01-01', x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00226/deduplication', 'r1', d, x, 1); +CREATE TABLE deduplication (d Date DEFAULT '2015-01-01', x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00226/deduplication', 'r1', d, x, 1); INSERT INTO deduplication (x) VALUES (1); INSERT INTO deduplication (x) VALUES (1); diff --git a/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference b/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.reference rename to tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql b/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper_long.sql similarity index 77% rename from tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql rename to tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper_long.sql index 95a5c7c97f1..96a131514f8 100644 --- a/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper.sql +++ b/tests/queries/0_stateless/00236_replicated_drop_on_non_leader_zookeeper_long.sql @@ -3,8 +3,8 @@ SET replication_alter_partitions_sync = 2; DROP TABLE IF EXISTS attach_r1; DROP TABLE IF EXISTS attach_r2; -CREATE TABLE attach_r1 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00236/01/attach', 'r1', d, d, 8192); -CREATE TABLE attach_r2 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00236/01/attach', 'r2', d, d, 8192); +CREATE TABLE attach_r1 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00236/01/attach', 'r1', d, d, 8192); +CREATE TABLE attach_r2 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00236/01/attach', 'r2', d, d, 8192); INSERT INTO attach_r1 VALUES ('2014-01-01'), ('2014-02-01'), ('2014-03-01'); diff --git a/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.reference rename to tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper_long.sql similarity index 91% rename from tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql rename to tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper_long.sql index 5d8c4de1c06..e4d60f1b960 100644 --- a/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper.sql +++ b/tests/queries/0_stateless/00446_clear_column_in_partition_zookeeper_long.sql @@ -24,8 +24,8 @@ SELECT '===Replicated case==='; DROP TABLE IF EXISTS clear_column1; DROP TABLE IF EXISTS clear_column2; SELECT sleep(1) FORMAT Null; -CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '1') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; -CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/test_00446/tables/clear_column', '2') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_00446/tables/clear_column', '1') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_00446/tables/clear_column', '2') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; INSERT INTO clear_column1 (d) VALUES ('2000-01-01'), ('2000-02-01'); SYSTEM SYNC REPLICA clear_column2; diff --git a/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.reference rename to tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.sql similarity index 84% rename from tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql rename to tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.sql index a2a771e6cc0..bd92fcefcff 100644 --- a/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper.sql +++ b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.sql @@ -4,8 +4,8 @@ SELECT '*** Not partitioned ***'; DROP TABLE IF EXISTS not_partitioned_replica1_00502; DROP TABLE IF EXISTS not_partitioned_replica2_00502; -CREATE TABLE not_partitioned_replica1_00502(x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/not_partitioned_00502', '1') ORDER BY x; -CREATE TABLE not_partitioned_replica2_00502(x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/not_partitioned_00502', '2') ORDER BY x; +CREATE TABLE not_partitioned_replica1_00502(x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/not_partitioned_00502', '1') ORDER BY x; +CREATE TABLE not_partitioned_replica2_00502(x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/not_partitioned_00502', '2') ORDER BY x; INSERT INTO not_partitioned_replica1_00502 VALUES (1), (2), (3); INSERT INTO not_partitioned_replica1_00502 VALUES (4), (5); @@ -30,8 +30,8 @@ SELECT '*** Partitioned by week ***'; DROP TABLE IF EXISTS partitioned_by_week_replica1; DROP TABLE IF EXISTS partitioned_by_week_replica2; -CREATE TABLE partitioned_by_week_replica1(d Date, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/partitioned_by_week_00502', '1') PARTITION BY toMonday(d) ORDER BY x; -CREATE TABLE partitioned_by_week_replica2(d Date, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/partitioned_by_week_00502', '2') PARTITION BY toMonday(d) ORDER BY x; +CREATE TABLE partitioned_by_week_replica1(d Date, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_week_00502', '1') PARTITION BY toMonday(d) ORDER BY x; +CREATE TABLE partitioned_by_week_replica2(d Date, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_week_00502', '2') PARTITION BY toMonday(d) ORDER BY x; -- 2000-01-03 belongs to a different week than 2000-01-01 and 2000-01-02 INSERT INTO partitioned_by_week_replica1 VALUES ('2000-01-01', 1), ('2000-01-02', 2), ('2000-01-03', 3); @@ -57,8 +57,8 @@ SELECT '*** Partitioned by a (Date, UInt8) tuple ***'; DROP TABLE IF EXISTS partitioned_by_tuple_replica1_00502; DROP TABLE IF EXISTS partitioned_by_tuple_replica2_00502; -CREATE TABLE partitioned_by_tuple_replica1_00502(d Date, x UInt8, y UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/partitioned_by_tuple_00502', '1') ORDER BY x PARTITION BY (d, x); -CREATE TABLE partitioned_by_tuple_replica2_00502(d Date, x UInt8, y UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/partitioned_by_tuple_00502', '2') ORDER BY x PARTITION BY (d, x); +CREATE TABLE partitioned_by_tuple_replica1_00502(d Date, x UInt8, y UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_tuple_00502', '1') ORDER BY x PARTITION BY (d, x); +CREATE TABLE partitioned_by_tuple_replica2_00502(d Date, x UInt8, y UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_tuple_00502', '2') ORDER BY x PARTITION BY (d, x); INSERT INTO partitioned_by_tuple_replica1_00502 VALUES ('2000-01-01', 1, 1), ('2000-01-01', 2, 2), ('2000-01-02', 1, 3); INSERT INTO partitioned_by_tuple_replica1_00502 VALUES ('2000-01-02', 1, 4), ('2000-01-01', 1, 5); @@ -84,8 +84,8 @@ SELECT '*** Partitioned by String ***'; DROP TABLE IF EXISTS partitioned_by_string_replica1; DROP TABLE IF EXISTS partitioned_by_string_replica2; -CREATE TABLE partitioned_by_string_replica1(s String, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/partitioned_by_string_00502', '1') PARTITION BY s ORDER BY x; -CREATE TABLE partitioned_by_string_replica2(s String, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/partitioned_by_string_00502', '2') PARTITION BY s ORDER BY x; +CREATE TABLE partitioned_by_string_replica1(s String, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_string_00502', '1') PARTITION BY s ORDER BY x; +CREATE TABLE partitioned_by_string_replica2(s String, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_string_00502', '2') PARTITION BY s ORDER BY x; INSERT INTO partitioned_by_string_replica1 VALUES ('aaa', 1), ('aaa', 2), ('bbb', 3); INSERT INTO partitioned_by_string_replica1 VALUES ('bbb', 4), ('aaa', 5); @@ -110,8 +110,8 @@ SELECT '*** Table without columns with fixed size ***'; DROP TABLE IF EXISTS without_fixed_size_columns_replica1; DROP TABLE IF EXISTS without_fixed_size_columns_replica2; -CREATE TABLE without_fixed_size_columns_replica1(s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/without_fixed_size_columns_00502', '1') PARTITION BY length(s) ORDER BY s; -CREATE TABLE without_fixed_size_columns_replica2(s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/without_fixed_size_columns_00502', '2') PARTITION BY length(s) ORDER BY s; +CREATE TABLE without_fixed_size_columns_replica1(s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/without_fixed_size_columns_00502', '1') PARTITION BY length(s) ORDER BY s; +CREATE TABLE without_fixed_size_columns_replica2(s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/without_fixed_size_columns_00502', '2') PARTITION BY length(s) ORDER BY s; INSERT INTO without_fixed_size_columns_replica1 VALUES ('a'), ('aa'), ('b'), ('cc'); diff --git a/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql b/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql index ef8655a1861..48678329404 100644 --- a/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql +++ b/tests/queries/0_stateless/00509_extended_storage_definition_syntax_zookeeper.sql @@ -5,7 +5,7 @@ SELECT '*** Replicated with sampling ***'; DROP TABLE IF EXISTS replicated_with_sampling; CREATE TABLE replicated_with_sampling(x UInt8) - ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00509/replicated_with_sampling', 'r1') + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00509/replicated_with_sampling', 'r1') ORDER BY x SAMPLE BY x; @@ -72,7 +72,7 @@ SELECT '*** Table definition with SETTINGS ***'; DROP TABLE IF EXISTS with_settings; CREATE TABLE with_settings(x UInt32) - ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00509/with_settings', 'r1') + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00509/with_settings', 'r1') ORDER BY x SETTINGS replicated_can_become_leader = 0; diff --git a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql index 8df012a8588..24f64a577f1 100644 --- a/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql +++ b/tests/queries/0_stateless/00510_materizlized_view_and_deduplication_zookeeper.sql @@ -4,9 +4,9 @@ DROP TABLE IF EXISTS with_deduplication_mv; DROP TABLE IF EXISTS without_deduplication_mv; CREATE TABLE with_deduplication(x UInt32) - ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00510/with_deduplication', 'r1') ORDER BY x; + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/with_deduplication', 'r1') ORDER BY x; CREATE TABLE without_deduplication(x UInt32) - ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00510/without_deduplication', 'r1') ORDER BY x SETTINGS replicated_deduplication_window = 0; + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/without_deduplication', 'r1') ORDER BY x SETTINGS replicated_deduplication_window = 0; CREATE MATERIALIZED VIEW with_deduplication_mv UUID '00000510-1000-4000-8000-000000000001' ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/test_00510/with_deduplication_mv', 'r1') ORDER BY dummy diff --git a/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql b/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql index 2a9ebb992a8..71ed99f78fa 100644 --- a/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql +++ b/tests/queries/0_stateless/00516_deduplication_after_drop_partition_zookeeper.sql @@ -1,6 +1,6 @@ DROP TABLE IF EXISTS deduplication_by_partition; CREATE TABLE deduplication_by_partition(d Date, x UInt32) ENGINE = - ReplicatedMergeTree('/clickhouse/tables/test_00516/deduplication_by_partition', 'r1', d, x, 8192); + ReplicatedMergeTree('/clickhouse/tables/{database}/test_00516/deduplication_by_partition', 'r1', d, x, 8192); INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 1); INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 2), ('2000-01-01', 3); diff --git a/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference b/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.reference rename to tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql b/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper_long.sql similarity index 91% rename from tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql rename to tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper_long.sql index 9c00ee68ba9..4cfbc1d0fb0 100644 --- a/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper.sql +++ b/tests/queries/0_stateless/00563_insert_into_remote_and_zookeeper_long.sql @@ -1,6 +1,6 @@ -- Check that settings are correctly passed through Distributed table DROP TABLE IF EXISTS simple; -CREATE TABLE simple (d Int8) ENGINE = ReplicatedMergeTree('/clickhouse/test_00563/tables/simple', '1') ORDER BY d; +CREATE TABLE simple (d Int8) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_00563/tables/simple', '1') ORDER BY d; -- TODO: replace '127.0.0.2' -> '127.0.0.1' after a fix INSERT INTO TABLE FUNCTION remote('127.0.0.2', currentDatabase(), 'simple') VALUES (1); diff --git a/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference b/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.reference rename to tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql b/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper_long.sql similarity index 83% rename from tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql rename to tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper_long.sql index 0f6d51b8716..06e88754888 100644 --- a/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper.sql +++ b/tests/queries/0_stateless/00623_replicated_truncate_table_zookeeper_long.sql @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS replicated_truncate1; DROP TABLE IF EXISTS replicated_truncate2; -CREATE TABLE replicated_truncate1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_00623/truncate', 'r1', d, k, 8192); -CREATE TABLE replicated_truncate2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_00623/truncate', 'r2', d, k, 8192); +CREATE TABLE replicated_truncate1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00623/truncate', 'r1', d, k, 8192); +CREATE TABLE replicated_truncate2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00623/truncate', 'r2', d, k, 8192); SELECT '======Before Truncate======'; INSERT INTO replicated_truncate1 VALUES ('2015-01-01', 10, 42); diff --git a/tests/queries/0_stateless/00643_cast_zookeeper.reference b/tests/queries/0_stateless/00643_cast_zookeeper_long.reference similarity index 79% rename from tests/queries/0_stateless/00643_cast_zookeeper.reference rename to tests/queries/0_stateless/00643_cast_zookeeper_long.reference index 9123463de1a..4eb4a4e0365 100644 --- a/tests/queries/0_stateless/00643_cast_zookeeper.reference +++ b/tests/queries/0_stateless/00643_cast_zookeeper_long.reference @@ -3,7 +3,7 @@ CREATE TABLE default.cast1 `x` UInt8, `e` Enum8('hello' = 1, 'world' = 2) DEFAULT CAST(x, 'Enum8(\'hello\' = 1, \'world\' = 2)') ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00643/cast', 'r1') +ENGINE = ReplicatedMergeTree('/clickhouse/tables/default/test_00643/cast', 'r1') ORDER BY e SETTINGS index_granularity = 8192 x UInt8 diff --git a/tests/queries/0_stateless/00643_cast_zookeeper.sql b/tests/queries/0_stateless/00643_cast_zookeeper_long.sql similarity index 79% rename from tests/queries/0_stateless/00643_cast_zookeeper.sql rename to tests/queries/0_stateless/00643_cast_zookeeper_long.sql index c9760f00ca7..21e7a89c603 100644 --- a/tests/queries/0_stateless/00643_cast_zookeeper.sql +++ b/tests/queries/0_stateless/00643_cast_zookeeper_long.sql @@ -22,7 +22,7 @@ CREATE TABLE cast1 'world' = 2 ) ) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00643/cast', 'r1') ORDER BY e; +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00643/cast', 'r1') ORDER BY e; SHOW CREATE TABLE cast1 FORMAT TSVRaw; DESC TABLE cast1; @@ -30,7 +30,7 @@ DESC TABLE cast1; INSERT INTO cast1 (x) VALUES (1); SELECT * FROM cast1; -CREATE TABLE cast2 AS cast1 ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00643/cast', 'r2') ORDER BY e; +CREATE TABLE cast2 AS cast1 ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00643/cast', 'r2') ORDER BY e; SYSTEM SYNC REPLICA cast2; diff --git a/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference b/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.reference rename to tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql b/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper_long.sql similarity index 84% rename from tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql rename to tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper_long.sql index 498896d31a7..50f51510d61 100644 --- a/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper.sql +++ b/tests/queries/0_stateless/00721_force_by_identical_result_after_merge_zookeeper_long.sql @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS byte_identical_r1; DROP TABLE IF EXISTS byte_identical_r2; -CREATE TABLE byte_identical_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00721/byte_identical', 'r1') ORDER BY x; -CREATE TABLE byte_identical_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00721/byte_identical', 'r2') ORDER BY x; +CREATE TABLE byte_identical_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00721/byte_identical', 'r1') ORDER BY x; +CREATE TABLE byte_identical_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00721/byte_identical', 'r2') ORDER BY x; INSERT INTO byte_identical_r1(x) VALUES (1), (2), (3); SYSTEM SYNC REPLICA byte_identical_r2; diff --git a/tests/queries/0_stateless/00725_comment_columns.reference b/tests/queries/0_stateless/00725_comment_columns_long.reference similarity index 100% rename from tests/queries/0_stateless/00725_comment_columns.reference rename to tests/queries/0_stateless/00725_comment_columns_long.reference diff --git a/tests/queries/0_stateless/00725_comment_columns.sql b/tests/queries/0_stateless/00725_comment_columns_long.sql similarity index 98% rename from tests/queries/0_stateless/00725_comment_columns.sql rename to tests/queries/0_stateless/00725_comment_columns_long.sql index 6fe095d9935..3eec0ba31b7 100644 --- a/tests/queries/0_stateless/00725_comment_columns.sql +++ b/tests/queries/0_stateless/00725_comment_columns_long.sql @@ -87,4 +87,4 @@ FORMAT PrettyCompactNoEscapes; DROP TABLE IF EXISTS check_query_comment_column; --- TODO: add here tests with ReplicatedMergeTree +-- TODO: add here tests with ReplicatedMergeTree({database}) diff --git a/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.sql similarity index 80% rename from tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.sql index a1fc25fbf0b..a92646a41fc 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_have_data_before_quorum_zookeeper_long.sql @@ -3,8 +3,8 @@ SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS quorum1; DROP TABLE IF EXISTS quorum2; -CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_have_data', '1') ORDER BY x PARTITION BY y; -CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_have_data', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_have_data', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_have_data', '2') ORDER BY x PARTITION BY y; INSERT INTO quorum1 VALUES (1, '1990-11-15'); INSERT INTO quorum1 VALUES (2, '1990-11-15'); diff --git a/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.sql similarity index 82% rename from tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.sql index 914d98ff3f5..4f9afb93f3a 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long.sql @@ -3,8 +3,8 @@ SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS quorum1; DROP TABLE IF EXISTS quorum2; -CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_lost_alive', '1') ORDER BY x PARTITION BY y; -CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_lost_alive', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost_alive', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost_alive', '2') ORDER BY x PARTITION BY y; SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; diff --git a/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.sql similarity index 81% rename from tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.sql index 18cad0dda41..1374fa776ca 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_lost_part_zookeeper_long.sql @@ -3,8 +3,8 @@ SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS quorum1; DROP TABLE IF EXISTS quorum2; -CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_lost', '1') ORDER BY x PARTITION BY y; -CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_lost', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost', '2') ORDER BY x PARTITION BY y; SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; diff --git a/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.sql similarity index 79% rename from tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.sql index 11c8324ac25..0cc162b6010 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long.sql @@ -3,8 +3,8 @@ SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS quorum1; DROP TABLE IF EXISTS quorum2; -CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_old_data', '1') ORDER BY x PARTITION BY y; -CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum_old_data', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_old_data', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_old_data', '2') ORDER BY x PARTITION BY y; INSERT INTO quorum1 VALUES (1, '1990-11-15'); INSERT INTO quorum1 VALUES (2, '1990-11-15'); diff --git a/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.sql similarity index 84% rename from tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.sql index a4e262ce3c4..0ceca3743b2 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_simple_test_1_parts_zookeeper_long.sql @@ -3,8 +3,8 @@ SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS quorum1; DROP TABLE IF EXISTS quorum2; -CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum1', '1') ORDER BY x PARTITION BY y; -CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum1', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum1', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum1', '2') ORDER BY x PARTITION BY y; SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; diff --git a/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.reference rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.sql similarity index 76% rename from tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql rename to tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.sql index 40c08fd926f..a95d992705e 100644 --- a/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper.sql +++ b/tests/queries/0_stateless/00732_quorum_insert_simple_test_2_parts_zookeeper_long.sql @@ -3,8 +3,8 @@ SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS quorum1; DROP TABLE IF EXISTS quorum2; -CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum2', '1') ORDER BY x PARTITION BY y; -CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00732/quorum2', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum2', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum2', '2') ORDER BY x PARTITION BY y; SET insert_quorum=2, insert_quorum_parallel=0; SET select_sequential_consistency=1; diff --git a/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/tests/queries/0_stateless/00738_lock_for_inner_table.sh index d19288f65d8..37d5755fb0b 100755 --- a/tests/queries/0_stateless/00738_lock_for_inner_table.sh +++ b/tests/queries/0_stateless/00738_lock_for_inner_table.sh @@ -11,10 +11,9 @@ uuid=$(${CLICKHOUSE_CLIENT} --query "SELECT reinterpretAsUUID(currentDatabase()) echo "DROP TABLE IF EXISTS tab_00738 SYNC; DROP TABLE IF EXISTS mv SYNC; --- create table with fsync and 20 partitions for slower INSERT --- (since increasing number of records will make it significantly slower in debug build, but not in release) -CREATE TABLE tab_00738(a Int) ENGINE = MergeTree() ORDER BY a PARTITION BY a%20 SETTINGS fsync_after_insert=1; -CREATE MATERIALIZED VIEW mv UUID '$uuid' ENGINE = Log AS SELECT a FROM tab_00738;" | ${CLICKHOUSE_CLIENT} -n +CREATE TABLE tab_00738(a Int) ENGINE = MergeTree() ORDER BY a; +-- The matview will take at least 2 seconds to be finished (10000000 * 0.0000002) +CREATE MATERIALIZED VIEW mv UUID '$uuid' ENGINE = Log AS SELECT sleepEachRow(0.0000002) FROM tab_00738;" | ${CLICKHOUSE_CLIENT} -n ${CLICKHOUSE_CLIENT} --query_id insert_$CLICKHOUSE_DATABASE --query "INSERT INTO tab_00738 SELECT number FROM numbers(10000000)" & diff --git a/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference index 74aa60a41f2..9bf0304d7d7 100644 --- a/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference +++ b/tests/queries/0_stateless/00753_comment_columns_zookeeper.reference @@ -1,6 +1,6 @@ -CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'comment\',\n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_00753/comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'comment\',\n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00753/comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 column_name1 UInt8 DEFAULT 1 comment column_name2 UInt8 non default comment -CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\',\n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'clickhouse/tables/test_00753/comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 +CREATE TABLE default.check_comments\n(\n `column_name1` UInt8 DEFAULT 1 COMMENT \'another comment\',\n `column_name2` UInt8 COMMENT \'non default comment\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00753/comments\', \'r1\')\nORDER BY column_name1\nSETTINGS index_granularity = 8192 column_name1 UInt8 DEFAULT 1 another comment column_name2 UInt8 non default comment diff --git a/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql b/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql index e98630bb00d..93e2c9fb9d5 100644 --- a/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql +++ b/tests/queries/0_stateless/00753_comment_columns_zookeeper.sql @@ -4,7 +4,7 @@ CREATE TABLE check_comments ( column_name1 UInt8 DEFAULT 1 COMMENT 'comment', column_name2 UInt8 COMMENT 'non default comment' - ) ENGINE = ReplicatedMergeTree('clickhouse/tables/test_00753/comments', 'r1') + ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00753/comments', 'r1') ORDER BY column_name1; SHOW CREATE check_comments; diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper_long.reference similarity index 62% rename from tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference rename to tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper_long.reference index 48fe2d30bf3..a1fecd72e30 100644 --- a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper_long.reference @@ -9,6 +9,6 @@ 1 2 1 30 1 2 4 90 *** Check SHOW CREATE TABLE *** -CREATE TABLE default.summing_r2\n(\n `x` UInt32,\n `y` UInt32,\n `z` UInt32,\n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test_00754/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.summing_r2\n(\n `x` UInt32,\n `y` UInt32,\n `z` UInt32,\n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/default/test_00754/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, -z)\nSETTINGS index_granularity = 8192 *** Check SHOW CREATE TABLE after offline ALTER *** -CREATE TABLE default.summing_r2\n(\n `x` UInt32,\n `y` UInt32,\n `z` UInt32,\n `t` UInt32,\n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/test_00754/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, t * t)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.summing_r2\n(\n `x` UInt32,\n `y` UInt32,\n `z` UInt32,\n `t` UInt32,\n `val` UInt32\n)\nENGINE = ReplicatedSummingMergeTree(\'/clickhouse/tables/default/test_00754/summing\', \'r2\')\nPRIMARY KEY (x, y)\nORDER BY (x, y, t * t)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper_long.sql similarity index 88% rename from tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql rename to tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper_long.sql index 809adfaa498..78986338cd9 100644 --- a/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper.sql +++ b/tests/queries/0_stateless/00754_alter_modify_order_by_replicated_zookeeper_long.sql @@ -3,14 +3,14 @@ SET optimize_on_insert = 0; SET send_logs_level = 'fatal'; DROP TABLE IF EXISTS old_style; -CREATE TABLE old_style(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00754/old_style', 'r1', d, x, 8192); +CREATE TABLE old_style(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00754/old_style', 'r1', d, x, 8192); ALTER TABLE old_style ADD COLUMN y UInt32, MODIFY ORDER BY (x, y); -- { serverError 36 } DROP TABLE old_style; DROP TABLE IF EXISTS summing_r1; DROP TABLE IF EXISTS summing_r2; -CREATE TABLE summing_r1(x UInt32, y UInt32, val UInt32) ENGINE ReplicatedSummingMergeTree('/clickhouse/tables/test_00754/summing', 'r1') ORDER BY (x, y); -CREATE TABLE summing_r2(x UInt32, y UInt32, val UInt32) ENGINE ReplicatedSummingMergeTree('/clickhouse/tables/test_00754/summing', 'r2') ORDER BY (x, y); +CREATE TABLE summing_r1(x UInt32, y UInt32, val UInt32) ENGINE ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00754/summing', 'r1') ORDER BY (x, y); +CREATE TABLE summing_r2(x UInt32, y UInt32, val UInt32) ENGINE ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00754/summing', 'r2') ORDER BY (x, y); /* Can't add an expression with existing column to ORDER BY. */ ALTER TABLE summing_r1 MODIFY ORDER BY (x, y, -val); -- { serverError 36 } diff --git a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper_long.reference similarity index 56% rename from tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference rename to tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper_long.reference index 838bd93ebaf..fbe0e7f564f 100644 --- a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.reference +++ b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper_long.reference @@ -1,5 +1,5 @@ -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 2 1 2 @@ -14,8 +14,8 @@ CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n I 3 2 19 9 65 75 -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 4 1 5 @@ -28,10 +28,10 @@ CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n I 3 2 19 9 65 75 -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter1\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 4 1 5 @@ -44,14 +44,14 @@ CREATE TABLE default.minmax_idx_r\n(\n `u64` UInt64,\n `i32` Int32,\n I 3 2 19 9 65 75 -CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx2_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx2_r\n(\n `u64` UInt64,\n `i32` Int32,\n INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10,\n INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 3 1 2 1 3 -CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 -CREATE TABLE default.minmax_idx2_r\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00836/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx2\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter2\', \'r1\')\nORDER BY u64\nSETTINGS index_granularity = 8192 +CREATE TABLE default.minmax_idx2_r\n(\n `u64` UInt64,\n `i32` Int32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00836/indices_alter2\', \'r2\')\nORDER BY u64\nSETTINGS index_granularity = 8192 1 2 1 3 1 2 diff --git a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper_long.sql similarity index 88% rename from tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql rename to tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper_long.sql index e038d2d425e..6274a70a381 100644 --- a/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper.sql +++ b/tests/queries/0_stateless/00836_indices_alter_replicated_zookeeper_long.sql @@ -9,14 +9,14 @@ CREATE TABLE minmax_idx ( u64 UInt64, i32 Int32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00836/indices_alter1', 'r1') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter1', 'r1') ORDER BY u64; CREATE TABLE minmax_idx_r ( u64 UInt64, i32 Int32 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00836/indices_alter1', 'r2') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter1', 'r2') ORDER BY u64; INSERT INTO minmax_idx VALUES (1, 2); @@ -74,7 +74,7 @@ CREATE TABLE minmax_idx2 i32 Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00836/indices_alter2', 'r1') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter2', 'r1') ORDER BY u64; CREATE TABLE minmax_idx2_r @@ -83,7 +83,7 @@ CREATE TABLE minmax_idx2_r i32 Int32, INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00836/indices_alter2', 'r2') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter2', 'r2') ORDER BY u64; diff --git a/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference b/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.reference rename to tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql b/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper_long.sql similarity index 93% rename from tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql rename to tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper_long.sql index 2213ccecd4e..0dd8dd41f97 100644 --- a/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper.sql +++ b/tests/queries/0_stateless/00837_minmax_index_replicated_zookeeper_long.sql @@ -15,7 +15,7 @@ CREATE TABLE minmax_idx1 idx_all (i32, i32 + f64, d, s, e, dt) TYPE minmax GRANULARITY 1, INDEX idx_2 (u64 + toYear(dt), substring(s, 2, 4)) TYPE minmax GRANULARITY 3 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00837/minmax', 'r1') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00837/minmax', 'r1') ORDER BY u64 SETTINGS index_granularity = 2; @@ -32,7 +32,7 @@ CREATE TABLE minmax_idx2 idx_all (i32, i32 + f64, d, s, e, dt) TYPE minmax GRANULARITY 1, INDEX idx_2 (u64 + toYear(dt), substring(s, 2, 4)) TYPE minmax GRANULARITY 3 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00837/minmax', 'r2') +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00837/minmax', 'r2') ORDER BY u64 SETTINGS index_granularity = 2; diff --git a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated_long.reference similarity index 87% rename from tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference rename to tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated_long.reference index 29e7b23d3dd..3b7faecbba4 100644 --- a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.reference +++ b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated_long.reference @@ -20,7 +20,7 @@ 274972506.6 9175437371954010821 9175437371954010821 -CREATE TABLE default.compression_codec_multiple_more_types_replicated\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00910/compression_codec_multiple_more_types_replicated\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.compression_codec_multiple_more_types_replicated\n(\n `id` Decimal(38, 13) CODEC(ZSTD(1), LZ4, ZSTD(1), ZSTD(1), Delta(2), Delta(4), Delta(1), LZ4HC(0)),\n `data` FixedString(12) CODEC(ZSTD(1), ZSTD(1), Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC(0)),\n `ddd.age` Array(UInt8) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8)),\n `ddd.Name` Array(String) CODEC(LZ4, LZ4HC(0), NONE, NONE, NONE, ZSTD(1), Delta(8))\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00910/compression_codec_multiple_more_types_replicated\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1.5555555555555 hello world! [77] ['John'] 7.1000000000000 xxxxxxxxxxxx [127] ['Henry'] ! diff --git a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated_long.sql similarity index 87% rename from tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql rename to tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated_long.sql index 52eb1d4e411..3fe121edc94 100644 --- a/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated.sql +++ b/tests/queries/0_stateless/00910_zookeeper_custom_compression_codecs_replicated_long.sql @@ -11,7 +11,7 @@ CREATE TABLE compression_codec_replicated1( somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), othernum Int64 CODEC(Delta) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/compression_codec_replicated', '1') ORDER BY tuple(); +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_replicated', '1') ORDER BY tuple(); CREATE TABLE compression_codec_replicated2( id UInt64 CODEC(LZ4), @@ -20,7 +20,7 @@ CREATE TABLE compression_codec_replicated2( somenum Float64 CODEC(ZSTD(2)), somestr FixedString(3) CODEC(LZ4HC(7)), othernum Int64 CODEC(Delta) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/compression_codec_replicated', '2') ORDER BY tuple(); +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_replicated', '2') ORDER BY tuple(); INSERT INTO compression_codec_replicated1 VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); @@ -57,14 +57,14 @@ CREATE TABLE compression_codec_multiple_replicated1 ( data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/compression_codec_multiple', '1') ORDER BY tuple(); +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple', '1') ORDER BY tuple(); CREATE TABLE compression_codec_multiple_replicated2 ( id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/compression_codec_multiple', '2') ORDER BY tuple(); +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple', '2') ORDER BY tuple(); INSERT INTO compression_codec_multiple_replicated2 VALUES (1, 'world', toDate('2018-10-05'), 1.1), (2, 'hello', toDate('2018-10-01'), 2.2), (3, 'buy', toDate('2018-10-11'), 3.3); @@ -106,7 +106,7 @@ CREATE TABLE compression_codec_multiple_more_types_replicated ( id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), data FixedString(12) CODEC(ZSTD, ZSTD, Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC), ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/compression_codec_multiple_more_types_replicated', '1') ORDER BY tuple(); +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple_more_types_replicated', '1') ORDER BY tuple(); SHOW CREATE TABLE compression_codec_multiple_more_types_replicated; @@ -124,7 +124,7 @@ CREATE TABLE compression_codec_multiple_with_key_replicated ( somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), Delta, Delta), id UInt64 CODEC(LZ4, ZSTD, Delta, NONE, LZ4HC, Delta), data String CODEC(ZSTD(2), Delta(1), LZ4HC, NONE, LZ4, LZ4) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/compression_codec_multiple_with_key_replicated', '1') PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple_with_key_replicated', '1') PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; INSERT INTO compression_codec_multiple_with_key_replicated VALUES(toDate('2018-10-12'), 100000, 'hello'), (toDate('2018-10-12'), 100002, 'world'), (toDate('2018-10-12'), 1111, '!'); diff --git a/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs_long.sql b/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs_long.sql index 548f26eadd0..e4994ff8d75 100644 --- a/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs_long.sql +++ b/tests/queries/0_stateless/00910_zookeeper_test_alter_compression_codecs_long.sql @@ -7,12 +7,12 @@ DROP TABLE IF EXISTS alter_compression_codec2; CREATE TABLE alter_compression_codec1 ( somedate Date CODEC(LZ4), id UInt64 CODEC(NONE) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/'||currentDatabase()||'alter_compression_codecs/{shard}', '1_{replica}') PARTITION BY somedate ORDER BY id; +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/'||currentDatabase()||'alter_compression_codecs/{shard}', '1_{replica}') PARTITION BY somedate ORDER BY id; CREATE TABLE alter_compression_codec2 ( somedate Date CODEC(LZ4), id UInt64 CODEC(NONE) -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00910/'||currentDatabase()||'alter_compression_codecs/{shard}', '2_{replica}') PARTITION BY somedate ORDER BY id; +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/'||currentDatabase()||'alter_compression_codecs/{shard}', '2_{replica}') PARTITION BY somedate ORDER BY id; INSERT INTO alter_compression_codec1 VALUES('2018-01-01', 1); INSERT INTO alter_compression_codec1 VALUES('2018-01-01', 2); diff --git a/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference b/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final_long.reference similarity index 100% rename from tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.reference rename to tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final_long.reference diff --git a/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql b/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final_long.sql similarity index 62% rename from tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql rename to tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final_long.sql index e227222a939..2b119836ebf 100644 --- a/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final.sql +++ b/tests/queries/0_stateless/00925_zookeeper_empty_replicated_merge_tree_optimize_final_long.sql @@ -1,7 +1,7 @@ DROP TABLE IF EXISTS replicated_optimize1; DROP TABLE IF EXISTS replicated_optimize2; -CREATE TABLE replicated_optimize1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_00925/optimize', 'r1', d, k, 8192); -CREATE TABLE replicated_optimize2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_00925/optimize', 'r2', d, k, 8192); +CREATE TABLE replicated_optimize1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00925/optimize', 'r1', d, k, 8192); +CREATE TABLE replicated_optimize2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00925/optimize', 'r2', d, k, 8192); OPTIMIZE TABLE replicated_optimize1 FINAL; diff --git a/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.reference similarity index 100% rename from tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.reference rename to tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.reference diff --git a/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.sql similarity index 83% rename from tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql rename to tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.sql index 92b34a243a4..1e04679e1f3 100644 --- a/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree.sql +++ b/tests/queries/0_stateless/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long.sql @@ -8,14 +8,14 @@ CREATE TABLE zero_rows_per_granule1 ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00926/zero_rows_in_granule', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0; +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/zero_rows_in_granule', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0; CREATE TABLE zero_rows_per_granule2 ( p Date, k UInt64, v1 UInt64, v2 Int64 -) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00926/zero_rows_in_granule', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0; +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/zero_rows_in_granule', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0; INSERT INTO zero_rows_per_granule1 (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -68,14 +68,14 @@ CREATE TABLE four_rows_per_granule1 ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00926/four_rows_in_granule', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/four_rows_in_granule', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; CREATE TABLE four_rows_per_granule2 ( p Date, k UInt64, v1 UInt64, v2 Int64 -) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00926/four_rows_in_granule', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100 ,write_final_mark = 0; +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/four_rows_in_granule', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100 ,write_final_mark = 0; INSERT INTO four_rows_per_granule1 (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); @@ -135,14 +135,14 @@ CREATE TABLE adaptive_granularity_alter1 ( k UInt64, v1 UInt64, v2 Int64 -) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00926/adaptive_granularity_alter', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/adaptive_granularity_alter', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; CREATE TABLE adaptive_granularity_alter2 ( p Date, k UInt64, v1 UInt64, v2 Int64 -) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00926/adaptive_granularity_alter', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/adaptive_granularity_alter', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; INSERT INTO adaptive_granularity_alter1 (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference deleted file mode 100644 index e84c3beabee..00000000000 --- a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.reference +++ /dev/null @@ -1,3 +0,0 @@ -200 -400 -CREATE TABLE default.ttl_repl2\n(\n `d` Date,\n `x` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_00933/ttl_repl\', \'2\')\nPARTITION BY toDayOfMonth(d)\nORDER BY x\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.reference b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.reference new file mode 100644 index 00000000000..3dc07236d2b --- /dev/null +++ b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.reference @@ -0,0 +1,3 @@ +200 +400 +CREATE TABLE default.ttl_repl2\n(\n `d` Date,\n `x` UInt32\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_00933/ttl_repl\', \'2\')\nPARTITION BY toDayOfMonth(d)\nORDER BY x\nTTL d + toIntervalDay(1)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.sql similarity index 88% rename from tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql rename to tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.sql index dbbbe887e9f..6085545dc02 100644 --- a/tests/queries/0_stateless/00933_ttl_replicated_zookeeper.sql +++ b/tests/queries/0_stateless/00933_ttl_replicated_zookeeper_long.sql @@ -1,9 +1,9 @@ DROP TABLE IF EXISTS ttl_repl1; DROP TABLE IF EXISTS ttl_repl2; -CREATE TABLE ttl_repl1(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00933/ttl_repl', '1') +CREATE TABLE ttl_repl1(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00933/ttl_repl', '1') PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + INTERVAL 1 DAY; -CREATE TABLE ttl_repl2(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_00933/ttl_repl', '2') +CREATE TABLE ttl_repl2(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00933/ttl_repl', '2') PARTITION BY toDayOfMonth(d) ORDER BY x TTL d + INTERVAL 1 DAY; INSERT INTO TABLE ttl_repl1 VALUES (toDate('2000-10-10 00:00:00'), 100); diff --git a/tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference b/tests/queries/0_stateless/00988_constraints_replication_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/00988_constraints_replication_zookeeper.reference rename to tests/queries/0_stateless/00988_constraints_replication_zookeeper_long.reference diff --git a/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql b/tests/queries/0_stateless/00988_constraints_replication_zookeeper_long.sql similarity index 86% rename from tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql rename to tests/queries/0_stateless/00988_constraints_replication_zookeeper_long.sql index f8895843dd3..c36bee7fe4f 100644 --- a/tests/queries/0_stateless/00988_constraints_replication_zookeeper.sql +++ b/tests/queries/0_stateless/00988_constraints_replication_zookeeper_long.sql @@ -6,14 +6,14 @@ CREATE TABLE replicated_constraints1 a UInt32, b UInt32, CONSTRAINT a_constraint CHECK a < 10 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00988/alter_constraints', 'r1') ORDER BY (a); +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00988/alter_constraints', 'r1') ORDER BY (a); CREATE TABLE replicated_constraints2 ( a UInt32, b UInt32, CONSTRAINT a_constraint CHECK a < 10 -) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00988/alter_constraints', 'r2') ORDER BY (a); +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00988/alter_constraints', 'r2') ORDER BY (a); INSERT INTO replicated_constraints1 VALUES (1, 2); INSERT INTO replicated_constraints2 VALUES (3, 4); diff --git a/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql b/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql index 60c6d55c4f5..262ccecc7d4 100644 --- a/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql +++ b/tests/queries/0_stateless/01037_zookeeper_check_table_empty_pk.sql @@ -13,7 +13,7 @@ DROP TABLE IF EXISTS mt_without_pk; DROP TABLE IF EXISTS replicated_mt_without_pk; -CREATE TABLE replicated_mt_without_pk (SomeField1 Int64, SomeField2 Double) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01037/replicated_mt_without_pk', '1') ORDER BY tuple(); +CREATE TABLE replicated_mt_without_pk (SomeField1 Int64, SomeField2 Double) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01037/replicated_mt_without_pk', '1') ORDER BY tuple(); INSERT INTO replicated_mt_without_pk VALUES (1, 2); diff --git a/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference b/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations_long.reference similarity index 100% rename from tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.reference rename to tests/queries/0_stateless/01049_zookeeper_synchronous_mutations_long.reference diff --git a/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql b/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations_long.sql similarity index 88% rename from tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql rename to tests/queries/0_stateless/01049_zookeeper_synchronous_mutations_long.sql index c84fb22043f..e369b500c35 100644 --- a/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations.sql +++ b/tests/queries/0_stateless/01049_zookeeper_synchronous_mutations_long.sql @@ -3,9 +3,9 @@ DROP TABLE IF EXISTS table_for_synchronous_mutations2; SELECT 'Replicated'; -CREATE TABLE table_for_synchronous_mutations1(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01049/table_for_synchronous_mutations', '1') ORDER BY k; +CREATE TABLE table_for_synchronous_mutations1(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01049/table_for_synchronous_mutations', '1') ORDER BY k; -CREATE TABLE table_for_synchronous_mutations2(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01049/table_for_synchronous_mutations', '2') ORDER BY k; +CREATE TABLE table_for_synchronous_mutations2(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01049/table_for_synchronous_mutations', '2') ORDER BY k; INSERT INTO table_for_synchronous_mutations1 select number, number from numbers(100000); diff --git a/tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper.reference b/tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper.reference rename to tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper.sql b/tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper_long.sql similarity index 93% rename from tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper.sql rename to tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper_long.sql index b830c549dc3..53d7100a38e 100644 --- a/tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper.sql +++ b/tests/queries/0_stateless/01062_alter_on_mutataion_zookeeper_long.sql @@ -6,7 +6,7 @@ CREATE TABLE test_alter_on_mutation key UInt64, value String ) -ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01062/alter_on_mutation', '1') +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01062/alter_on_mutation', '1') ORDER BY key PARTITION BY date; INSERT INTO test_alter_on_mutation select toDate('2020-01-05'), number, toString(number) from system.numbers limit 100; @@ -58,7 +58,7 @@ DROP TABLE IF EXISTS test_alter_on_mutation; DROP TABLE IF EXISTS nested_alter; -CREATE TABLE nested_alter (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT '0') ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01062/nested_alter', 'r2', d, k, 8192); +CREATE TABLE nested_alter (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT '0') ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01062/nested_alter', 'r2', d, k, 8192); INSERT INTO nested_alter VALUES ('2015-01-01', 6,38,'2014-07-15 13:26:50',[10,20,30],['asd','qwe','qwe'],['2000-01-01','2000-01-01','2000-01-03'],'100500'); diff --git a/tests/queries/0_stateless/01079_alter_default_zookeeper.reference b/tests/queries/0_stateless/01079_alter_default_zookeeper_long.reference similarity index 54% rename from tests/queries/0_stateless/01079_alter_default_zookeeper.reference rename to tests/queries/0_stateless/01079_alter_default_zookeeper_long.reference index 7dd539b2683..32d31af0058 100644 --- a/tests/queries/0_stateless/01079_alter_default_zookeeper.reference +++ b/tests/queries/0_stateless/01079_alter_default_zookeeper_long.reference @@ -1,11 +1,11 @@ -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` String DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` String DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 1000 -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT \'10\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 1000 -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt16 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt64 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt16 DEFAULT 100\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 10000 -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10,\n `better_column` UInt8 DEFAULT \'1\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 -CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10,\n `better_column` UInt8 DEFAULT \'1\',\n `other_date` String DEFAULT 1\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10,\n `better_column` UInt8 DEFAULT \'1\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.alter_default\n(\n `date` Date,\n `key` UInt64,\n `value` UInt8 DEFAULT 10,\n `better_column` UInt8 DEFAULT \'1\',\n `other_date` String DEFAULT 1\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01079/alter_default\', \'1\')\nORDER BY key\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01079_alter_default_zookeeper.sql b/tests/queries/0_stateless/01079_alter_default_zookeeper_long.sql similarity index 94% rename from tests/queries/0_stateless/01079_alter_default_zookeeper.sql rename to tests/queries/0_stateless/01079_alter_default_zookeeper_long.sql index 6fa9d2bf4e0..7257f86c4e8 100644 --- a/tests/queries/0_stateless/01079_alter_default_zookeeper.sql +++ b/tests/queries/0_stateless/01079_alter_default_zookeeper_long.sql @@ -5,7 +5,7 @@ CREATE TABLE alter_default date Date, key UInt64 ) -ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01079/alter_default', '1') +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01079/alter_default', '1') ORDER BY key; INSERT INTO alter_default select toDate('2020-01-05'), number from system.numbers limit 100; diff --git a/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.reference b/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.reference similarity index 100% rename from tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.reference rename to tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.reference diff --git a/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql b/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.sql similarity index 72% rename from tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql rename to tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.sql index 5597bf523e4..d38c639458d 100644 --- a/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum.sql +++ b/tests/queries/0_stateless/01090_zookeeper_mutations_and_insert_quorum_long.sql @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS mutations_and_quorum1; DROP TABLE IF EXISTS mutations_and_quorum2; -CREATE TABLE mutations_and_quorum1 (`server_date` Date, `something` String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01090/mutations_and_quorum', '1') PARTITION BY toYYYYMM(server_date) ORDER BY (server_date, something); -CREATE TABLE mutations_and_quorum2 (`server_date` Date, `something` String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01090/mutations_and_quorum', '2') PARTITION BY toYYYYMM(server_date) ORDER BY (server_date, something); +CREATE TABLE mutations_and_quorum1 (`server_date` Date, `something` String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01090/mutations_and_quorum', '1') PARTITION BY toYYYYMM(server_date) ORDER BY (server_date, something); +CREATE TABLE mutations_and_quorum2 (`server_date` Date, `something` String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01090/mutations_and_quorum', '2') PARTITION BY toYYYYMM(server_date) ORDER BY (server_date, something); SET insert_quorum=2, insert_quorum_parallel=0; diff --git a/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference b/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference index cf69f1c5896..30cdc08e87d 100644 --- a/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference +++ b/tests/queries/0_stateless/01135_default_and_alter_zookeeper.reference @@ -1,2 +1,2 @@ 4 -CREATE TABLE default.default_table\n(\n `id` UInt64,\n `enum_column` Enum8(\'undefined\' = 0, \'fox\' = 1, \'index\' = 2) DEFAULT \'fox\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01135/default_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.default_table\n(\n `id` UInt64,\n `enum_column` Enum8(\'undefined\' = 0, \'fox\' = 1, \'index\' = 2) DEFAULT \'fox\'\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01135/default_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01135_default_and_alter_zookeeper.sql b/tests/queries/0_stateless/01135_default_and_alter_zookeeper.sql index 982c72eb333..ea8c943b9a4 100644 --- a/tests/queries/0_stateless/01135_default_and_alter_zookeeper.sql +++ b/tests/queries/0_stateless/01135_default_and_alter_zookeeper.sql @@ -5,7 +5,7 @@ CREATE TABLE default_table id UInt64, enum_column Enum8('undefined' = 0, 'fox' = 1, 'index' = 2) ) -ENGINE ReplicatedMergeTree('/clickhouse/test_01135/default_table', '1') +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test_01135/default_table', '1') ORDER BY tuple(); INSERT INTO default_table VALUES(1, 'index'), (2, 'fox'); diff --git a/tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper.reference b/tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper.reference rename to tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper.sql b/tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper_long.sql similarity index 92% rename from tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper.sql rename to tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper_long.sql index 2aa903d99f0..9464ac24dff 100644 --- a/tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper.sql +++ b/tests/queries/0_stateless/01201_drop_column_compact_part_replicated_zookeeper_long.sql @@ -3,7 +3,7 @@ set replication_alter_partitions_sync = 2; drop table if exists mt_compact; create table mt_compact(a UInt64, b UInt64 DEFAULT a * a, s String, n Nested(x UInt32, y String), lc LowCardinality(String)) -engine = ReplicatedMergeTree('/clickhouse/test_01201/mt_compact_replicated', '1') +engine = ReplicatedMergeTree('/clickhouse/{database}/test_01201/mt_compact_replicated', '1') order by a partition by a % 10 settings index_granularity = 8, min_rows_for_wide_part = 10; diff --git a/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.reference rename to tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper_long.sql similarity index 89% rename from tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql rename to tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper_long.sql index 616a213e46c..91dd6e67512 100644 --- a/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper.sql +++ b/tests/queries/0_stateless/01213_alter_rename_primary_key_zookeeper_long.sql @@ -9,7 +9,7 @@ CREATE TABLE table_for_rename_pk value1 String, value2 String ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01213/table_for_rename_pk1', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01213/table_for_rename_pk1', '1') PARTITION BY date ORDER BY (key1, pow(key2, 2), key3); @@ -37,7 +37,7 @@ CREATE TABLE table_for_rename_with_primary_key value2 String, INDEX idx (value1) TYPE set(1) GRANULARITY 1 ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01213/table_for_rename_pk2', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01213/table_for_rename_pk2', '1') PARTITION BY date ORDER BY (key1, key2, key3) PRIMARY KEY (key1, key2); diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper_long.reference similarity index 74% rename from tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference rename to tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper_long.reference index da3dad5cb16..2a6b00cdddb 100644 --- a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.reference +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper_long.reference @@ -8,10 +8,10 @@ Hello 1 Word 1 date1 date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl\n(\n `date1` Date,\n `date2` Date,\n `value1` String,\n `value2` String TTL date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01213/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `date1` Date,\n `date2` Date,\n `value1` String,\n `value2` String TTL date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01213/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 renamed_date1 date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date,\n `date2` Date,\n `value1` String,\n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01213/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date,\n `date2` Date,\n `value1` String,\n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01213/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 renamed_date1 renamed_date2 value1 value2 2019-10-02 2018-10-02 1 1 -CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date,\n `renamed_date2` Date,\n `value1` String,\n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01213/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL renamed_date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_rename_with_ttl\n(\n `renamed_date1` Date,\n `renamed_date2` Date,\n `value1` String,\n `value2` String TTL renamed_date1 + toIntervalMonth(10000)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01213/table_rename_with_ttl\', \'1\')\nORDER BY tuple()\nTTL renamed_date2 + toIntervalMonth(10000)\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper_long.sql similarity index 95% rename from tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql rename to tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper_long.sql index e5701077770..cb9e918e335 100644 --- a/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper.sql +++ b/tests/queries/0_stateless/01213_alter_rename_with_default_zookeeper_long.sql @@ -38,7 +38,7 @@ CREATE TABLE table_rename_with_ttl value1 String, value2 String TTL date1 + INTERVAL 10000 MONTH ) -ENGINE = ReplicatedMergeTree('/clickhouse/test_01213/table_rename_with_ttl', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01213/table_rename_with_ttl', '1') ORDER BY tuple() TTL date2 + INTERVAL 10000 MONTH; diff --git a/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.reference index 4442b0b6b61..a4a6b87de25 100644 --- a/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.reference +++ b/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.reference @@ -57,7 +57,9 @@ LIMIT 1 0 LIMIT OFFSET 1 1 -OFFSET +OFFSET distributed_push_down_limit=0 +1 1 +OFFSET distributed_push_down_limit=1 1 1 1 0 1 1 @@ -65,6 +67,8 @@ WHERE LIMIT OFFSET 1 1 LIMIT BY 1 1 0 +1 0 +1 1 1 1 GROUP BY (Distributed-over-Distributed) 4 0 diff --git a/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql b/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql index 1dcdd795bc1..a8dc0d91c37 100644 --- a/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql +++ b/tests/queries/0_stateless/01244_optimize_distributed_group_by_sharding_key.sql @@ -60,8 +60,10 @@ select 'LIMIT'; select count(), * from dist_01247 group by number limit 1; select 'LIMIT OFFSET'; select count(), * from dist_01247 group by number limit 1 offset 1; -select 'OFFSET'; -select count(), * from dist_01247 group by number offset 1; +select 'OFFSET distributed_push_down_limit=0'; +select count(), * from dist_01247 group by number offset 1 settings distributed_push_down_limit=0; +select 'OFFSET distributed_push_down_limit=1'; +select count(), * from dist_01247 group by number offset 1 settings distributed_push_down_limit=1; -- this will emulate different data on for different shards select 'WHERE LIMIT OFFSET'; select count(), * from dist_01247 where number = _shard_num-1 group by number order by number limit 1 offset 1; diff --git a/tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper.reference b/tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper.reference rename to tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper.sql b/tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper_long.sql similarity index 83% rename from tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper.sql rename to tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper_long.sql index d96085bc086..6e7f4d55a53 100644 --- a/tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper.sql +++ b/tests/queries/0_stateless/01267_alter_default_key_columns_zookeeper_long.sql @@ -8,8 +8,8 @@ DROP TABLE test_alter; DROP TABLE IF EXISTS test_alter_r1; DROP TABLE IF EXISTS test_alter_r2; -CREATE TABLE test_alter_r1 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01267/alter', 'r1') ORDER BY s PARTITION BY x; -CREATE TABLE test_alter_r2 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01267/alter', 'r2') ORDER BY s PARTITION BY x; +CREATE TABLE test_alter_r1 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01267/alter', 'r1') ORDER BY s PARTITION BY x; +CREATE TABLE test_alter_r2 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01267/alter', 'r2') ORDER BY s PARTITION BY x; ALTER TABLE test_alter_r1 MODIFY COLUMN s DEFAULT 'Hello' SETTINGS replication_alter_partitions_sync = 2; ALTER TABLE test_alter_r2 MODIFY COLUMN x DEFAULT '2000-01-01' SETTINGS replication_alter_partitions_sync = 2; diff --git a/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.reference b/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper_long.reference similarity index 90% rename from tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.reference rename to tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper_long.reference index 84ca8273128..382ccb592af 100644 --- a/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.reference +++ b/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper_long.reference @@ -7,7 +7,7 @@ 2019-10-01 6 6 7 8 2019-10-02 7 7 8 9 2019-10-03 8 8 9 10 -CREATE TABLE default.table_for_rename1\n(\n `date` Date,\n `key` UInt64,\n `value4` String,\n `value5` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value4) < toInt64(value5),\n CONSTRAINT cs_value2 CHECK toInt64(value5) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01277/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename1\n(\n `date` Date,\n `key` UInt64,\n `value4` String,\n `value5` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value4) < toInt64(value5),\n CONSTRAINT cs_value2 CHECK toInt64(value5) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01277/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2 2019-10-02 1 1 2 3 2019-10-03 2 2 3 4 @@ -38,7 +38,7 @@ CREATE TABLE default.table_for_rename1\n(\n `date` Date,\n `key` UInt64,\n 2019-10-01 18 18 19 20 2019-10-02 19 19 20 21 -- rename columns back -- -CREATE TABLE default.table_for_rename1\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2),\n CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/test_01277/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 +CREATE TABLE default.table_for_rename1\n(\n `date` Date,\n `key` UInt64,\n `value1` String,\n `value2` String,\n `value3` String,\n CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2),\n CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/test_01277/test_for_rename\', \'1\')\nPARTITION BY date\nORDER BY key\nSETTINGS index_granularity = 8192 2019-10-01 0 0 1 2 2019-10-02 1 1 2 3 2019-10-03 2 2 3 4 diff --git a/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.sql b/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper_long.sql similarity index 95% rename from tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.sql rename to tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper_long.sql index 28f17dced97..10ab75e42bd 100644 --- a/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper.sql +++ b/tests/queries/0_stateless/01277_alter_rename_column_constraint_zookeeper_long.sql @@ -10,7 +10,7 @@ CREATE TABLE table_for_rename1 CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2), CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3) ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01277/test_for_rename', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01277/test_for_rename', '1') PARTITION BY date ORDER BY key; diff --git a/tests/queries/0_stateless/01300_client_save_history_when_terminated_long.expect b/tests/queries/0_stateless/01300_client_save_history_when_terminated_long.expect index 7fed383dc38..0c53f5658d1 100755 --- a/tests/queries/0_stateless/01300_client_save_history_when_terminated_long.expect +++ b/tests/queries/0_stateless/01300_client_save_history_when_terminated_long.expect @@ -27,7 +27,7 @@ close spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT" expect ":) " send -- "\[A" -expect "SELECT 'for the history'" +expect "for the history" # Will check that Ctrl+C clears current line. send -- "\3" diff --git a/tests/queries/0_stateless/01319_manual_write_to_replicas.reference b/tests/queries/0_stateless/01319_manual_write_to_replicas_long.reference similarity index 100% rename from tests/queries/0_stateless/01319_manual_write_to_replicas.reference rename to tests/queries/0_stateless/01319_manual_write_to_replicas_long.reference diff --git a/tests/queries/0_stateless/01319_manual_write_to_replicas.sql b/tests/queries/0_stateless/01319_manual_write_to_replicas_long.sql similarity index 85% rename from tests/queries/0_stateless/01319_manual_write_to_replicas.sql rename to tests/queries/0_stateless/01319_manual_write_to_replicas_long.sql index 5388f0017c0..7fb4d0b7d61 100644 --- a/tests/queries/0_stateless/01319_manual_write_to_replicas.sql +++ b/tests/queries/0_stateless/01319_manual_write_to_replicas_long.sql @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS r1; DROP TABLE IF EXISTS r2; -CREATE TABLE r1 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/r', 'r1') ORDER BY x; -CREATE TABLE r2 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/r', 'r2') ORDER BY x; +CREATE TABLE r1 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/r', 'r1') ORDER BY x; +CREATE TABLE r2 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/r', 'r2') ORDER BY x; SYSTEM STOP REPLICATED SENDS; diff --git a/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper.reference b/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper.reference rename to tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper.sql b/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.sql similarity index 92% rename from tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper.sql rename to tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.sql index d185973f564..d0b6fbe3dde 100644 --- a/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper.sql +++ b/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.sql @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS test; DROP TABLE IF EXISTS test2; -CREATE TABLE test (x Enum('hello' = 1, 'world' = 2), y String) ENGINE = ReplicatedMergeTree('/clickhouse/test_01346/table', 'r1') PARTITION BY x ORDER BY y; -CREATE TABLE test2 (x Enum('hello' = 1, 'world' = 2), y String) ENGINE = ReplicatedMergeTree('/clickhouse/test_01346/table', 'r2') PARTITION BY x ORDER BY y; +CREATE TABLE test (x Enum('hello' = 1, 'world' = 2), y String) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01346/table', 'r1') PARTITION BY x ORDER BY y; +CREATE TABLE test2 (x Enum('hello' = 1, 'world' = 2), y String) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01346/table', 'r2') PARTITION BY x ORDER BY y; INSERT INTO test VALUES ('hello', 'test'); SELECT * FROM test; diff --git a/tests/queries/0_stateless/01378_alter_rename_with_ttl_zookeeper.sql b/tests/queries/0_stateless/01378_alter_rename_with_ttl_zookeeper.sql index 0cd6feb9da1..79d1276ae61 100644 --- a/tests/queries/0_stateless/01378_alter_rename_with_ttl_zookeeper.sql +++ b/tests/queries/0_stateless/01378_alter_rename_with_ttl_zookeeper.sql @@ -5,7 +5,7 @@ CREATE TABLE table_rename_with_ttl date1 Date, value1 String ) -ENGINE = ReplicatedMergeTree('/clickhouse/test/table_rename_with_ttl_01378', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test/table_rename_with_ttl_01378', '1') ORDER BY tuple(); INSERT INTO table_rename_with_ttl SELECT toDate('2018-10-01') + number % 3, toString(number) from numbers(9); diff --git a/tests/queries/0_stateless/01430_modify_sample_by_zookeeper.reference b/tests/queries/0_stateless/01430_modify_sample_by_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01430_modify_sample_by_zookeeper.reference rename to tests/queries/0_stateless/01430_modify_sample_by_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01430_modify_sample_by_zookeeper.sql b/tests/queries/0_stateless/01430_modify_sample_by_zookeeper_long.sql similarity index 96% rename from tests/queries/0_stateless/01430_modify_sample_by_zookeeper.sql rename to tests/queries/0_stateless/01430_modify_sample_by_zookeeper_long.sql index dc7f5017bfd..288fa97d218 100644 --- a/tests/queries/0_stateless/01430_modify_sample_by_zookeeper.sql +++ b/tests/queries/0_stateless/01430_modify_sample_by_zookeeper_long.sql @@ -11,7 +11,7 @@ SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM modify_sample SAMPLE 0 ALTER TABLE modify_sample MODIFY SAMPLE BY x; SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM modify_sample SAMPLE 0.1; -CREATE TABLE modify_sample_replicated (d Date DEFAULT '2000-01-01', x UInt8, y UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01430', 'modify_sample') PARTITION BY d ORDER BY (x, y); +CREATE TABLE modify_sample_replicated (d Date DEFAULT '2000-01-01', x UInt8, y UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01430', 'modify_sample') PARTITION BY d ORDER BY (x, y); INSERT INTO modify_sample_replicated (x, y) SELECT toUInt8(number) AS x, toUInt64(number) as y FROM system.numbers LIMIT 256; diff --git a/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum.reference b/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.reference similarity index 100% rename from tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum.reference rename to tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.reference diff --git a/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum.sql b/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.sql similarity index 84% rename from tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum.sql rename to tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.sql index 72e588c5c02..2c6bafe4936 100644 --- a/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum.sql +++ b/tests/queries/0_stateless/01451_replicated_detach_drop_and_quorum_long.sql @@ -4,8 +4,8 @@ SET replication_alter_partitions_sync = 2; DROP TABLE IF EXISTS replica1; DROP TABLE IF EXISTS replica2; -CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/01451/quorum', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; -CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/01451/quorum', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; +CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/01451/quorum', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; +CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/01451/quorum', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; INSERT INTO replica1 VALUES (0); diff --git a/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql b/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql index cd8267ce59a..5da86f45fde 100644 --- a/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql +++ b/tests/queries/0_stateless/01451_replicated_detach_drop_part_long.sql @@ -3,8 +3,8 @@ SET replication_alter_partitions_sync = 2; DROP TABLE IF EXISTS replica1; DROP TABLE IF EXISTS replica2; -CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/'||currentDatabase()||'test/01451/attach', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; -CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/'||currentDatabase()||'test/01451/attach', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; +CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/'||currentDatabase()||'test/01451/attach', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; +CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/'||currentDatabase()||'test/01451/attach', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; INSERT INTO replica1 VALUES (0); INSERT INTO replica1 VALUES (1); diff --git a/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper.reference b/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper_long.reference similarity index 58% rename from tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper.reference rename to tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper_long.reference index 82f6fbd6615..a770460c8b8 100644 --- a/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper.reference +++ b/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper_long.reference @@ -1,4 +1,4 @@ CREATE TABLE default.no_prop_table\n(\n `some_column` UInt64\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 CREATE TABLE default.no_prop_table\n(\n `some_column` UInt64\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_no_prop_table\n(\n `some_column` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/01493_r_no_prop_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_no_prop_table\n(\n `some_column` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test/01493_r_no_prop_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_no_prop_table\n(\n `some_column` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test/01493_r_no_prop_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_no_prop_table\n(\n `some_column` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test/01493_r_no_prop_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper.sql b/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper_long.sql similarity index 95% rename from tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper.sql rename to tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper_long.sql index aceb4e8140d..8d37900aaf8 100644 --- a/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper.sql +++ b/tests/queries/0_stateless/01493_alter_remove_no_property_zookeeper_long.sql @@ -29,7 +29,7 @@ CREATE TABLE r_no_prop_table ( some_column UInt64 ) -ENGINE ReplicatedMergeTree('/clickhouse/test/01493_r_no_prop_table', '1') +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test/01493_r_no_prop_table', '1') ORDER BY tuple(); SHOW CREATE TABLE r_no_prop_table; diff --git a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.reference b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.reference index 7cd2c81b460..42f9b24715f 100644 --- a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.reference +++ b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.reference @@ -1,21 +1,21 @@ -CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date COMMENT \'Some comment\',\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date COMMENT \'Some comment\',\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date COMMENT \'Some comment\',\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date COMMENT \'Some comment\',\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 ====== remove column comment ====== -CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String CODEC(ZSTD(10)),\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 ====== remove column codec ====== -CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64 DEFAULT 42,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 ====== remove column default ====== 42 str 1 0 tsr 2 -CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64 TTL column_comment + toIntervalMonth(1)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 ====== remove column TTL ====== -CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nTTL column_comment + toIntervalMonth(2)\nSETTINGS index_granularity = 8192 ====== remove table TTL ====== -CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 -CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table1\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.r_prop_table2\n(\n `column_default` UInt64,\n `column_codec` String,\n `column_comment` Date,\n `column_ttl` UInt64\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/test_01493/r_prop_table\', \'2\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 3 3 diff --git a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql index b810e2a8737..55f610eb7d0 100644 --- a/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql +++ b/tests/queries/0_stateless/01493_alter_remove_properties_zookeeper.sql @@ -10,7 +10,7 @@ CREATE TABLE r_prop_table1 column_comment Date COMMENT 'Some comment', column_ttl UInt64 TTL column_comment + INTERVAL 1 MONTH ) -ENGINE ReplicatedMergeTree('/clickhouse/test_01493/r_prop_table', '1') +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test_01493/r_prop_table', '1') ORDER BY tuple() TTL column_comment + INTERVAL 2 MONTH; @@ -21,7 +21,7 @@ CREATE TABLE r_prop_table2 column_comment Date COMMENT 'Some comment', column_ttl UInt64 TTL column_comment + INTERVAL 1 MONTH ) -ENGINE ReplicatedMergeTree('/clickhouse/test_01493/r_prop_table', '2') +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test_01493/r_prop_table', '2') ORDER BY tuple() TTL column_comment + INTERVAL 2 MONTH; diff --git a/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas.reference b/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.reference similarity index 100% rename from tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas.reference rename to tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.reference diff --git a/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas.sql b/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.sql similarity index 91% rename from tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas.sql rename to tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.sql index 16c4a4df936..ba2b774b718 100644 --- a/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas.sql +++ b/tests/queries/0_stateless/01509_parallel_quorum_insert_no_replicas_long.sql @@ -4,13 +4,13 @@ DROP TABLE IF EXISTS r2 SYNC; CREATE TABLE r1 ( key UInt64, value String ) -ENGINE = ReplicatedMergeTree('/clickhouse/01509_parallel_quorum_insert_no_replicas', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01509_parallel_quorum_insert_no_replicas', '1') ORDER BY tuple(); CREATE TABLE r2 ( key UInt64, value String ) -ENGINE = ReplicatedMergeTree('/clickhouse/01509_parallel_quorum_insert_no_replicas', '2') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01509_parallel_quorum_insert_no_replicas', '2') ORDER BY tuple(); SET insert_quorum_parallel=1; diff --git a/tests/queries/0_stateless/01512_create_replicate_merge_tree_one_arg.sql b/tests/queries/0_stateless/01512_create_replicate_merge_tree_one_arg.sql index f33c1534a44..2ee73865122 100644 --- a/tests/queries/0_stateless/01512_create_replicate_merge_tree_one_arg.sql +++ b/tests/queries/0_stateless/01512_create_replicate_merge_tree_one_arg.sql @@ -1,3 +1,3 @@ -CREATE TABLE mt (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01497/mt') +CREATE TABLE mt (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01497/mt') ORDER BY tuple() -- { serverError 36 } diff --git a/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper.reference b/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper.reference rename to tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper.sql b/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.sql similarity index 79% rename from tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper.sql rename to tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.sql index 0eb9f20ad91..925dd042435 100644 --- a/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper.sql +++ b/tests/queries/0_stateless/01513_count_without_select_sequence_consistency_zookeeper_long.sql @@ -4,9 +4,9 @@ DROP TABLE IF EXISTS quorum1 SYNC; DROP TABLE IF EXISTS quorum2 SYNC; DROP TABLE IF EXISTS quorum3 SYNC; -CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01513/sequence_consistency', '1') ORDER BY x PARTITION BY y; -CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01513/sequence_consistency', '2') ORDER BY x PARTITION BY y; -CREATE TABLE quorum3(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/test_01513/sequence_consistency', '3') ORDER BY x PARTITION BY y; +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01513/sequence_consistency', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01513/sequence_consistency', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum3(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01513/sequence_consistency', '3') ORDER BY x PARTITION BY y; INSERT INTO quorum1 VALUES (1, '1990-11-15'); INSERT INTO quorum1 VALUES (2, '1990-11-15'); diff --git a/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql b/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql index d5fb25a9241..07fe00693d1 100644 --- a/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql +++ b/tests/queries/0_stateless/01530_drop_database_atomic_sync.sql @@ -1,33 +1,33 @@ drop database if exists db_01530_atomic sync; create database db_01530_atomic Engine=Atomic; -create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/db_01530_atomic/data', 'test') order by key; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; drop database db_01530_atomic sync; create database db_01530_atomic Engine=Atomic; -create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/db_01530_atomic/data', 'test') order by key; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; drop database db_01530_atomic sync; set database_atomic_wait_for_drop_and_detach_synchronously=1; create database db_01530_atomic Engine=Atomic; -create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/db_01530_atomic/data', 'test') order by key; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; drop database db_01530_atomic; create database db_01530_atomic Engine=Atomic; -create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/db_01530_atomic/data', 'test') order by key; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; drop database db_01530_atomic; set database_atomic_wait_for_drop_and_detach_synchronously=0; create database db_01530_atomic Engine=Atomic; -create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/db_01530_atomic/data', 'test') order by key; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; drop database db_01530_atomic; create database db_01530_atomic Engine=Atomic; -create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/db_01530_atomic/data', 'test') order by key; -- { serverError 253; } +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; -- { serverError 253; } set database_atomic_wait_for_drop_and_detach_synchronously=1; diff --git a/tests/queries/0_stateless/01532_execute_merges_on_single_replica.reference b/tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.reference similarity index 100% rename from tests/queries/0_stateless/01532_execute_merges_on_single_replica.reference rename to tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.reference diff --git a/tests/queries/0_stateless/01532_execute_merges_on_single_replica.sql b/tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.sql similarity index 96% rename from tests/queries/0_stateless/01532_execute_merges_on_single_replica.sql rename to tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.sql index 69369321d26..1acae560c93 100644 --- a/tests/queries/0_stateless/01532_execute_merges_on_single_replica.sql +++ b/tests/queries/0_stateless/01532_execute_merges_on_single_replica_long.sql @@ -1,7 +1,7 @@ DROP TABLE IF EXISTS execute_on_single_replica_r1 NO DELAY; DROP TABLE IF EXISTS execute_on_single_replica_r2 NO DELAY; -/* that test requires fixed zookeeper path */ +/* that test requires fixed zookeeper path, so we cannot use ReplicatedMergeTree({database}) */ CREATE TABLE execute_on_single_replica_r1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r1') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10; CREATE TABLE execute_on_single_replica_r2 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r2') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10; @@ -124,4 +124,4 @@ ORDER BY part_name FORMAT Vertical; DROP TABLE execute_on_single_replica_r1 NO DELAY; -DROP TABLE execute_on_single_replica_r2 NO DELAY; \ No newline at end of file +DROP TABLE execute_on_single_replica_r2 NO DELAY; diff --git a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference index 0eb7e06f724..bf9bff06959 100644 --- a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference +++ b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference @@ -5,7 +5,7 @@ FROM test_order_by ORDER BY timestamp ASC LIMIT 10 Expression (Projection) - Limit (preliminary LIMIT) + Limit (preliminary LIMIT (without OFFSET)) MergingSorted (Merge sorted streams for ORDER BY) MergeSorting (Merge sorted blocks for ORDER BY) PartialSorting (Sort each block for ORDER BY) @@ -19,7 +19,7 @@ FROM test_order_by ORDER BY toDate(timestamp) ASC LIMIT 10 Expression (Projection) - Limit (preliminary LIMIT) + Limit (preliminary LIMIT (without OFFSET)) FinishSorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) @@ -33,7 +33,7 @@ ORDER BY timestamp ASC LIMIT 10 Expression (Projection) - Limit (preliminary LIMIT) + Limit (preliminary LIMIT (without OFFSET)) FinishSorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) diff --git a/tests/queries/0_stateless/01576_alias_column_rewrite.reference b/tests/queries/0_stateless/01576_alias_column_rewrite.reference index c9a4c04b352..2a824e62158 100644 --- a/tests/queries/0_stateless/01576_alias_column_rewrite.reference +++ b/tests/queries/0_stateless/01576_alias_column_rewrite.reference @@ -22,7 +22,7 @@ lambda 1 optimize_read_in_order Expression (Projection) - Limit (preliminary LIMIT) + Limit (preliminary LIMIT (without OFFSET)) MergingSorted (Merge sorted streams for ORDER BY) MergeSorting (Merge sorted blocks for ORDER BY) PartialSorting (Sort each block for ORDER BY) @@ -30,13 +30,13 @@ Expression (Projection) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromMergeTree Expression (Projection) - Limit (preliminary LIMIT) + Limit (preliminary LIMIT (without OFFSET)) FinishSorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromMergeTree Expression (Projection) - Limit (preliminary LIMIT) + Limit (preliminary LIMIT (without OFFSET)) FinishSorting Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) diff --git a/tests/queries/0_stateless/01581_deduplicate_by_columns_replicated.reference b/tests/queries/0_stateless/01581_deduplicate_by_columns_replicated_long.reference similarity index 100% rename from tests/queries/0_stateless/01581_deduplicate_by_columns_replicated.reference rename to tests/queries/0_stateless/01581_deduplicate_by_columns_replicated_long.reference diff --git a/tests/queries/0_stateless/01581_deduplicate_by_columns_replicated.sql b/tests/queries/0_stateless/01581_deduplicate_by_columns_replicated_long.sql similarity index 92% rename from tests/queries/0_stateless/01581_deduplicate_by_columns_replicated.sql rename to tests/queries/0_stateless/01581_deduplicate_by_columns_replicated_long.sql index 9779dca90a2..cc8c721b6a8 100644 --- a/tests/queries/0_stateless/01581_deduplicate_by_columns_replicated.sql +++ b/tests/queries/0_stateless/01581_deduplicate_by_columns_replicated_long.sql @@ -11,11 +11,11 @@ SET replication_alter_partitions_sync = 2; -- IRL insert_replica_id were filled from hostname CREATE TABLE IF NOT EXISTS replicated_deduplicate_by_columns_r1 ( id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock() -) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01581/replicated_deduplicate', 'r1') ORDER BY id; +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_01581/replicated_deduplicate', 'r1') ORDER BY id; CREATE TABLE IF NOT EXISTS replicated_deduplicate_by_columns_r2 ( id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock() -) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01581/replicated_deduplicate', 'r2') ORDER BY id; +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_01581/replicated_deduplicate', 'r2') ORDER BY id; -- insert some data, 2 records: (3, 1003), (4, 1004) are duplicated and have difference in unique_value / insert_replica_id diff --git a/tests/queries/0_stateless/01646_system_restart_replicas_smoke.sql b/tests/queries/0_stateless/01646_system_restart_replicas_smoke.sql index cfd70df8dd4..0b7fba5b02e 100644 --- a/tests/queries/0_stateless/01646_system_restart_replicas_smoke.sql +++ b/tests/queries/0_stateless/01646_system_restart_replicas_smoke.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS data_01646; -CREATE TABLE data_01646 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01646/data_01646', 'r') ORDER BY s PARTITION BY x; +CREATE TABLE data_01646 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01646/data_01646', 'r') ORDER BY s PARTITION BY x; SYSTEM RESTART REPLICAS; DESCRIBE TABLE data_01646; DROP TABLE data_01646; diff --git a/tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper.reference b/tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper.reference rename to tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper.sql b/tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper_long.sql similarity index 69% rename from tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper.sql rename to tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper_long.sql index c3e459dfc49..a839883e683 100644 --- a/tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper.sql +++ b/tests/queries/0_stateless/01650_drop_part_and_deduplication_zookeeper_long.sql @@ -5,7 +5,7 @@ CREATE TABLE partitioned_table ( partitioner UInt8, value String ) -ENGINE ReplicatedMergeTree('/clickhouse/01650_drop_part_and_deduplication_partitioned_table', '1') +ENGINE ReplicatedMergeTree('/clickhouse/{database}/01650_drop_part_and_deduplication_partitioned_table', '1') ORDER BY key PARTITION BY partitioner; @@ -16,24 +16,24 @@ INSERT INTO partitioned_table VALUES (11, 1, 'AA'), (22, 2, 'BB'), (33, 3, 'CC') SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name; -SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; +SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/' || currentDatabase() || '/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; INSERT INTO partitioned_table VALUES (33, 3, 'CC'); -- must be deduplicated SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name; -SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; +SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/' || currentDatabase() || '/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; ALTER TABLE partitioned_table DROP PART '3_1_1_0'; SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name; -SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; +SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/' || currentDatabase() || '/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; INSERT INTO partitioned_table VALUES (33, 3, 'CC'); -- mustn't be deduplicated SELECT partition_id, name FROM system.parts WHERE table = 'partitioned_table' AND database = currentDatabase() ORDER BY name; -SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; +SELECT substring(name, 1, 2), value FROM system.zookeeper WHERE path='/clickhouse/' || currentDatabase() || '/01650_drop_part_and_deduplication_partitioned_table/blocks/' ORDER BY value; DROP TABLE IF EXISTS partitioned_table; diff --git a/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path.reference b/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.reference similarity index 100% rename from tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path.reference rename to tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.reference diff --git a/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path.sql b/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.sql similarity index 74% rename from tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path.sql rename to tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.sql index 2c50a7a8516..85b37cee15a 100644 --- a/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path.sql +++ b/tests/queries/0_stateless/01650_fetch_patition_with_macro_in_zk_path_long.sql @@ -2,17 +2,17 @@ DROP TABLE IF EXISTS test_01640; DROP TABLE IF EXISTS restore_01640; CREATE TABLE test_01640(i Int64, d Date, s String) -ENGINE = ReplicatedMergeTree('/clickhouse/{shard}/tables/test_01640','{replica}') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/{shard}/tables/test_01640','{replica}') PARTITION BY toYYYYMM(d) ORDER BY i; insert into test_01640 values (1, '2021-01-01','some'); CREATE TABLE restore_01640(i Int64, d Date, s String) -ENGINE = ReplicatedMergeTree('/clickhouse/{shard}/tables/restore_01640','{replica}') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/{shard}/tables/restore_01640','{replica}') PARTITION BY toYYYYMM(d) ORDER BY i; -ALTER TABLE restore_01640 FETCH PARTITION tuple(toYYYYMM(toDate('2021-01-01'))) - FROM '/clickhouse/{shard}/tables/test_01640'; +ALTER TABLE restore_01640 FETCH PARTITION tuple(toYYYYMM(toDate('2021-01-01'))) + FROM '/clickhouse/{database}/{shard}/tables/test_01640'; SELECT partition_id FROM system.detached_parts diff --git a/tests/queries/0_stateless/01666_blns.reference b/tests/queries/0_stateless/01666_blns_long.reference similarity index 100% rename from tests/queries/0_stateless/01666_blns.reference rename to tests/queries/0_stateless/01666_blns_long.reference diff --git a/tests/queries/0_stateless/01666_blns.sql b/tests/queries/0_stateless/01666_blns_long.sql similarity index 99% rename from tests/queries/0_stateless/01666_blns.sql rename to tests/queries/0_stateless/01666_blns_long.sql index be9632092bc..19caf45832f 100644 --- a/tests/queries/0_stateless/01666_blns.sql +++ b/tests/queries/0_stateless/01666_blns_long.sql @@ -554,9 +554,9 @@ SELECT count() FROM test; DROP TABLE IF EXISTS test_r1; DROP TABLE IF EXISTS test_r2; -CREATE TABLE test_r1 AS test ENGINE = ReplicatedMergeTree('/clickhouse/test_01666', 'r1') ORDER BY "\\" SETTINGS min_bytes_for_wide_part = '100G'; +CREATE TABLE test_r1 AS test ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01666', 'r1') ORDER BY "\\" SETTINGS min_bytes_for_wide_part = '100G'; INSERT INTO test_r1 SELECT * FROM test; -CREATE TABLE test_r2 AS test ENGINE = ReplicatedMergeTree('/clickhouse/test_01666', 'r2') ORDER BY "\\" SETTINGS min_bytes_for_wide_part = '100G'; +CREATE TABLE test_r2 AS test ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01666', 'r2') ORDER BY "\\" SETTINGS min_bytes_for_wide_part = '100G'; SYSTEM SYNC REPLICA test_r2; diff --git a/tests/queries/0_stateless/01669_columns_declaration_serde.reference b/tests/queries/0_stateless/01669_columns_declaration_serde_long.reference similarity index 100% rename from tests/queries/0_stateless/01669_columns_declaration_serde.reference rename to tests/queries/0_stateless/01669_columns_declaration_serde_long.reference diff --git a/tests/queries/0_stateless/01669_columns_declaration_serde.sql b/tests/queries/0_stateless/01669_columns_declaration_serde_long.sql similarity index 81% rename from tests/queries/0_stateless/01669_columns_declaration_serde.sql rename to tests/queries/0_stateless/01669_columns_declaration_serde_long.sql index a6bf1184e9f..b1a85754fce 100644 --- a/tests/queries/0_stateless/01669_columns_declaration_serde.sql +++ b/tests/queries/0_stateless/01669_columns_declaration_serde_long.sql @@ -22,12 +22,12 @@ DROP TABLE IF EXISTS test_r1; DROP TABLE IF EXISTS test_r2; CREATE TABLE test_r1 (x UInt64, "\\" String DEFAULT '\r\n\t\\' || ' -') ENGINE = ReplicatedMergeTree('/clickhouse/test_01669', 'r1') ORDER BY "\\"; +') ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01669', 'r1') ORDER BY "\\"; INSERT INTO test_r1 ("\\") VALUES ('\\'); CREATE TABLE test_r2 (x UInt64, "\\" String DEFAULT '\r\n\t\\' || ' -') ENGINE = ReplicatedMergeTree('/clickhouse/test_01669', 'r2') ORDER BY "\\"; +') ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01669', 'r2') ORDER BY "\\"; SYSTEM SYNC REPLICA test_r2; diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference index e491dd9e091..dcee18b33e0 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.reference @@ -12,3 +12,5 @@ blocks failed_parts last_part parallel +shared +shared diff --git a/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql b/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql index 02457a956a1..a147952dfe1 100644 --- a/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql +++ b/tests/queries/0_stateless/01700_system_zookeeper_path_in.sql @@ -3,20 +3,20 @@ DROP TABLE IF EXISTS sample_table; CREATE TABLE sample_table ( key UInt64 ) -ENGINE ReplicatedMergeTree('/clickhouse/01700_system_zookeeper_path_in/{shard}', '{replica}') +ENGINE ReplicatedMergeTree('/clickhouse/{database}/01700_system_zookeeper_path_in/{shard}', '{replica}') ORDER BY tuple(); -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/01700_system_zookeeper_path_in/s1' AND name like 'block%' ORDER BY name; -SELECT name FROM system.zookeeper WHERE path = '/clickhouse/01700_system_zookeeper_path_in/s1/replicas' AND name LIKE '%r1%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1' AND name like 'block%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas' AND name LIKE '%r1%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/01700_system_zookeeper_path_in/s1') AND name LIKE 'block%' ORDER BY name; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE '%r1%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1') AND name LIKE 'block%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE '%r1%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/01700_system_zookeeper_path_in/s1', - '/clickhouse/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE 'block%' ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1', + '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/replicas') AND name LIKE 'block%' ORDER BY name; SELECT '========'; -SELECT name FROM system.zookeeper WHERE path IN (SELECT concat('/clickhouse/01700_system_zookeeper_path_in/s1/', name) - FROM system.zookeeper WHERE (name != 'replicas' AND name NOT LIKE 'leader_election%' AND path = '/clickhouse/01700_system_zookeeper_path_in/s1')) ORDER BY name; +SELECT name FROM system.zookeeper WHERE path IN (SELECT concat('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1/', name) + FROM system.zookeeper WHERE (name != 'replicas' AND name NOT LIKE 'leader_election%' AND path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/s1')) ORDER BY name; DROP TABLE IF EXISTS sample_table; diff --git a/tests/queries/0_stateless/01713_table_ttl_old_syntax_zookeeper.sql b/tests/queries/0_stateless/01713_table_ttl_old_syntax_zookeeper.sql index 7d4c83c9d3a..71898bdedcb 100644 --- a/tests/queries/0_stateless/01713_table_ttl_old_syntax_zookeeper.sql +++ b/tests/queries/0_stateless/01713_table_ttl_old_syntax_zookeeper.sql @@ -5,7 +5,7 @@ CREATE TABLE ttl_table date Date, value UInt64 ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01713_table_ttl', '1', date, date, 8192) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01713_table_ttl', '1', date, date, 8192) TTL date + INTERVAL 2 MONTH; --{ serverError 36 } CREATE TABLE ttl_table @@ -13,7 +13,7 @@ CREATE TABLE ttl_table date Date, value UInt64 ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01713_table_ttl', '1', date, date, 8192) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01713_table_ttl', '1', date, date, 8192) PARTITION BY date; --{ serverError 42 } CREATE TABLE ttl_table @@ -21,7 +21,7 @@ CREATE TABLE ttl_table date Date, value UInt64 ) -ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_01713_table_ttl', '1', date, date, 8192) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01713_table_ttl', '1', date, date, 8192) ORDER BY value; --{ serverError 42 } SELECT 1; diff --git a/tests/queries/0_stateless/01715_background_checker_blather_zookeeper.reference b/tests/queries/0_stateless/01715_background_checker_blather_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01715_background_checker_blather_zookeeper.reference rename to tests/queries/0_stateless/01715_background_checker_blather_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01715_background_checker_blather_zookeeper.sql b/tests/queries/0_stateless/01715_background_checker_blather_zookeeper_long.sql similarity index 74% rename from tests/queries/0_stateless/01715_background_checker_blather_zookeeper.sql rename to tests/queries/0_stateless/01715_background_checker_blather_zookeeper_long.sql index 66b53369517..c0335ae3753 100644 --- a/tests/queries/0_stateless/01715_background_checker_blather_zookeeper.sql +++ b/tests/queries/0_stateless/01715_background_checker_blather_zookeeper_long.sql @@ -2,11 +2,11 @@ DROP TABLE IF EXISTS i20203_1; DROP TABLE IF EXISTS i20203_2; CREATE TABLE i20203_1 (a Int8) -ENGINE = ReplicatedMergeTree('/clickhouse/01715_background_checker_i20203', 'r1') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01715_background_checker_i20203', 'r1') ORDER BY tuple(); CREATE TABLE i20203_2 (a Int8) -ENGINE = ReplicatedMergeTree('/clickhouse/01715_background_checker_i20203', 'r2') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01715_background_checker_i20203', 'r2') ORDER BY tuple(); DETACH TABLE i20203_2; diff --git a/tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper.reference b/tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper_long.reference similarity index 100% rename from tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper.reference rename to tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper_long.reference diff --git a/tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper.sql b/tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper_long.sql similarity index 95% rename from tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper.sql rename to tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper_long.sql index 759c8ba3a0b..033a66a477f 100644 --- a/tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper.sql +++ b/tests/queries/0_stateless/01747_alter_partition_key_enum_zookeeper_long.sql @@ -39,7 +39,7 @@ CREATE TABLE replicated_report `branch` String, `generated_time` DateTime ) -ENGINE = ReplicatedMergeTree('/clickhouse/01747_alter_partition_key/t', '1') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01747_alter_partition_key/t', '1') PARTITION BY (product, toYYYYMM(generated_time)) ORDER BY (product, machine, branch, generated_time); diff --git a/tests/queries/0_stateless/01761_alter_decimal_zookeeper.reference b/tests/queries/0_stateless/01761_alter_decimal_zookeeper_long.reference similarity index 62% rename from tests/queries/0_stateless/01761_alter_decimal_zookeeper.reference rename to tests/queries/0_stateless/01761_alter_decimal_zookeeper_long.reference index 5dcc95fd7b7..ea3f608b6c7 100644 --- a/tests/queries/0_stateless/01761_alter_decimal_zookeeper.reference +++ b/tests/queries/0_stateless/01761_alter_decimal_zookeeper_long.reference @@ -1,9 +1,9 @@ 1 5.00000000 2 6.00000000 -CREATE TABLE default.test_alter_decimal\n(\n `n` UInt64,\n `d` Decimal(18, 8)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/01761_alter_decimal_zookeeper\', \'r1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test_alter_decimal\n(\n `n` UInt64,\n `d` Decimal(18, 8)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/01761_alter_decimal_zookeeper\', \'r1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 5.00000000 2 6.00000000 -CREATE TABLE default.test_alter_decimal\n(\n `n` UInt64,\n `d` Decimal(18, 8)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/01761_alter_decimal_zookeeper\', \'r1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.test_alter_decimal\n(\n `n` UInt64,\n `d` Decimal(18, 8)\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/default/01761_alter_decimal_zookeeper\', \'r1\')\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 5.00000000 2 6.00000000 3 7.00000000 diff --git a/tests/queries/0_stateless/01761_alter_decimal_zookeeper.sql b/tests/queries/0_stateless/01761_alter_decimal_zookeeper_long.sql similarity index 89% rename from tests/queries/0_stateless/01761_alter_decimal_zookeeper.sql rename to tests/queries/0_stateless/01761_alter_decimal_zookeeper_long.sql index 01766f0d6c2..d1bd3b65182 100644 --- a/tests/queries/0_stateless/01761_alter_decimal_zookeeper.sql +++ b/tests/queries/0_stateless/01761_alter_decimal_zookeeper_long.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS test_alter_decimal; CREATE TABLE test_alter_decimal (n UInt64, d Decimal(15, 8)) -ENGINE = ReplicatedMergeTree('/clickhouse/01761_alter_decimal_zookeeper', 'r1') +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01761_alter_decimal_zookeeper', 'r1') ORDER BY tuple(); INSERT INTO test_alter_decimal VALUES (1, toDecimal32(5, 5)); diff --git a/tests/queries/0_stateless/01814_distributed_push_down_limit.reference b/tests/queries/0_stateless/01814_distributed_push_down_limit.reference index f879f2cbd21..c542b5b7325 100644 --- a/tests/queries/0_stateless/01814_distributed_push_down_limit.reference +++ b/tests/queries/0_stateless/01814_distributed_push_down_limit.reference @@ -12,18 +12,6 @@ distributed_push_down_limit=1 8 9 40 40 -auto-distributed_push_down_limit -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -40 40 distributed_push_down_limit=1 with OFFSET 97 96 diff --git a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh index 93321646037..24b27e74ba5 100755 --- a/tests/queries/0_stateless/01814_distributed_push_down_limit.sh +++ b/tests/queries/0_stateless/01814_distributed_push_down_limit.sh @@ -86,9 +86,11 @@ function test_distributed_push_down_limit_0() function test_distributed_push_down_limit_1() { local args=( - "remote('127.{2,3}', $CLICKHOUSE_DATABASE, data_01814)" + "remote('127.{2,3}', $CLICKHOUSE_DATABASE, data_01814, key)" 0 # offset --distributed_push_down_limit 1 + --optimize_skip_unused_shards 1 + --optimize_distributed_group_by_sharding_key 1 ) test_distributed_push_down_limit_with_query_log "${args[@]}" } @@ -97,22 +99,11 @@ function test_distributed_push_down_limit_1_offset() { local settings_and_opts=( --distributed_push_down_limit 1 - ) - - $CLICKHOUSE_CLIENT "${settings_and_opts[@]}" -q "select * from remote('127.{2,3}', $CLICKHOUSE_DATABASE, data_01814) group by key order by key desc limit 5, 10" -} - -function test_auto_distributed_push_down_limit() -{ - local args=( - dist_01814 - 0 # offset --optimize_skip_unused_shards 1 --optimize_distributed_group_by_sharding_key 1 - --prefer_localhost_replica 0 - --distributed_push_down_limit 0 ) - test_distributed_push_down_limit_with_query_log "${args[@]}" + + $CLICKHOUSE_CLIENT "${settings_and_opts[@]}" -q "select * from remote('127.{2,3}', $CLICKHOUSE_DATABASE, data_01814, key) group by key order by key desc limit 5, 10" } function main() @@ -151,16 +142,6 @@ function main() done echo "$out" - echo 'auto-distributed_push_down_limit' - for ((i = 0; i < max_tries; ++i)); do - out=$(test_auto_distributed_push_down_limit) - out_lines=( $out ) - if [[ ${#out_lines[@]} -gt 2 ]] && [[ ${out_lines[-1]} = 40 ]] && [[ ${out_lines[-2]} = 40 ]]; then - break - fi - done - echo "$out" - echo 'distributed_push_down_limit=1 with OFFSET' test_distributed_push_down_limit_1_offset } diff --git a/tests/queries/0_stateless/01901_test_attach_partition_from.sql b/tests/queries/0_stateless/01901_test_attach_partition_from.sql index 0ffa4cdecf2..c15edacc6c8 100644 --- a/tests/queries/0_stateless/01901_test_attach_partition_from.sql +++ b/tests/queries/0_stateless/01901_test_attach_partition_from.sql @@ -5,7 +5,7 @@ CREATE TABLE test_alter_attach_01901S (A Int64, D date) ENGINE = MergeTree PARTI INSERT INTO test_alter_attach_01901S VALUES (1, '2020-01-01'); CREATE TABLE test_alter_attach_01901D (A Int64, D date) -Engine=ReplicatedMergeTree('/clickhouse/tables/test_alter_attach_01901D', 'r1') +Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/test_alter_attach_01901D', 'r1') PARTITION BY D ORDER BY A; ALTER TABLE test_alter_attach_01901D ATTACH PARTITION '2020-01-01' FROM test_alter_attach_01901S; diff --git a/tests/queries/0_stateless/01925_broken_partition_id_zookeeper.sql b/tests/queries/0_stateless/01925_broken_partition_id_zookeeper.sql index baf6c1fbf8f..07e490d0ce0 100644 --- a/tests/queries/0_stateless/01925_broken_partition_id_zookeeper.sql +++ b/tests/queries/0_stateless/01925_broken_partition_id_zookeeper.sql @@ -14,3 +14,13 @@ ALTER TABLE broken_partition DROP PARTITION ID '20210325_0_13241_6_12747'; --{se ALTER TABLE broken_partition DROP PARTITION ID '20210325_0_13241_6_12747'; --{serverError 248} DROP TABLE IF EXISTS broken_partition; + +DROP TABLE IF EXISTS old_partition_key; + +CREATE TABLE old_partition_key (sd Date, dh UInt64, ak UInt32, ed Date) ENGINE=MergeTree(sd, dh, (ak, ed, dh), 8192); + +ALTER TABLE old_partition_key DROP PARTITION ID '20210325_0_13241_6_12747'; --{serverError 248} + +ALTER TABLE old_partition_key DROP PARTITION ID '202103'; + +DROP TABLE old_partition_key; diff --git a/tests/queries/0_stateless/01951_distributed_push_down_limit.reference b/tests/queries/0_stateless/01951_distributed_push_down_limit.reference new file mode 100644 index 00000000000..ca3bbc4cb33 --- /dev/null +++ b/tests/queries/0_stateless/01951_distributed_push_down_limit.reference @@ -0,0 +1,32 @@ +-- { echo } +explain select * from remote('127.{1,2}', view(select * from numbers(1e6))) order by number limit 10 settings distributed_push_down_limit=0; +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + MergingSorted (Merge sorted streams after aggregation stage for ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select * from remote('127.{1,2}', view(select * from numbers(1e6))) order by number limit 10 settings distributed_push_down_limit=1; +Expression (Projection) + Limit (preliminary LIMIT (without OFFSET)) + MergingSorted (Merge sorted streams after aggregation stage for ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + Limit (preliminary LIMIT (with OFFSET)) + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) diff --git a/tests/queries/0_stateless/01951_distributed_push_down_limit.sql b/tests/queries/0_stateless/01951_distributed_push_down_limit.sql new file mode 100644 index 00000000000..0d6e2069215 --- /dev/null +++ b/tests/queries/0_stateless/01951_distributed_push_down_limit.sql @@ -0,0 +1,3 @@ +-- { echo } +explain select * from remote('127.{1,2}', view(select * from numbers(1e6))) order by number limit 10 settings distributed_push_down_limit=0; +explain select * from remote('127.{1,2}', view(select * from numbers(1e6))) order by number limit 10 settings distributed_push_down_limit=1; diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference new file mode 100644 index 00000000000..10787068f43 --- /dev/null +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference @@ -0,0 +1,115 @@ +-- { echo } +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +Expression (Projection) + Distinct + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + Expression (Projection) + Distinct + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +Expression (Projection) + LimitBy + Expression (Before LIMIT BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + LimitBy + Expression ((Before LIMIT BY + Before ORDER BY)) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + Expression (Projection) + LimitBy + Expression ((Before LIMIT BY + Before ORDER BY)) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +Expression (Projection) + Distinct + MergingSorted (Merge sorted streams for ORDER BY, without aggregation) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +Expression (Projection) + MergingSorted (Merge sorted streams after aggregation stage for ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + Distinct + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Distinct (Preliminary DISTINCT) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +Expression (Projection) + LimitBy + Expression (Before LIMIT BY) + MergingSorted (Merge sorted streams for ORDER BY, without aggregation) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + LimitBy + Expression (Before LIMIT BY) + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +Expression (Projection) + MergingSorted (Merge sorted streams after aggregation stage for ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Union + LimitBy + Expression (Before LIMIT BY) + MergingSorted (Merge sorted streams for ORDER BY) + MergeSorting (Merge sorted blocks for ORDER BY) + PartialSorting (Sort each block for ORDER BY) + Expression (Before ORDER BY) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + Expression ((Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) + SettingQuotaAndLimits (Set limits and quota after reading from storage) + ReadFromStorage (SystemNumbers) + ReadFromRemote (Read from remote replica) diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql new file mode 100644 index 00000000000..2ae872f72b0 --- /dev/null +++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.sql @@ -0,0 +1,13 @@ +set optimize_skip_unused_shards=1; +set optimize_distributed_group_by_sharding_key=1; + +-- { echo } +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized + +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized diff --git a/tests/queries/0_stateless/01961_roaring_memory_tracking.reference b/tests/queries/0_stateless/01961_roaring_memory_tracking.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01961_roaring_memory_tracking.sql b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql new file mode 100644 index 00000000000..57d71cdc91c --- /dev/null +++ b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql @@ -0,0 +1,2 @@ +SET max_memory_usage = '100M'; +SELECT cityHash64(rand() % 1000) as n, groupBitmapState(number) FROM numbers_mt(2000000000) GROUP BY n; -- { serverError 241 } diff --git a/tests/queries/0_stateless/02001_dist_on_dist_WithMergeableStateAfterAggregation.reference b/tests/queries/0_stateless/02001_dist_on_dist_WithMergeableStateAfterAggregation.reference new file mode 100644 index 00000000000..6c680840239 --- /dev/null +++ b/tests/queries/0_stateless/02001_dist_on_dist_WithMergeableStateAfterAggregation.reference @@ -0,0 +1,8 @@ +-- { echo } +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_push_down_limit=0; +0 +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_push_down_limit=1; +0 +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_group_by_no_merge=1; +0 +0 diff --git a/tests/queries/0_stateless/02001_dist_on_dist_WithMergeableStateAfterAggregation.sql b/tests/queries/0_stateless/02001_dist_on_dist_WithMergeableStateAfterAggregation.sql new file mode 100644 index 00000000000..0925df1888d --- /dev/null +++ b/tests/queries/0_stateless/02001_dist_on_dist_WithMergeableStateAfterAggregation.sql @@ -0,0 +1,6 @@ +drop table if exists dist; +create table dist as system.one engine=Distributed('test_shard_localhost', system, one); +-- { echo } +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_push_down_limit=0; +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_push_down_limit=1; +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_group_by_no_merge=1; diff --git a/tests/queries/0_stateless/02002_sampling_and_unknown_column_bug.reference b/tests/queries/0_stateless/02002_sampling_and_unknown_column_bug.reference new file mode 100644 index 00000000000..9315e86b328 --- /dev/null +++ b/tests/queries/0_stateless/02002_sampling_and_unknown_column_bug.reference @@ -0,0 +1,2 @@ +1 +1 1 1 diff --git a/tests/queries/0_stateless/02002_sampling_and_unknown_column_bug.sql b/tests/queries/0_stateless/02002_sampling_and_unknown_column_bug.sql new file mode 100644 index 00000000000..838d7a5526b --- /dev/null +++ b/tests/queries/0_stateless/02002_sampling_and_unknown_column_bug.sql @@ -0,0 +1,20 @@ +drop table if exists sessions; +CREATE TABLE sessions +( + `user_id` UInt64 +) +ENGINE = MergeTree +ORDER BY user_id +SAMPLE BY user_id; + +insert into sessions values(1); + +SELECT + sum(user_id * _sample_factor) +FROM sessions +SAMPLE 10000000; + +SELECT + uniq(user_id) a, min(_sample_factor) x, a*x +FROM sessions +SAMPLE 10000000; diff --git a/tests/queries/0_stateless/02002_system_table_with_tuple.reference b/tests/queries/0_stateless/02002_system_table_with_tuple.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02002_system_table_with_tuple.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02002_system_table_with_tuple.sh b/tests/queries/0_stateless/02002_system_table_with_tuple.sh new file mode 100755 index 00000000000..2abf5b8b525 --- /dev/null +++ b/tests/queries/0_stateless/02002_system_table_with_tuple.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "SELECT count() > 0 FROM system.tables \ + WHERE database IN ('system', '$CLICKHOUSE_DATABASE') \ + SETTINGS legacy_column_name_of_tuple_literal = 1" diff --git a/tests/queries/0_stateless/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET.reference b/tests/queries/0_stateless/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET.reference new file mode 100644 index 00000000000..6cdb80167d1 --- /dev/null +++ b/tests/queries/0_stateless/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET.reference @@ -0,0 +1,38 @@ +-- { echo } +SELECT * +FROM remote('127.{1,2}', view( + SELECT number%20 number + FROM numbers(40) + WHERE (number % 2) = (shardNum() - 1) +), number) +GROUP BY number +ORDER BY number ASC +LIMIT 1 BY number +LIMIT 5, 5 +SETTINGS + optimize_skip_unused_shards=1, + optimize_distributed_group_by_sharding_key=1, + distributed_push_down_limit=1; +5 +6 +7 +8 +9 +SELECT * +FROM remote('127.{1,2}', view( + SELECT number%20 number + FROM numbers(40) + WHERE (number % 2) = (shardNum() - 1) +), number) +GROUP BY number +ORDER BY number ASC +LIMIT 1 BY number +LIMIT 5, 5 +SETTINGS + distributed_group_by_no_merge=2, + distributed_push_down_limit=1; +5 +6 +7 +8 +9 diff --git a/tests/queries/0_stateless/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET.sql b/tests/queries/0_stateless/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET.sql new file mode 100644 index 00000000000..1a446a80603 --- /dev/null +++ b/tests/queries/0_stateless/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET.sql @@ -0,0 +1,36 @@ +-- Here we use a trick with shardNum() to generate unique data on each shard. +-- Since distributed_group_by_no_merge=2 will use WithMergeableStateAfterAggregationAndLimit, +-- which assume that the data on shards is unique +-- (LIMIT BY will be applied only on shards, not on the initiator). + +-- To distinguish echoing from the comments above we use SELECT FORMAT Null. +SELECT '' FORMAT Null; + +-- { echo } +SELECT * +FROM remote('127.{1,2}', view( + SELECT number%20 number + FROM numbers(40) + WHERE (number % 2) = (shardNum() - 1) +), number) +GROUP BY number +ORDER BY number ASC +LIMIT 1 BY number +LIMIT 5, 5 +SETTINGS + optimize_skip_unused_shards=1, + optimize_distributed_group_by_sharding_key=1, + distributed_push_down_limit=1; +SELECT * +FROM remote('127.{1,2}', view( + SELECT number%20 number + FROM numbers(40) + WHERE (number % 2) = (shardNum() - 1) +), number) +GROUP BY number +ORDER BY number ASC +LIMIT 1 BY number +LIMIT 5, 5 +SETTINGS + distributed_group_by_no_merge=2, + distributed_push_down_limit=1; diff --git a/tests/queries/0_stateless/02003_compress_bz2.reference b/tests/queries/0_stateless/02003_compress_bz2.reference new file mode 100644 index 00000000000..8ab686eafeb --- /dev/null +++ b/tests/queries/0_stateless/02003_compress_bz2.reference @@ -0,0 +1 @@ +Hello, World! diff --git a/tests/queries/0_stateless/02003_compress_bz2.sh b/tests/queries/0_stateless/02003_compress_bz2.sh new file mode 100755 index 00000000000..b65ce2f233b --- /dev/null +++ b/tests/queries/0_stateless/02003_compress_bz2.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +WORKING_FOLDER_02003="${USER_FILES_PATH}/${CLICKHOUSE_DATABASE}" + +rm -rf "${WORKING_FOLDER_02003}" +mkdir "${WORKING_FOLDER_02003}" + +${CLICKHOUSE_CLIENT} --query "SELECT * FROM (SELECT 'Hello, World!' as String) INTO OUTFILE '${WORKING_FOLDER_02003}/bz2_compression.bz2'" +bzip2 -t ${WORKING_FOLDER_02003}/bz2_compression.bz2 +${CLICKHOUSE_CLIENT} --query "SELECT * FROM file('${WORKING_FOLDER_02003}/bz2_compression.bz2', 'TabSeparated', 'col String')" + +rm -rf "${WORKING_FOLDER_02003}" diff --git a/tests/queries/0_stateless/02005_log_formatted_queries.reference b/tests/queries/0_stateless/02005_log_formatted_queries.reference new file mode 100644 index 00000000000..3ddd8b0d64f --- /dev/null +++ b/tests/queries/0_stateless/02005_log_formatted_queries.reference @@ -0,0 +1,3 @@ +02005_log_formatted_queries.sql +select \'02005_log_formatted_queries.sql\' from system.one; SELECT \'02005_log_formatted_queries.sql\' FROM system.one +select \'02005_log_formatted_queries.sql\' from system.one; SELECT \'02005_log_formatted_queries.sql\' FROM system.one diff --git a/tests/queries/0_stateless/02005_log_formatted_queries.sql b/tests/queries/0_stateless/02005_log_formatted_queries.sql new file mode 100644 index 00000000000..62f839af0f0 --- /dev/null +++ b/tests/queries/0_stateless/02005_log_formatted_queries.sql @@ -0,0 +1,5 @@ +set log_formatted_queries = 1; + +select '02005_log_formatted_queries.sql' from system.one; +system flush logs; +select query, formatted_query from system.query_log where current_database = currentDatabase() and query = 'select \'02005_log_formatted_queries.sql\' from system.one;' and event_date >= yesterday() and event_time > now() - interval 5 minute; diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 579a2636ad5..859f848a256 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -193,29 +193,23 @@ [ /// Pessimistic list of tests which work badly in parallel. /// Probably they need better investigation. - "00062_replicated_merge_tree_alter_zookeeper", "00080_show_tables_and_system_tables", "00101_materialized_views_and_insert_without_explicit_database", "00109_shard_totals_after_having", "00110_external_sort", "00116_storage_set", - "00121_drop_column_zookeeper", "00133_long_shard_memory_tracker_and_exception_safety", "00158_buffer_and_nonexistent_table", "00180_attach_materialized_view", - "00226_zookeeper_deduplication_and_unexpected_parts", - "00236_replicated_drop_on_non_leader_zookeeper", "00305_http_and_readonly", "00311_array_primary_key", "00417_kill_query", "00423_storage_log_single_thread", "00429_long_http_bufferization", "00446_clear_column_in_partition_concurrent_zookeeper", - "00446_clear_column_in_partition_zookeeper", "00463_long_sessions_in_http_interface", "00505_shard_secure", "00508_materialized_view_to", - "00516_deduplication_after_drop_partition_zookeeper", "00534_functions_bad_arguments10", "00552_or_nullable", "00564_versioned_collapsing_merge_tree", @@ -233,7 +227,6 @@ "00626_replace_partition_from_table", "00626_replace_partition_from_table_zookeeper", "00633_materialized_view_and_too_many_parts_zookeeper", - "00643_cast_zookeeper", "00652_mergetree_mutations", "00652_replicated_mutations_zookeeper", "00682_empty_parts_merge", @@ -249,7 +242,6 @@ "00751_default_databasename_for_view", "00753_alter_attach", "00754_alter_modify_column_partitions", - "00754_alter_modify_order_by_replicated_zookeeper", "00763_long_lock_buffer_alter_destination_table", "00800_versatile_storage_join", "00804_test_alter_compression_codecs", @@ -264,22 +256,18 @@ "00857_global_joinsavel_table_alias", "00899_long_attach_memory_limit", "00910_buffer_prewhere", - "00910_zookeeper_custom_compression_codecs_replicated", "00926_adaptive_index_granularity_merge_tree", "00926_adaptive_index_granularity_pk", "00926_adaptive_index_granularity_replacing_merge_tree", - "00926_zookeeper_adaptive_index_granularity_replicated_merge_tree", "00933_alter_ttl", "00933_reserved_word", "00933_test_fix_extra_seek_on_compressed_cache", - "00933_ttl_replicated_zookeeper", "00933_ttl_with_default", "00950_dict_get", "00955_test_final_mark", "00976_ttl_with_old_parts", "00980_merge_alter_settings", "00980_zookeeper_merge_tree_alter_settings", - "00988_constraints_replication_zookeeper", "00989_parallel_parts_loading", "00992_system_parts_race_condition_zookeeper_long", "00993_system_parts_race_condition_drop_zookeeper", @@ -351,7 +339,6 @@ "01125_dict_ddl_cannot_add_column", "01127_month_partitioning_consistency_select", "01130_in_memory_parts_partitons", - "01135_default_and_alter_zookeeper", "01148_zookeeper_path_macros_unfolding", "01150_ddl_guard_rwr", "01153_attach_mv_uuid", @@ -376,11 +363,9 @@ "01254_dict_load_after_detach_attach", "01257_dictionary_mismatch_types", "01259_dictionary_custom_settings_ddl", - "01267_alter_default_key_columns_zookeeper", "01268_dictionary_direct_layout", "01269_alias_type_differs", "01272_suspicious_codecs", - "01277_alter_rename_column_constraint_zookeeper", "01280_ssd_complex_key_dictionary", "01280_ttl_where_group_by", "01281_group_by_limit_memory_tracking", @@ -394,7 +379,6 @@ "01305_replica_create_drop_zookeeper", "01307_multiple_leaders_zookeeper", "01318_long_unsuccessful_mutation_zookeeper", - "01319_manual_write_to_replicas", "01320_create_sync_race_condition_zookeeper", "01338_long_select_and_alter", "01338_long_select_and_alter_zookeeper", @@ -403,7 +387,6 @@ "01357_version_collapsing_attach_detach_zookeeper", "01375_compact_parts_codecs", "01376_GROUP_BY_injective_elimination_dictGet", - "01378_alter_rename_with_ttl_zookeeper", "01383_remote_ambiguous_column_shard", "01388_clear_all_columns", "01391_join_on_dict_crash", @@ -415,7 +398,6 @@ "01415_sticking_mutations", "01417_freeze_partition_verbose", "01417_freeze_partition_verbose_zookeeper", - "01430_modify_sample_by_zookeeper", "01444_create_table_drop_database_race", "01454_storagememory_data_race_challenge", "01455_rank_correlation_spearman", @@ -427,7 +409,6 @@ "01470_show_databases_like", "01471_calculate_ttl_during_merge", "01487_distributed_in_not_default_db", - "01493_alter_remove_properties_zookeeper", "01493_storage_set_persistency", "01494_storage_join_persistency", "01501_cache_dictionary_all_fields", @@ -439,7 +420,6 @@ "01526_complex_key_dict_direct_layout", "01527_clickhouse_local_optimize", "01527_dist_sharding_key_dictGet_reload", - "01530_drop_database_atomic_sync", "01541_max_memory_usage_for_user_long", "01542_dictionary_load_exception_race", "01545_system_errors", // looks at the difference of values in system.errors @@ -455,30 +435,23 @@ "01601_detach_permanently", "01602_show_create_view", "01603_rename_overwrite_bug", - "01666_blns", "01646_system_restart_replicas_smoke", // system restart replicas is a global query "01656_test_query_log_factories_info", "01658_read_file_to_stringcolumn", - "01669_columns_declaration_serde", "01676_dictget_in_default_expression", "01681_cache_dictionary_simple_key", "01682_cache_dictionary_complex_key", "01683_flat_dictionary", "01684_ssd_cache_dictionary_simple_key", "01685_ssd_cache_dictionary_complex_key", - "01700_system_zookeeper_path_in", "01702_system_query_log", // It's ok to execute in parallel with oter tests but not several instances of the same test. "01702_system_query_log", // Runs many global system queries - "01715_background_checker_blather_zookeeper", "01721_engine_file_truncate_on_insert", // It's ok to execute in parallel but not several instances of the same test. "01722_long_brotli_http_compression_json_format", // It is broken in some unimaginable way with the genius error 'cannot write to ofstream'. Not sure how to debug this - "01747_alter_partition_key_enum_zookeeper", "01748_dictionary_table_dot", // creates database "01760_polygon_dictionaries", "01760_system_dictionaries", - "01761_alter_decimal_zookeeper", "01360_materialized_view_with_join_on_query_log", // creates and drops MVs on query_log, which may interrupt flushes. - "01509_parallel_quorum_insert_no_replicas", // It's ok to execute in parallel with oter tests but not several instances of the same test. "attach", "ddl_dictionaries", "dictionary", @@ -510,7 +483,6 @@ "01804_dictionary_decimal256_type", "01850_dist_INSERT_preserve_error", // uses cluster with different static databases shard_0/shard_1 "01821_table_comment", - "01710_projection_fetch", "01824_prefer_global_in_and_join", "01870_modulo_partition_key", "01870_buffer_flush", // creates database @@ -528,7 +500,12 @@ "01902_table_function_merge_db_repr", "01946_test_zstd_decompression_with_escape_sequence_at_the_end_of_buffer", "01946_test_wrong_host_name_access", - "01213_alter_rename_with_default_zookeeper", /// Warning: Removing leftovers from table. + "01493_alter_remove_properties_zookeeper", + "00510_materizlized_view_and_deduplication_zookeeper", /// static UUID + "00509_extended_storage_definition_syntax_zookeeper", /// leftovers + "00083_create_merge_tree_zookeeper", /// leftovers + "01532_execute_merges_on_single_replica", /// static zk path + "01530_drop_database_atomic_sync", /// creates database "02001_add_default_database_to_system_users" ///create user ] } diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 551368e5fd8..05b8a1485e4 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -89,7 +89,7 @@ find $ROOT_PATH/tests/queries -iname '*fail*' | # Queries to system.query_log/system.query_thread_log should have current_database = currentDatabase() condition # NOTE: it is not that accuate, but at least something. tests_with_query_log=( $( - find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' | + find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' | grep -vP $EXCLUDE_DIRS | xargs grep --with-filename -e system.query_log -e system.query_thread_log | cut -d: -f1 | sort -u ) ) @@ -102,7 +102,7 @@ done # Queries to system.tables/system.parts/system.detached_parts/system.parts_columns/system.columns should have database = currentDatabase() condition # NOTE: it is not that accuate, but at least something. tests_with_database_column=( $( - find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' | + find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' | grep -vP $EXCLUDE_DIRS | grep -v -x -e $ROOT_PATH/tests/queries/query_test.py | xargs grep --with-filename -e system.tables -e system.parts -e system.detached_parts -e system.parts_columns -e system.columns | cut -d: -f1 | sort -u @@ -121,25 +121,21 @@ done # Queries with ReplicatedMergeTree # NOTE: it is not that accuate, but at least something. tests_with_replicated_merge_tree=( $( - find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' | + find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' | grep -vP $EXCLUDE_DIRS | xargs grep --with-filename -e ReplicatedMergeTree | cut -d: -f1 | sort -u ) ) for test_case in "${tests_with_replicated_merge_tree[@]}"; do case "$test_case" in + *.gen.*) + ;; *.sh) test_case_zk_prefix="\$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX" - grep -q -e "ReplicatedMergeTree.*$test_case_zk_prefix" "$test_case" || echo "ReplicatedMergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)" + grep -q -e "ReplicatedMergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "ReplicatedMergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)" ;; - *.sql) - # NOTE: *.sql is not supported because it is not possible right now, because: - # - ReplicatedMergeTree supports only ASTLiteral for zookeeper path - # (and adding support of other nodes, with evaluating them are not that easy, due to zk_prefix is "optional") - # - Hence concat(currentDatabase(), 'foo') - # - Also params cannot be used, because the are wrapped with CAST() - # - # But hopefully they will not be a problem - # (since they do not do any "stressing" and overlap probability should be lower). + *.sql|*.sql.j2) + test_case_zk_prefix="\({database}\|currentDatabase()\)" + grep -q -e "ReplicatedMergeTree[ ]*(.*$test_case_zk_prefix" "$test_case" || echo "ReplicatedMergeTree should contain '$test_case_zk_prefix' in zookeeper path to avoid overlaps ($test_case)" ;; *.py) # Right now there is not such tests anyway @@ -186,7 +182,7 @@ find $ROOT_PATH/{src,programs,utils} -name '*.h' | while read file; do [[ $(head -n1 $file) != '#pragma once' ]] && echo "File $file must have '#pragma once' in first line"; done # Check for executable bit on non-executable files -find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} '(' -name '*.cpp' -or -name '*.h' -or -name '*.sql' -or -name '*.xml' -or -name '*.reference' -or -name '*.txt' -or -name '*.md' ')' -and -executable | grep -P '.' && echo "These files should not be executable." +find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} '(' -name '*.cpp' -or -name '*.h' -or -name '*.sql' -or -name '*.j2' -or -name '*.xml' -or -name '*.reference' -or -name '*.txt' -or -name '*.md' ')' -and -executable | grep -P '.' && echo "These files should not be executable." # Check for BOM find $ROOT_PATH/{src,base,programs,utils,tests,docs,website,cmake} -name '*.md' -or -name '*.cpp' -or -name '*.h' | xargs grep -l -F $'\xEF\xBB\xBF' | grep -P '.' && echo "Files should not have UTF-8 BOM"