diff --git a/README.md b/README.md index 9561458ba37..87fb4920222 100644 --- a/README.md +++ b/README.md @@ -22,12 +22,13 @@ curl https://clickhouse.com/ | sh ## Upcoming Events -* [**v23.5 Release Webinar**](https://clickhouse.com/company/events/v23-5-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-05) - Jun 8 - 23.5 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release. -* [**ClickHouse Meetup in Bangalore**](https://www.meetup.com/clickhouse-bangalore-user-group/events/293740066/) - Jun 7 -* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7 +* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release. +* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4 +* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18 +* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19 +* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20 - -Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler clickhouse com. +Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler clickhouse com. ## Recent Recordings * **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments" diff --git a/base/base/find_symbols.h b/base/base/find_symbols.h index a8747ecc9b7..83232669c04 100644 --- a/base/base/find_symbols.h +++ b/base/base/find_symbols.h @@ -2,6 +2,7 @@ #include #include +#include #if defined(__SSE2__) #include diff --git a/base/base/interpolate.h b/base/base/interpolate.h index 1d4fc0b6257..4c27f70c95b 100644 --- a/base/base/interpolate.h +++ b/base/base/interpolate.h @@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio) assert(min > 0 && ratio >= 0 && ratio <= 1); return min * std::pow(max / min, ratio); } + +constexpr double interpolateLinear(double min, double max, double ratio) +{ + return std::lerp(min, max, ratio); +} diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 16198887075..5fe942d1cd0 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -116,43 +116,79 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A # ARROW_ORC + adapters/orc/CMakefiles set(ORC_SRCS "${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h" - "${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc" - "${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc" - "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" - "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" - "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc" + "${ORC_SOURCE_SRC_DIR}/Adaptor.cc" + "${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" + "${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc" + "${ORC_SOURCE_SRC_DIR}/BlockBuffer.hh" + "${ORC_SOURCE_SRC_DIR}/BloomFilter.cc" + "${ORC_SOURCE_SRC_DIR}/BloomFilter.hh" + "${ORC_SOURCE_SRC_DIR}/Bpacking.hh" + "${ORC_SOURCE_SRC_DIR}/BpackingDefault.cc" + "${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh" "${ORC_SOURCE_SRC_DIR}/ByteRLE.cc" + "${ORC_SOURCE_SRC_DIR}/ByteRLE.hh" + "${ORC_SOURCE_SRC_DIR}/CMakeLists.txt" "${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc" "${ORC_SOURCE_SRC_DIR}/ColumnReader.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnReader.hh" "${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc" + "${ORC_SOURCE_SRC_DIR}/ColumnWriter.hh" "${ORC_SOURCE_SRC_DIR}/Common.cc" "${ORC_SOURCE_SRC_DIR}/Compression.cc" + "${ORC_SOURCE_SRC_DIR}/Compression.hh" + "${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.cc" + "${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.hh" + "${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.cc" + "${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.hh" + "${ORC_SOURCE_SRC_DIR}/Dispatch.hh" + "${ORC_SOURCE_SRC_DIR}/Exceptions.cc" "${ORC_SOURCE_SRC_DIR}/Int128.cc" "${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc" + "${ORC_SOURCE_SRC_DIR}/LzoDecompressor.hh" "${ORC_SOURCE_SRC_DIR}/MemoryPool.cc" + "${ORC_SOURCE_SRC_DIR}/Murmur3.cc" + "${ORC_SOURCE_SRC_DIR}/Murmur3.hh" + "${ORC_SOURCE_SRC_DIR}/Options.hh" + "${ORC_SOURCE_SRC_DIR}/OrcFile.cc" "${ORC_SOURCE_SRC_DIR}/RLE.cc" + "${ORC_SOURCE_SRC_DIR}/RLE.hh" + "${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc" + "${ORC_SOURCE_SRC_DIR}/RLEV2Util.hh" "${ORC_SOURCE_SRC_DIR}/RLEv1.cc" + "${ORC_SOURCE_SRC_DIR}/RLEv1.hh" + "${ORC_SOURCE_SRC_DIR}/RLEv2.hh" + "${ORC_SOURCE_SRC_DIR}/Reader.cc" + "${ORC_SOURCE_SRC_DIR}/Reader.hh" "${ORC_SOURCE_SRC_DIR}/RleDecoderV2.cc" "${ORC_SOURCE_SRC_DIR}/RleEncoderV2.cc" - "${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc" + "${ORC_SOURCE_SRC_DIR}/SchemaEvolution.cc" + "${ORC_SOURCE_SRC_DIR}/SchemaEvolution.hh" "${ORC_SOURCE_SRC_DIR}/Statistics.cc" + "${ORC_SOURCE_SRC_DIR}/Statistics.hh" "${ORC_SOURCE_SRC_DIR}/StripeStream.cc" + "${ORC_SOURCE_SRC_DIR}/StripeStream.hh" "${ORC_SOURCE_SRC_DIR}/Timezone.cc" + "${ORC_SOURCE_SRC_DIR}/Timezone.hh" "${ORC_SOURCE_SRC_DIR}/TypeImpl.cc" + "${ORC_SOURCE_SRC_DIR}/TypeImpl.hh" + "${ORC_SOURCE_SRC_DIR}/Utils.hh" "${ORC_SOURCE_SRC_DIR}/Vector.cc" "${ORC_SOURCE_SRC_DIR}/Writer.cc" - "${ORC_SOURCE_SRC_DIR}/Adaptor.cc" - "${ORC_SOURCE_SRC_DIR}/BloomFilter.cc" - "${ORC_SOURCE_SRC_DIR}/Murmur3.cc" - "${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc" - "${ORC_SOURCE_SRC_DIR}/wrap/orc-proto-wrapper.cc" "${ORC_SOURCE_SRC_DIR}/io/InputStream.cc" + "${ORC_SOURCE_SRC_DIR}/io/InputStream.hh" "${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc" - "${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc" + "${ORC_SOURCE_SRC_DIR}/io/OutputStream.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc" + "${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.hh" + "${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc" ) add_library(_orc ${ORC_SRCS}) diff --git a/contrib/orc b/contrib/orc index c5d7755ba0b..568d1d60c25 160000 --- a/contrib/orc +++ b/contrib/orc @@ -1 +1 @@ -Subproject commit c5d7755ba0b9a95631c8daea4d094101f26ec761 +Subproject commit 568d1d60c250af1890f226c182bc15bd8cc94cf1 diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index 951c443c30d..e72c28b7167 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -59,6 +59,8 @@ install_packages previous_release_package_folder # available for dump via clickhouse-local configure +# it contains some new settings, but we can safely remove it +rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml start @@ -85,6 +87,8 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1 export ZOOKEEPER_FAULT_INJECTION=0 configure +# it contains some new settings, but we can safely remove it +rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml start @@ -115,6 +119,13 @@ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/c install_packages package_folder export ZOOKEEPER_FAULT_INJECTION=1 configure + +# Just in case previous version left some garbage in zk +sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \ + | sed "s|>1<|>0<|g" \ + > /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp +sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml + start 500 clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \ || (rg --text ".*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \ diff --git a/docs/changelogs/v23.3.5.9-lts.md b/docs/changelogs/v23.3.5.9-lts.md new file mode 100644 index 00000000000..df1aab541ec --- /dev/null +++ b/docs/changelogs/v23.3.5.9-lts.md @@ -0,0 +1,19 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.3.5.9-lts (f5fbc2fd2b3) FIXME as compared to v23.3.4.17-lts (2c99b73ff40) + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)). +* Cleanup moving parts [#50489](https://github.com/ClickHouse/ClickHouse/pull/50489) ([vdimir](https://github.com/vdimir)). +* Do not apply projection if read-in-order was enabled. [#50923](https://github.com/ClickHouse/ClickHouse/pull/50923) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Increase max array size in group bitmap [#50620](https://github.com/ClickHouse/ClickHouse/pull/50620) ([Kruglov Pavel](https://github.com/Avogar)). + diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 5fd2b82375f..79baf04d75d 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1298,8 +1298,8 @@ For output it uses the following correspondence between ClickHouse types and BSO | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x04` array | | [Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x03` document | | [Map](/docs/en/sql-reference/data-types/map.md) | `\x03` document | -| [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `\x10` int32 | -| [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `\x05` binary, `\x00` binary subtype | +| [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `\x10` int32 | +| [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `\x05` binary, `\x00` binary subtype | For input it uses the following correspondence between BSON types and ClickHouse types: @@ -1309,7 +1309,7 @@ For input it uses the following correspondence between BSON types and ClickHouse | `\x02` string | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | | `\x03` document | [Map](/docs/en/sql-reference/data-types/map.md)/[Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | | `\x04` array | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | -| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | +| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/ipv6.md) | | `\x05` binary, `\x02` old binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | | `\x05` binary, `\x03` old uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) | | `\x05` binary, `\x04` uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) | @@ -1319,7 +1319,7 @@ For input it uses the following correspondence between BSON types and ClickHouse | `\x0A` null value | [NULL](/docs/en/sql-reference/data-types/nullable.md) | | `\x0D` JavaScript code | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | | `\x0E` symbol | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | -| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) | +| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) | | `\x12` int64 | [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal64](/docs/en/sql-reference/data-types/decimal.md)/[DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8). @@ -1669,8 +1669,8 @@ The table below shows supported data types and how they match ClickHouse [data t | `ENUM` | [Enum(8/16)](/docs/en/sql-reference/data-types/enum.md) | `ENUM` | | `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` | | `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` | -| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` | -| `DATA` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `DATA` | +| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` | +| `DATA` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `DATA` | | `DATA` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `DATA` | | `DATA` | [Decimal128/Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DATA` | | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | [Map](/docs/en/sql-reference/data-types/map.md) | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | @@ -1872,8 +1872,8 @@ The table below shows supported data types and how they match ClickHouse [data t | `long (timestamp-millis)` \** | [DateTime64(3)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \** | | `long (timestamp-micros)` \** | [DateTime64(6)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \** | | `bytes (decimal)` \** | [DateTime64(N)](/docs/en/sql-reference/data-types/datetime.md) | `bytes (decimal)` \** | -| `int` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `int` | -| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `fixed(16)` | +| `int` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `int` | +| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `fixed(16)` | | `bytes (decimal)` \** | [Decimal(P, S)](/docs/en/sql-reference/data-types/decimal.md) | `bytes (decimal)` \** | | `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** | | `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` | @@ -2026,9 +2026,9 @@ The table below shows supported data types and how they match ClickHouse [data t | `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` | | `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` | | `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` | -| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` | -| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` | -| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` | +| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` | +| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` | +| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` | Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested. @@ -2082,7 +2082,7 @@ Special format for reading Parquet file metadata (https://parquet.apache.org/doc - logical_type - column logical type - compression - compression used for this column - total_uncompressed_size - total uncompressed bytes size of the column, calculated as the sum of total_uncompressed_size of the column from all row groups - - total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups + - total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups - space_saved - percent of space saved by compression, calculated as (1 - total_compressed_size/total_uncompressed_size). - encodings - the list of encodings used for this column - row_groups - the list of row groups metadata with the next structure: @@ -2229,9 +2229,9 @@ The table below shows supported data types and how they match ClickHouse [data t | `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` | | `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` | | `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` | -| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` | -| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_SIZE_BINARY` | -| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` | +| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` | +| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_SIZE_BINARY` | +| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` | Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested. @@ -2297,7 +2297,7 @@ The table below shows supported data types and how they match ClickHouse [data t | `Struct` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `Struct` | | `Map` | [Map](/docs/en/sql-reference/data-types/map.md) | `Map` | | `Int` | [IPv4](/docs/en/sql-reference/data-types/int-uint.md) | `Int` | -| `Binary` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `Binary` | +| `Binary` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `Binary` | | `Binary` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `Binary` | | `Binary` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `Binary` | @@ -2510,7 +2510,7 @@ ClickHouse supports reading and writing [MessagePack](https://msgpack.org/) data | `uint 64` | [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `uint 64` | | `fixarray`, `array 16`, `array 32` | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | `fixarray`, `array 16`, `array 32` | | `fixmap`, `map 16`, `map 32` | [Map](/docs/en/sql-reference/data-types/map.md) | `fixmap`, `map 16`, `map 32` | -| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `uint 32` | +| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `uint 32` | | `bin 8` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8` | | `int 8` | [Enum8](/docs/en/sql-reference/data-types/enum.md) | `int 8` | | `bin 8` | [(U)Int128/(U)Int256](/docs/en/sql-reference/data-types/int-uint.md) | `bin 8` | diff --git a/docs/en/operations/configuration-files.md b/docs/en/operations/configuration-files.md index b3583e156ad..d3e21cb2364 100644 --- a/docs/en/operations/configuration-files.md +++ b/docs/en/operations/configuration-files.md @@ -6,32 +6,43 @@ sidebar_label: Configuration Files # Configuration Files -ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml` or `/etc/clickhouse-server/config.yaml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. Note, that any configuration file can be written either in XML or YAML, but mixing formats in one file is not supported. For example, you can have main configs as `config.xml` and `users.xml` and write additional files in `config.d` and `users.d` directories in `.yaml`. +The ClickHouse server can be configured with configuration files in XML or YAML syntax. In most installation types, the ClickHouse server runs with `/etc/clickhouse-server/config.xml` as default configuration file but it is also possible to specify the location of the configuration file manually at server startup using command line option `--config-file=` or `-C`. Additional configuration files may be placed into directory `config.d/` relative to the main configuration file, for example into directory `/etc/clickhouse-server/config.d/`. Files in this directory and the main configuration are merged in a preprocessing step before the configuration is applied in ClickHouse server. Configuration files are merged in alphabetical order. To simplify updates and improve modularization, it is best practice to keep the default `config.xml` file unmodified and place additional customization into `config.d/`. -All XML files should have the same root element, usually ``. As for YAML, `clickhouse:` should not be present, the parser will insert it automatically. +It is possible to mix XML and YAML configuration files, for example you could have a main configuration file `config.xml` and additional configuration files `config.d/network.xml`, `config.d/timezone.yaml` and `config.d/keeper.yaml`. Mixing XML and YAML within a single configuration file is not supported. XML configuration files should use `...` as top-level tag. In YAML configuration files, `clickhouse:` is optional, the parser inserts it implicitly if absent. -## Override {#override} +## Overriding Configuration {#override} -Some settings specified in the main configuration file can be overridden in other configuration files: +The merge of configuration files behaves as one intuitively expects: The contents of both files are combined recursively, children with the same name are replaced by the element of the more specific configuration file. The merge can be customized using attributes `replace` and `remove`. +- Attribute `replace` means that the element is replaced by the specified one. +- Attribute `remove` means that the element is deleted. -- The `replace` or `remove` attributes can be specified for the elements of these configuration files. -- If neither is specified, it combines the contents of elements recursively, replacing values of duplicate children. -- If `replace` is specified, it replaces the entire element with the specified one. -- If `remove` is specified, it deletes the element. +To specify that a value of an element should be replaced by the value of an environment variable, you can use attribute `from_env`. -You can also declare attributes as coming from environment variables by using `from_env="VARIABLE_NAME"`: +Example with `$MAX_QUERY_SIZE = 150000`: ```xml - - - - - + + + + + ``` -## Substitution {#substitution} +which is equal to + +``` xml + + + + 150000 + + + +``` + +## Substituting Configuration {#substitution} The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/clickhouse/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md#macros)). diff --git a/docs/en/operations/named-collections.md b/docs/en/operations/named-collections.md index a521a369721..02f52b6f8bf 100644 --- a/docs/en/operations/named-collections.md +++ b/docs/en/operations/named-collections.md @@ -50,7 +50,7 @@ To manage named collections with DDL a user must have the `named_control_collect ``` :::tip -In the above example the `passowrd_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user. +In the above example the `password_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user. ::: ## Storing named collections in configuration files diff --git a/docs/en/operations/optimizing-performance/index.md b/docs/en/operations/optimizing-performance/index.md deleted file mode 100644 index 83e9430ed27..00000000000 --- a/docs/en/operations/optimizing-performance/index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -slug: /en/operations/optimizing-performance/ -sidebar_label: Optimizing Performance -sidebar_position: 52 ---- - -# Optimizing Performance - -- [Sampling query profiler](../../operations/optimizing-performance/sampling-query-profiler.md) diff --git a/docs/en/operations/server-configuration-parameters/index.md b/docs/en/operations/server-configuration-parameters/index.md deleted file mode 100644 index d4b941c0819..00000000000 --- a/docs/en/operations/server-configuration-parameters/index.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -slug: /en/operations/server-configuration-parameters/ -sidebar_position: 54 -sidebar_label: Server Configuration Parameters -pagination_next: en/operations/server-configuration-parameters/settings ---- - -# Server Configuration Parameters - -This section contains descriptions of server settings that cannot be changed at the session or query level. - -These settings are stored in the `config.xml` file on the ClickHouse server. - -Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section. - -Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes). diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 83756097cfa..ce62d436813 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -7,6 +7,14 @@ description: This section contains descriptions of server settings that cannot b # Server Settings +This section contains descriptions of server settings that cannot be changed at the session or query level. + +These settings are stored in the `config.xml` file on the ClickHouse server. + +Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section. + +Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes). + ## allow_use_jemalloc_memory Allows to use jemalloc memory. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index f0668c1d58f..baefbb2cf6f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -2941,7 +2941,7 @@ Default value: `0`. ## mutations_sync {#mutations_sync} -Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously. +Allows to execute `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously. Possible values: diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md index 71e1452cef1..b9fdd19c643 100644 --- a/docs/en/operations/system-tables/query_log.md +++ b/docs/en/operations/system-tables/query_log.md @@ -71,11 +71,11 @@ Columns: - 0 — Query was initiated by another query as part of distributed query execution. - `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution). -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query. - `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution). - `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution). diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index cdd23bb15db..a6d5632ade9 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -40,11 +40,11 @@ Columns: - 0 — Query was initiated by another query for distributed query execution. - `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the query. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution). -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the parent query. - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Interface that the query was initiated from. Possible values: - 1 — TCP. diff --git a/docs/en/operations/system-tables/session_log.md b/docs/en/operations/system-tables/session_log.md index 661d34677e4..5b1a2b2a489 100644 --- a/docs/en/operations/system-tables/session_log.md +++ b/docs/en/operations/system-tables/session_log.md @@ -28,7 +28,7 @@ Columns: - `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users. - `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied. - `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out. -- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — The IP address that was used to log in/out. +- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — The IP address that was used to log in/out. - `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to log in/out. - `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — The interface from which the login was initiated. Possible values: - `TCP` diff --git a/docs/en/operations/system-tables/zookeeper_connection.md b/docs/en/operations/system-tables/zookeeper_connection.md index 9438cda1808..2f0ed303ce3 100644 --- a/docs/en/operations/system-tables/zookeeper_connection.md +++ b/docs/en/operations/system-tables/zookeeper_connection.md @@ -11,7 +11,8 @@ Columns: - `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to. - `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to. - `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config. -- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established +- `connected_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the connection was established +- `session_uptime_elapsed_seconds` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Seconds elapsed since the connection was established - `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired. - `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version. - `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection. @@ -23,7 +24,7 @@ SELECT * FROM system.zookeeper_connection; ``` ``` text -┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐ -│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660 │ -└───────────────────┴──────────────┴──────┴───────┴─────────────────────┴────────────┴────────────────────┴────────────────────┘ +┌─name────┬─host──────┬─port─┬─index─┬──────connected_time─┬─session_uptime_elapsed_seconds─┬─is_expired─┬─keeper_api_version─┬─client_id─┐ +│ default │ 127.0.0.1 │ 9181 │ 0 │ 2023-06-15 14:36:01 │ 3058 │ 0 │ 3 │ 5 │ +└─────────┴───────────┴──────┴───────┴─────────────────────┴────────────────────────────────┴────────────┴────────────────────┴───────────┘ ``` diff --git a/docs/en/operations/system-tables/zookeeper_log.md b/docs/en/operations/system-tables/zookeeper_log.md index b7cc4e22cd6..dce5be29f62 100644 --- a/docs/en/operations/system-tables/zookeeper_log.md +++ b/docs/en/operations/system-tables/zookeeper_log.md @@ -15,7 +15,7 @@ Columns with request parameters: - `Finalize` — The connection is lost, no response was received. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the event happened. - `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — The date and time when the event happened. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address of ZooKeeper server that was used to make the request. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address of ZooKeeper server that was used to make the request. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The port of ZooKeeper server that was used to make the request. - `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — The session ID that the ZooKeeper server sets for each connection. - `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — The ID of the request within the session. This is usually a sequential request number. It is the same for the request row and the paired `response`/`finalize` row. diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index 508307a0543..ffd063590fa 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -28,6 +28,6 @@ ClickHouse data types include: - **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell - **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type. - **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type) -- **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses +- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses - **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon` - **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md) diff --git a/docs/en/sql-reference/data-types/domains/ipv4.md b/docs/en/sql-reference/data-types/ipv4.md similarity index 60% rename from docs/en/sql-reference/data-types/domains/ipv4.md rename to docs/en/sql-reference/data-types/ipv4.md index b34814211fc..288806f47b3 100644 --- a/docs/en/sql-reference/data-types/domains/ipv4.md +++ b/docs/en/sql-reference/data-types/ipv4.md @@ -1,12 +1,12 @@ --- -slug: /en/sql-reference/data-types/domains/ipv4 +slug: /en/sql-reference/data-types/ipv4 sidebar_position: 59 sidebar_label: IPv4 --- ## IPv4 -`IPv4` is a domain based on `UInt32` type and serves as a typed replacement for storing IPv4 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. +IPv4 addresses. Stored in 4 bytes as UInt32. ### Basic Usage @@ -57,25 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴───────────┘ ``` -Domain values are not implicitly convertible to types other than `UInt32`. -If you want to convert `IPv4` value to a string, you have to do that explicitly with `IPv4NumToString()` function: +**See Also** -``` sql -SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1; -``` - - ┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ - │ String │ 183.247.232.58 │ - └───────────────────────────────────┴────────────────┘ - -Or cast to a `UInt32` value: - -``` sql -SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ -│ UInt32 │ 3086477370 │ -└──────────────────────────────────┴────────────┘ -``` +- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md) diff --git a/docs/en/sql-reference/data-types/domains/ipv6.md b/docs/en/sql-reference/data-types/ipv6.md similarity index 61% rename from docs/en/sql-reference/data-types/domains/ipv6.md rename to docs/en/sql-reference/data-types/ipv6.md index dcb22e3cb6d..97959308b58 100644 --- a/docs/en/sql-reference/data-types/domains/ipv6.md +++ b/docs/en/sql-reference/data-types/ipv6.md @@ -1,12 +1,12 @@ --- -slug: /en/sql-reference/data-types/domains/ipv6 +slug: /en/sql-reference/data-types/ipv6 sidebar_position: 60 sidebar_label: IPv6 --- ## IPv6 -`IPv6` is a domain based on `FixedString(16)` type and serves as a typed replacement for storing IPv6 values. It provides compact storage with the human-friendly input-output format and column type information on inspection. +IPv6 addresses. Stored in 16 bytes as UInt128 big-endian. ### Basic Usage @@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴──────────────────────────────────┘ ``` -Domain values are not implicitly convertible to types other than `FixedString(16)`. -If you want to convert `IPv6` value to a string, you have to do that explicitly with `IPv6NumToString()` function: +**See Also** -``` sql -SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐ -│ String │ 2001:44c8:129:2632:33:0:252:2 │ -└───────────────────────────────────┴───────────────────────────────┘ -``` - -Or cast to a `FixedString(16)` value: - -``` sql -SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐ -│ FixedString(16) │ ��� │ -└───────────────────────────────────────────┴─────────┘ -``` +- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md) diff --git a/docs/en/sql-reference/functions/ip-address-functions.md b/docs/en/sql-reference/functions/ip-address-functions.md index 0dc1db1161b..33c788a632e 100644 --- a/docs/en/sql-reference/functions/ip-address-functions.md +++ b/docs/en/sql-reference/functions/ip-address-functions.md @@ -248,7 +248,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32); ## toIPv4(string) -An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/domains/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`. +An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`. ``` sql WITH @@ -296,7 +296,7 @@ Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns null ## toIPv6 -Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/domains/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value. +Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value. Similar to [IPv6StringToNum](#ipv6stringtonums) function, which converts IPv6 address to binary format. If the input string contains a valid IPv4 address, then the IPv6 equivalent of the IPv4 address is returned. @@ -315,7 +315,7 @@ toIPv6(string) - IP address. -Type: [IPv6](../../sql-reference/data-types/domains/ipv6.md). +Type: [IPv6](../../sql-reference/data-types/ipv6.md). **Examples** diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 378f41c1199..dae2c7dd1d3 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -232,6 +232,7 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL; Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`). It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive. +Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). Syntax: diff --git a/docs/en/sql-reference/statements/alter/index.md b/docs/en/sql-reference/statements/alter/index.md index 7a687a067aa..7dadc2be5b2 100644 --- a/docs/en/sql-reference/statements/alter/index.md +++ b/docs/en/sql-reference/statements/alter/index.md @@ -60,7 +60,7 @@ You can specify how long (in seconds) to wait for inactive replicas to execute a For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown. ::: -For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting. +For `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting. ## Related content diff --git a/docs/en/sql-reference/statements/alter/projection.md b/docs/en/sql-reference/statements/alter/projection.md index b7399442d41..fb438927089 100644 --- a/docs/en/sql-reference/statements/alter/projection.md +++ b/docs/en/sql-reference/statements/alter/projection.md @@ -142,19 +142,19 @@ The following operations with [projections](/docs/en/engines/table-engines/merge ## ADD PROJECTION -`ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata. +`ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata. ## DROP PROJECTION -`ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). ## MATERIALIZE PROJECTION -`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). ## CLEAR PROJECTION -`ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files. diff --git a/docs/en/sql-reference/statements/alter/skipping-index.md b/docs/en/sql-reference/statements/alter/skipping-index.md index 4194731d33a..42fd12d9487 100644 --- a/docs/en/sql-reference/statements/alter/skipping-index.md +++ b/docs/en/sql-reference/statements/alter/skipping-index.md @@ -10,15 +10,25 @@ sidebar_label: INDEX The following operations are available: -- `ALTER TABLE [db].table_name [ON CLUSTER cluster] ADD INDEX name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata. +## ADD INDEX -- `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). +`ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata. -- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. +## DROP INDEX -The first two commands are lightweight in a sense that they only change metadata or remove files. +`ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). -Also, they are replicated, syncing indices metadata via ZooKeeper. +## MATERIALIZE INDEX + +`ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data. + +## CLEAR INDEX + +`ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Deletes the secondary index files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). + + +The commands `ADD`, `DROP`, and `CLEAR` are lightweight in the sense that they only change metadata or remove files. +Also, they are replicated, syncing indices metadata via ClickHouse Keeper or ZooKeeper. :::note Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants). diff --git a/docs/en/sql-reference/statements/create/dictionary.md b/docs/en/sql-reference/statements/create/dictionary.md index 29c72d62f24..c0a153c5660 100644 --- a/docs/en/sql-reference/statements/create/dictionary.md +++ b/docs/en/sql-reference/statements/create/dictionary.md @@ -82,6 +82,35 @@ LIFETIME(MIN 0 MAX 1000) LAYOUT(FLAT()) ``` +:::note +When using the SQL console in [ClickHouse Cloud](https://clickhouse.com), you must specify a user (`default` or any other user with the role `default_role`) and password when creating a dictionary. +:::note + +```sql +CREATE USER IF NOT EXISTS clickhouse_admin +IDENTIFIED WITH sha256_password BY 'passworD43$x'; + +GRANT default_role TO clickhouse_admin; + +CREATE DATABASE foo_db; + +CREATE TABLE foo_db.source_table ( + id UInt64, + value String +) ENGINE = MergeTree +PRIMARY KEY id; + +CREATE DICTIONARY foo_db.id_value_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'source_table' USER 'clickhouse_admin' PASSWORD 'passworD43$x' DB 'foo_db' )) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); +``` + ### Create a dictionary from a table in a remote ClickHouse service Input table (in the remote ClickHouse service) `source_table`: diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index de44a001472..1a72f89fb1f 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -380,11 +380,15 @@ High compression levels are useful for asymmetric scenarios, like compress once, `DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply: -- DEFLATE_QPL is experimental and can only be used after setting configuration parameter `allow_experimental_codecs=1`. +- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`. - DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details. - DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details. - DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled. +:::note +DEFLATE_QPL is not available in ClickHouse Cloud. +::: + ### Specialized Codecs These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. diff --git a/docs/en/sql-reference/statements/set.md b/docs/en/sql-reference/statements/set.md index 14f523adc3b..3e5e86eccf7 100644 --- a/docs/en/sql-reference/statements/set.md +++ b/docs/en/sql-reference/statements/set.md @@ -10,7 +10,7 @@ sidebar_label: SET SET param = value ``` -Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/index.md) this way. +Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/settings.md) this way. You can also set all the values from the specified settings profile in a single query. diff --git a/docs/en/sql-reference/table-functions/remote.md b/docs/en/sql-reference/table-functions/remote.md index bf0abd49fc6..8dcae6a0a79 100644 --- a/docs/en/sql-reference/table-functions/remote.md +++ b/docs/en/sql-reference/table-functions/remote.md @@ -114,9 +114,8 @@ This example uses one table from a sample dataset. The database is `imdb`, and `first_name` String, `last_name` String, `gender` FixedString(1)) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') - ORDER BY (id, first_name, last_name, gender) - SETTINGS index_granularity = 8192 + ENGINE = MergeTree + ORDER BY (id, first_name, last_name, gender); ``` #### On the destination ClickHouse system: @@ -132,9 +131,8 @@ This example uses one table from a sample dataset. The database is `imdb`, and `first_name` String, `last_name` String, `gender` FixedString(1)) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}') - ORDER BY (id, first_name, last_name, gender) - SETTINGS index_granularity = 8192 + ENGINE = MergeTree + ORDER BY (id, first_name, last_name, gender); ``` #### Back on the source deployment: @@ -142,7 +140,7 @@ This example uses one table from a sample dataset. The database is `imdb`, and Insert into the new database and table created on the remote system. You will need the host, port, username, password, destination database, and destination table. ```sql INSERT INTO FUNCTION -remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD', rand()) +remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD') SELECT * from imdb.actors ``` diff --git a/docs/redirects.txt b/docs/redirects.txt index cea138f7237..ebeda125e01 100644 --- a/docs/redirects.txt +++ b/docs/redirects.txt @@ -1,453 +1,6 @@ -agg_functions/combinators.md query-language/agg-functions/combinators.md -agg_functions/index.md query-language/agg-functions/index.md -agg_functions/parametric_functions.md query-language/agg-functions/parametric-functions.md -agg_functions/reference.md query-language/agg-functions/reference.md -changelog/2017.md whats-new/changelog/2017.md -changelog/2018.md whats-new/changelog/2018.md -changelog/2019.md whats-new/changelog/2019.md -changelog/index.md whats-new/changelog/index.md -commercial/cloud.md https://clickhouse.com/cloud/ -data_types/array.md sql-reference/data-types/array.md -data_types/boolean.md sql-reference/data-types/boolean.md -data_types/date.md sql-reference/data-types/date.md -data_types/datetime.md sql-reference/data-types/datetime.md -data_types/datetime64.md sql-reference/data-types/datetime64.md -data_types/decimal.md sql-reference/data-types/decimal.md -data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md -data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md -data_types/domains/overview.md sql-reference/data-types/domains/overview.md -data_types/enum.md sql-reference/data-types/enum.md -data_types/fixedstring.md sql-reference/data-types/fixedstring.md -data_types/float.md sql-reference/data-types/float.md -data_types/index.md sql-reference/data-types/index.md -data_types/int_uint.md sql-reference/data-types/int-uint.md -data_types/nested_data_structures/aggregatefunction.md sql-reference/data-types/aggregatefunction.md -data_types/nested_data_structures/index.md sql-reference/data-types/nested-data-structures/index.md -data_types/nested_data_structures/nested.md sql-reference/data-types/nested-data-structures/nested.md -data_types/nullable.md sql-reference/data-types/nullable.md -data_types/special_data_types/expression.md sql-reference/data-types/special-data-types/expression.md -data_types/special_data_types/index.md sql-reference/data-types/special-data-types/index.md -data_types/special_data_types/interval.md sql-reference/data-types/special-data-types/interval.md -data_types/special_data_types/nothing.md sql-reference/data-types/special-data-types/nothing.md -data_types/special_data_types/set.md sql-reference/data-types/special-data-types/set.md -data_types/string.md sql-reference/data-types/string.md -data_types/tuple.md sql-reference/data-types/tuple.md -data_types/uuid.md sql-reference/data-types/uuid.md -database_engines/index.md engines/database-engines/index.md -database_engines/lazy.md engines/database-engines/lazy.md -database_engines/mysql.md engines/database-engines/mysql.md -development/browse_code.md development/browse-code.md -development/build_cross_arm.md development/build-cross-arm.md -development/build_cross_osx.md development/build-cross-osx.md -development/build_osx.md development/build-osx.md -development/developer_instruction.md development/developer-instruction.md -dicts/external_dicts.md query-language/dicts/external-dicts.md -dicts/external_dicts_dict.md query-language/dicts/external-dicts-dict.md -dicts/external_dicts_dict_layout.md query-language/dicts/external-dicts-dict-layout.md -dicts/external_dicts_dict_lifetime.md query-language/dicts/external-dicts-dict-lifetime.md -dicts/external_dicts_dict_sources.md query-language/dicts/external-dicts-dict-sources.md -dicts/external_dicts_dict_structure.md query-language/dicts/external-dicts-dict-structure.md -dicts/index.md query-language/dicts/index.md -dicts/internal_dicts.md query-language/dicts/internal-dicts.md -engines/database_engines/index.md engines/database-engines/index.md -engines/database_engines/lazy.md engines/database-engines/lazy.md -engines/database_engines/mysql.md engines/database-engines/mysql.md -engines/table-engines/log-family/log-family.md engines/table-engines/log-family/index.md -engines/table_engines/index.md engines/table-engines/index.md -engines/table_engines/integrations/hdfs.md engines/table-engines/integrations/hdfs.md -engines/table_engines/integrations/index.md engines/table-engines/integrations/index.md -engines/table_engines/integrations/jdbc.md engines/table-engines/integrations/jdbc.md -engines/table_engines/integrations/kafka.md engines/table-engines/integrations/kafka.md -engines/table_engines/integrations/mysql.md engines/table-engines/integrations/mysql.md -engines/table_engines/integrations/odbc.md engines/table-engines/integrations/odbc.md -engines/table_engines/log_family/index.md engines/table-engines/log-family/index.md -engines/table_engines/log_family/log.md engines/table-engines/log-family/log.md -engines/table_engines/log_family/log_family.md engines/table-engines/log-family/log-family.md -engines/table_engines/log_family/stripelog.md engines/table-engines/log-family/stripelog.md -engines/table_engines/log_family/tinylog.md engines/table-engines/log-family/tinylog.md -engines/table_engines/mergetree_family/aggregatingmergetree.md engines/table-engines/mergetree-family/aggregatingmergetree.md -engines/table_engines/mergetree_family/collapsingmergetree.md engines/table-engines/mergetree-family/collapsingmergetree.md -engines/table_engines/mergetree_family/custom_partitioning_key.md engines/table-engines/mergetree-family/custom-partitioning-key.md -engines/table_engines/mergetree_family/graphitemergetree.md engines/table-engines/mergetree-family/graphitemergetree.md -engines/table_engines/mergetree_family/index.md engines/table-engines/mergetree-family/index.md -engines/table_engines/mergetree_family/mergetree.md engines/table-engines/mergetree-family/mergetree.md -engines/table_engines/mergetree_family/replacingmergetree.md engines/table-engines/mergetree-family/replacingmergetree.md -engines/table_engines/mergetree_family/replication.md engines/table-engines/mergetree-family/replication.md -engines/table_engines/mergetree_family/summingmergetree.md engines/table-engines/mergetree-family/summingmergetree.md -engines/table_engines/mergetree_family/versionedcollapsingmergetree.md engines/table-engines/mergetree-family/versionedcollapsingmergetree.md -engines/table_engines/special/buffer.md engines/table-engines/special/buffer.md -engines/table_engines/special/dictionary.md engines/table-engines/special/dictionary.md -engines/table_engines/special/distributed.md engines/table-engines/special/distributed.md -engines/table_engines/special/external_data.md engines/table-engines/special/external-data.md -engines/table_engines/special/file.md engines/table-engines/special/file.md -engines/table_engines/special/generate.md engines/table-engines/special/generate.md -engines/table_engines/special/index.md engines/table-engines/special/index.md -engines/table_engines/special/join.md engines/table-engines/special/join.md -engines/table_engines/special/materializedview.md engines/table-engines/special/materializedview.md -engines/table_engines/special/memory.md engines/table-engines/special/memory.md -engines/table_engines/special/merge.md engines/table-engines/special/merge.md -engines/table_engines/special/null.md engines/table-engines/special/null.md -engines/table_engines/special/set.md engines/table-engines/special/set.md -engines/table_engines/special/url.md engines/table-engines/special/url.md -engines/table_engines/special/view.md engines/table-engines/special/view.md -extended_roadmap.md whats-new/extended-roadmap.md -formats.md interfaces/formats.md -formats/capnproto.md interfaces/formats.md -formats/csv.md interfaces/formats.md -formats/csvwithnames.md interfaces/formats.md -formats/json.md interfaces/formats.md -formats/jsoncompact.md interfaces/formats.md -formats/jsoneachrow.md interfaces/formats.md -formats/native.md interfaces/formats.md -formats/null.md interfaces/formats.md -formats/pretty.md interfaces/formats.md -formats/prettycompact.md interfaces/formats.md -formats/prettycompactmonoblock.md interfaces/formats.md -formats/prettynoescapes.md interfaces/formats.md -formats/prettyspace.md interfaces/formats.md -formats/rowbinary.md interfaces/formats.md -formats/tabseparated.md interfaces/formats.md -formats/tabseparatedraw.md interfaces/formats.md -formats/tabseparatedwithnames.md interfaces/formats.md -formats/tabseparatedwithnamesandtypes.md interfaces/formats.md -formats/tskv.md interfaces/formats.md -formats/values.md interfaces/formats.md -formats/vertical.md interfaces/formats.md -formats/verticalraw.md interfaces/formats.md -formats/xml.md interfaces/formats.md -functions/arithmetic_functions.md query-language/functions/arithmetic-functions.md -functions/array_functions.md query-language/functions/array-functions.md -functions/array_join.md query-language/functions/array-join.md -functions/bit_functions.md query-language/functions/bit-functions.md -functions/bitmap_functions.md query-language/functions/bitmap-functions.md -functions/comparison_functions.md query-language/functions/comparison-functions.md -functions/conditional_functions.md query-language/functions/conditional-functions.md -functions/date_time_functions.md query-language/functions/date-time-functions.md -functions/encoding_functions.md query-language/functions/encoding-functions.md -functions/ext_dict_functions.md query-language/functions/ext-dict-functions.md -functions/hash_functions.md query-language/functions/hash-functions.md -functions/higher_order_functions.md query-language/functions/higher-order-functions.md -functions/in_functions.md query-language/functions/in-functions.md -functions/index.md query-language/functions/index.md -functions/ip_address_functions.md query-language/functions/ip-address-functions.md -functions/json_functions.md query-language/functions/json-functions.md -functions/logical_functions.md query-language/functions/logical-functions.md -functions/math_functions.md query-language/functions/math-functions.md -functions/other_functions.md query-language/functions/other-functions.md -functions/random_functions.md query-language/functions/random-functions.md -functions/rounding_functions.md query-language/functions/rounding-functions.md -functions/splitting_merging_functions.md query-language/functions/splitting-merging-functions.md -functions/string_functions.md query-language/functions/string-functions.md -functions/string_replace_functions.md query-language/functions/string-replace-functions.md -functions/string_search_functions.md query-language/functions/string-search-functions.md -functions/type_conversion_functions.md query-language/functions/type-conversion-functions.md -functions/url_functions.md query-language/functions/url-functions.md -functions/ym_dict_functions.md query-language/functions/ym-dict-functions.md -getting_started/example_datasets/amplab_benchmark.md getting-started/example-datasets/amplab-benchmark.md -getting_started/example_datasets/criteo.md getting-started/example-datasets/criteo.md -getting_started/example_datasets/index.md getting-started/example-datasets/index.md -getting_started/example_datasets/metrica.md getting-started/example-datasets/metrica.md -getting_started/example_datasets/nyc_taxi.md getting-started/example-datasets/nyc-taxi.md -getting_started/example_datasets/ontime.md getting-started/example-datasets/ontime.md -getting_started/example_datasets/star_schema.md getting-started/example-datasets/star-schema.md -getting_started/example_datasets/wikistat.md getting-started/example-datasets/wikistat.md -getting_started/index.md getting-started/index.md -getting_started/install.md getting-started/install.md -getting_started/playground.md getting-started/playground.md -getting_started/tutorial.md getting-started/tutorial.md -images/column_oriented.gif images/column-oriented.gif -images/row_oriented.gif images/row-oriented.gif -interfaces/http_interface.md interfaces/http.md -interfaces/third-party/client_libraries.md interfaces/third-party/client-libraries.md -interfaces/third-party_client_libraries.md interfaces/third-party/client-libraries.md -interfaces/third-party_gui.md interfaces/third-party/gui.md -interfaces/third_party/index.md interfaces/third-party/index.md -introduction/index.md -introduction/distinctive_features.md introduction/distinctive-features.md -introduction/features_considered_disadvantages.md introduction/distinctive-features.md -introduction/possible_silly_questions.md faq/general.md -introduction/ya_metrika_task.md introduction/history.md -operations/access_rights.md operations/access-rights.md -operations/configuration_files.md operations/configuration-files.md -operations/optimizing_performance/index.md operations/optimizing-performance/index.md -operations/optimizing_performance/sampling_query_profiler.md operations/optimizing-performance/sampling-query-profiler.md -operations/performance/sampling_query_profiler.md operations/optimizing-performance/sampling-query-profiler.md -operations/performance_test.md operations/performance-test.md -operations/server_configuration_parameters/index.md operations/server-configuration-parameters/index.md -operations/server_configuration_parameters/settings.md operations/server-configuration-parameters/settings.md -operations/server_settings/index.md operations/server-configuration-parameters/index.md -operations/server_settings/settings.md operations/server-configuration-parameters/settings.md -operations/settings/constraints_on_settings.md operations/settings/constraints-on-settings.md -operations/settings/permissions_for_queries.md operations/settings/permissions-for-queries.md -operations/settings/query_complexity.md operations/settings/query-complexity.md -operations/settings/settings_profiles.md operations/settings/settings-profiles.md -operations/settings/settings_users.md operations/settings/settings-users.md -operations/system_tables.md operations/system-tables.md -operations/table_engines/aggregatingmergetree.md engines/table-engines/mergetree-family/aggregatingmergetree.md -operations/table_engines/buffer.md engines/table-engines/special/buffer.md -operations/table_engines/collapsingmergetree.md engines/table-engines/mergetree-family/collapsingmergetree.md -operations/table_engines/custom_partitioning_key.md engines/table-engines/mergetree-family/custom-partitioning-key.md -operations/table_engines/dictionary.md engines/table-engines/special/dictionary.md -operations/table_engines/distributed.md engines/table-engines/special/distributed.md -operations/table_engines/external_data.md engines/table-engines/special/external-data.md -operations/table_engines/file.md engines/table-engines/special/file.md -operations/table_engines/generate.md engines/table-engines/special/generate.md -operations/table_engines/graphitemergetree.md engines/table-engines/mergetree-family/graphitemergetree.md -operations/table_engines/hdfs.md engines/table-engines/integrations/hdfs.md -operations/table_engines/index.md engines/table-engines/index.md -operations/table_engines/jdbc.md engines/table-engines/integrations/jdbc.md -operations/table_engines/join.md engines/table-engines/special/join.md -operations/table_engines/kafka.md engines/table-engines/integrations/kafka.md -operations/table_engines/log.md engines/table-engines/log-family/log.md -operations/table_engines/log_family.md engines/table-engines/log-family/log-family.md -operations/table_engines/materializedview.md engines/table-engines/special/materializedview.md -operations/table_engines/memory.md engines/table-engines/special/memory.md -operations/table_engines/merge.md engines/table-engines/special/merge.md -operations/table_engines/mergetree.md engines/table-engines/mergetree-family/mergetree.md -operations/table_engines/mysql.md engines/table-engines/integrations/mysql.md -operations/table_engines/null.md engines/table-engines/special/null.md -operations/table_engines/odbc.md engines/table-engines/integrations/odbc.md -operations/table_engines/replacingmergetree.md engines/table-engines/mergetree-family/replacingmergetree.md -operations/table_engines/replication.md engines/table-engines/mergetree-family/replication.md -operations/table_engines/set.md engines/table-engines/special/set.md -operations/table_engines/stripelog.md engines/table-engines/log-family/stripelog.md -operations/table_engines/summingmergetree.md engines/table-engines/mergetree-family/summingmergetree.md -operations/table_engines/tinylog.md engines/table-engines/log-family/tinylog.md -operations/table_engines/url.md engines/table-engines/special/url.md -operations/table_engines/versionedcollapsingmergetree.md engines/table-engines/mergetree-family/versionedcollapsingmergetree.md -operations/table_engines/view.md engines/table-engines/special/view.md -operations/utils/clickhouse-benchmark.md operations/utilities/clickhouse-benchmark.md -operations/utils/clickhouse-copier.md operations/utilities/clickhouse-copier.md -operations/utils/clickhouse-local.md operations/utilities/clickhouse-local.md -operations/utils/index.md operations/utilities/index.md -query_language/agg_functions/combinators.md sql-reference/aggregate-functions/combinators.md -query_language/agg_functions/index.md sql-reference/aggregate-functions/index.md -query_language/agg_functions/parametric_functions.md sql-reference/aggregate-functions/parametric-functions.md -query_language/agg_functions/reference.md sql-reference/aggregate-functions/reference.md -query_language/alter.md sql-reference/statements/alter.md -query_language/create.md sql-reference/statements/create.md -query_language/dicts/external_dicts.md sql-reference/dictionaries/external-dictionaries/external-dicts.md -query_language/dicts/external_dicts_dict.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md -query_language/dicts/external_dicts_dict_hierarchical.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md -query_language/dicts/external_dicts_dict_layout.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md -query_language/dicts/external_dicts_dict_lifetime.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md -query_language/dicts/external_dicts_dict_sources.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md -query_language/dicts/external_dicts_dict_structure.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md -query_language/dicts/index.md sql-reference/dictionaries/index.md -query_language/dicts/internal_dicts.md sql-reference/dictionaries/internal-dicts.md -query_language/functions/arithmetic_functions.md sql-reference/functions/arithmetic-functions.md -query_language/functions/array_functions.md sql-reference/functions/array-functions.md -query_language/functions/array_join.md sql-reference/functions/array-join.md -query_language/functions/bit_functions.md sql-reference/functions/bit-functions.md -query_language/functions/bitmap_functions.md sql-reference/functions/bitmap-functions.md -query_language/functions/comparison_functions.md sql-reference/functions/comparison-functions.md -query_language/functions/conditional_functions.md sql-reference/functions/conditional-functions.md -query_language/functions/date_time_functions.md sql-reference/functions/date-time-functions.md -query_language/functions/encoding_functions.md sql-reference/functions/encoding-functions.md -query_language/functions/ext_dict_functions.md sql-reference/functions/ext-dict-functions.md -query_language/functions/functions_for_nulls.md sql-reference/functions/functions-for-nulls.md -query_language/functions/geo.md sql-reference/functions/geo.md -query_language/functions/hash_functions.md sql-reference/functions/hash-functions.md -query_language/functions/higher_order_functions.md sql-reference/functions/higher-order-functions.md -query_language/functions/in_functions.md sql-reference/functions/in-functions.md -query_language/functions/index.md sql-reference/functions/index.md -query_language/functions/introspection.md sql-reference/functions/introspection.md -query_language/functions/ip_address_functions.md sql-reference/functions/ip-address-functions.md -query_language/functions/json_functions.md sql-reference/functions/json-functions.md -query_language/functions/logical_functions.md sql-reference/functions/logical-functions.md -query_language/functions/machine_learning_functions.md sql-reference/functions/machine-learning-functions.md -query_language/functions/math_functions.md sql-reference/functions/math-functions.md -query_language/functions/other_functions.md sql-reference/functions/other-functions.md -query_language/functions/random_functions.md sql-reference/functions/random-functions.md -query_language/functions/rounding_functions.md sql-reference/functions/rounding-functions.md -query_language/functions/splitting_merging_functions.md sql-reference/functions/splitting-merging-functions.md -query_language/functions/string_functions.md sql-reference/functions/string-functions.md -query_language/functions/string_replace_functions.md sql-reference/functions/string-replace-functions.md -query_language/functions/string_search_functions.md sql-reference/functions/string-search-functions.md -query_language/functions/type_conversion_functions.md sql-reference/functions/type-conversion-functions.md -query_language/functions/url_functions.md sql-reference/functions/url-functions.md -query_language/functions/uuid_functions.md sql-reference/functions/uuid-functions.md -query_language/functions/ym_dict_functions.md sql-reference/functions/ym-dict-functions.md -query_language/index.md sql-reference/index.md -query_language/insert_into.md sql-reference/statements/insert-into.md -query_language/misc.md sql-reference/statements/misc.md -query_language/operators.md sql-reference/operators.md -query_language/queries.md query-language.md -query_language/select.md sql-reference/statements/select.md -query_language/show.md sql-reference/statements/show.md -query_language/syntax.md sql-reference/syntax.md -query_language/system.md sql-reference/statements/system.md -query_language/table_functions/file.md sql-reference/table-functions/file.md -query_language/table_functions/generate.md sql-reference/table-functions/generate.md -query_language/table_functions/hdfs.md sql-reference/table-functions/hdfs.md -query_language/table_functions/index.md sql-reference/table-functions/index.md -query_language/table_functions/input.md sql-reference/table-functions/input.md -query_language/table_functions/jdbc.md sql-reference/table-functions/jdbc.md -query_language/table_functions/merge.md sql-reference/table-functions/merge.md -query_language/table_functions/mysql.md sql-reference/table-functions/mysql.md -query_language/table_functions/numbers.md sql-reference/table-functions/numbers.md -query_language/table_functions/odbc.md sql-reference/table-functions/odbc.md -query_language/table_functions/remote.md sql-reference/table-functions/remote.md -query_language/table_functions/url.md sql-reference/table-functions/url.md -roadmap.md whats-new/roadmap.md -security_changelog.md whats-new/security-changelog.md -sql-reference/data-types/domains/overview.md sql-reference/data-types/domains/index.md -sql_reference/aggregate_functions/combinators.md sql-reference/aggregate-functions/combinators.md -sql_reference/aggregate_functions/index.md sql-reference/aggregate-functions/index.md -sql_reference/aggregate_functions/parametric_functions.md sql-reference/aggregate-functions/parametric-functions.md -sql_reference/aggregate_functions/reference.md sql-reference/aggregate-functions/reference.md -sql_reference/ansi.md sql-reference/ansi.md -sql_reference/data_types/aggregatefunction.md sql-reference/data-types/aggregatefunction.md -sql_reference/data_types/array.md sql-reference/data-types/array.md -sql_reference/data_types/boolean.md sql-reference/data-types/boolean.md -sql_reference/data_types/date.md sql-reference/data-types/date.md -sql_reference/data_types/datetime.md sql-reference/data-types/datetime.md -sql_reference/data_types/datetime64.md sql-reference/data-types/datetime64.md -sql_reference/data_types/decimal.md sql-reference/data-types/decimal.md -sql_reference/data_types/domains/index.md sql-reference/data-types/domains/index.md -sql_reference/data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md -sql_reference/data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md -sql_reference/data_types/domains/overview.md sql-reference/data-types/domains/overview.md -sql_reference/data_types/enum.md sql-reference/data-types/enum.md -sql_reference/data_types/fixedstring.md sql-reference/data-types/fixedstring.md -sql_reference/data_types/float.md sql-reference/data-types/float.md -sql_reference/data_types/index.md sql-reference/data-types/index.md -sql_reference/data_types/int_uint.md sql-reference/data-types/int-uint.md -sql_reference/data_types/nested_data_structures/index.md sql-reference/data-types/nested-data-structures/index.md -sql_reference/data_types/nested_data_structures/nested.md sql-reference/data-types/nested-data-structures/nested.md -sql_reference/data_types/nullable.md sql-reference/data-types/nullable.md -sql_reference/data_types/simpleaggregatefunction.md sql-reference/data-types/simpleaggregatefunction.md -sql_reference/data_types/special_data_types/expression.md sql-reference/data-types/special-data-types/expression.md -sql_reference/data_types/special_data_types/index.md sql-reference/data-types/special-data-types/index.md -sql_reference/data_types/special_data_types/interval.md sql-reference/data-types/special-data-types/interval.md -sql_reference/data_types/special_data_types/nothing.md sql-reference/data-types/special-data-types/nothing.md -sql_reference/data_types/special_data_types/set.md sql-reference/data-types/special-data-types/set.md -sql_reference/data_types/string.md sql-reference/data-types/string.md -sql_reference/data_types/tuple.md sql-reference/data-types/tuple.md -sql_reference/data_types/uuid.md sql-reference/data-types/uuid.md -sql_reference/dictionaries/external_dictionaries/external_dicts.md sql-reference/dictionaries/external-dictionaries/external-dicts.md -sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md -sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md -sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md -sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md -sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md -sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md -sql_reference/dictionaries/external_dictionaries/index.md sql-reference/dictionaries/external-dictionaries/index.md -sql_reference/dictionaries/index.md sql-reference/dictionaries/index.md -sql_reference/dictionaries/internal_dicts.md sql-reference/dictionaries/internal-dicts.md -sql_reference/functions/arithmetic_functions.md sql-reference/functions/arithmetic-functions.md -sql_reference/functions/array_functions.md sql-reference/functions/array-functions.md -sql_reference/functions/array_join.md sql-reference/functions/array-join.md -sql_reference/functions/bit_functions.md sql-reference/functions/bit-functions.md -sql_reference/functions/bitmap_functions.md sql-reference/functions/bitmap-functions.md -sql_reference/functions/comparison_functions.md sql-reference/functions/comparison-functions.md -sql_reference/functions/conditional_functions.md sql-reference/functions/conditional-functions.md -sql_reference/functions/date_time_functions.md sql-reference/functions/date-time-functions.md -sql_reference/functions/encoding_functions.md sql-reference/functions/encoding-functions.md -sql_reference/functions/ext_dict_functions.md sql-reference/functions/ext-dict-functions.md -sql_reference/functions/functions_for_nulls.md sql-reference/functions/functions-for-nulls.md -sql_reference/functions/geo.md sql-reference/functions/geo.md -sql_reference/functions/hash_functions.md sql-reference/functions/hash-functions.md -sql_reference/functions/higher_order_functions.md sql-reference/functions/higher-order-functions.md -sql_reference/functions/in_functions.md sql-reference/functions/in-functions.md -sql_reference/functions/index.md sql-reference/functions/index.md -sql_reference/functions/introspection.md sql-reference/functions/introspection.md -sql_reference/functions/ip_address_functions.md sql-reference/functions/ip-address-functions.md -sql_reference/functions/json_functions.md sql-reference/functions/json-functions.md -sql_reference/functions/logical_functions.md sql-reference/functions/logical-functions.md -sql_reference/functions/machine_learning_functions.md sql-reference/functions/machine-learning-functions.md -sql_reference/functions/math_functions.md sql-reference/functions/math-functions.md -sql_reference/functions/other_functions.md sql-reference/functions/other-functions.md -sql_reference/functions/random_functions.md sql-reference/functions/random-functions.md -sql_reference/functions/rounding_functions.md sql-reference/functions/rounding-functions.md -sql_reference/functions/splitting_merging_functions.md sql-reference/functions/splitting-merging-functions.md -sql_reference/functions/string_functions.md sql-reference/functions/string-functions.md -sql_reference/functions/string_replace_functions.md sql-reference/functions/string-replace-functions.md -sql_reference/functions/string_search_functions.md sql-reference/functions/string-search-functions.md -sql_reference/functions/type_conversion_functions.md sql-reference/functions/type-conversion-functions.md -sql_reference/functions/url_functions.md sql-reference/functions/url-functions.md -sql_reference/functions/uuid_functions.md sql-reference/functions/uuid-functions.md -sql_reference/functions/ym_dict_functions.md sql-reference/functions/ym-dict-functions.md -sql_reference/index.md sql-reference/index.md -sql_reference/operators.md sql-reference/operators.md -sql_reference/statements/alter.md sql-reference/statements/alter.md -sql_reference/statements/create.md sql-reference/statements/create.md -sql_reference/statements/index.md sql-reference/statements/index.md -sql_reference/statements/insert_into.md sql-reference/statements/insert-into.md -sql_reference/statements/misc.md sql-reference/statements/misc.md -sql_reference/statements/select.md sql-reference/statements/select.md -sql_reference/statements/show.md sql-reference/statements/show.md -sql_reference/statements/system.md sql-reference/statements/system.md -sql_reference/syntax.md sql-reference/syntax.md -sql_reference/table_functions/file.md sql-reference/table-functions/file.md -sql_reference/table_functions/generate.md sql-reference/table-functions/generate.md -sql_reference/table_functions/hdfs.md sql-reference/table-functions/hdfs.md -sql_reference/table_functions/index.md sql-reference/table-functions/index.md -sql_reference/table_functions/input.md sql-reference/table-functions/input.md -sql_reference/table_functions/jdbc.md sql-reference/table-functions/jdbc.md -sql_reference/table_functions/merge.md sql-reference/table-functions/merge.md -sql_reference/table_functions/mysql.md sql-reference/table-functions/mysql.md -sql_reference/table_functions/numbers.md sql-reference/table-functions/numbers.md -sql_reference/table_functions/odbc.md sql-reference/table-functions/odbc.md -sql_reference/table_functions/remote.md sql-reference/table-functions/remote.md -sql_reference/table_functions/url.md sql-reference/table-functions/url.md -system_tables.md operations/system-tables.md -system_tables/system.asynchronous_metrics.md operations/system-tables.md -system_tables/system.clusters.md operations/system-tables.md -system_tables/system.columns.md operations/system-tables.md -system_tables/system.databases.md operations/system-tables.md -system_tables/system.dictionaries.md operations/system-tables.md -system_tables/system.events.md operations/system-tables.md -system_tables/system.functions.md operations/system-tables.md -system_tables/system.merges.md operations/system-tables.md -system_tables/system.metrics.md operations/system-tables.md -system_tables/system.numbers.md operations/system-tables.md -system_tables/system.numbers_mt.md operations/system-tables.md -system_tables/system.one.md operations/system-tables.md -system_tables/system.parts.md operations/system-tables.md -system_tables/system.processes.md operations/system-tables.md -system_tables/system.replicas.md operations/system-tables.md -system_tables/system.settings.md operations/system-tables.md -system_tables/system.tables.md operations/system-tables.md -system_tables/system.zookeeper.md operations/system-tables.md -table_engines.md operations/table-engines.md -table_engines/aggregatingmergetree.md operations/table-engines/aggregatingmergetree.md -table_engines/buffer.md operations/table-engines/buffer.md -table_engines/collapsingmergetree.md operations/table-engines/collapsingmergetree.md -table_engines/custom_partitioning_key.md operations/table-engines/custom-partitioning-key.md -table_engines/dictionary.md operations/table-engines/dictionary.md -table_engines/distributed.md operations/table-engines/distributed.md -table_engines/external_data.md operations/table-engines/external-data.md -table_engines/file.md operations/table-engines/file.md -table_engines/graphitemergetree.md operations/table-engines/graphitemergetree.md -table_engines/index.md operations/table-engines/index.md -table_engines/join.md operations/table-engines/join.md -table_engines/kafka.md operations/table-engines/kafka.md -table_engines/log.md operations/table-engines/log.md -table_engines/materializedview.md operations/table-engines/materializedview.md -table_engines/memory.md operations/table-engines/memory.md -table_engines/merge.md operations/table-engines/merge.md -table_engines/mergetree.md operations/table-engines/mergetree.md -table_engines/mysql.md operations/table-engines/mysql.md -table_engines/null.md operations/table-engines/null.md -table_engines/replacingmergetree.md operations/table-engines/replacingmergetree.md -table_engines/replication.md operations/table-engines/replication.md -table_engines/set.md operations/table-engines/set.md -table_engines/summingmergetree.md operations/table-engines/summingmergetree.md -table_engines/tinylog.md operations/table-engines/tinylog.md -table_engines/view.md operations/table-engines/view.md -table_functions/file.md query-language/table-functions/file.md -table_functions/index.md query-language/table-functions/index.md -table_functions/merge.md query-language/table-functions/merge.md -table_functions/numbers.md query-language/table-functions/numbers.md -table_functions/remote.md query-language/table-functions/remote.md -utils.md operations/utils.md -utils/clickhouse-copier.md operations/utils/clickhouse-copier.md -utils/clickhouse-local.md operations/utils/clickhouse-local.md -whats_new/changelog/2017.md whats-new/changelog/2017.md -whats_new/changelog/2018.md whats-new/changelog/2018.md -whats_new/changelog/2019.md whats-new/changelog/2019.md -whats_new/changelog/index.md whats-new/changelog/index.md -whats_new/index.md whats-new/index.md -whats_new/roadmap.md whats-new/roadmap.md -whats_new/security_changelog.md whats-new/security-changelog.md +The redirects from this file were moved to the Docusaurus configuration file. +If you need to add a redirect please either open a PR in +https://github.com/clickhouse/clickhouse-docs adding the redirect to +https://github.com/ClickHouse/clickhouse-docs/blob/main/docusaurus.config.js +or open an issue in the same repo and provide the old URL and new URL to have +the redirect added. diff --git a/docs/ru/operations/system-tables/query_log.md b/docs/ru/operations/system-tables/query_log.md index a55528bd829..8f858c14fb1 100644 --- a/docs/ru/operations/system-tables/query_log.md +++ b/docs/ru/operations/system-tables/query_log.md @@ -69,11 +69,11 @@ ClickHouse не удаляет данные из таблица автомати - 0 — запрос был инициирован другим запросом при выполнении распределенного запроса. - `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос - `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса. -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос. - `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса (для распределенных запросов). - `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд (для распределенных запросов). diff --git a/docs/ru/operations/system-tables/query_thread_log.md b/docs/ru/operations/system-tables/query_thread_log.md index c9aabb02cad..1a256e1657a 100644 --- a/docs/ru/operations/system-tables/query_thread_log.md +++ b/docs/ru/operations/system-tables/query_thread_log.md @@ -39,11 +39,11 @@ ClickHouse не удаляет данные из таблицы автомати - 0 — запрос был инициирован другим запросом при распределенном запросе. - `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос. - `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, с которого пришел запрос. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса. -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, пришел родительский запрос. - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — интерфейс, с которого ушёл запрос. Возможные значения: - 1 — TCP. diff --git a/docs/ru/operations/system-tables/session_log.md b/docs/ru/operations/system-tables/session_log.md index 1f313e7815a..5849cb51ab4 100644 --- a/docs/ru/operations/system-tables/session_log.md +++ b/docs/ru/operations/system-tables/session_log.md @@ -27,7 +27,7 @@ slug: /ru/operations/system-tables/session_log - `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список профилей, установленных для всех ролей и (или) пользователей. - `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список ролей, к которым применяется данный профиль. - `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — настройки, которые были изменены при входе или выходе клиента из системы. -- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы. +- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы. - `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт клиента, который использовался для входа или выхода из системы. - `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — интерфейс, с которого был инициирован вход в систему. Возможные значения: - `TCP` diff --git a/docs/ru/operations/system-tables/zookeeper_log.md b/docs/ru/operations/system-tables/zookeeper_log.md index ccbdd5110ad..9874cb3a269 100644 --- a/docs/ru/operations/system-tables/zookeeper_log.md +++ b/docs/ru/operations/system-tables/zookeeper_log.md @@ -15,7 +15,7 @@ slug: /ru/operations/system-tables/zookeeper_log - `Finalize` — соединение разорвано, ответ не получен. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата, когда произошло событие. - `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — дата и время, когда произошло событие. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт сервера ZooKeeper, с которого был сделан запрос. - `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — идентификатор сессии, который сервер ZooKeeper создает для каждого соединения. - `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — идентификатор запроса внутри сессии. Обычно это последовательный номер запроса, одинаковый у строки запроса и у парной строки `response`/`finalize`. diff --git a/docs/ru/sql-reference/data-types/domains/ipv4.md b/docs/ru/sql-reference/data-types/ipv4.md similarity index 56% rename from docs/ru/sql-reference/data-types/domains/ipv4.md rename to docs/ru/sql-reference/data-types/ipv4.md index 57a19e282ae..5cb977c64c9 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv4.md +++ b/docs/ru/sql-reference/data-types/ipv4.md @@ -1,12 +1,12 @@ --- -slug: /ru/sql-reference/data-types/domains/ipv4 +slug: /ru/sql-reference/data-types/ipv4 sidebar_position: 59 sidebar_label: IPv4 --- ## IPv4 {#ipv4} -`IPv4` — это домен, базирующийся на типе данных `UInt32` предназначенный для хранения адресов IPv4. Он обеспечивает компактное хранение данных с удобным для человека форматом ввода-вывода, и явно отображаемым типом данных в структуре таблицы. +IPv4-адреса. Хранится в 4 байтах как UInt32. ### Применение {#primenenie} @@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1; └──────────────────┴───────────┘ ``` -Значения с доменным типом данных не преобразуются неявно в другие типы данных, кроме `UInt32`. -Если необходимо преобразовать значение типа `IPv4` в строку, то это необходимо делать явно с помощью функции `IPv4NumToString()`: +**См. также** -``` sql -SELECT toTypeName(s), IPv4NumToString(from) AS s FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐ -│ String │ 183.247.232.58 │ -└───────────────────────────────────┴────────────────┘ -``` - -Или приводить к типу данных `UInt32`: - -``` sql -SELECT toTypeName(i), CAST(from AS UInt32) AS i FROM hits LIMIT 1; -``` - -``` text -┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐ -│ UInt32 │ 3086477370 │ -└──────────────────────────────────┴────────────┘ -``` +- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md) diff --git a/docs/ru/sql-reference/data-types/domains/ipv6.md b/docs/ru/sql-reference/data-types/ipv6.md similarity index 98% rename from docs/ru/sql-reference/data-types/domains/ipv6.md rename to docs/ru/sql-reference/data-types/ipv6.md index fdfb26f68c1..808068ce90a 100644 --- a/docs/ru/sql-reference/data-types/domains/ipv6.md +++ b/docs/ru/sql-reference/data-types/ipv6.md @@ -1,5 +1,5 @@ --- -slug: /ru/sql-reference/data-types/domains/ipv6 +slug: /ru/sql-reference/data-types/ipv6 sidebar_position: 60 sidebar_label: IPv6 --- diff --git a/docs/ru/sql-reference/functions/ext-dict-functions.md b/docs/ru/sql-reference/functions/ext-dict-functions.md index e6cb878d1c7..d14f0ddf027 100644 --- a/docs/ru/sql-reference/functions/ext-dict-functions.md +++ b/docs/ru/sql-reference/functions/ext-dict-functions.md @@ -5,7 +5,7 @@ sidebar_label: "Функции для работы с внешними слов --- :::note "Внимание" - Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `.`. Если база данных не указана, используется текущая. +Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `.`. Если база данных не указана, используется текущая. ::: # Функции для работы с внешними словарями {#ext_dict_functions} diff --git a/docs/ru/sql-reference/functions/ip-address-functions.md b/docs/ru/sql-reference/functions/ip-address-functions.md index 96d4b737c88..d1a72b82b67 100644 --- a/docs/ru/sql-reference/functions/ip-address-functions.md +++ b/docs/ru/sql-reference/functions/ip-address-functions.md @@ -265,7 +265,7 @@ SELECT ## toIPv6 {#toipv6string} -Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/domains/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом. +Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом. Похоже на функцию [IPv6StringToNum](#ipv6stringtonums), которая представляет адрес IPv6 в двоичном виде. Если входящая строка содержит корректный IPv4 адрес, функция возвращает его IPv6 эквивалент. @@ -284,7 +284,7 @@ toIPv6(string) - IP адрес. -Тип: [IPv6](../../sql-reference/data-types/domains/ipv6.md). +Тип: [IPv6](../../sql-reference/data-types/ipv6.md). **Примеры** diff --git a/docs/zh/operations/system-tables/query_log.md b/docs/zh/operations/system-tables/query_log.md index 7149282dfcc..0ba669906cb 100644 --- a/docs/zh/operations/system-tables/query_log.md +++ b/docs/zh/operations/system-tables/query_log.md @@ -60,11 +60,11 @@ ClickHouse不会自动从表中删除数据。更多详情请看 [introduction]( - 0 — 由另一个查询发起的,作为分布式查询的一部分. - `user` ([String](../../sql-reference/data-types/string.md)) — 发起查询的用户. - `query_id` ([String](../../sql-reference/data-types/string.md)) — 查询ID. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的客户端IP地址. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的客户端IP地址. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起查询的客户端端口. - `initial_user` ([String](../../sql-reference/data-types/string.md)) — 初始查询的用户名(用于分布式查询执行). - `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — 运行初始查询的ID(用于分布式查询执行). -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 运行父查询的IP地址. +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 运行父查询的IP地址. - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起父查询的客户端端口. - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 发起查询的接口. 可能的值: - 1 — TCP. diff --git a/docs/zh/operations/system-tables/query_thread_log.md b/docs/zh/operations/system-tables/query_thread_log.md index 8a41c1501a6..c4b7e2f1043 100644 --- a/docs/zh/operations/system-tables/query_thread_log.md +++ b/docs/zh/operations/system-tables/query_thread_log.md @@ -36,11 +36,11 @@ ClickHouse不会自动从表中删除数据。 欲了解更多详情,请参照 - 0 — 由其他查询发起的分布式查询。 - `user` ([字符串](../../sql-reference/data-types/string.md)) — 发起查询的用户名。 - `query_id` ([字符串](../../sql-reference/data-types/string.md)) — 查询的ID。 -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的IP地址。 +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的IP地址。 - `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的端口。 - `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的用户名(对于分布式查询)。 - `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的ID(对于分布式查询)。 -- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起该查询的父查询IP地址。 +- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起该查询的父查询IP地址。 - `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起该查询的父查询端口。 - `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的界面,可能的值: - 1 — TCP. diff --git a/docs/zh/operations/system-tables/zookeeper_log.md b/docs/zh/operations/system-tables/zookeeper_log.md index 59dcdaecdc1..ebc51a2e79d 100644 --- a/docs/zh/operations/system-tables/zookeeper_log.md +++ b/docs/zh/operations/system-tables/zookeeper_log.md @@ -15,7 +15,7 @@ slug: /zh/operations/system-tables/zookeeper_log - `Finalize` — 连接丢失, 未收到响应. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件发生的日期. - `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件发生的日期和时间. -- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址. +- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址. - `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 用于发出请求的 ZooKeeper 服务器的端口. - `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — ZooKeeper 服务器为每个连接设置的会话 ID. - `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — 会话中请求的 ID. 这通常是一个连续的请求编号. 请求行和配对的 `response`/`finalize` 行相同. diff --git a/docs/zh/sql-reference/data-types/domains/ipv4.md b/docs/zh/sql-reference/data-types/ipv4.md similarity index 98% rename from docs/zh/sql-reference/data-types/domains/ipv4.md rename to docs/zh/sql-reference/data-types/ipv4.md index 69e17b2f617..b89af974b87 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv4.md +++ b/docs/zh/sql-reference/data-types/ipv4.md @@ -1,5 +1,5 @@ --- -slug: /zh/sql-reference/data-types/domains/ipv4 +slug: /zh/sql-reference/data-types/ipv4 --- ## IPv4 {#ipv4} diff --git a/docs/zh/sql-reference/data-types/domains/ipv6.md b/docs/zh/sql-reference/data-types/ipv6.md similarity index 98% rename from docs/zh/sql-reference/data-types/domains/ipv6.md rename to docs/zh/sql-reference/data-types/ipv6.md index 9dd88692c37..3896bb873d8 100644 --- a/docs/zh/sql-reference/data-types/domains/ipv6.md +++ b/docs/zh/sql-reference/data-types/ipv6.md @@ -1,5 +1,5 @@ --- -slug: /zh/sql-reference/data-types/domains/ipv6 +slug: /zh/sql-reference/data-types/ipv6 --- ## IPv6 {#ipv6} diff --git a/programs/copier/ShardPartitionPiece.h b/programs/copier/ShardPartitionPiece.h index aba378d466d..453364c0fc8 100644 --- a/programs/copier/ShardPartitionPiece.h +++ b/programs/copier/ShardPartitionPiece.h @@ -2,6 +2,8 @@ #include +#include + namespace DB { diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp index cc32b46c6e7..57760b5ebbf 100644 --- a/programs/keeper/Keeper.cpp +++ b/programs/keeper/Keeper.cpp @@ -224,12 +224,12 @@ struct Keeper::KeeperHTTPContext : public IHTTPContext uint64_t getMaxFieldNameSize() const override { - return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 1048576); + return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 128 * 1024); } uint64_t getMaxFieldValueSize() const override { - return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 1048576); + return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 128 * 1024); } uint64_t getMaxChunkSize() const override diff --git a/programs/server/config.xml b/programs/server/config.xml index d18b4cb2ac9..2f69f23a718 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -1543,12 +1543,12 @@ --> - - - - - - + + 1073741824 + 1024 + 1048576 + 30000000 + + executable_not_found + CSV + + + + 86400 + 90000 + + + + + + + input + + + result + String + + + + + diff --git a/tests/integration/test_executable_dictionary/test.py b/tests/integration/test_executable_dictionary/test.py index 43e6ec0a800..22f3442bb95 100644 --- a/tests/integration/test_executable_dictionary/test.py +++ b/tests/integration/test_executable_dictionary/test.py @@ -217,33 +217,21 @@ def test_executable_implicit_input_argument_python(started_cluster): def test_executable_input_signalled_python(started_cluster): skip_test_msan(node) - assert ( - node.query( - "SELECT dictGet('executable_input_signalled_python', 'result', toUInt64(1))" - ) - == "Default result\n" + assert node.query_and_get_error( + "SELECT dictGet('executable_input_signalled_python', 'result', toUInt64(1))" ) - assert ( - node.query( - "SELECT dictGet('executable_input_signalled_pool_python', 'result', toUInt64(1))" - ) - == "Default result\n" + assert node.query_and_get_error( + "SELECT dictGet('executable_input_signalled_pool_python', 'result', toUInt64(1))" ) def test_executable_implicit_input_signalled_python(started_cluster): skip_test_msan(node) - assert ( - node.query( - "SELECT dictGet('executable_implicit_input_signalled_python', 'result', toUInt64(1))" - ) - == "Default result\n" + assert node.query_and_get_error( + "SELECT dictGet('executable_implicit_input_signalled_python', 'result', toUInt64(1))" ) - assert ( - node.query( - "SELECT dictGet('executable_implicit_input_signalled_pool_python', 'result', toUInt64(1))" - ) - == "Default result\n" + assert node.query_and_get_error( + "SELECT dictGet('executable_implicit_input_signalled_pool_python', 'result', toUInt64(1))" ) @@ -482,3 +470,23 @@ def test_executable_source_updated_python(started_cluster): ) == "Value 1 1\n" ) + + +def test_executable_source_exit_code_check(started_cluster): + skip_test_msan(node) + assert "DB::Exception" in node.query_and_get_error( + "SELECT * FROM dictionary(executable_input_missing_executable) ORDER BY input" + ) + assert "DB::Exception" in node.query_and_get_error( + "SELECT dictGet('executable_input_missing_executable', 'result', toUInt64(1))" + ) + + assert ( + node.query( + "SELECT status FROM system.dictionaries WHERE name='executable_input_missing_executable'" + ) + == "FAILED\n" + ) + assert "DB::Exception" in node.query( + "SELECT last_exception FROM system.dictionaries WHERE name='executable_input_missing_executable'" + ) diff --git a/tests/integration/test_jbod_balancer/test.py b/tests/integration/test_jbod_balancer/test.py index df34a075d5a..4797eec5381 100644 --- a/tests/integration/test_jbod_balancer/test.py +++ b/tests/integration/test_jbod_balancer/test.py @@ -134,6 +134,7 @@ def test_replicated_balanced_merge_fetch(start_cluster): old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 2, + cleanup_thread_preferred_points_per_iteration=0, min_bytes_to_rebalance_partition_over_jbod = 1024, max_bytes_to_merge_at_max_space_in_pool = 4096 """.format( diff --git a/tests/integration/test_jbod_ha/test.py b/tests/integration/test_jbod_ha/test.py index 5cbb5989ff3..033d751912a 100644 --- a/tests/integration/test_jbod_ha/test.py +++ b/tests/integration/test_jbod_ha/test.py @@ -58,6 +58,7 @@ def test_jbod_ha(start_cluster): old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 2, + cleanup_thread_preferred_points_per_iteration=0, max_bytes_to_merge_at_max_space_in_pool = 4096 """.format( i diff --git a/tests/integration/test_lost_part/test.py b/tests/integration/test_lost_part/test.py index dd4c2105d55..0bc24268040 100644 --- a/tests/integration/test_lost_part/test.py +++ b/tests/integration/test_lost_part/test.py @@ -42,7 +42,8 @@ def test_lost_part_same_replica(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{node.name}') ORDER BY tuple() PARTITION BY date " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt0") @@ -109,7 +110,8 @@ def test_lost_part_other_replica(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{node.name}') ORDER BY tuple() " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt1") @@ -178,7 +180,8 @@ def test_lost_part_mutation(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{node.name}') ORDER BY tuple() " - "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt2") @@ -241,7 +244,8 @@ def test_lost_last_part(start_cluster): for node in [node1, node2]: node.query( f"CREATE TABLE mt3 (id UInt64, p String) ENGINE ReplicatedMergeTree('/clickhouse/tables/t3', '{node.name}') " - "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0," + "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000" ) node1.query("SYSTEM STOP MERGES mt3") diff --git a/tests/integration/test_merge_tree_check_part_with_cache/__init__.py b/tests/integration/test_merge_tree_check_part_with_cache/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_merge_tree_check_part_with_cache/configs/storage_conf.xml b/tests/integration/test_merge_tree_check_part_with_cache/configs/storage_conf.xml new file mode 100644 index 00000000000..c5e5565e1a9 --- /dev/null +++ b/tests/integration/test_merge_tree_check_part_with_cache/configs/storage_conf.xml @@ -0,0 +1,29 @@ + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + 33554432 + + + cache + s3 + /s3_cache/ + 1000000000 + 1 + + + + + +
+ s3_cache +
+
+
+
+
+
diff --git a/tests/integration/test_merge_tree_check_part_with_cache/test.py b/tests/integration/test_merge_tree_check_part_with_cache/test.py new file mode 100644 index 00000000000..1f50a5ab9de --- /dev/null +++ b/tests/integration/test_merge_tree_check_part_with_cache/test.py @@ -0,0 +1,106 @@ +import pytest +import os +import json +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) + +node = cluster.add_instance( + "node", + main_configs=["configs/storage_conf.xml"], + with_minio=True, +) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_check_part_with_cache(start_cluster): + if node.is_built_with_sanitizer() or node.is_debug_build(): + pytest.skip( + "Skip with debug build and sanitizers. \ + This test manually corrupts cache which triggers LOGICAL_ERROR \ + and leads to crash with those builds" + ) + + node.query( + """ + CREATE TABLE s3_test ( + id Int64, + data String + ) ENGINE=MergeTree() + ORDER BY id + SETTINGS storage_policy='s3_cache' + """ + ) + + node.query("SYSTEM STOP MERGES s3_test") + + node.query( + "INSERT INTO s3_test VALUES (0, 'data')", + settings={"enable_filesystem_cache_on_write_operations": 1}, + ) + + node.query( + "INSERT INTO s3_test VALUES (1, 'data')", + settings={"enable_filesystem_cache_on_write_operations": 1}, + ) + + def get_cache_path_of_data_file(part_name): + disk_path = node.query( + "SELECT path FROM system.disks WHERE name = 's3_cache'" + ).strip("\n") + + part_path = node.query( + f"SELECT path FROM system.parts WHERE table = 's3_test' AND name = '{part_name}'" + ).strip("\n") + + local_data_file_path = os.path.relpath(part_path, disk_path) + "/data.bin" + + return node.query( + f"SELECT cache_paths[1] FROM system.remote_data_paths WHERE disk_name = 's3_cache' AND local_path = '{local_data_file_path}'" + ).strip("\n") + + cache_path = get_cache_path_of_data_file("all_1_1_0") + assert len(cache_path) > 0 + + node.exec_in_container( + ["bash", "-c", f"truncate -s -1 {cache_path}"], privileged=True + ) + + assert ( + node.query( + "SELECT count() FROM s3_test WHERE NOT ignore(*)", + settings={"enable_filesystem_cache": 0}, + ) + == "2\n" + ) + + with pytest.raises(Exception): + node.query( + "SELECT count() FROM s3_test WHERE NOT ignore(*)", + settings={"enable_filesystem_cache": 1}, + ) + + assert node.query("CHECK TABLE s3_test") == "1\n" + + # Check that cache is removed only for one part after CHECK TABLE + cache_path = get_cache_path_of_data_file("all_1_1_0") + assert len(cache_path) == 0 + + cache_path = get_cache_path_of_data_file("all_2_2_0") + assert len(cache_path) > 0 + + assert ( + node.query( + "SELECT count() FROM s3_test WHERE NOT ignore(*)", + settings={"enable_filesystem_cache": 1}, + ) + == "2\n" + ) diff --git a/tests/integration/test_merge_tree_empty_parts/test.py b/tests/integration/test_merge_tree_empty_parts/test.py index 0f611408a67..c6a96f3ed1b 100644 --- a/tests/integration/test_merge_tree_empty_parts/test.py +++ b/tests/integration/test_merge_tree_empty_parts/test.py @@ -27,7 +27,7 @@ def test_empty_parts_alter_delete(started_cluster): "CREATE TABLE empty_parts_delete (d Date, key UInt64, value String) " "ENGINE = ReplicatedMergeTree('/clickhouse/tables/empty_parts_delete', 'r1') " "PARTITION BY toYYYYMM(d) ORDER BY key " - "SETTINGS old_parts_lifetime = 1" + "SETTINGS old_parts_lifetime = 1, cleanup_delay_period=0, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("INSERT INTO empty_parts_delete VALUES (toDate('2020-10-10'), 1, 'a')") @@ -48,7 +48,7 @@ def test_empty_parts_summing(started_cluster): "CREATE TABLE empty_parts_summing (d Date, key UInt64, value Int64) " "ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/empty_parts_summing', 'r1') " "PARTITION BY toYYYYMM(d) ORDER BY key " - "SETTINGS old_parts_lifetime = 1" + "SETTINGS old_parts_lifetime = 1, cleanup_delay_period=0, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("INSERT INTO empty_parts_summing VALUES (toDate('2020-10-10'), 1, 1)") diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 0e51df017b2..54e7f6dd8ee 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -1528,7 +1528,8 @@ def test_simple_replication_and_moves(start_cluster): s1 String ) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_moves', '{}') ORDER BY tuple() - SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2 + SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, + cleanup_delay_period=1, cleanup_delay_period_random_add=2, cleanup_thread_preferred_points_per_iteration=0 """.format( i + 1 ) @@ -1609,7 +1610,8 @@ def test_download_appropriate_disk(start_cluster): s1 String ) ENGINE = ReplicatedMergeTree('/clickhouse/replicated_table_for_download', '{}') ORDER BY tuple() - SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=2 + SETTINGS storage_policy='moving_jbod_with_external', old_parts_lifetime=1, + cleanup_delay_period=1, cleanup_delay_period_random_add=2, cleanup_thread_preferred_points_per_iteration=0 """.format( i + 1 ) diff --git a/tests/integration/test_non_default_compression/configs/deflateqpl_compression_by_default.xml b/tests/integration/test_non_default_compression/configs/deflateqpl_compression_by_default.xml new file mode 100644 index 00000000000..2ad6a0f1eff --- /dev/null +++ b/tests/integration/test_non_default_compression/configs/deflateqpl_compression_by_default.xml @@ -0,0 +1,11 @@ + + + + + 0 + 0 + + deflate_qpl + + + diff --git a/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml new file mode 100644 index 00000000000..24e101e0e3f --- /dev/null +++ b/tests/integration/test_non_default_compression/configs/enable_deflateqpl_codec.xml @@ -0,0 +1,7 @@ + + + + 1 + + + diff --git a/tests/integration/test_non_default_compression/test.py b/tests/integration/test_non_default_compression/test.py index e0a67a5db95..18e2eb43813 100644 --- a/tests/integration/test_non_default_compression/test.py +++ b/tests/integration/test_non_default_compression/test.py @@ -38,6 +38,14 @@ node5 = cluster.add_instance( ) node6 = cluster.add_instance( "node6", + main_configs=["configs/deflateqpl_compression_by_default.xml"], + user_configs=[ + "configs/allow_suspicious_codecs.xml", + "configs/enable_deflateqpl_codec.xml", + ], +) +node7 = cluster.add_instance( + "node7", main_configs=["configs/allow_experimental_codecs.xml"], user_configs=["configs/allow_suspicious_codecs.xml"], ) @@ -244,3 +252,58 @@ def test_uncompressed_cache_plus_zstd_codec(start_cluster): ) == "10000\n" ) + + +def test_preconfigured_deflateqpl_codec(start_cluster): + node6.query( + """ + CREATE TABLE compression_codec_multiple_with_key ( + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), DEFLATE_QPL), + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, DEFLATE_QPL), + data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4, DEFLATE_QPL), + somecolumn Float64 + ) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2; + """ + ) + node6.query( + "INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello', 88.88), (toDate('2018-10-12'), 100002, 'world', 99.99), (toDate('2018-10-12'), 1111, '!', 777.777)" + ) + assert ( + node6.query( + "SELECT COUNT(*) FROM compression_codec_multiple_with_key WHERE id % 2 == 0" + ) + == "2\n" + ) + assert ( + node6.query( + "SELECT DISTINCT somecolumn FROM compression_codec_multiple_with_key ORDER BY id" + ) + == "777.777\n88.88\n99.99\n" + ) + assert ( + node6.query( + "SELECT data FROM compression_codec_multiple_with_key WHERE id >= 1112 AND somedate = toDate('2018-10-12') AND somecolumn <= 100" + ) + == "hello\nworld\n" + ) + + node6.query( + "INSERT INTO compression_codec_multiple_with_key SELECT toDate('2018-10-12'), number, toString(number), 1.0 FROM system.numbers LIMIT 10000" + ) + + assert ( + node6.query( + "SELECT COUNT(id) FROM compression_codec_multiple_with_key WHERE id % 10 == 0" + ) + == "1001\n" + ) + assert ( + node6.query("SELECT SUM(somecolumn) FROM compression_codec_multiple_with_key") + == str(777.777 + 88.88 + 99.99 + 1.0 * 10000) + "\n" + ) + assert ( + node6.query( + "SELECT count(*) FROM compression_codec_multiple_with_key GROUP BY somedate" + ) + == "10003\n" + ) diff --git a/tests/integration/test_old_parts_finally_removed/test.py b/tests/integration/test_old_parts_finally_removed/test.py index 5347d433419..cbd701588d5 100644 --- a/tests/integration/test_old_parts_finally_removed/test.py +++ b/tests/integration/test_old_parts_finally_removed/test.py @@ -27,7 +27,8 @@ def started_cluster(): def test_part_finally_removed(started_cluster): node1.query( - "CREATE TABLE drop_outdated_part (Key UInt64) ENGINE = ReplicatedMergeTree('/table/d', '1') ORDER BY tuple() SETTINGS old_parts_lifetime=10, cleanup_delay_period=10, cleanup_delay_period_random_add=1" + "CREATE TABLE drop_outdated_part (Key UInt64) ENGINE = ReplicatedMergeTree('/table/d', '1') ORDER BY tuple() " + "SETTINGS old_parts_lifetime=10, cleanup_delay_period=10, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) node1.query("INSERT INTO drop_outdated_part VALUES (1)") @@ -44,7 +45,7 @@ def test_part_finally_removed(started_cluster): ) node1.query( - "ALTER TABLE drop_outdated_part MODIFY SETTING old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1" + "ALTER TABLE drop_outdated_part MODIFY SETTING old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0" ) for i in range(60): diff --git a/tests/integration/test_parts_delete_zookeeper/test.py b/tests/integration/test_parts_delete_zookeeper/test.py index a78aefa4595..9fd07e7b65d 100644 --- a/tests/integration/test_parts_delete_zookeeper/test.py +++ b/tests/integration/test_parts_delete_zookeeper/test.py @@ -21,7 +21,7 @@ def start_cluster(): CREATE DATABASE test; CREATE TABLE test_table(date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/replicated', 'node1') - ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS old_parts_lifetime=4, cleanup_delay_period=1; + ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS old_parts_lifetime=4, cleanup_delay_period=1, cleanup_thread_preferred_points_per_iteration=0; """ ) diff --git a/tests/integration/test_recovery_replica/test.py b/tests/integration/test_recovery_replica/test.py index 0a63da4db22..582e018f5d2 100644 --- a/tests/integration/test_recovery_replica/test.py +++ b/tests/integration/test_recovery_replica/test.py @@ -4,7 +4,7 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry -SETTINGS = "SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0" +SETTINGS = "SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0" def fill_nodes(nodes): diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index 2ab2fe499ff..b3ba8d4737f 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -34,6 +34,7 @@ competing_node = cluster.add_instance( main_configs=["configs/config.xml"], user_configs=["configs/settings.xml"], with_zookeeper=True, + stay_alive=True, macros={"shard": 1, "replica": 3}, ) snapshotting_node = cluster.add_instance( @@ -1272,3 +1273,61 @@ def test_recover_digest_mismatch(started_cluster): dummy_node.query("DROP DATABASE IF EXISTS recover_digest_mismatch") print("Everything Okay") + + +def test_replicated_table_structure_alter(started_cluster): + main_node.query("DROP DATABASE IF EXISTS table_structure") + dummy_node.query("DROP DATABASE IF EXISTS table_structure") + + main_node.query( + "CREATE DATABASE table_structure ENGINE = Replicated('/clickhouse/databases/table_structure', 'shard1', 'replica1');" + ) + dummy_node.query( + "CREATE DATABASE table_structure ENGINE = Replicated('/clickhouse/databases/table_structure', 'shard1', 'replica2');" + ) + competing_node.query( + "CREATE DATABASE table_structure ENGINE = Replicated('/clickhouse/databases/table_structure', 'shard1', 'replica3');" + ) + + competing_node.query("CREATE TABLE table_structure.mem (n int) ENGINE=Memory") + dummy_node.query("DETACH DATABASE table_structure") + + settings = {"distributed_ddl_task_timeout": 0} + main_node.query( + "CREATE TABLE table_structure.rmt (n int, v UInt64) ENGINE=ReplicatedReplacingMergeTree(v) ORDER BY n", + settings=settings, + ) + + competing_node.query("SYSTEM SYNC DATABASE REPLICA table_structure") + competing_node.query("DETACH DATABASE table_structure") + + main_node.query( + "ALTER TABLE table_structure.rmt ADD COLUMN m int", settings=settings + ) + main_node.query( + "ALTER TABLE table_structure.rmt COMMENT COLUMN v 'version'", settings=settings + ) + main_node.query("INSERT INTO table_structure.rmt VALUES (1, 2, 3)") + + command = "rm -f /var/lib/clickhouse/metadata/table_structure/mem.sql" + competing_node.exec_in_container(["bash", "-c", command]) + competing_node.restart_clickhouse(kill=True) + + dummy_node.query("ATTACH DATABASE table_structure") + dummy_node.query("SYSTEM SYNC DATABASE REPLICA table_structure") + dummy_node.query("SYSTEM SYNC REPLICA table_structure.rmt") + assert "1\t2\t3\n" == dummy_node.query("SELECT * FROM table_structure.rmt") + + competing_node.query("SYSTEM SYNC DATABASE REPLICA table_structure") + competing_node.query("SYSTEM SYNC REPLICA table_structure.rmt") + # time.sleep(600) + assert "mem" in competing_node.query("SHOW TABLES FROM table_structure") + assert "1\t2\t3\n" == competing_node.query("SELECT * FROM table_structure.rmt") + + main_node.query("ALTER TABLE table_structure.rmt ADD COLUMN k int") + main_node.query("INSERT INTO table_structure.rmt VALUES (1, 2, 3, 4)") + dummy_node.query("SYSTEM SYNC DATABASE REPLICA table_structure") + dummy_node.query("SYSTEM SYNC REPLICA table_structure.rmt") + assert "1\t2\t3\t0\n1\t2\t3\t4\n" == dummy_node.query( + "SELECT * FROM table_structure.rmt ORDER BY k" + ) diff --git a/tests/integration/test_storage_nats/test.py b/tests/integration/test_storage_nats/test.py index 1d7e046864b..4d7e4cf813d 100644 --- a/tests/integration/test_storage_nats/test.py +++ b/tests/integration/test_storage_nats/test.py @@ -931,7 +931,8 @@ def test_nats_overloaded_insert(nats_cluster): CREATE TABLE test.view_overload (key UInt64, value UInt64) ENGINE = MergeTree ORDER BY key - SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; + SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3, + cleanup_thread_preferred_points_per_iteration=0; CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS SELECT * FROM test.nats_consume; """ diff --git a/tests/integration/test_storage_rabbitmq/test.py b/tests/integration/test_storage_rabbitmq/test.py index 4e1e28373e3..b4dcf86e0ba 100644 --- a/tests/integration/test_storage_rabbitmq/test.py +++ b/tests/integration/test_storage_rabbitmq/test.py @@ -642,7 +642,8 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster): CREATE TABLE test.view (key UInt64, value UInt64, channel_id String) ENGINE = MergeTree ORDER BY key - SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; + SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3, + cleanup_thread_preferred_points_per_iteration=0; CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT *, _channel_id AS channel_id FROM test.rabbitmq; """ @@ -1116,7 +1117,8 @@ def test_rabbitmq_direct_exchange(rabbitmq_cluster): CREATE TABLE test.destination(key UInt64, value UInt64) ENGINE = MergeTree() ORDER BY key - SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3; + SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3, + cleanup_thread_preferred_points_per_iteration=0; """ ) diff --git a/tests/integration/test_system_metrics/test.py b/tests/integration/test_system_metrics/test.py index 9ebe198a109..338622b824e 100644 --- a/tests/integration/test_system_metrics/test.py +++ b/tests/integration/test_system_metrics/test.py @@ -13,7 +13,9 @@ def fill_nodes(nodes, shard): CREATE DATABASE test; CREATE TABLE test.test_table(date Date, id UInt32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0; + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) + SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, + cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0; """.format( shard=shard, replica=node.name ) diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index 05eab7a952f..3b031569b8a 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -6,6 +6,7 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV, exec_query_with_retry from helpers.wait_for_helpers import wait_for_delete_inactive_parts from helpers.wait_for_helpers import wait_for_delete_empty_parts +from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=True) @@ -75,7 +76,8 @@ def test_ttl_columns(started_cluster): """ CREATE TABLE test_ttl(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_columns', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0; + ORDER BY id PARTITION BY toDayOfMonth(date) + SETTINGS merge_with_ttl_timeout=0, min_bytes_for_wide_part=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name ) @@ -108,7 +110,7 @@ def test_merge_with_ttl_timeout(started_cluster): CREATE TABLE {table}(date DateTime, id UInt32, a Int32 TTL date + INTERVAL 1 DAY, b Int32 TTL date + INTERVAL 1 MONTH) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{table}', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - SETTINGS min_bytes_for_wide_part=0; + SETTINGS min_bytes_for_wide_part=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name, table=table ) @@ -143,13 +145,11 @@ def test_merge_with_ttl_timeout(started_cluster): ) ) - time.sleep(15) # TTL merges shall not happen. - - assert ( - node1.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" + assert_eq_with_retry( + node1, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n" ) - assert ( - node2.query("SELECT countIf(a = 0) FROM {table}".format(table=table)) == "3\n" + assert_eq_with_retry( + node2, "SELECT countIf(a = 0) FROM {table}".format(table=table), "3\n" ) @@ -164,7 +164,7 @@ def test_ttl_many_columns(started_cluster): _offset Int32 TTL date, _partition Int32 TTL date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_2', '{replica}') - ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0; + ORDER BY id PARTITION BY toDayOfMonth(date) SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name ) @@ -222,7 +222,7 @@ def test_ttl_table(started_cluster, delete_suffix): CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - TTL date + INTERVAL 1 DAY {delete_suffix} SETTINGS merge_with_ttl_timeout=0; + TTL date + INTERVAL 1 DAY {delete_suffix} SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000; """.format( replica=node.name, delete_suffix=delete_suffix ) @@ -313,7 +313,7 @@ def test_ttl_double_delete_rule_returns_error(started_cluster): CREATE TABLE test_ttl(date DateTime, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_double_delete', '{replica}') ORDER BY id PARTITION BY toDayOfMonth(date) - TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0 + TTL date + INTERVAL 1 DAY, date + INTERVAL 2 DAY SETTINGS merge_with_ttl_timeout=0, max_merge_selecting_sleep_ms=6000 """.format( replica=node1.name ) @@ -431,7 +431,8 @@ def test_ttl_empty_parts(started_cluster): ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/test_ttl_empty_parts', '{replica}') ORDER BY id SETTINGS max_bytes_to_merge_at_min_space_in_pool = 1, max_bytes_to_merge_at_max_space_in_pool = 1, - cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, old_parts_lifetime = 1 + cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime = 1 """.format( replica=node.name diff --git a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh index a0a3416e406..399511db701 100755 --- a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh @@ -36,8 +36,12 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst_r1;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst_r2;" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r1 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r2 (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '2') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r1 (p UInt64, k String, d UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '1') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst_r2 (p UInt64, k String, d UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst_1', '2') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" diff --git a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh index 1f5bcbdc0d0..d8b1bdec328 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh @@ -56,11 +56,13 @@ ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations_cleaner_r2 SYNC" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations_cleaner', 'r1') ORDER BY x SETTINGS \ finished_mutations_to_keep = 2, cleanup_delay_period = 1, - cleanup_delay_period_random_add = 0" + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE mutations_cleaner_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/mutations_cleaner', 'r2') ORDER BY x SETTINGS \ finished_mutations_to_keep = 2, cleanup_delay_period = 1, - cleanup_delay_period_random_add = 0" + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0" # Insert some data ${CLICKHOUSE_CLIENT} --insert_keeper_fault_injection_probability=0 --query="INSERT INTO mutations_cleaner_r1(x) VALUES (1), (2), (3), (4), (5)" diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference new file mode 100644 index 00000000000..a2178f5eda7 --- /dev/null +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.reference @@ -0,0 +1,6 @@ +CREATE TABLE default.compression_codec\n(\n `id` UInt64 CODEC(DEFLATE_QPL),\n `data` String CODEC(DEFLATE_QPL),\n `ddd` Date CODEC(DEFLATE_QPL),\n `ddd32` Date32 CODEC(DEFLATE_QPL),\n `somenum` Float64 CODEC(DEFLATE_QPL),\n `somestr` FixedString(3) CODEC(DEFLATE_QPL),\n `othernum` Int64 CODEC(DEFLATE_QPL),\n `somearray` Array(UInt8) CODEC(DEFLATE_QPL),\n `somemap` Map(String, UInt32) CODEC(DEFLATE_QPL),\n `sometuple` Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +1 hello 2018-12-14 2018-12-14 1.1 aaa 5 [1,2,3] {'k1':1,'k2':2} (1,2) +2 world 2018-12-15 2018-12-15 2.2 bbb 6 [4,5,6] {'k3':3,'k4':4} (3,4) +3 ! 2018-12-16 2018-12-16 3.3 ccc 7 [7,8,9] {'k5':5,'k6':6} (5,6) +2 +10001 diff --git a/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql new file mode 100644 index 00000000000..8a256567e80 --- /dev/null +++ b/tests/queries/0_stateless/00804_test_deflate_qpl_codec_compression.sql @@ -0,0 +1,49 @@ +--Tags: no-fasttest, no-cpu-aarch64 +-- no-fasttest because DEFLATE_QPL isn't available in fasttest +-- no-cpu-aarch64 because DEFLATE_QPL is x86-only + +-- A bunch of random DDLs to test the DEFLATE_QPL codec. + +SET enable_deflate_qpl_codec = 1; + +-- Suppress test failures because stderr contains warning "Initialization of hardware-assisted DeflateQpl failed, falling +-- back to software DeflateQpl coded." +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS compression_codec; + +CREATE TABLE compression_codec( + id UInt64 CODEC(DEFLATE_QPL), + data String CODEC(DEFLATE_QPL), + ddd Date CODEC(DEFLATE_QPL), + ddd32 Date32 CODEC(DEFLATE_QPL), + somenum Float64 CODEC(DEFLATE_QPL), + somestr FixedString(3) CODEC(DEFLATE_QPL), + othernum Int64 CODEC(DEFLATE_QPL), + somearray Array(UInt8) CODEC(DEFLATE_QPL), + somemap Map(String, UInt32) CODEC(DEFLATE_QPL), + sometuple Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL), +) ENGINE = MergeTree() ORDER BY tuple(); + +SHOW CREATE TABLE compression_codec; + +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), toDate32('2018-12-14'), 1.1, 'aaa', 5, [1,2,3], map('k1',1,'k2',2), tuple(1,2)); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), toDate32('2018-12-15'), 2.2, 'bbb', 6, [4,5,6], map('k3',3,'k4',4), tuple(3,4)); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6)); + +SELECT * FROM compression_codec ORDER BY id; + +OPTIMIZE TABLE compression_codec FINAL; + +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), toDate32('2018-12-13'), 4.4, 'ddd', 8, [10,11,12], map('k7',7,'k8',8), tuple(7,8)); + +DETACH TABLE compression_codec; +ATTACH TABLE compression_codec; + +SELECT count(*) FROM compression_codec WHERE id = 2 GROUP BY id; + +INSERT INTO compression_codec SELECT 3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6) FROM system.numbers LIMIT 10000; + +SELECT count(*) FROM compression_codec WHERE id = 3 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec; diff --git a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh index 5fc3fa460e6..bab2304cec2 100755 --- a/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh +++ b/tests/queries/0_stateless/00814_replicated_minimalistic_part_header_zookeeper.sh @@ -20,13 +20,15 @@ CREATE TABLE part_header_r1(x UInt32, y UInt32) SETTINGS use_minimalistic_part_header_in_zookeeper = 0, old_parts_lifetime = 1, cleanup_delay_period = 0, - cleanup_delay_period_random_add = 0; + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0; CREATE TABLE part_header_r2(x UInt32, y UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/test_00814/part_header/{shard}', '2{replica}') ORDER BY x SETTINGS use_minimalistic_part_header_in_zookeeper = 1, old_parts_lifetime = 1, cleanup_delay_period = 0, - cleanup_delay_period_random_add = 0; + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0; SELECT '*** Test fetches ***'; INSERT INTO part_header_r1 VALUES (1, 1); diff --git a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh index c713c7c4926..ad0146b9d99 100755 --- a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh +++ b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh @@ -22,7 +22,7 @@ CREATE TABLE elog ( ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/elog/{shard}', '{replica}') PARTITION BY date ORDER BY (engine_id) -SETTINGS replicated_deduplication_window = 2, cleanup_delay_period=4, cleanup_delay_period_random_add=0;" +SETTINGS replicated_deduplication_window = 2, cleanup_delay_period=4, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 1, 'hello')" $CLICKHOUSE_CLIENT --query="INSERT INTO elog VALUES (toDate('2018-10-01'), 2, 'hello')" diff --git a/tests/queries/0_stateless/00988_parallel_parts_removal.sql b/tests/queries/0_stateless/00988_parallel_parts_removal.sql index 8f79276782b..65629f0df01 100644 --- a/tests/queries/0_stateless/00988_parallel_parts_removal.sql +++ b/tests/queries/0_stateless/00988_parallel_parts_removal.sql @@ -1,6 +1,8 @@ DROP TABLE IF EXISTS mt; -CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; +CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x + SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; SYSTEM STOP MERGES mt; diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index 862cc90fb1c..d45cc3a6871 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -13,8 +13,14 @@ $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; - CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 100)); - CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 200)); + CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 100)); + CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 200)); " function thread1() diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh index f4f38ad9c83..811681794a5 100755 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh @@ -58,7 +58,8 @@ function thread6() $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS alter_table_$REPLICA; CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 - SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50));"; + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50));"; sleep 0.$RANDOM; done } diff --git a/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh index 5e9e69d999d..e0a84323dbd 100755 --- a/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/01034_move_partition_from_table_zookeeper.sh @@ -28,7 +28,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS src;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst;" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src1', '1') PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst1', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst1', '1') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" @@ -58,7 +59,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE dst;" $CLICKHOUSE_CLIENT --query="SELECT 'MOVE incompatible schema missing column';" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src2', '1') PARTITION BY p ORDER BY (d, p);" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst2', '1') PARTITION BY p ORDER BY (d, p) SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst2', '1') PARTITION BY p ORDER BY (d, p) +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (0, '0', 1);" $CLICKHOUSE_CLIENT --query="INSERT INTO src VALUES (1, '0', 1);" diff --git a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh index 8ef03be02b6..06a460f3600 100755 --- a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh @@ -11,7 +11,8 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS src;" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS dst;" $CLICKHOUSE_CLIENT --query="CREATE TABLE src (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/src', '1') PARTITION BY p ORDER BY k;" -$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst', '1') PARTITION BY p ORDER BY k SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" +$CLICKHOUSE_CLIENT --query="CREATE TABLE dst (p UInt64, k String) ENGINE = ReplicatedMergeTree('/clickhouse/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/dst', '1') PARTITION BY p ORDER BY k +SETTINGS old_parts_lifetime=1, cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" function thread1() { diff --git a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh index 7f53bf2a627..5f69427c0cd 100755 --- a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh @@ -31,7 +31,8 @@ for i in $(seq $REPLICAS); do max_replicated_merges_in_queue = 1000, temporary_directories_lifetime = 10, cleanup_delay_period = 3, - cleanup_delay_period_random_add = 0" + cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_mutate_mt_1 SELECT number, number + 10, toString(number) from numbers(10)" diff --git a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh index aec27792603..e508b77a0c2 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh @@ -12,7 +12,10 @@ for i in $(seq $REPLICAS); do done for i in $(seq $REPLICAS); do - $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_detach_$i (key UInt64, value1 UInt8, value2 UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_alter_detach', '$i') ORDER BY key SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000,temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0" + $CLICKHOUSE_CLIENT --query "CREATE TABLE concurrent_alter_detach_$i (key UInt64, value1 UInt8, value2 UInt8) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_alter_detach', '$i') ORDER BY key + SETTINGS max_replicated_mutations_in_queue=1000, number_of_free_entries_in_pool_to_execute_mutation=0,max_replicated_merges_in_queue=1000, + temporary_directories_lifetime=10,cleanup_delay_period=3,cleanup_delay_period_random_add=0,cleanup_thread_preferred_points_per_iteration=0" done $CLICKHOUSE_CLIENT --query "INSERT INTO concurrent_alter_detach_1 SELECT number, number + 10, number from numbers(10)" diff --git a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh index 95f8dfc0377..3461283b5ea 100755 --- a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh +++ b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh @@ -27,7 +27,9 @@ function thread3() { while true; do $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS concurrent_optimize_table; - CREATE TABLE concurrent_optimize_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_optimize_table', '1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; + CREATE TABLE concurrent_optimize_table (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_optimize_table', '1') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0;"; sleep 0.$RANDOM; sleep 0.$RANDOM; sleep 0.$RANDOM; diff --git a/tests/queries/0_stateless/01158_zookeeper_log_long.sql b/tests/queries/0_stateless/01158_zookeeper_log_long.sql index 45771494af6..9b5ae7ad7c6 100644 --- a/tests/queries/0_stateless/01158_zookeeper_log_long.sql +++ b/tests/queries/0_stateless/01158_zookeeper_log_long.sql @@ -6,7 +6,7 @@ SET insert_keeper_fault_injection_probability=0; -- disable fault injection; par drop table if exists rmt sync; -- cleanup code will perform extra Exists -- (so the .reference will not match) -create table rmt (n int) engine=ReplicatedMergeTree('/test/01158/{database}/rmt', '1') order by n settings cleanup_delay_period=86400, replicated_can_become_leader=0; +create table rmt (n int) engine=ReplicatedMergeTree('/test/01158/{database}/rmt', '1') order by n settings cleanup_delay_period=86400, max_cleanup_delay_period=86400, replicated_can_become_leader=0; system sync replica rmt; insert into rmt values (1); insert into rmt values (1); diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh index 411705e0469..2d761df998e 100755 --- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh +++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh @@ -13,8 +13,10 @@ SCALE=5000 $CLICKHOUSE_CLIENT -n --query " DROP TABLE IF EXISTS r1; DROP TABLE IF EXISTS r2; - CREATE TABLE r1 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '1{replica}') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; - CREATE TABLE r2 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '2{replica}') ORDER BY x SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; + CREATE TABLE r1 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '1{replica}') ORDER BY x + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; + CREATE TABLE r2 (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{shard}', '2{replica}') ORDER BY x + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, parts_to_throw_insert = 100000, max_replicated_logs_to_keep = 10; DETACH TABLE r2; " diff --git a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh index 80318ba67fb..c3c87eeaf8b 100755 --- a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh +++ b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper_long.sh @@ -8,7 +8,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames0" $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames50" -$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_rename', '1') ORDER BY tuple() SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0" +$CLICKHOUSE_CLIENT --query "CREATE TABLE table_for_renames0 (value UInt64, data String) +ENGINE ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/concurrent_rename', '1') ORDER BY tuple() +SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0" $CLICKHOUSE_CLIENT --query "INSERT INTO table_for_renames0 SELECT number, toString(number) FROM numbers(1000)" diff --git a/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh index 445706e35bf..bf88ad0e0b2 100755 --- a/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh +++ b/tests/queries/0_stateless/01509_parallel_quorum_and_merge_long.sh @@ -13,7 +13,8 @@ $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parallel_q1 SYNC" $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parallel_q2 SYNC" -$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r1') ORDER BY tuple() SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0" +$CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r1') ORDER BY tuple() +SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0" $CLICKHOUSE_CLIENT -q "CREATE TABLE parallel_q2 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/parallel_q', 'r2') ORDER BY tuple() SETTINGS always_fetch_merged_part = 1" diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh index a3682a3a74b..5e1600a0673 100755 --- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh +++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh @@ -24,7 +24,8 @@ for i in $(seq 1 $NUM_REPLICAS); do ENGINE ReplicatedMergeTree('/test/01921_concurrent_ttl_and_normal_merges/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/ttl_table', '$i') ORDER BY tuple() TTL key + INTERVAL 1 SECOND - SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100, cleanup_delay_period=1, cleanup_delay_period_random_add=0;" + SETTINGS merge_with_ttl_timeout=1, max_replicated_merges_with_ttl_in_queue=100, max_number_of_merges_with_ttl_in_pool=100, + cleanup_delay_period=1, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;" done function optimize_thread diff --git a/tests/queries/0_stateless/02067_lost_part_s3.sql b/tests/queries/0_stateless/02067_lost_part_s3.sql index 12afdcd4421..7df15ab33c4 100644 --- a/tests/queries/0_stateless/02067_lost_part_s3.sql +++ b/tests/queries/0_stateless/02067_lost_part_s3.sql @@ -4,11 +4,17 @@ DROP TABLE IF EXISTS partslost_0; DROP TABLE IF EXISTS partslost_1; DROP TABLE IF EXISTS partslost_2; -CREATE TABLE partslost_0 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '0') ORDER BY tuple() SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 1; +CREATE TABLE partslost_0 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '0') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0; -CREATE TABLE partslost_1 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '1') ORDER BY tuple() SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 1; +CREATE TABLE partslost_1 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '1') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0; -CREATE TABLE partslost_2 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '2') ORDER BY tuple() SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 1; +CREATE TABLE partslost_2 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '2') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0; INSERT INTO partslost_0 SELECT toString(number) AS x from system.numbers LIMIT 10000; diff --git a/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh b/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh index bc297cbb963..e34163d0502 100755 --- a/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh +++ b/tests/queries/0_stateless/02370_lost_part_intersecting_merges.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "drop table if exists rmt1 sync;" $CLICKHOUSE_CLIENT -q "drop table if exists rmt2 sync;" $CLICKHOUSE_CLIENT -q "create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{database}', '1') order by n - settings cleanup_delay_period=0, cleanup_delay_period_random_add=0, old_parts_lifetime=0" + settings cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0" $CLICKHOUSE_CLIENT -q "create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02369/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/{database}', '2') order by n" $CLICKHOUSE_CLIENT -q "system stop replicated sends rmt2" diff --git a/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh b/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh index 5df1a9ba095..e31a091ff45 100755 --- a/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh +++ b/tests/queries/0_stateless/02396_system_parts_race_condition_rm.sh @@ -15,8 +15,12 @@ $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; - CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0; - CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0 + CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0; + CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0 " function thread1() diff --git a/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh b/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh index 548179b94c9..39e513f6be4 100755 --- a/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh +++ b/tests/queries/0_stateless/02397_system_parts_race_condition_drop_rm.sh @@ -58,7 +58,9 @@ function thread6() while true; do REPLICA=$(($RANDOM % 10)) $CLICKHOUSE_CLIENT -n -q "DROP TABLE IF EXISTS alter_table_$REPLICA; - CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0;"; + CREATE TABLE alter_table_$REPLICA (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r_$REPLICA') ORDER BY a PARTITION BY b % 10 + SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, cleanup_thread_preferred_points_per_iteration=0;"; sleep 0.$RANDOM; done } diff --git a/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql b/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql index 9b0a52b8dbd..e3c8583ccf4 100644 --- a/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql +++ b/tests/queries/0_stateless/02427_mutate_and_zero_copy_replication_zookeeper.sql @@ -9,7 +9,7 @@ CREATE TABLE mutate_and_zero_copy_replication1 ) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02427_mutate_and_zero_copy_replication/alter', '1') ORDER BY tuple() -SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, cleanup_delay_period_random_add=300, min_bytes_for_wide_part = 0; +SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, max_cleanup_delay_period=300, cleanup_delay_period_random_add=300, min_bytes_for_wide_part = 0; CREATE TABLE mutate_and_zero_copy_replication2 ( @@ -19,7 +19,7 @@ CREATE TABLE mutate_and_zero_copy_replication2 ) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02427_mutate_and_zero_copy_replication/alter', '2') ORDER BY tuple() -SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, cleanup_delay_period_random_add=300; +SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, max_cleanup_delay_period=300, cleanup_delay_period_random_add=300; INSERT INTO mutate_and_zero_copy_replication1 VALUES (1, '1', 1.0); diff --git a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql index 5b9342972f4..948ec9e9e8a 100644 --- a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql +++ b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql @@ -8,7 +8,7 @@ drop table if exists rmt2; -- Disable compact parts, because we need hardlinks in mutations. create table rmt (n int, m int, k int) engine=ReplicatedMergeTree('/test/02432/{database}', '1') order by tuple() settings storage_policy = 's3_cache', allow_remote_fs_zero_copy_replication=1, - concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, + concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, max_replicated_merges_in_queue=0, max_replicated_mutations_in_queue=0, min_bytes_for_wide_part=0, min_rows_for_wide_part=0; insert into rmt(n, m) values (1, 42); @@ -38,7 +38,7 @@ select count(), sum(n), sum(m) from rmt; -- New table can assign merges/mutations and can remove old parts create table rmt2 (n int, m int, k String) engine=ReplicatedMergeTree('/test/02432/{database}', '2') order by tuple() settings storage_policy = 's3_cache', allow_remote_fs_zero_copy_replication=1, - concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, + concurrent_part_removal_threshold=1, cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0, min_bytes_for_wide_part=0, min_rows_for_wide_part=0, max_replicated_merges_in_queue=1, old_parts_lifetime=0; diff --git a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql index 4befe952a14..ec669ace620 100644 --- a/tests/queries/0_stateless/02448_clone_replica_lost_part.sql +++ b/tests/queries/0_stateless/02448_clone_replica_lost_part.sql @@ -5,9 +5,15 @@ SET insert_keeper_fault_injection_probability=0; -- disable fault injection; par drop table if exists rmt1; drop table if exists rmt2; create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '1') order by tuple() - settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, old_parts_lifetime=0, max_parts_to_merge_at_once=4; + settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, + max_cleanup_delay_period=1, cleanup_delay_period=0, cleanup_delay_period_random_add=1, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, + merge_selecting_sleep_ms=1000, max_merge_selecting_sleep_ms=2000; create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '2') order by tuple() - settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, cleanup_delay_period=0, cleanup_delay_period_random_add=1, old_parts_lifetime=0, max_parts_to_merge_at_once=4; + settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, + max_cleanup_delay_period=1, cleanup_delay_period=0, cleanup_delay_period_random_add=1, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, + merge_selecting_sleep_ms=1000, max_merge_selecting_sleep_ms=2000; -- insert part only on one replica system stop replicated sends rmt1; @@ -137,7 +143,10 @@ system sync replica rmt2; -- merge through gap optimize table rmt2; -- give it a chance to cleanup log -select sleep(2) format Null; -- increases probability of reproducing the issue + +select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select value from system.zookeeper where path=''/test/02448/' || currentDatabase() || '/rmt/replicas/1'' and name=''is_lost'' and value=''0''' + ), 'LineAsString', 's String') settings max_threads=1 format Null; -- rmt1 will mimic rmt2, but will not be able to fetch parts for a while system stop replicated sends rmt2; diff --git a/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql b/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql index 98427874160..b4504a55643 100644 --- a/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql +++ b/tests/queries/0_stateless/02494_zero_copy_and_projection_and_mutation_work_together.sql @@ -24,7 +24,8 @@ CREATE TABLE wikistat1 ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02494_zero_copy_and_projection', '1') ORDER BY (path, time) -SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; +SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; CREATE TABLE wikistat2 ( @@ -49,7 +50,8 @@ CREATE TABLE wikistat2 ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02494_zero_copy_and_projection', '2') ORDER BY (path, time) -SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; +SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 0, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, allow_remote_fs_zero_copy_replication=1, min_bytes_for_wide_part=0; INSERT INTO wikistat1 SELECT toDateTime('2020-10-01 00:00:00'), 'hello', 'world', '/data/path', 10 from numbers(100); diff --git a/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh b/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh index 458a5e95faa..bc6e7eeb214 100755 --- a/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh +++ b/tests/queries/0_stateless/02515_cleanup_async_insert_block_ids.sh @@ -13,7 +13,7 @@ $CLICKHOUSE_CLIENT -n --query " CREATE TABLE t_async_insert_cleanup ( KeyID UInt32 ) Engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup', '{replica}') - ORDER BY (KeyID) SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, replicated_deduplication_window_for_async_inserts=10 + ORDER BY (KeyID) SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, replicated_deduplication_window_for_async_inserts=10 " for i in {1..100}; do diff --git a/tests/queries/0_stateless/02735_system_zookeeper_connection.reference b/tests/queries/0_stateless/02735_system_zookeeper_connection.reference index 1deabd88b88..52ff7657b8d 100644 --- a/tests/queries/0_stateless/02735_system_zookeeper_connection.reference +++ b/tests/queries/0_stateless/02735_system_zookeeper_connection.reference @@ -1,2 +1,2 @@ -default ::1 9181 0 0 3 -zookeeper2 ::1 9181 0 0 0 +default ::1 9181 0 0 3 1 1 +zookeeper2 ::1 9181 0 0 0 1 diff --git a/tests/queries/0_stateless/02735_system_zookeeper_connection.sql b/tests/queries/0_stateless/02735_system_zookeeper_connection.sql index 8b37c428413..863d90e1654 100644 --- a/tests/queries/0_stateless/02735_system_zookeeper_connection.sql +++ b/tests/queries/0_stateless/02735_system_zookeeper_connection.sql @@ -8,8 +8,14 @@ CREATE TABLE test_zk_connection_table ( ENGINE ReplicatedMergeTree('zookeeper2:/clickhouse/{database}/02731_zk_connection/{shard}', '{replica}') ORDER BY tuple(); +select name, host, port, index, is_expired, keeper_api_version, (connected_time between yesterday() and now()), + (abs(session_uptime_elapsed_seconds - zookeeperSessionUptime()) < 10) +from system.zookeeper_connection where name='default'; + -- keeper_api_version will by 0 for auxiliary_zookeeper2, because we fail to get /api_version due to chroot -- I'm not sure if it's a bug or a useful trick to fallback to basic api -select name, host, port, index, is_expired, keeper_api_version from system.zookeeper_connection order by name; +-- Also, auxiliary zookeeper is created lazily +select name, host, port, index, is_expired, keeper_api_version, (connected_time between yesterday() and now()) +from system.zookeeper_connection where name!='default'; DROP TABLE IF EXISTS test_zk_connection_table; diff --git a/tests/queries/0_stateless/02771_jit_functions_comparison_crash.reference b/tests/queries/0_stateless/02771_jit_functions_comparison_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02771_jit_functions_comparison_crash.sql b/tests/queries/0_stateless/02771_jit_functions_comparison_crash.sql new file mode 100644 index 00000000000..e02f1a3382d --- /dev/null +++ b/tests/queries/0_stateless/02771_jit_functions_comparison_crash.sql @@ -0,0 +1,36 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +DROP TABLE IF EXISTS test_table_1; +CREATE TABLE test_table_1 +( + pkey UInt32, + c8 UInt32, + c9 String, + c10 Float32, + c11 String +) ENGINE = MergeTree ORDER BY pkey; + +DROP TABLE IF EXISTS test_table_2; +CREATE TABLE test_table_2 +( + vkey UInt32, + pkey UInt32, + c15 UInt32 +) ENGINE = MergeTree ORDER BY vkey; + +WITH test_cte AS +( + SELECT + ref_10.c11 as c_2_c2350_1, + ref_9.c9 as c_2_c2351_2 + FROM + test_table_1 as ref_9 + RIGHT OUTER JOIN test_table_1 as ref_10 ON (ref_9.c11 = ref_10.c9) + INNER JOIN test_table_2 as ref_11 ON (ref_10.c8 = ref_11.vkey) + WHERE ((ref_10.pkey + ref_11.pkey) BETWEEN ref_11.vkey AND (CASE WHEN (-30.87 >= ref_9.c10) THEN ref_11.c15 ELSE ref_11.pkey END)) +) +SELECT ref_13.c_2_c2350_1 as c_2_c2357_3 FROM test_cte as ref_13 WHERE (ref_13.c_2_c2351_2) in (select ref_14.c_2_c2351_2 as c_5_c2352_0 FROM test_cte as ref_14); + +DROP TABLE test_table_1; +DROP TABLE test_table_2; diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference new file mode 100644 index 00000000000..f688db940d9 --- /dev/null +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.reference @@ -0,0 +1,12 @@ +-8888150036649430454 +-2788931093724180887 +-75175454385331084 +368066018677693974 +821735343441964030 +2804162938822577320 +4357435422797280898 +5935810273536892891 +7885388429666205427 +8124171311239967992 +1 1 -- Simple query with analyzer and pure parallel replicas\nSELECT number\nFROM join_inner_table__fuzz_146_replicated\n SETTINGS\n allow_experimental_analyzer = 1,\n max_parallel_replicas = 2,\n cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\',\n allow_experimental_parallel_reading_from_replicas = 1,\n use_hedged_requests = 0; +0 2 SELECT `join_inner_table__fuzz_146_replicated`.`number` AS `number` FROM `default`.`join_inner_table__fuzz_146_replicated` SETTINGS allow_experimental_analyzer = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = \'test_cluster_one_shard_three_replicas_localhost\', allow_experimental_parallel_reading_from_replicas = 1, use_hedged_requests = 0 diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql new file mode 100644 index 00000000000..35089c0cedb --- /dev/null +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql @@ -0,0 +1,52 @@ +-- Tags: zookeeper + +CREATE TABLE join_inner_table__fuzz_146_replicated +( + `id` UUID, + `key` String, + `number` Int64, + `value1` String, + `value2` String, + `time` Nullable(Int64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/join_inner_table__fuzz_146_replicated', '{replica}') +ORDER BY (id, number, key) +SETTINGS index_granularity = 8192; + +INSERT INTO join_inner_table__fuzz_146_replicated + SELECT CAST('833c9e22-c245-4eb5-8745-117a9a1f26b1', 'UUID') AS id, CAST(rowNumberInAllBlocks(), 'String') AS key, * + FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) LIMIT 10; + +-- Simple query with analyzer and pure parallel replicas +SELECT number +FROM join_inner_table__fuzz_146_replicated + SETTINGS + allow_experimental_analyzer = 1, + max_parallel_replicas = 2, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', + allow_experimental_parallel_reading_from_replicas = 1, + use_hedged_requests = 0; + +SYSTEM FLUSH LOGS; +-- There should be 2 different queries +-- The initial query +-- The query sent to each replica (which should appear 2 times as we are setting max_parallel_replicas to 2) +SELECT + is_initial_query, + count() as c, query, +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND initial_query_id = + ( + SELECT query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND type = 'QueryFinish' + AND query LIKE '-- Simple query with analyzer and pure parallel replicas%' + ) +GROUP BY is_initial_query, query +ORDER BY is_initial_query DESC, c, query; diff --git a/tests/queries/0_stateless/02772_jit_date_time_add.reference b/tests/queries/0_stateless/02772_jit_date_time_add.reference new file mode 100644 index 00000000000..dec7d2fabd2 --- /dev/null +++ b/tests/queries/0_stateless/02772_jit_date_time_add.reference @@ -0,0 +1 @@ +\N diff --git a/tests/queries/0_stateless/02772_jit_date_time_add.sql b/tests/queries/0_stateless/02772_jit_date_time_add.sql new file mode 100644 index 00000000000..61028ac4172 --- /dev/null +++ b/tests/queries/0_stateless/02772_jit_date_time_add.sql @@ -0,0 +1,6 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +SELECT DISTINCT result FROM (SELECT toStartOfFifteenMinutes(toDateTime(toStartOfFifteenMinutes(toDateTime(1000.0001220703125) + (number * 65536))) + (number * 9223372036854775807)) AS result FROM system.numbers LIMIT 1048576) ORDER BY result DESC NULLS FIRST FORMAT Null; -- { serverError 407 } +SELECT DISTINCT result FROM (SELECT toStartOfFifteenMinutes(toDateTime(toStartOfFifteenMinutes(toDateTime(1000.0001220703125) + (number * 65536))) + toInt64(number * 9223372036854775807)) AS result FROM system.numbers LIMIT 1048576) ORDER BY result DESC NULLS FIRST FORMAT Null; +SELECT round(round(round(round(round(100)), round(round(round(round(NULL), round(65535)), toTypeName(now() + 9223372036854775807) LIKE 'DateTime%DateTime%DateTime%DateTime%', round(-2)), 255), round(NULL)))); diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference new file mode 100644 index 00000000000..68e7be9ae6f --- /dev/null +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.reference @@ -0,0 +1,366 @@ +Drop tables if they exist +Create tab table +Create pseudo-random database name +Create tab duplicate table +Run MySQL test +field type null key default extra +aggregate_function AggregateFunction(sum, Int32) 0 NULL +array_value Array(Int32) 0 NULL +boolean_value UInt8 0 NULL +date32_value Date32 0 NULL +date_value Date 0 NULL +datetime64_value DateTime64(3) 0 NULL +datetime_value DateTime 0 NULL +decimal_value Decimal(10, 2) 0 NULL +enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3) 0 NULL +fixed_string_value FixedString(10) 0 NULL +float32 Float32 0 NULL +float64 Float64 0 NULL +int128 Int128 0 NULL +int16 Int16 0 NULL +int256 Int256 0 NULL +int32 Int32 0 NULL +int64 Int64 0 NULL +int8 Int8 0 NULL +ipv4_value IPv4 0 NULL +ipv6_value IPv6 0 NULL +json_value Object('json') 0 NULL +low_cardinality LowCardinality(String) 0 NULL +low_cardinality_date LowCardinality(DateTime) 0 NULL +map_value Map(String, Int32) 0 NULL +nested.nested_int Array(Int32) 0 NULL +nested.nested_string Array(String) 0 NULL +nint32 Nullable(Int32) 1 NULL +nullable_value Nullable(Int32) 1 NULL +string_value String 0 NULL +tuple_value Tuple(Int32, String) 0 NULL +uint128 UInt128 0 NULL +uint16 UInt16 0 NULL +uint256 UInt256 0 NULL +uint32 UInt32 0 NULL +uint64 UInt64 0 PRI SOR NULL +uint8 UInt8 0 NULL +uuid_value UUID 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +uuid_value CHAR 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +uuid_value CHAR 0 NULL +field type null key default extra collation comment privileges +aggregate_function TEXT 0 NULL NULL +array_value TEXT 0 NULL NULL +boolean_value TINYINT UNSIGNED 0 NULL NULL +date32_value DATE 0 NULL NULL +date_value DATE 0 NULL NULL +datetime64_value DATETIME 0 NULL NULL +datetime_value DATETIME 0 NULL NULL +decimal_value DECIMAL(10, 2) 0 NULL NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL NULL +fixed_string_value TEXT 0 NULL NULL +float32 FLOAT 0 NULL NULL +float64 DOUBLE 0 NULL NULL +int128 TEXT 0 NULL NULL +int16 SMALLINT 0 NULL NULL +int256 TEXT 0 NULL NULL +int32 INTEGER 0 NULL NULL +int64 BIGINT 0 NULL NULL +int8 TINYINT 0 NULL NULL +ipv4_value TEXT 0 NULL NULL +ipv6_value TEXT 0 NULL NULL +json_value JSON 0 NULL NULL +low_cardinality BLOB 0 NULL NULL +low_cardinality_date DATETIME 0 NULL NULL +map_value JSON 0 NULL NULL +nested.nested_int TEXT 0 NULL NULL +nested.nested_string TEXT 0 NULL NULL +nint32 INTEGER 0 NULL NULL +nullable_value INTEGER 0 NULL NULL +string_value BLOB 0 NULL NULL +tuple_value JSON 0 NULL NULL +uint128 TEXT 0 NULL NULL +uint16 SMALLINT UNSIGNED 0 NULL NULL +uint256 TEXT 0 NULL NULL +uint32 INTEGER UNSIGNED 0 NULL NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL NULL +uint8 TINYINT UNSIGNED 0 NULL NULL +uuid_value CHAR 0 NULL NULL +field type null key default extra +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +nested.nested_int TEXT 0 NULL +nint32 INTEGER 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INTEGER 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uuid_value CHAR 0 NULL +field type null key default extra +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +nested.nested_int TEXT 0 NULL +nint32 INTEGER 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_string TEXT 0 NULL +nullable_value INTEGER 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uuid_value CHAR 0 NULL +field type null key default extra +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +nested.nested_int TEXT 0 NULL +nint32 INTEGER 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +uuid_value CHAR 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +uuid_value CHAR 0 NULL +field type null key default extra +aggregate_function TEXT 0 NULL +array_value TEXT 0 NULL +boolean_value TINYINT UNSIGNED 0 NULL +date32_value DATE 0 NULL +date_value DATE 0 NULL +datetime64_value DATETIME 0 NULL +datetime_value DATETIME 0 NULL +decimal_value DECIMAL(10, 2) 0 NULL +enum_value ENUM('apple', 'banana', 'orange') 0 NULL +fixed_string_value TEXT 0 NULL +float32 FLOAT 0 NULL +float64 DOUBLE 0 NULL +int128 TEXT 0 NULL +int16 SMALLINT 0 NULL +int256 TEXT 0 NULL +int32 INTEGER 0 NULL +int64 BIGINT 0 NULL +int8 TINYINT 0 NULL +ipv4_value TEXT 0 NULL +ipv6_value TEXT 0 NULL +json_value JSON 0 NULL +low_cardinality BLOB 0 NULL +low_cardinality_date DATETIME 0 NULL +map_value JSON 0 NULL +nested.nested_int TEXT 0 NULL +nested.nested_string TEXT 0 NULL +nint32 INTEGER 0 NULL +nullable_value INTEGER 0 NULL +string_value BLOB 0 NULL +tuple_value JSON 0 NULL +uint128 TEXT 0 NULL +uint16 SMALLINT UNSIGNED 0 NULL +uint256 TEXT 0 NULL +uint32 INTEGER UNSIGNED 0 NULL +uint64 BIGINT UNSIGNED 0 PRI SOR NULL +uint8 TINYINT UNSIGNED 0 NULL +uuid_value CHAR 0 NULL diff --git a/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh new file mode 100755 index 00000000000..c86519b6f5d --- /dev/null +++ b/tests/queries/0_stateless/02775_show_columns_mysql_compatibility.sh @@ -0,0 +1,153 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# This script tests the MySQL compatibility of the SHOW COLUMNS command in ClickHouse +USER="default" +PASSWORD="" +HOST="127.0.0.1" +PORT=9004 + +# First run the clickhouse test to create the ClickHouse Tables + +echo "Drop tables if they exist" +${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS tab" +${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS database_123456789abcdef" +${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS database_123456789abcdef.tab" + +echo "Create tab table " +${CLICKHOUSE_CLIENT} -n -q " + SET allow_suspicious_low_cardinality_types=1; + SET allow_experimental_object_type=1; + CREATE TABLE tab + ( + uint8 UInt8, + uint16 UInt16, + uint32 UInt32, + uint64 UInt64, + uint128 UInt128, + uint256 UInt256, + int8 Int8, + int16 Int16, + int32 Int32, + int64 Int64, + int128 Int128, + int256 Int256, + nint32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value JSON, + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + low_cardinality_date LowCardinality(DateTime), + aggregate_function AggregateFunction(sum, Int32), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + + +echo "Create pseudo-random database name" +${CLICKHOUSE_CLIENT} --query "CREATE DATABASE database_123456789abcdef;" + +echo "Create tab duplicate table" +${CLICKHOUSE_CLIENT} -n -q " + SET allow_suspicious_low_cardinality_types=1; + SET allow_experimental_object_type =1; + CREATE TABLE database_123456789abcdef.tab + ( + uint8 UInt8, + uint16 UInt16, + uint32 UInt32, + uint64 UInt64, + uint128 UInt128, + uint256 UInt256, + int8 Int8, + int16 Int16, + int32 Int32, + int64 Int64, + int128 Int128, + int256 Int256, + nint32 Nullable(Int32), + float32 Float32, + float64 Float64, + decimal_value Decimal(10, 2), + boolean_value UInt8, + string_value String, + fixed_string_value FixedString(10), + date_value Date, + date32_value Date32, + datetime_value DateTime, + datetime64_value DateTime64(3), + json_value JSON, + uuid_value UUID, + enum_value Enum8('apple' = 1, 'banana' = 2, 'orange' = 3), + low_cardinality LowCardinality(String), + low_cardinality_date LowCardinality(DateTime), + aggregate_function AggregateFunction(sum, Int32), + array_value Array(Int32), + map_value Map(String, Int32), + tuple_value Tuple(Int32, String), + nullable_value Nullable(Int32), + ipv4_value IPv4, + ipv6_value IPv6, + nested Nested + ( + nested_int Int32, + nested_string String + ) + ) ENGINE = MergeTree + ORDER BY uint64; + " + +# Write sql to temp file +TEMP_FILE=$(mktemp) + +cat < $TEMP_FILE +SHOW COLUMNS FROM tab; +SET use_mysql_types_in_show_columns=1; +SHOW COLUMNS FROM tab; +SHOW EXTENDED COLUMNS FROM tab; +SHOW FULL COLUMNS FROM tab; +SHOW COLUMNS FROM tab LIKE '%int%'; +SHOW COLUMNS FROM tab NOT LIKE '%int%'; +SHOW COLUMNS FROM tab ILIKE '%INT%'; +SHOW COLUMNS FROM tab NOT ILIKE '%INT%'; +SHOW COLUMNS FROM tab WHERE field LIKE '%int%'; +SHOW COLUMNS FROM tab LIMIT 1; +SHOW COLUMNS FROM tab; +SHOW COLUMNS FROM tab FROM database_123456789abcdef; +SHOW COLUMNS FROM database_123456789abcdef.tab; +DROP DATABASE database_123456789abcdef; +DROP TABLE tab; +EOT + +# Now run the MySQL test script on the ClickHouse DB +echo "Run MySQL test" +MYSQL_PWD=$PASSWORD ${MYSQL_CLIENT} --user="$USER" --host="$HOST" --port="$PORT" < $TEMP_FILE + +# Clean up the temp file +rm $TEMP_FILE + diff --git a/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.reference b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.reference new file mode 100644 index 00000000000..d395c4d6a0f --- /dev/null +++ b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.reference @@ -0,0 +1 @@ +1 2023-05-01 23:55:55.100 15 diff --git a/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql new file mode 100644 index 00000000000..db00f189330 --- /dev/null +++ b/tests/queries/0_stateless/02785_summing_merge_tree_datetime64.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS summing_merge_tree_datetime64; + +CREATE TABLE summing_merge_tree_datetime64 ( `pk` UInt64, `timestamp` DateTime64(3), `value` UInt64 ) +ENGINE = SummingMergeTree() ORDER BY pk; + +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 1 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 2 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 3 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 4 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 5 value; + +SELECT * FROM summing_merge_tree_datetime64 FINAL; +DROP TABLE summing_merge_tree_datetime64; diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference new file mode 100644 index 00000000000..7764974255b --- /dev/null +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.reference @@ -0,0 +1 @@ +424242424242424242424242424242424242424242424242424242 diff --git a/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh new file mode 100755 index 00000000000..8865b2e7aab --- /dev/null +++ b/tests/queries/0_stateless/02786_parquet_big_integer_compatibility.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "select toString(424242424242424242424242424242424242424242424242424242::UInt256) as x format Parquet" | $CLICKHOUSE_LOCAL --input-format=Parquet --structure='x UInt256' -q "select * from table" + diff --git a/tests/queries/0_stateless/02789_table_functions_errors.reference b/tests/queries/0_stateless/02789_table_functions_errors.reference new file mode 100644 index 00000000000..af81158ecae --- /dev/null +++ b/tests/queries/0_stateless/02789_table_functions_errors.reference @@ -0,0 +1,2 @@ +10 +1 diff --git a/tests/queries/0_stateless/02789_table_functions_errors.sh b/tests/queries/0_stateless/02789_table_functions_errors.sh new file mode 100755 index 00000000000..081f7776ba0 --- /dev/null +++ b/tests/queries/0_stateless/02789_table_functions_errors.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Tags: no-parallel +# Tag no-parallel: It uses system.errors values which are global + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +# Simply calling a table function correctly should not increase system.errors +OLD_ERROR_QUANTITY=$(${CLICKHOUSE_CLIENT} --query "SELECT sum(value) FROM system.errors WHERE name = 'UNKNOWN_TABLE'") +${CLICKHOUSE_CLIENT} --query "SELECT count() FROM numbers(10)" +${CLICKHOUSE_CLIENT} --query "SELECT sum(value) = ${OLD_ERROR_QUANTITY} FROM system.errors WHERE name = 'UNKNOWN_TABLE'" diff --git a/tests/queries/0_stateless/02790_fix_coredump_when_compile_expression.reference b/tests/queries/0_stateless/02790_fix_coredump_when_compile_expression.reference new file mode 100644 index 00000000000..b8626c4cff2 --- /dev/null +++ b/tests/queries/0_stateless/02790_fix_coredump_when_compile_expression.reference @@ -0,0 +1 @@ +4 diff --git a/tests/queries/0_stateless/02790_fix_coredump_when_compile_expression.sql b/tests/queries/0_stateless/02790_fix_coredump_when_compile_expression.sql new file mode 100644 index 00000000000..90995da0cca --- /dev/null +++ b/tests/queries/0_stateless/02790_fix_coredump_when_compile_expression.sql @@ -0,0 +1,4 @@ +CREATE TABLE test (col1 Nullable(DOUBLE), col2 Nullable(DOUBLE), col3 DOUBLE) ENGINE=Memory; + +insert into test values(1.0 , 2.0, 3.0); +select multiIf(col1 > 2, col2/col3, 4.0) from test SETTINGS min_count_to_compile_expression=0; diff --git a/tests/queries/0_stateless/02790_url_multiple_tsv_files.reference b/tests/queries/0_stateless/02790_url_multiple_tsv_files.reference new file mode 100644 index 00000000000..927a33afecf --- /dev/null +++ b/tests/queries/0_stateless/02790_url_multiple_tsv_files.reference @@ -0,0 +1,5 @@ +136 +136 +136 +136 +136 diff --git a/tests/queries/0_stateless/02790_url_multiple_tsv_files.sql b/tests/queries/0_stateless/02790_url_multiple_tsv_files.sql new file mode 100644 index 00000000000..b71bd7c737a --- /dev/null +++ b/tests/queries/0_stateless/02790_url_multiple_tsv_files.sql @@ -0,0 +1,5 @@ +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+TSV', 'TSV') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+CSV', 'CSV') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+JSONEachRow', 'JSONEachRow') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+TSKV', 'TSKV') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+Native', 'Native') settings max_threads=1, max_download_threads=1); diff --git a/tests/queries/0_stateless/02791_predicate_pushdown_different_types.reference b/tests/queries/0_stateless/02791_predicate_pushdown_different_types.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02791_predicate_pushdown_different_types.sql b/tests/queries/0_stateless/02791_predicate_pushdown_different_types.sql new file mode 100644 index 00000000000..121ffb27e9b --- /dev/null +++ b/tests/queries/0_stateless/02791_predicate_pushdown_different_types.sql @@ -0,0 +1,7 @@ +# These queries triggered a crash in old ClickHouse versions: + +CREATE TEMPORARY TABLE a (key UInt32, ID LowCardinality(String)); +CREATE TEMPORARY TABLE b (key UInt32); +SELECT * FROM b JOIN a USING (key) WHERE ID = '1' HAVING ID = '1'; + +# PS. Predicate pushdown does not work for LowCardinality(String), but it's another problem. diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 73ec64e2f30..46bd0da4236 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1762,6 +1762,7 @@ misconfiguration mispredictions mmap mmapped +modularization moduloOrZero mongodb monthName diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 5c32ebb2683..dd21cc7e953 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -5,6 +5,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.5.9-lts 2023-06-22 v23.3.4.17-lts 2023-06-17 v23.3.3.52-lts 2023-06-12 v23.3.2.37-lts 2023-04-22