Merge remote-tracking branch 'upstream/master' into min-file-segment-size

This commit is contained in:
kssenii 2023-06-23 12:52:29 +02:00
commit fbc1a80ebe
360 changed files with 4813 additions and 2530 deletions

View File

@ -22,12 +22,13 @@ curl https://clickhouse.com/ | sh
## Upcoming Events
* [**v23.5 Release Webinar**](https://clickhouse.com/company/events/v23-5-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-05) - Jun 8 - 23.5 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Meetup in Bangalore**](https://www.meetup.com/clickhouse-bangalore-user-group/events/293740066/) - Jun 7
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
## Recent Recordings
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"

View File

@ -2,6 +2,7 @@
#include <cstdint>
#include <string>
#include <array>
#if defined(__SSE2__)
#include <emmintrin.h>

View File

@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio)
assert(min > 0 && ratio >= 0 && ratio <= 1);
return min * std::pow(max / min, ratio);
}
constexpr double interpolateLinear(double min, double max, double ratio)
{
return std::lerp(min, max, ratio);
}

View File

@ -116,43 +116,79 @@ configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/A
# ARROW_ORC + adapters/orc/CMakefiles
set(ORC_SRCS
"${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h"
"${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc"
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
"${ORC_SOURCE_SRC_DIR}/Adaptor.cc"
"${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in"
"${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc"
"${ORC_SOURCE_SRC_DIR}/BlockBuffer.hh"
"${ORC_SOURCE_SRC_DIR}/BloomFilter.cc"
"${ORC_SOURCE_SRC_DIR}/BloomFilter.hh"
"${ORC_SOURCE_SRC_DIR}/Bpacking.hh"
"${ORC_SOURCE_SRC_DIR}/BpackingDefault.cc"
"${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh"
"${ORC_SOURCE_SRC_DIR}/ByteRLE.cc"
"${ORC_SOURCE_SRC_DIR}/ByteRLE.hh"
"${ORC_SOURCE_SRC_DIR}/CMakeLists.txt"
"${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc"
"${ORC_SOURCE_SRC_DIR}/ColumnReader.cc"
"${ORC_SOURCE_SRC_DIR}/ColumnReader.hh"
"${ORC_SOURCE_SRC_DIR}/ColumnWriter.cc"
"${ORC_SOURCE_SRC_DIR}/ColumnWriter.hh"
"${ORC_SOURCE_SRC_DIR}/Common.cc"
"${ORC_SOURCE_SRC_DIR}/Compression.cc"
"${ORC_SOURCE_SRC_DIR}/Compression.hh"
"${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.cc"
"${ORC_SOURCE_SRC_DIR}/ConvertColumnReader.hh"
"${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.cc"
"${ORC_SOURCE_SRC_DIR}/CpuInfoUtil.hh"
"${ORC_SOURCE_SRC_DIR}/Dispatch.hh"
"${ORC_SOURCE_SRC_DIR}/Exceptions.cc"
"${ORC_SOURCE_SRC_DIR}/Int128.cc"
"${ORC_SOURCE_SRC_DIR}/LzoDecompressor.cc"
"${ORC_SOURCE_SRC_DIR}/LzoDecompressor.hh"
"${ORC_SOURCE_SRC_DIR}/MemoryPool.cc"
"${ORC_SOURCE_SRC_DIR}/Murmur3.cc"
"${ORC_SOURCE_SRC_DIR}/Murmur3.hh"
"${ORC_SOURCE_SRC_DIR}/Options.hh"
"${ORC_SOURCE_SRC_DIR}/OrcFile.cc"
"${ORC_SOURCE_SRC_DIR}/RLE.cc"
"${ORC_SOURCE_SRC_DIR}/RLE.hh"
"${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc"
"${ORC_SOURCE_SRC_DIR}/RLEV2Util.hh"
"${ORC_SOURCE_SRC_DIR}/RLEv1.cc"
"${ORC_SOURCE_SRC_DIR}/RLEv1.hh"
"${ORC_SOURCE_SRC_DIR}/RLEv2.hh"
"${ORC_SOURCE_SRC_DIR}/Reader.cc"
"${ORC_SOURCE_SRC_DIR}/Reader.hh"
"${ORC_SOURCE_SRC_DIR}/RleDecoderV2.cc"
"${ORC_SOURCE_SRC_DIR}/RleEncoderV2.cc"
"${ORC_SOURCE_SRC_DIR}/RLEV2Util.cc"
"${ORC_SOURCE_SRC_DIR}/SchemaEvolution.cc"
"${ORC_SOURCE_SRC_DIR}/SchemaEvolution.hh"
"${ORC_SOURCE_SRC_DIR}/Statistics.cc"
"${ORC_SOURCE_SRC_DIR}/Statistics.hh"
"${ORC_SOURCE_SRC_DIR}/StripeStream.cc"
"${ORC_SOURCE_SRC_DIR}/StripeStream.hh"
"${ORC_SOURCE_SRC_DIR}/Timezone.cc"
"${ORC_SOURCE_SRC_DIR}/Timezone.hh"
"${ORC_SOURCE_SRC_DIR}/TypeImpl.cc"
"${ORC_SOURCE_SRC_DIR}/TypeImpl.hh"
"${ORC_SOURCE_SRC_DIR}/Utils.hh"
"${ORC_SOURCE_SRC_DIR}/Vector.cc"
"${ORC_SOURCE_SRC_DIR}/Writer.cc"
"${ORC_SOURCE_SRC_DIR}/Adaptor.cc"
"${ORC_SOURCE_SRC_DIR}/BloomFilter.cc"
"${ORC_SOURCE_SRC_DIR}/Murmur3.cc"
"${ORC_SOURCE_SRC_DIR}/BlockBuffer.cc"
"${ORC_SOURCE_SRC_DIR}/wrap/orc-proto-wrapper.cc"
"${ORC_SOURCE_SRC_DIR}/io/InputStream.cc"
"${ORC_SOURCE_SRC_DIR}/io/InputStream.hh"
"${ORC_SOURCE_SRC_DIR}/io/OutputStream.cc"
"${ORC_ADDITION_SOURCE_DIR}/orc_proto.pb.cc"
"${ORC_SOURCE_SRC_DIR}/io/OutputStream.hh"
"${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/ExpressionTree.hh"
"${ORC_SOURCE_SRC_DIR}/sargs/Literal.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/PredicateLeaf.hh"
"${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/SargsApplier.hh"
"${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.cc"
"${ORC_SOURCE_SRC_DIR}/sargs/SearchArgument.hh"
"${ORC_SOURCE_SRC_DIR}/sargs/TruthValue.cc"
)
add_library(_orc ${ORC_SRCS})

2
contrib/orc vendored

@ -1 +1 @@
Subproject commit c5d7755ba0b9a95631c8daea4d094101f26ec761
Subproject commit 568d1d60c250af1890f226c182bc15bd8cc94cf1

View File

@ -59,6 +59,8 @@ install_packages previous_release_package_folder
# available for dump via clickhouse-local
configure
# it contains some new settings, but we can safely remove it
rm /etc/clickhouse-server/config.d/merge_tree.xml
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
start
@ -85,6 +87,8 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1
export ZOOKEEPER_FAULT_INJECTION=0
configure
# it contains some new settings, but we can safely remove it
rm /etc/clickhouse-server/config.d/merge_tree.xml
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
start
@ -115,6 +119,13 @@ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/c
install_packages package_folder
export ZOOKEEPER_FAULT_INJECTION=1
configure
# Just in case previous version left some garbage in zk
sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \
| sed "s|>1<|>0<|g" \
> /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp
sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml
start 500
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \

View File

@ -0,0 +1,19 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.3.5.9-lts (f5fbc2fd2b3) FIXME as compared to v23.3.4.17-lts (2c99b73ff40)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix broken index analysis when binary operator contains a null constant argument [#50177](https://github.com/ClickHouse/ClickHouse/pull/50177) ([Amos Bird](https://github.com/amosbird)).
* Cleanup moving parts [#50489](https://github.com/ClickHouse/ClickHouse/pull/50489) ([vdimir](https://github.com/vdimir)).
* Do not apply projection if read-in-order was enabled. [#50923](https://github.com/ClickHouse/ClickHouse/pull/50923) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Increase max array size in group bitmap [#50620](https://github.com/ClickHouse/ClickHouse/pull/50620) ([Kruglov Pavel](https://github.com/Avogar)).

View File

@ -1298,8 +1298,8 @@ For output it uses the following correspondence between ClickHouse types and BSO
| [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x04` array |
| [Named Tuple](/docs/en/sql-reference/data-types/tuple.md) | `\x03` document |
| [Map](/docs/en/sql-reference/data-types/map.md) | `\x03` document |
| [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `\x10` int32 |
| [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `\x05` binary, `\x00` binary subtype |
| [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `\x10` int32 |
| [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `\x05` binary, `\x00` binary subtype |
For input it uses the following correspondence between BSON types and ClickHouse types:
@ -1309,7 +1309,7 @@ For input it uses the following correspondence between BSON types and ClickHouse
| `\x02` string | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
| `\x03` document | [Map](/docs/en/sql-reference/data-types/map.md)/[Named Tuple](/docs/en/sql-reference/data-types/tuple.md) |
| `\x04` array | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) |
| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) |
| `\x05` binary, `\x00` binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md)/[IPv6](/docs/en/sql-reference/data-types/ipv6.md) |
| `\x05` binary, `\x02` old binary subtype | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
| `\x05` binary, `\x03` old uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
| `\x05` binary, `\x04` uuid subtype | [UUID](/docs/en/sql-reference/data-types/uuid.md) |
@ -1319,7 +1319,7 @@ For input it uses the following correspondence between BSON types and ClickHouse
| `\x0A` null value | [NULL](/docs/en/sql-reference/data-types/nullable.md) |
| `\x0D` JavaScript code | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
| `\x0E` symbol | [String](/docs/en/sql-reference/data-types/string.md)/[FixedString](/docs/en/sql-reference/data-types/fixedstring.md) |
| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) |
| `\x10` int32 | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal32](/docs/en/sql-reference/data-types/decimal.md)/[IPv4](/docs/en/sql-reference/data-types/ipv4.md)/[Enum8/Enum16](/docs/en/sql-reference/data-types/enum.md) |
| `\x12` int64 | [Int64/UInt64](/docs/en/sql-reference/data-types/int-uint.md)/[Decimal64](/docs/en/sql-reference/data-types/decimal.md)/[DateTime64](/docs/en/sql-reference/data-types/datetime64.md) |
Other BSON types are not supported. Also, it performs conversion between different integer types (for example, you can insert BSON int32 value into ClickHouse UInt8).
@ -1669,8 +1669,8 @@ The table below shows supported data types and how they match ClickHouse [data t
| `ENUM` | [Enum(8/16)](/docs/en/sql-reference/data-types/enum.md) | `ENUM` |
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
| `DATA` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `DATA` |
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` |
| `DATA` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `DATA` |
| `DATA` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `DATA` |
| `DATA` | [Decimal128/Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DATA` |
| `STRUCT(entries LIST(STRUCT(key Key, value Value)))` | [Map](/docs/en/sql-reference/data-types/map.md) | `STRUCT(entries LIST(STRUCT(key Key, value Value)))` |
@ -1872,8 +1872,8 @@ The table below shows supported data types and how they match ClickHouse [data t
| `long (timestamp-millis)` \** | [DateTime64(3)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \** |
| `long (timestamp-micros)` \** | [DateTime64(6)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \** |
| `bytes (decimal)` \** | [DateTime64(N)](/docs/en/sql-reference/data-types/datetime.md) | `bytes (decimal)` \** |
| `int` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `int` |
| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `fixed(16)` |
| `int` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `int` |
| `fixed(16)` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `fixed(16)` |
| `bytes (decimal)` \** | [Decimal(P, S)](/docs/en/sql-reference/data-types/decimal.md) | `bytes (decimal)` \** |
| `string (uuid)` \** | [UUID](/docs/en/sql-reference/data-types/uuid.md) | `string (uuid)` \** |
| `fixed(16)` | [Int128/UInt128](/docs/en/sql-reference/data-types/int-uint.md) | `fixed(16)` |
@ -2026,9 +2026,9 @@ The table below shows supported data types and how they match ClickHouse [data t
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` |
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` |
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` |
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` |
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` |
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
@ -2082,7 +2082,7 @@ Special format for reading Parquet file metadata (https://parquet.apache.org/doc
- logical_type - column logical type
- compression - compression used for this column
- total_uncompressed_size - total uncompressed bytes size of the column, calculated as the sum of total_uncompressed_size of the column from all row groups
- total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups
- total_compressed_size - total compressed bytes size of the column, calculated as the sum of total_compressed_size of the column from all row groups
- space_saved - percent of space saved by compression, calculated as (1 - total_compressed_size/total_uncompressed_size).
- encodings - the list of encodings used for this column
- row_groups - the list of row groups metadata with the next structure:
@ -2229,9 +2229,9 @@ The table below shows supported data types and how they match ClickHouse [data t
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_SIZE_BINARY` |
| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` |
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `UINT32` |
| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `FIXED_SIZE_BINARY` |
| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` |
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
@ -2297,7 +2297,7 @@ The table below shows supported data types and how they match ClickHouse [data t
| `Struct` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `Struct` |
| `Map` | [Map](/docs/en/sql-reference/data-types/map.md) | `Map` |
| `Int` | [IPv4](/docs/en/sql-reference/data-types/int-uint.md) | `Int` |
| `Binary` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `Binary` |
| `Binary` | [IPv6](/docs/en/sql-reference/data-types/ipv6.md) | `Binary` |
| `Binary` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `Binary` |
| `Binary` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `Binary` |
@ -2510,7 +2510,7 @@ ClickHouse supports reading and writing [MessagePack](https://msgpack.org/) data
| `uint 64` | [DateTime64](/docs/en/sql-reference/data-types/datetime.md) | `uint 64` |
| `fixarray`, `array 16`, `array 32` | [Array](/docs/en/sql-reference/data-types/array.md)/[Tuple](/docs/en/sql-reference/data-types/tuple.md) | `fixarray`, `array 16`, `array 32` |
| `fixmap`, `map 16`, `map 32` | [Map](/docs/en/sql-reference/data-types/map.md) | `fixmap`, `map 16`, `map 32` |
| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `uint 32` |
| `uint 32` | [IPv4](/docs/en/sql-reference/data-types/ipv4.md) | `uint 32` |
| `bin 8` | [String](/docs/en/sql-reference/data-types/string.md) | `bin 8` |
| `int 8` | [Enum8](/docs/en/sql-reference/data-types/enum.md) | `int 8` |
| `bin 8` | [(U)Int128/(U)Int256](/docs/en/sql-reference/data-types/int-uint.md) | `bin 8` |

View File

@ -6,32 +6,43 @@ sidebar_label: Configuration Files
# Configuration Files
ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml` or `/etc/clickhouse-server/config.yaml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. Note, that any configuration file can be written either in XML or YAML, but mixing formats in one file is not supported. For example, you can have main configs as `config.xml` and `users.xml` and write additional files in `config.d` and `users.d` directories in `.yaml`.
The ClickHouse server can be configured with configuration files in XML or YAML syntax. In most installation types, the ClickHouse server runs with `/etc/clickhouse-server/config.xml` as default configuration file but it is also possible to specify the location of the configuration file manually at server startup using command line option `--config-file=` or `-C`. Additional configuration files may be placed into directory `config.d/` relative to the main configuration file, for example into directory `/etc/clickhouse-server/config.d/`. Files in this directory and the main configuration are merged in a preprocessing step before the configuration is applied in ClickHouse server. Configuration files are merged in alphabetical order. To simplify updates and improve modularization, it is best practice to keep the default `config.xml` file unmodified and place additional customization into `config.d/`.
All XML files should have the same root element, usually `<clickhouse>`. As for YAML, `clickhouse:` should not be present, the parser will insert it automatically.
It is possible to mix XML and YAML configuration files, for example you could have a main configuration file `config.xml` and additional configuration files `config.d/network.xml`, `config.d/timezone.yaml` and `config.d/keeper.yaml`. Mixing XML and YAML within a single configuration file is not supported. XML configuration files should use `<clickhouse>...</clickhouse>` as top-level tag. In YAML configuration files, `clickhouse:` is optional, the parser inserts it implicitly if absent.
## Override {#override}
## Overriding Configuration {#override}
Some settings specified in the main configuration file can be overridden in other configuration files:
The merge of configuration files behaves as one intuitively expects: The contents of both files are combined recursively, children with the same name are replaced by the element of the more specific configuration file. The merge can be customized using attributes `replace` and `remove`.
- Attribute `replace` means that the element is replaced by the specified one.
- Attribute `remove` means that the element is deleted.
- The `replace` or `remove` attributes can be specified for the elements of these configuration files.
- If neither is specified, it combines the contents of elements recursively, replacing values of duplicate children.
- If `replace` is specified, it replaces the entire element with the specified one.
- If `remove` is specified, it deletes the element.
To specify that a value of an element should be replaced by the value of an environment variable, you can use attribute `from_env`.
You can also declare attributes as coming from environment variables by using `from_env="VARIABLE_NAME"`:
Example with `$MAX_QUERY_SIZE = 150000`:
```xml
<clickhouse>
<macros>
<replica from_env="REPLICA" />
<layer from_env="LAYER" />
<shard from_env="SHARD" />
</macros>
<profiles>
<default>
<max_query_size from_env="MAX_QUERY_SIZE"/>
</default>
</profiles>
</clickhouse>
```
## Substitution {#substitution}
which is equal to
``` xml
<clickhouse>
<profiles>
<default>
<max_query_size/>150000</max_query_size>
</default>
</profiles>
</clickhouse>
```
## Substituting Configuration {#substitution}
The config can also define “substitutions”. If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-include_from) element in the server config. The substitution values are specified in `/clickhouse/substitution_name` elements in this file. If a substitution specified in `incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](../operations/server-configuration-parameters/settings.md#macros)).

View File

@ -50,7 +50,7 @@ To manage named collections with DDL a user must have the `named_control_collect
```
:::tip
In the above example the `passowrd_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
In the above example the `password_sha256_hex` value is the hexadecimal representation of the SHA256 hash of the password. This configuration for the user `default` has the attribute `replace=true` as in the default configuration has a plain text `password` set, and it is not possible to have both plain text and sha256 hex passwords set for a user.
:::
## Storing named collections in configuration files

View File

@ -1,9 +0,0 @@
---
slug: /en/operations/optimizing-performance/
sidebar_label: Optimizing Performance
sidebar_position: 52
---
# Optimizing Performance
- [Sampling query profiler](../../operations/optimizing-performance/sampling-query-profiler.md)

View File

@ -1,16 +0,0 @@
---
slug: /en/operations/server-configuration-parameters/
sidebar_position: 54
sidebar_label: Server Configuration Parameters
pagination_next: en/operations/server-configuration-parameters/settings
---
# Server Configuration Parameters
This section contains descriptions of server settings that cannot be changed at the session or query level.
These settings are stored in the `config.xml` file on the ClickHouse server.
Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section.
Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes).

View File

@ -7,6 +7,14 @@ description: This section contains descriptions of server settings that cannot b
# Server Settings
This section contains descriptions of server settings that cannot be changed at the session or query level.
These settings are stored in the `config.xml` file on the ClickHouse server.
Other settings are described in the “[Settings](../../operations/settings/index.md#session-settings-intro)” section.
Before studying the settings, read the [Configuration files](../../operations/configuration-files.md#configuration_files) section and note the use of substitutions (the `incl` and `optional` attributes).
## allow_use_jemalloc_memory
Allows to use jemalloc memory.

View File

@ -2941,7 +2941,7 @@ Default value: `0`.
## mutations_sync {#mutations_sync}
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
Allows to execute `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
Possible values:

View File

@ -71,11 +71,11 @@ Columns:
- 0 — Query was initiated by another query as part of distributed query execution.
- `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the query.
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from.
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to make the parent query.
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Initial query starting time (for distributed query execution).
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Initial query starting time with microseconds precision (for distributed query execution).

View File

@ -40,11 +40,11 @@ Columns:
- 0 — Query was initiated by another query for distributed query execution.
- `user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who initiated the current query.
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the query.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that was used to make the query.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that was used to make the query.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the query.
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — Name of the user who ran the initial query (for distributed query execution).
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID of the initial query (for distributed query execution).
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address that the parent query was launched from.
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address that the parent query was launched from.
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — The client port that was used to make the parent query.
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Interface that the query was initiated from. Possible values:
- 1 — TCP.

View File

@ -28,7 +28,7 @@ Columns:
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of profiles set for all roles and/or users.
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — The list of roles to which the profile is applied.
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — Settings that were changed when the client logged in/out.
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — The IP address that was used to log in/out.
- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — The IP address that was used to log in/out.
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The client port that was used to log in/out.
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — The interface from which the login was initiated. Possible values:
- `TCP`

View File

@ -11,7 +11,8 @@ Columns:
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established
- `connected_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the connection was established
- `session_uptime_elapsed_seconds` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Seconds elapsed since the connection was established
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
@ -23,7 +24,7 @@ SELECT * FROM system.zookeeper_connection;
```
``` text
┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐
│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660
└─────────────────────────────────┴──────┴───────┴─────────────────────┴────────────────────────────────┴────────────────────┘
┌─name────┬─host──────┬─port─┬─index─┬──────connected_time─┬─session_uptime_elapsed_seconds─┬─is_expired─┬─keeper_api_version─┬─client_id─┐
│ default │ 127.0.0.1 │ 9181 │ 0 │ 2023-06-15 14:36:01 │ 3058 │ 0 │ 3 │ 5
└─────────┴───────────┴──────┴───────┴─────────────────────┴────────────────────────────────┴────────────┴────────────────────┴───────────┘
```

View File

@ -15,7 +15,7 @@ Columns with request parameters:
- `Finalize` — The connection is lost, no response was received.
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the event happened.
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — The date and time when the event happened.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP address of ZooKeeper server that was used to make the request.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP address of ZooKeeper server that was used to make the request.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — The port of ZooKeeper server that was used to make the request.
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — The session ID that the ZooKeeper server sets for each connection.
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — The ID of the request within the session. This is usually a sequential request number. It is the same for the request row and the paired `response`/`finalize` row.

View File

@ -28,6 +28,6 @@ ClickHouse data types include:
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
- **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses
- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)

View File

@ -1,12 +1,12 @@
---
slug: /en/sql-reference/data-types/domains/ipv4
slug: /en/sql-reference/data-types/ipv4
sidebar_position: 59
sidebar_label: IPv4
---
## IPv4
`IPv4` is a domain based on `UInt32` type and serves as a typed replacement for storing IPv4 values. It provides compact storage with the human-friendly input-output format and column type information on inspection.
IPv4 addresses. Stored in 4 bytes as UInt32.
### Basic Usage
@ -57,25 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1;
└──────────────────┴───────────┘
```
Domain values are not implicitly convertible to types other than `UInt32`.
If you want to convert `IPv4` value to a string, you have to do that explicitly with `IPv4NumToString()` function:
**See Also**
``` sql
SELECT toTypeName(s), IPv4NumToString(from) as s FROM hits LIMIT 1;
```
┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐
│ String │ 183.247.232.58 │
└───────────────────────────────────┴────────────────┘
Or cast to a `UInt32` value:
``` sql
SELECT toTypeName(i), CAST(from as UInt32) as i FROM hits LIMIT 1;
```
``` text
┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐
│ UInt32 │ 3086477370 │
└──────────────────────────────────┴────────────┘
```
- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md)

View File

@ -1,12 +1,12 @@
---
slug: /en/sql-reference/data-types/domains/ipv6
slug: /en/sql-reference/data-types/ipv6
sidebar_position: 60
sidebar_label: IPv6
---
## IPv6
`IPv6` is a domain based on `FixedString(16)` type and serves as a typed replacement for storing IPv6 values. It provides compact storage with the human-friendly input-output format and column type information on inspection.
IPv6 addresses. Stored in 16 bytes as UInt128 big-endian.
### Basic Usage
@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1;
└──────────────────┴──────────────────────────────────┘
```
Domain values are not implicitly convertible to types other than `FixedString(16)`.
If you want to convert `IPv6` value to a string, you have to do that explicitly with `IPv6NumToString()` function:
**See Also**
``` sql
SELECT toTypeName(s), IPv6NumToString(from) as s FROM hits LIMIT 1;
```
``` text
┌─toTypeName(IPv6NumToString(from))─┬─s─────────────────────────────┐
│ String │ 2001:44c8:129:2632:33:0:252:2 │
└───────────────────────────────────┴───────────────────────────────┘
```
Or cast to a `FixedString(16)` value:
``` sql
SELECT toTypeName(i), CAST(from as FixedString(16)) as i FROM hits LIMIT 1;
```
``` text
┌─toTypeName(CAST(from, 'FixedString(16)'))─┬─i───────┐
│ FixedString(16) │ <20><><EFBFBD>
└───────────────────────────────────────────┴─────────┘
```
- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md)

View File

@ -248,7 +248,7 @@ SELECT IPv6CIDRToRange(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32);
## toIPv4(string)
An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/domains/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`.
An alias to `IPv4StringToNum()` that takes a string form of IPv4 address and returns value of [IPv4](../../sql-reference/data-types/ipv4.md) type, which is binary equal to value returned by `IPv4StringToNum()`.
``` sql
WITH
@ -296,7 +296,7 @@ Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns null
## toIPv6
Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/domains/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value.
Converts a string form of IPv6 address to [IPv6](../../sql-reference/data-types/ipv6.md) type. If the IPv6 address has an invalid format, returns an empty value.
Similar to [IPv6StringToNum](#ipv6stringtonums) function, which converts IPv6 address to binary format.
If the input string contains a valid IPv4 address, then the IPv6 equivalent of the IPv4 address is returned.
@ -315,7 +315,7 @@ toIPv6(string)
- IP address.
Type: [IPv6](../../sql-reference/data-types/domains/ipv6.md).
Type: [IPv6](../../sql-reference/data-types/ipv6.md).
**Examples**

View File

@ -232,6 +232,7 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
Syntax:

View File

@ -60,7 +60,7 @@ You can specify how long (in seconds) to wait for inactive replicas to execute a
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
:::
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
For `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
## Related content

View File

@ -142,19 +142,19 @@ The following operations with [projections](/docs/en/engines/table-engines/merge
## ADD PROJECTION
`ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
`ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
## DROP PROJECTION
`ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
`ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
## MATERIALIZE PROJECTION
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
`ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
## CLEAR PROJECTION
`ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
`ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.

View File

@ -10,15 +10,25 @@ sidebar_label: INDEX
The following operations are available:
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] ADD INDEX name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
## ADD INDEX
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
## DROP INDEX
The first two commands are lightweight in a sense that they only change metadata or remove files.
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
Also, they are replicated, syncing indices metadata via ZooKeeper.
## MATERIALIZE INDEX
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
## CLEAR INDEX
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Deletes the secondary index files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
The commands `ADD`, `DROP`, and `CLEAR` are lightweight in the sense that they only change metadata or remove files.
Also, they are replicated, syncing indices metadata via ClickHouse Keeper or ZooKeeper.
:::note
Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants).

View File

@ -82,6 +82,35 @@ LIFETIME(MIN 0 MAX 1000)
LAYOUT(FLAT())
```
:::note
When using the SQL console in [ClickHouse Cloud](https://clickhouse.com), you must specify a user (`default` or any other user with the role `default_role`) and password when creating a dictionary.
:::note
```sql
CREATE USER IF NOT EXISTS clickhouse_admin
IDENTIFIED WITH sha256_password BY 'passworD43$x';
GRANT default_role TO clickhouse_admin;
CREATE DATABASE foo_db;
CREATE TABLE foo_db.source_table (
id UInt64,
value String
) ENGINE = MergeTree
PRIMARY KEY id;
CREATE DICTIONARY foo_db.id_value_dictionary
(
id UInt64,
value String
)
PRIMARY KEY id
SOURCE(CLICKHOUSE(TABLE 'source_table' USER 'clickhouse_admin' PASSWORD 'passworD43$x' DB 'foo_db' ))
LAYOUT(FLAT())
LIFETIME(MIN 0 MAX 1000);
```
### Create a dictionary from a table in a remote ClickHouse service
Input table (in the remote ClickHouse service) `source_table`:

View File

@ -380,11 +380,15 @@ High compression levels are useful for asymmetric scenarios, like compress once,
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
- DEFLATE_QPL is experimental and can only be used after setting configuration parameter `allow_experimental_codecs=1`.
- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`.
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.
:::note
DEFLATE_QPL is not available in ClickHouse Cloud.
:::
### Specialized Codecs
These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation.

View File

@ -10,7 +10,7 @@ sidebar_label: SET
SET param = value
```
Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/index.md) this way.
Assigns `value` to the `param` [setting](../../operations/settings/index.md) for the current session. You cannot change [server settings](../../operations/server-configuration-parameters/settings.md) this way.
You can also set all the values from the specified settings profile in a single query.

View File

@ -114,9 +114,8 @@ This example uses one table from a sample dataset. The database is `imdb`, and
`first_name` String,
`last_name` String,
`gender` FixedString(1))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
ORDER BY (id, first_name, last_name, gender)
SETTINGS index_granularity = 8192
ENGINE = MergeTree
ORDER BY (id, first_name, last_name, gender);
```
#### On the destination ClickHouse system:
@ -132,9 +131,8 @@ This example uses one table from a sample dataset. The database is `imdb`, and
`first_name` String,
`last_name` String,
`gender` FixedString(1))
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
ORDER BY (id, first_name, last_name, gender)
SETTINGS index_granularity = 8192
ENGINE = MergeTree
ORDER BY (id, first_name, last_name, gender);
```
#### Back on the source deployment:
@ -142,7 +140,7 @@ This example uses one table from a sample dataset. The database is `imdb`, and
Insert into the new database and table created on the remote system. You will need the host, port, username, password, destination database, and destination table.
```sql
INSERT INTO FUNCTION
remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD', rand())
remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD')
SELECT * from imdb.actors
```

View File

@ -1,453 +1,6 @@
agg_functions/combinators.md query-language/agg-functions/combinators.md
agg_functions/index.md query-language/agg-functions/index.md
agg_functions/parametric_functions.md query-language/agg-functions/parametric-functions.md
agg_functions/reference.md query-language/agg-functions/reference.md
changelog/2017.md whats-new/changelog/2017.md
changelog/2018.md whats-new/changelog/2018.md
changelog/2019.md whats-new/changelog/2019.md
changelog/index.md whats-new/changelog/index.md
commercial/cloud.md https://clickhouse.com/cloud/
data_types/array.md sql-reference/data-types/array.md
data_types/boolean.md sql-reference/data-types/boolean.md
data_types/date.md sql-reference/data-types/date.md
data_types/datetime.md sql-reference/data-types/datetime.md
data_types/datetime64.md sql-reference/data-types/datetime64.md
data_types/decimal.md sql-reference/data-types/decimal.md
data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md
data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md
data_types/domains/overview.md sql-reference/data-types/domains/overview.md
data_types/enum.md sql-reference/data-types/enum.md
data_types/fixedstring.md sql-reference/data-types/fixedstring.md
data_types/float.md sql-reference/data-types/float.md
data_types/index.md sql-reference/data-types/index.md
data_types/int_uint.md sql-reference/data-types/int-uint.md
data_types/nested_data_structures/aggregatefunction.md sql-reference/data-types/aggregatefunction.md
data_types/nested_data_structures/index.md sql-reference/data-types/nested-data-structures/index.md
data_types/nested_data_structures/nested.md sql-reference/data-types/nested-data-structures/nested.md
data_types/nullable.md sql-reference/data-types/nullable.md
data_types/special_data_types/expression.md sql-reference/data-types/special-data-types/expression.md
data_types/special_data_types/index.md sql-reference/data-types/special-data-types/index.md
data_types/special_data_types/interval.md sql-reference/data-types/special-data-types/interval.md
data_types/special_data_types/nothing.md sql-reference/data-types/special-data-types/nothing.md
data_types/special_data_types/set.md sql-reference/data-types/special-data-types/set.md
data_types/string.md sql-reference/data-types/string.md
data_types/tuple.md sql-reference/data-types/tuple.md
data_types/uuid.md sql-reference/data-types/uuid.md
database_engines/index.md engines/database-engines/index.md
database_engines/lazy.md engines/database-engines/lazy.md
database_engines/mysql.md engines/database-engines/mysql.md
development/browse_code.md development/browse-code.md
development/build_cross_arm.md development/build-cross-arm.md
development/build_cross_osx.md development/build-cross-osx.md
development/build_osx.md development/build-osx.md
development/developer_instruction.md development/developer-instruction.md
dicts/external_dicts.md query-language/dicts/external-dicts.md
dicts/external_dicts_dict.md query-language/dicts/external-dicts-dict.md
dicts/external_dicts_dict_layout.md query-language/dicts/external-dicts-dict-layout.md
dicts/external_dicts_dict_lifetime.md query-language/dicts/external-dicts-dict-lifetime.md
dicts/external_dicts_dict_sources.md query-language/dicts/external-dicts-dict-sources.md
dicts/external_dicts_dict_structure.md query-language/dicts/external-dicts-dict-structure.md
dicts/index.md query-language/dicts/index.md
dicts/internal_dicts.md query-language/dicts/internal-dicts.md
engines/database_engines/index.md engines/database-engines/index.md
engines/database_engines/lazy.md engines/database-engines/lazy.md
engines/database_engines/mysql.md engines/database-engines/mysql.md
engines/table-engines/log-family/log-family.md engines/table-engines/log-family/index.md
engines/table_engines/index.md engines/table-engines/index.md
engines/table_engines/integrations/hdfs.md engines/table-engines/integrations/hdfs.md
engines/table_engines/integrations/index.md engines/table-engines/integrations/index.md
engines/table_engines/integrations/jdbc.md engines/table-engines/integrations/jdbc.md
engines/table_engines/integrations/kafka.md engines/table-engines/integrations/kafka.md
engines/table_engines/integrations/mysql.md engines/table-engines/integrations/mysql.md
engines/table_engines/integrations/odbc.md engines/table-engines/integrations/odbc.md
engines/table_engines/log_family/index.md engines/table-engines/log-family/index.md
engines/table_engines/log_family/log.md engines/table-engines/log-family/log.md
engines/table_engines/log_family/log_family.md engines/table-engines/log-family/log-family.md
engines/table_engines/log_family/stripelog.md engines/table-engines/log-family/stripelog.md
engines/table_engines/log_family/tinylog.md engines/table-engines/log-family/tinylog.md
engines/table_engines/mergetree_family/aggregatingmergetree.md engines/table-engines/mergetree-family/aggregatingmergetree.md
engines/table_engines/mergetree_family/collapsingmergetree.md engines/table-engines/mergetree-family/collapsingmergetree.md
engines/table_engines/mergetree_family/custom_partitioning_key.md engines/table-engines/mergetree-family/custom-partitioning-key.md
engines/table_engines/mergetree_family/graphitemergetree.md engines/table-engines/mergetree-family/graphitemergetree.md
engines/table_engines/mergetree_family/index.md engines/table-engines/mergetree-family/index.md
engines/table_engines/mergetree_family/mergetree.md engines/table-engines/mergetree-family/mergetree.md
engines/table_engines/mergetree_family/replacingmergetree.md engines/table-engines/mergetree-family/replacingmergetree.md
engines/table_engines/mergetree_family/replication.md engines/table-engines/mergetree-family/replication.md
engines/table_engines/mergetree_family/summingmergetree.md engines/table-engines/mergetree-family/summingmergetree.md
engines/table_engines/mergetree_family/versionedcollapsingmergetree.md engines/table-engines/mergetree-family/versionedcollapsingmergetree.md
engines/table_engines/special/buffer.md engines/table-engines/special/buffer.md
engines/table_engines/special/dictionary.md engines/table-engines/special/dictionary.md
engines/table_engines/special/distributed.md engines/table-engines/special/distributed.md
engines/table_engines/special/external_data.md engines/table-engines/special/external-data.md
engines/table_engines/special/file.md engines/table-engines/special/file.md
engines/table_engines/special/generate.md engines/table-engines/special/generate.md
engines/table_engines/special/index.md engines/table-engines/special/index.md
engines/table_engines/special/join.md engines/table-engines/special/join.md
engines/table_engines/special/materializedview.md engines/table-engines/special/materializedview.md
engines/table_engines/special/memory.md engines/table-engines/special/memory.md
engines/table_engines/special/merge.md engines/table-engines/special/merge.md
engines/table_engines/special/null.md engines/table-engines/special/null.md
engines/table_engines/special/set.md engines/table-engines/special/set.md
engines/table_engines/special/url.md engines/table-engines/special/url.md
engines/table_engines/special/view.md engines/table-engines/special/view.md
extended_roadmap.md whats-new/extended-roadmap.md
formats.md interfaces/formats.md
formats/capnproto.md interfaces/formats.md
formats/csv.md interfaces/formats.md
formats/csvwithnames.md interfaces/formats.md
formats/json.md interfaces/formats.md
formats/jsoncompact.md interfaces/formats.md
formats/jsoneachrow.md interfaces/formats.md
formats/native.md interfaces/formats.md
formats/null.md interfaces/formats.md
formats/pretty.md interfaces/formats.md
formats/prettycompact.md interfaces/formats.md
formats/prettycompactmonoblock.md interfaces/formats.md
formats/prettynoescapes.md interfaces/formats.md
formats/prettyspace.md interfaces/formats.md
formats/rowbinary.md interfaces/formats.md
formats/tabseparated.md interfaces/formats.md
formats/tabseparatedraw.md interfaces/formats.md
formats/tabseparatedwithnames.md interfaces/formats.md
formats/tabseparatedwithnamesandtypes.md interfaces/formats.md
formats/tskv.md interfaces/formats.md
formats/values.md interfaces/formats.md
formats/vertical.md interfaces/formats.md
formats/verticalraw.md interfaces/formats.md
formats/xml.md interfaces/formats.md
functions/arithmetic_functions.md query-language/functions/arithmetic-functions.md
functions/array_functions.md query-language/functions/array-functions.md
functions/array_join.md query-language/functions/array-join.md
functions/bit_functions.md query-language/functions/bit-functions.md
functions/bitmap_functions.md query-language/functions/bitmap-functions.md
functions/comparison_functions.md query-language/functions/comparison-functions.md
functions/conditional_functions.md query-language/functions/conditional-functions.md
functions/date_time_functions.md query-language/functions/date-time-functions.md
functions/encoding_functions.md query-language/functions/encoding-functions.md
functions/ext_dict_functions.md query-language/functions/ext-dict-functions.md
functions/hash_functions.md query-language/functions/hash-functions.md
functions/higher_order_functions.md query-language/functions/higher-order-functions.md
functions/in_functions.md query-language/functions/in-functions.md
functions/index.md query-language/functions/index.md
functions/ip_address_functions.md query-language/functions/ip-address-functions.md
functions/json_functions.md query-language/functions/json-functions.md
functions/logical_functions.md query-language/functions/logical-functions.md
functions/math_functions.md query-language/functions/math-functions.md
functions/other_functions.md query-language/functions/other-functions.md
functions/random_functions.md query-language/functions/random-functions.md
functions/rounding_functions.md query-language/functions/rounding-functions.md
functions/splitting_merging_functions.md query-language/functions/splitting-merging-functions.md
functions/string_functions.md query-language/functions/string-functions.md
functions/string_replace_functions.md query-language/functions/string-replace-functions.md
functions/string_search_functions.md query-language/functions/string-search-functions.md
functions/type_conversion_functions.md query-language/functions/type-conversion-functions.md
functions/url_functions.md query-language/functions/url-functions.md
functions/ym_dict_functions.md query-language/functions/ym-dict-functions.md
getting_started/example_datasets/amplab_benchmark.md getting-started/example-datasets/amplab-benchmark.md
getting_started/example_datasets/criteo.md getting-started/example-datasets/criteo.md
getting_started/example_datasets/index.md getting-started/example-datasets/index.md
getting_started/example_datasets/metrica.md getting-started/example-datasets/metrica.md
getting_started/example_datasets/nyc_taxi.md getting-started/example-datasets/nyc-taxi.md
getting_started/example_datasets/ontime.md getting-started/example-datasets/ontime.md
getting_started/example_datasets/star_schema.md getting-started/example-datasets/star-schema.md
getting_started/example_datasets/wikistat.md getting-started/example-datasets/wikistat.md
getting_started/index.md getting-started/index.md
getting_started/install.md getting-started/install.md
getting_started/playground.md getting-started/playground.md
getting_started/tutorial.md getting-started/tutorial.md
images/column_oriented.gif images/column-oriented.gif
images/row_oriented.gif images/row-oriented.gif
interfaces/http_interface.md interfaces/http.md
interfaces/third-party/client_libraries.md interfaces/third-party/client-libraries.md
interfaces/third-party_client_libraries.md interfaces/third-party/client-libraries.md
interfaces/third-party_gui.md interfaces/third-party/gui.md
interfaces/third_party/index.md interfaces/third-party/index.md
introduction/index.md
introduction/distinctive_features.md introduction/distinctive-features.md
introduction/features_considered_disadvantages.md introduction/distinctive-features.md
introduction/possible_silly_questions.md faq/general.md
introduction/ya_metrika_task.md introduction/history.md
operations/access_rights.md operations/access-rights.md
operations/configuration_files.md operations/configuration-files.md
operations/optimizing_performance/index.md operations/optimizing-performance/index.md
operations/optimizing_performance/sampling_query_profiler.md operations/optimizing-performance/sampling-query-profiler.md
operations/performance/sampling_query_profiler.md operations/optimizing-performance/sampling-query-profiler.md
operations/performance_test.md operations/performance-test.md
operations/server_configuration_parameters/index.md operations/server-configuration-parameters/index.md
operations/server_configuration_parameters/settings.md operations/server-configuration-parameters/settings.md
operations/server_settings/index.md operations/server-configuration-parameters/index.md
operations/server_settings/settings.md operations/server-configuration-parameters/settings.md
operations/settings/constraints_on_settings.md operations/settings/constraints-on-settings.md
operations/settings/permissions_for_queries.md operations/settings/permissions-for-queries.md
operations/settings/query_complexity.md operations/settings/query-complexity.md
operations/settings/settings_profiles.md operations/settings/settings-profiles.md
operations/settings/settings_users.md operations/settings/settings-users.md
operations/system_tables.md operations/system-tables.md
operations/table_engines/aggregatingmergetree.md engines/table-engines/mergetree-family/aggregatingmergetree.md
operations/table_engines/buffer.md engines/table-engines/special/buffer.md
operations/table_engines/collapsingmergetree.md engines/table-engines/mergetree-family/collapsingmergetree.md
operations/table_engines/custom_partitioning_key.md engines/table-engines/mergetree-family/custom-partitioning-key.md
operations/table_engines/dictionary.md engines/table-engines/special/dictionary.md
operations/table_engines/distributed.md engines/table-engines/special/distributed.md
operations/table_engines/external_data.md engines/table-engines/special/external-data.md
operations/table_engines/file.md engines/table-engines/special/file.md
operations/table_engines/generate.md engines/table-engines/special/generate.md
operations/table_engines/graphitemergetree.md engines/table-engines/mergetree-family/graphitemergetree.md
operations/table_engines/hdfs.md engines/table-engines/integrations/hdfs.md
operations/table_engines/index.md engines/table-engines/index.md
operations/table_engines/jdbc.md engines/table-engines/integrations/jdbc.md
operations/table_engines/join.md engines/table-engines/special/join.md
operations/table_engines/kafka.md engines/table-engines/integrations/kafka.md
operations/table_engines/log.md engines/table-engines/log-family/log.md
operations/table_engines/log_family.md engines/table-engines/log-family/log-family.md
operations/table_engines/materializedview.md engines/table-engines/special/materializedview.md
operations/table_engines/memory.md engines/table-engines/special/memory.md
operations/table_engines/merge.md engines/table-engines/special/merge.md
operations/table_engines/mergetree.md engines/table-engines/mergetree-family/mergetree.md
operations/table_engines/mysql.md engines/table-engines/integrations/mysql.md
operations/table_engines/null.md engines/table-engines/special/null.md
operations/table_engines/odbc.md engines/table-engines/integrations/odbc.md
operations/table_engines/replacingmergetree.md engines/table-engines/mergetree-family/replacingmergetree.md
operations/table_engines/replication.md engines/table-engines/mergetree-family/replication.md
operations/table_engines/set.md engines/table-engines/special/set.md
operations/table_engines/stripelog.md engines/table-engines/log-family/stripelog.md
operations/table_engines/summingmergetree.md engines/table-engines/mergetree-family/summingmergetree.md
operations/table_engines/tinylog.md engines/table-engines/log-family/tinylog.md
operations/table_engines/url.md engines/table-engines/special/url.md
operations/table_engines/versionedcollapsingmergetree.md engines/table-engines/mergetree-family/versionedcollapsingmergetree.md
operations/table_engines/view.md engines/table-engines/special/view.md
operations/utils/clickhouse-benchmark.md operations/utilities/clickhouse-benchmark.md
operations/utils/clickhouse-copier.md operations/utilities/clickhouse-copier.md
operations/utils/clickhouse-local.md operations/utilities/clickhouse-local.md
operations/utils/index.md operations/utilities/index.md
query_language/agg_functions/combinators.md sql-reference/aggregate-functions/combinators.md
query_language/agg_functions/index.md sql-reference/aggregate-functions/index.md
query_language/agg_functions/parametric_functions.md sql-reference/aggregate-functions/parametric-functions.md
query_language/agg_functions/reference.md sql-reference/aggregate-functions/reference.md
query_language/alter.md sql-reference/statements/alter.md
query_language/create.md sql-reference/statements/create.md
query_language/dicts/external_dicts.md sql-reference/dictionaries/external-dictionaries/external-dicts.md
query_language/dicts/external_dicts_dict.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md
query_language/dicts/external_dicts_dict_hierarchical.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md
query_language/dicts/external_dicts_dict_layout.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md
query_language/dicts/external_dicts_dict_lifetime.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md
query_language/dicts/external_dicts_dict_sources.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md
query_language/dicts/external_dicts_dict_structure.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md
query_language/dicts/index.md sql-reference/dictionaries/index.md
query_language/dicts/internal_dicts.md sql-reference/dictionaries/internal-dicts.md
query_language/functions/arithmetic_functions.md sql-reference/functions/arithmetic-functions.md
query_language/functions/array_functions.md sql-reference/functions/array-functions.md
query_language/functions/array_join.md sql-reference/functions/array-join.md
query_language/functions/bit_functions.md sql-reference/functions/bit-functions.md
query_language/functions/bitmap_functions.md sql-reference/functions/bitmap-functions.md
query_language/functions/comparison_functions.md sql-reference/functions/comparison-functions.md
query_language/functions/conditional_functions.md sql-reference/functions/conditional-functions.md
query_language/functions/date_time_functions.md sql-reference/functions/date-time-functions.md
query_language/functions/encoding_functions.md sql-reference/functions/encoding-functions.md
query_language/functions/ext_dict_functions.md sql-reference/functions/ext-dict-functions.md
query_language/functions/functions_for_nulls.md sql-reference/functions/functions-for-nulls.md
query_language/functions/geo.md sql-reference/functions/geo.md
query_language/functions/hash_functions.md sql-reference/functions/hash-functions.md
query_language/functions/higher_order_functions.md sql-reference/functions/higher-order-functions.md
query_language/functions/in_functions.md sql-reference/functions/in-functions.md
query_language/functions/index.md sql-reference/functions/index.md
query_language/functions/introspection.md sql-reference/functions/introspection.md
query_language/functions/ip_address_functions.md sql-reference/functions/ip-address-functions.md
query_language/functions/json_functions.md sql-reference/functions/json-functions.md
query_language/functions/logical_functions.md sql-reference/functions/logical-functions.md
query_language/functions/machine_learning_functions.md sql-reference/functions/machine-learning-functions.md
query_language/functions/math_functions.md sql-reference/functions/math-functions.md
query_language/functions/other_functions.md sql-reference/functions/other-functions.md
query_language/functions/random_functions.md sql-reference/functions/random-functions.md
query_language/functions/rounding_functions.md sql-reference/functions/rounding-functions.md
query_language/functions/splitting_merging_functions.md sql-reference/functions/splitting-merging-functions.md
query_language/functions/string_functions.md sql-reference/functions/string-functions.md
query_language/functions/string_replace_functions.md sql-reference/functions/string-replace-functions.md
query_language/functions/string_search_functions.md sql-reference/functions/string-search-functions.md
query_language/functions/type_conversion_functions.md sql-reference/functions/type-conversion-functions.md
query_language/functions/url_functions.md sql-reference/functions/url-functions.md
query_language/functions/uuid_functions.md sql-reference/functions/uuid-functions.md
query_language/functions/ym_dict_functions.md sql-reference/functions/ym-dict-functions.md
query_language/index.md sql-reference/index.md
query_language/insert_into.md sql-reference/statements/insert-into.md
query_language/misc.md sql-reference/statements/misc.md
query_language/operators.md sql-reference/operators.md
query_language/queries.md query-language.md
query_language/select.md sql-reference/statements/select.md
query_language/show.md sql-reference/statements/show.md
query_language/syntax.md sql-reference/syntax.md
query_language/system.md sql-reference/statements/system.md
query_language/table_functions/file.md sql-reference/table-functions/file.md
query_language/table_functions/generate.md sql-reference/table-functions/generate.md
query_language/table_functions/hdfs.md sql-reference/table-functions/hdfs.md
query_language/table_functions/index.md sql-reference/table-functions/index.md
query_language/table_functions/input.md sql-reference/table-functions/input.md
query_language/table_functions/jdbc.md sql-reference/table-functions/jdbc.md
query_language/table_functions/merge.md sql-reference/table-functions/merge.md
query_language/table_functions/mysql.md sql-reference/table-functions/mysql.md
query_language/table_functions/numbers.md sql-reference/table-functions/numbers.md
query_language/table_functions/odbc.md sql-reference/table-functions/odbc.md
query_language/table_functions/remote.md sql-reference/table-functions/remote.md
query_language/table_functions/url.md sql-reference/table-functions/url.md
roadmap.md whats-new/roadmap.md
security_changelog.md whats-new/security-changelog.md
sql-reference/data-types/domains/overview.md sql-reference/data-types/domains/index.md
sql_reference/aggregate_functions/combinators.md sql-reference/aggregate-functions/combinators.md
sql_reference/aggregate_functions/index.md sql-reference/aggregate-functions/index.md
sql_reference/aggregate_functions/parametric_functions.md sql-reference/aggregate-functions/parametric-functions.md
sql_reference/aggregate_functions/reference.md sql-reference/aggregate-functions/reference.md
sql_reference/ansi.md sql-reference/ansi.md
sql_reference/data_types/aggregatefunction.md sql-reference/data-types/aggregatefunction.md
sql_reference/data_types/array.md sql-reference/data-types/array.md
sql_reference/data_types/boolean.md sql-reference/data-types/boolean.md
sql_reference/data_types/date.md sql-reference/data-types/date.md
sql_reference/data_types/datetime.md sql-reference/data-types/datetime.md
sql_reference/data_types/datetime64.md sql-reference/data-types/datetime64.md
sql_reference/data_types/decimal.md sql-reference/data-types/decimal.md
sql_reference/data_types/domains/index.md sql-reference/data-types/domains/index.md
sql_reference/data_types/domains/ipv4.md sql-reference/data-types/domains/ipv4.md
sql_reference/data_types/domains/ipv6.md sql-reference/data-types/domains/ipv6.md
sql_reference/data_types/domains/overview.md sql-reference/data-types/domains/overview.md
sql_reference/data_types/enum.md sql-reference/data-types/enum.md
sql_reference/data_types/fixedstring.md sql-reference/data-types/fixedstring.md
sql_reference/data_types/float.md sql-reference/data-types/float.md
sql_reference/data_types/index.md sql-reference/data-types/index.md
sql_reference/data_types/int_uint.md sql-reference/data-types/int-uint.md
sql_reference/data_types/nested_data_structures/index.md sql-reference/data-types/nested-data-structures/index.md
sql_reference/data_types/nested_data_structures/nested.md sql-reference/data-types/nested-data-structures/nested.md
sql_reference/data_types/nullable.md sql-reference/data-types/nullable.md
sql_reference/data_types/simpleaggregatefunction.md sql-reference/data-types/simpleaggregatefunction.md
sql_reference/data_types/special_data_types/expression.md sql-reference/data-types/special-data-types/expression.md
sql_reference/data_types/special_data_types/index.md sql-reference/data-types/special-data-types/index.md
sql_reference/data_types/special_data_types/interval.md sql-reference/data-types/special-data-types/interval.md
sql_reference/data_types/special_data_types/nothing.md sql-reference/data-types/special-data-types/nothing.md
sql_reference/data_types/special_data_types/set.md sql-reference/data-types/special-data-types/set.md
sql_reference/data_types/string.md sql-reference/data-types/string.md
sql_reference/data_types/tuple.md sql-reference/data-types/tuple.md
sql_reference/data_types/uuid.md sql-reference/data-types/uuid.md
sql_reference/dictionaries/external_dictionaries/external_dicts.md sql-reference/dictionaries/external-dictionaries/external-dicts.md
sql_reference/dictionaries/external_dictionaries/external_dicts_dict.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_hierarchical.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-hierarchical.md
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_layout.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_lifetime.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_sources.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md
sql_reference/dictionaries/external_dictionaries/external_dicts_dict_structure.md sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md
sql_reference/dictionaries/external_dictionaries/index.md sql-reference/dictionaries/external-dictionaries/index.md
sql_reference/dictionaries/index.md sql-reference/dictionaries/index.md
sql_reference/dictionaries/internal_dicts.md sql-reference/dictionaries/internal-dicts.md
sql_reference/functions/arithmetic_functions.md sql-reference/functions/arithmetic-functions.md
sql_reference/functions/array_functions.md sql-reference/functions/array-functions.md
sql_reference/functions/array_join.md sql-reference/functions/array-join.md
sql_reference/functions/bit_functions.md sql-reference/functions/bit-functions.md
sql_reference/functions/bitmap_functions.md sql-reference/functions/bitmap-functions.md
sql_reference/functions/comparison_functions.md sql-reference/functions/comparison-functions.md
sql_reference/functions/conditional_functions.md sql-reference/functions/conditional-functions.md
sql_reference/functions/date_time_functions.md sql-reference/functions/date-time-functions.md
sql_reference/functions/encoding_functions.md sql-reference/functions/encoding-functions.md
sql_reference/functions/ext_dict_functions.md sql-reference/functions/ext-dict-functions.md
sql_reference/functions/functions_for_nulls.md sql-reference/functions/functions-for-nulls.md
sql_reference/functions/geo.md sql-reference/functions/geo.md
sql_reference/functions/hash_functions.md sql-reference/functions/hash-functions.md
sql_reference/functions/higher_order_functions.md sql-reference/functions/higher-order-functions.md
sql_reference/functions/in_functions.md sql-reference/functions/in-functions.md
sql_reference/functions/index.md sql-reference/functions/index.md
sql_reference/functions/introspection.md sql-reference/functions/introspection.md
sql_reference/functions/ip_address_functions.md sql-reference/functions/ip-address-functions.md
sql_reference/functions/json_functions.md sql-reference/functions/json-functions.md
sql_reference/functions/logical_functions.md sql-reference/functions/logical-functions.md
sql_reference/functions/machine_learning_functions.md sql-reference/functions/machine-learning-functions.md
sql_reference/functions/math_functions.md sql-reference/functions/math-functions.md
sql_reference/functions/other_functions.md sql-reference/functions/other-functions.md
sql_reference/functions/random_functions.md sql-reference/functions/random-functions.md
sql_reference/functions/rounding_functions.md sql-reference/functions/rounding-functions.md
sql_reference/functions/splitting_merging_functions.md sql-reference/functions/splitting-merging-functions.md
sql_reference/functions/string_functions.md sql-reference/functions/string-functions.md
sql_reference/functions/string_replace_functions.md sql-reference/functions/string-replace-functions.md
sql_reference/functions/string_search_functions.md sql-reference/functions/string-search-functions.md
sql_reference/functions/type_conversion_functions.md sql-reference/functions/type-conversion-functions.md
sql_reference/functions/url_functions.md sql-reference/functions/url-functions.md
sql_reference/functions/uuid_functions.md sql-reference/functions/uuid-functions.md
sql_reference/functions/ym_dict_functions.md sql-reference/functions/ym-dict-functions.md
sql_reference/index.md sql-reference/index.md
sql_reference/operators.md sql-reference/operators.md
sql_reference/statements/alter.md sql-reference/statements/alter.md
sql_reference/statements/create.md sql-reference/statements/create.md
sql_reference/statements/index.md sql-reference/statements/index.md
sql_reference/statements/insert_into.md sql-reference/statements/insert-into.md
sql_reference/statements/misc.md sql-reference/statements/misc.md
sql_reference/statements/select.md sql-reference/statements/select.md
sql_reference/statements/show.md sql-reference/statements/show.md
sql_reference/statements/system.md sql-reference/statements/system.md
sql_reference/syntax.md sql-reference/syntax.md
sql_reference/table_functions/file.md sql-reference/table-functions/file.md
sql_reference/table_functions/generate.md sql-reference/table-functions/generate.md
sql_reference/table_functions/hdfs.md sql-reference/table-functions/hdfs.md
sql_reference/table_functions/index.md sql-reference/table-functions/index.md
sql_reference/table_functions/input.md sql-reference/table-functions/input.md
sql_reference/table_functions/jdbc.md sql-reference/table-functions/jdbc.md
sql_reference/table_functions/merge.md sql-reference/table-functions/merge.md
sql_reference/table_functions/mysql.md sql-reference/table-functions/mysql.md
sql_reference/table_functions/numbers.md sql-reference/table-functions/numbers.md
sql_reference/table_functions/odbc.md sql-reference/table-functions/odbc.md
sql_reference/table_functions/remote.md sql-reference/table-functions/remote.md
sql_reference/table_functions/url.md sql-reference/table-functions/url.md
system_tables.md operations/system-tables.md
system_tables/system.asynchronous_metrics.md operations/system-tables.md
system_tables/system.clusters.md operations/system-tables.md
system_tables/system.columns.md operations/system-tables.md
system_tables/system.databases.md operations/system-tables.md
system_tables/system.dictionaries.md operations/system-tables.md
system_tables/system.events.md operations/system-tables.md
system_tables/system.functions.md operations/system-tables.md
system_tables/system.merges.md operations/system-tables.md
system_tables/system.metrics.md operations/system-tables.md
system_tables/system.numbers.md operations/system-tables.md
system_tables/system.numbers_mt.md operations/system-tables.md
system_tables/system.one.md operations/system-tables.md
system_tables/system.parts.md operations/system-tables.md
system_tables/system.processes.md operations/system-tables.md
system_tables/system.replicas.md operations/system-tables.md
system_tables/system.settings.md operations/system-tables.md
system_tables/system.tables.md operations/system-tables.md
system_tables/system.zookeeper.md operations/system-tables.md
table_engines.md operations/table-engines.md
table_engines/aggregatingmergetree.md operations/table-engines/aggregatingmergetree.md
table_engines/buffer.md operations/table-engines/buffer.md
table_engines/collapsingmergetree.md operations/table-engines/collapsingmergetree.md
table_engines/custom_partitioning_key.md operations/table-engines/custom-partitioning-key.md
table_engines/dictionary.md operations/table-engines/dictionary.md
table_engines/distributed.md operations/table-engines/distributed.md
table_engines/external_data.md operations/table-engines/external-data.md
table_engines/file.md operations/table-engines/file.md
table_engines/graphitemergetree.md operations/table-engines/graphitemergetree.md
table_engines/index.md operations/table-engines/index.md
table_engines/join.md operations/table-engines/join.md
table_engines/kafka.md operations/table-engines/kafka.md
table_engines/log.md operations/table-engines/log.md
table_engines/materializedview.md operations/table-engines/materializedview.md
table_engines/memory.md operations/table-engines/memory.md
table_engines/merge.md operations/table-engines/merge.md
table_engines/mergetree.md operations/table-engines/mergetree.md
table_engines/mysql.md operations/table-engines/mysql.md
table_engines/null.md operations/table-engines/null.md
table_engines/replacingmergetree.md operations/table-engines/replacingmergetree.md
table_engines/replication.md operations/table-engines/replication.md
table_engines/set.md operations/table-engines/set.md
table_engines/summingmergetree.md operations/table-engines/summingmergetree.md
table_engines/tinylog.md operations/table-engines/tinylog.md
table_engines/view.md operations/table-engines/view.md
table_functions/file.md query-language/table-functions/file.md
table_functions/index.md query-language/table-functions/index.md
table_functions/merge.md query-language/table-functions/merge.md
table_functions/numbers.md query-language/table-functions/numbers.md
table_functions/remote.md query-language/table-functions/remote.md
utils.md operations/utils.md
utils/clickhouse-copier.md operations/utils/clickhouse-copier.md
utils/clickhouse-local.md operations/utils/clickhouse-local.md
whats_new/changelog/2017.md whats-new/changelog/2017.md
whats_new/changelog/2018.md whats-new/changelog/2018.md
whats_new/changelog/2019.md whats-new/changelog/2019.md
whats_new/changelog/index.md whats-new/changelog/index.md
whats_new/index.md whats-new/index.md
whats_new/roadmap.md whats-new/roadmap.md
whats_new/security_changelog.md whats-new/security-changelog.md
The redirects from this file were moved to the Docusaurus configuration file.
If you need to add a redirect please either open a PR in
https://github.com/clickhouse/clickhouse-docs adding the redirect to
https://github.com/ClickHouse/clickhouse-docs/blob/main/docusaurus.config.js
or open an issue in the same repo and provide the old URL and new URL to have
the redirect added.

View File

@ -69,11 +69,11 @@ ClickHouse не удаляет данные из таблица автомати
- 0 — запрос был инициирован другим запросом при выполнении распределенного запроса.
- `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал запрос
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт, с которого клиент сделал родительский запрос.
- `initial_query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время начала обработки запроса (для распределенных запросов).
- `initial_query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время начала обработки запроса с точностью до микросекунд (для распределенных запросов).

View File

@ -39,11 +39,11 @@ ClickHouse не удаляет данные из таблицы автомати
- 0 — запрос был инициирован другим запросом при распределенном запросе.
- `user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший текущий запрос.
- `query_id` ([String](../../sql-reference/data-types/string.md)) — ID запроса.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел запрос.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел запрос.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, с которого пришел запрос.
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — пользователь, запустивший первоначальный запрос (для распределенных запросов).
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — ID родительского запроса.
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес, с которого пришел родительский запрос.
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — порт, пришел родительский запрос.
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — интерфейс, с которого ушёл запрос. Возможные значения:
- 1 — TCP.

View File

@ -27,7 +27,7 @@ slug: /ru/operations/system-tables/session_log
- `profiles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список профилей, установленных для всех ролей и (или) пользователей.
- `roles` ([Array](../../sql-reference/data-types/array.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md))) — список ролей, к которым применяется данный профиль.
- `settings` ([Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md), [String](../../sql-reference/data-types/string.md)))) — настройки, которые были изменены при входе или выходе клиента из системы.
- `client_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы.
- `client_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP-адрес, который использовался для входа или выхода из системы.
- `client_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт клиента, который использовался для входа или выхода из системы.
- `interface` ([Enum8](../../sql-reference/data-types/enum.md)) — интерфейс, с которого был инициирован вход в систему. Возможные значения:
- `TCP`

View File

@ -15,7 +15,7 @@ slug: /ru/operations/system-tables/zookeeper_log
- `Finalize` — соединение разорвано, ответ не получен.
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата, когда произошло событие.
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — дата и время, когда произошло событие.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — IP адрес сервера ZooKeeper, с которого был сделан запрос.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — порт сервера ZooKeeper, с которого был сделан запрос.
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — идентификатор сессии, который сервер ZooKeeper создает для каждого соединения.
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — идентификатор запроса внутри сессии. Обычно это последовательный номер запроса, одинаковый у строки запроса и у парной строки `response`/`finalize`.

View File

@ -1,12 +1,12 @@
---
slug: /ru/sql-reference/data-types/domains/ipv4
slug: /ru/sql-reference/data-types/ipv4
sidebar_position: 59
sidebar_label: IPv4
---
## IPv4 {#ipv4}
`IPv4` — это домен, базирующийся на типе данных `UInt32` предназначенный для хранения адресов IPv4. Он обеспечивает компактное хранение данных с удобным для человека форматом ввода-вывода, и явно отображаемым типом данных в структуре таблицы.
IPv4-адреса. Хранится в 4 байтах как UInt32.
### Применение {#primenenie}
@ -57,27 +57,6 @@ SELECT toTypeName(from), hex(from) FROM hits LIMIT 1;
└──────────────────┴───────────┘
```
Значения с доменным типом данных не преобразуются неявно в другие типы данных, кроме `UInt32`.
Если необходимо преобразовать значение типа `IPv4` в строку, то это необходимо делать явно с помощью функции `IPv4NumToString()`:
**См. также**
``` sql
SELECT toTypeName(s), IPv4NumToString(from) AS s FROM hits LIMIT 1;
```
``` text
┌─toTypeName(IPv4NumToString(from))─┬─s──────────────┐
│ String │ 183.247.232.58 │
└───────────────────────────────────┴────────────────┘
```
Или приводить к типу данных `UInt32`:
``` sql
SELECT toTypeName(i), CAST(from AS UInt32) AS i FROM hits LIMIT 1;
```
``` text
┌─toTypeName(CAST(from, 'UInt32'))─┬──────────i─┐
│ UInt32 │ 3086477370 │
└──────────────────────────────────┴────────────┘
```
- [Functions for Working with IPv4 and IPv6 Addresses](../functions/ip-address-functions.md)

View File

@ -1,5 +1,5 @@
---
slug: /ru/sql-reference/data-types/domains/ipv6
slug: /ru/sql-reference/data-types/ipv6
sidebar_position: 60
sidebar_label: IPv6
---

View File

@ -5,7 +5,7 @@ sidebar_label: "Функции для работы с внешними слов
---
:::note "Внимание"
Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `<database>.<dict_name>`. Если база данных не указана, используется текущая.
Для словарей, созданных с помощью [DDL-запросов](../../sql-reference/statements/create/dictionary.md), в параметре `dict_name` указывается полное имя словаря вместе с базой данных, например: `<database>.<dict_name>`. Если база данных не указана, используется текущая.
:::
# Функции для работы с внешними словарями {#ext_dict_functions}

View File

@ -265,7 +265,7 @@ SELECT
## toIPv6 {#toipv6string}
Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/domains/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом.
Приводит строку с адресом в формате IPv6 к типу [IPv6](../../sql-reference/data-types/ipv6.md). Возвращает пустое значение, если входящая строка не является корректным IP адресом.
Похоже на функцию [IPv6StringToNum](#ipv6stringtonums), которая представляет адрес IPv6 в двоичном виде.
Если входящая строка содержит корректный IPv4 адрес, функция возвращает его IPv6 эквивалент.
@ -284,7 +284,7 @@ toIPv6(string)
- IP адрес.
Тип: [IPv6](../../sql-reference/data-types/domains/ipv6.md).
Тип: [IPv6](../../sql-reference/data-types/ipv6.md).
**Примеры**

View File

@ -60,11 +60,11 @@ ClickHouse不会自动从表中删除数据。更多详情请看 [introduction](
- 0 — 由另一个查询发起的,作为分布式查询的一部分.
- `user` ([String](../../sql-reference/data-types/string.md)) — 发起查询的用户.
- `query_id` ([String](../../sql-reference/data-types/string.md)) — 查询ID.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的客户端IP地址.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的客户端IP地址.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起查询的客户端端口.
- `initial_user` ([String](../../sql-reference/data-types/string.md)) — 初始查询的用户名(用于分布式查询执行).
- `initial_query_id` ([String](../../sql-reference/data-types/string.md)) — 运行初始查询的ID用于分布式查询执行.
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 运行父查询的IP地址.
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 运行父查询的IP地址.
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 发起父查询的客户端端口.
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md)) — 发起查询的接口. 可能的值:
- 1 — TCP.

View File

@ -36,11 +36,11 @@ ClickHouse不会自动从表中删除数据。 欲了解更多详情,请参照
- 0 — 由其他查询发起的分布式查询。
- `user` ([字符串](../../sql-reference/data-types/string.md)) — 发起查询的用户名。
- `query_id` ([字符串](../../sql-reference/data-types/string.md)) — 查询的ID。
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起查询的IP地址。
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起查询的IP地址。
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的端口。
- `initial_user` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的用户名(对于分布式查询)。
- `initial_query_id` ([字符串](../../sql-reference/data-types/string.md)) — 首次发起查询的ID对于分布式查询
- `initial_address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 发起该查询的父查询IP地址。
- `initial_address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 发起该查询的父查询IP地址。
- `initial_port` ([UInt16](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起该查询的父查询端口。
- `interface` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — 发起查询的界面,可能的值:
- 1 — TCP.

View File

@ -15,7 +15,7 @@ slug: /zh/operations/system-tables/zookeeper_log
- `Finalize` — 连接丢失, 未收到响应.
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件发生的日期.
- `event_time` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件发生的日期和时间.
- `address` ([IPv6](../../sql-reference/data-types/domains/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址.
- `address` ([IPv6](../../sql-reference/data-types/ipv6.md)) — 用于发出请求的 ZooKeeper 服务器的 IP 地址.
- `port` ([UInt16](../../sql-reference/data-types/int-uint.md)) — 用于发出请求的 ZooKeeper 服务器的端口.
- `session_id` ([Int64](../../sql-reference/data-types/int-uint.md)) — ZooKeeper 服务器为每个连接设置的会话 ID.
- `xid` ([Int32](../../sql-reference/data-types/int-uint.md)) — 会话中请求的 ID. 这通常是一个连续的请求编号. 请求行和配对的 `response`/`finalize` 行相同.

View File

@ -1,5 +1,5 @@
---
slug: /zh/sql-reference/data-types/domains/ipv4
slug: /zh/sql-reference/data-types/ipv4
---
## IPv4 {#ipv4}

View File

@ -1,5 +1,5 @@
---
slug: /zh/sql-reference/data-types/domains/ipv6
slug: /zh/sql-reference/data-types/ipv6
---
## IPv6 {#ipv6}

View File

@ -2,6 +2,8 @@
#include <base/types.h>
#include <vector>
namespace DB
{

View File

@ -224,12 +224,12 @@ struct Keeper::KeeperHTTPContext : public IHTTPContext
uint64_t getMaxFieldNameSize() const override
{
return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 1048576);
return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 128 * 1024);
}
uint64_t getMaxFieldValueSize() const override
{
return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 1048576);
return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 128 * 1024);
}
uint64_t getMaxChunkSize() const override

View File

@ -1543,12 +1543,12 @@
-->
<!-- Configuration for the query cache -->
<!-- <query_cache> -->
<!-- <max_size_in_bytes>1073741824</max_size_in_bytes> -->
<!-- <max_entries>1024</max_entries> -->
<!-- <max_entry_size_in_bytes>1048576</max_entry_size_in_bytes> -->
<!-- <max_entry_size_in_rows>30000000</max_entry_size_in_rows> -->
<!-- </query_cache> -->
<query_cache>
<max_size_in_bytes>1073741824</max_size_in_bytes>
<max_entries>1024</max_entries>
<max_entry_size_in_bytes>1048576</max_entry_size_in_bytes>
<max_entry_size_in_rows>30000000</max_entry_size_in_rows>
</query_cache>
<!-- Uncomment if enable merge tree metadata cache -->
<!--merge_tree_metadata_cache>

View File

@ -4,6 +4,7 @@
#include <bitset>
#include <cstring>
#include <vector>
#include <unordered_map>
namespace DB

View File

@ -146,8 +146,8 @@ public:
for (const auto & argument : this->argument_types)
can_be_compiled &= canBeNativeType(*argument);
auto return_type = this->getResultType();
can_be_compiled &= canBeNativeType(*return_type);
const auto & result_type = this->getResultType();
can_be_compiled &= canBeNativeType(*result_type);
return can_be_compiled;
}
@ -198,8 +198,8 @@ public:
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, b.getDoubleTy());
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, b.getDoubleTy());
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, this->getResultType());
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, this->getResultType());
return b.CreateFDiv(double_numerator, double_denominator);
}
@ -308,7 +308,7 @@ public:
#if USE_EMBEDDED_COMPILER
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
@ -316,7 +316,7 @@ public:
auto * numerator_ptr = aggregate_data_ptr;
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
auto * value_cast_to_numerator = nativeCast(b, arguments[0], toNativeDataType<Numerator>());
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
b.CreateStore(numerator_result_value, numerator_ptr);

View File

@ -30,7 +30,7 @@ public:
using Numerator = typename Base::Numerator;
using Denominator = typename Base::Denominator;
using Fraction = typename Base::Fraction;
using Fraction = typename Base::Fraction;
void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
{
@ -55,7 +55,7 @@ public:
return can_be_compiled;
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
@ -63,8 +63,9 @@ public:
auto * numerator_ptr = aggregate_data_ptr;
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
auto * argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
auto * weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type);
auto numerator_data_type = toNativeDataType<Numerator>();
auto * argument = nativeCast(b, arguments[0], numerator_data_type);
auto * weight = nativeCast(b, arguments[1], numerator_data_type);
llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight);
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication);
@ -75,7 +76,7 @@ public:
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type);
auto * weight_cast_to_denominator = nativeCast(b, arguments[1], toNativeDataType<Denominator>());
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator);

View File

@ -148,7 +148,7 @@ public:
Data::compileCreate(builder, value_ptr);
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
@ -157,8 +157,7 @@ public:
auto * value_ptr = aggregate_data_ptr;
auto * value = b.CreateLoad(return_type, value_ptr);
const auto & argument_value = argument_values[0];
auto * result_value = Data::compileUpdate(builder, value, argument_value);
auto * result_value = Data::compileUpdate(builder, value, arguments[0].value);
b.CreateStore(result_value, value_ptr);
}

View File

@ -165,7 +165,7 @@ public:
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> &) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType &) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
@ -309,13 +309,13 @@ public:
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
auto * return_type = toNativeType(b, this->getResultType());
auto * is_null_value = b.CreateExtractValue(values[0], {1});
auto * is_null_value = b.CreateExtractValue(arguments[0].value, {1});
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
auto * count_value_ptr = aggregate_data_ptr;

View File

@ -188,18 +188,18 @@ public:
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
const auto & nullable_type = arguments_types[0];
const auto & nullable_value = argument_values[0];
const auto & nullable_type = arguments[0].type;
const auto & nullable_value = arguments[0].value;
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
const auto & predicate_type = arguments_types[argument_values.size() - 1];
auto * predicate_value = argument_values[argument_values.size() - 1];
const auto & predicate_type = arguments.back().type;
auto * predicate_value = arguments.back().value;
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
auto * head = b.GetInsertBlock();
@ -219,7 +219,7 @@ public:
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
b.CreateBr(join_block);
b.SetInsertPoint(join_block);
@ -370,38 +370,31 @@ public:
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
/// TODO: Check
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
size_t arguments_size = arguments_types.size();
size_t arguments_size = arguments.size();
ValuesWithType wrapped_arguments;
wrapped_arguments.reserve(arguments_size);
DataTypes non_nullable_types;
std::vector<llvm::Value * > wrapped_values;
std::vector<llvm::Value * > is_null_values;
non_nullable_types.resize(arguments_size);
wrapped_values.resize(arguments_size);
is_null_values.resize(arguments_size);
for (size_t i = 0; i < arguments_size; ++i)
{
const auto & argument_value = argument_values[i];
const auto & argument_value = arguments[i].value;
const auto & argument_type = arguments[i].type;
if (is_nullable[i])
{
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
wrapped_values[i] = wrapped_value;
non_nullable_types[i] = removeNullable(arguments_types[i]);
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
}
else
{
wrapped_values[i] = argument_value;
non_nullable_types[i] = arguments_types[i];
wrapped_arguments.emplace_back(argument_value, argument_type);
}
}
@ -415,9 +408,6 @@ public:
for (auto * is_null_value : is_null_values)
{
if (!is_null_value)
continue;
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
}
@ -426,8 +416,8 @@ public:
b.SetInsertPoint(join_block_after_null_checks);
const auto & predicate_type = arguments_types[argument_values.size() - 1];
auto * predicate_value = argument_values[argument_values.size() - 1];
const auto & predicate_type = arguments.back().type;
auto * predicate_value = arguments.back().value;
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent());
@ -444,7 +434,7 @@ public:
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values);
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
b.CreateBr(join_block);
b.SetInsertPoint(join_block);

View File

@ -223,12 +223,12 @@ public:
nested_func->compileCreate(builder, aggregate_data_ptr);
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
const auto & predicate_type = arguments_types[argument_values.size() - 1];
auto * predicate_value = argument_values[argument_values.size() - 1];
const auto & predicate_type = arguments.back().type;
auto * predicate_value = arguments.back().value;
auto * head = b.GetInsertBlock();
@ -242,21 +242,9 @@ public:
b.SetInsertPoint(if_true);
size_t arguments_size_without_predicate = arguments_types.size() - 1;
DataTypes argument_types_without_predicate;
std::vector<llvm::Value *> argument_values_without_predicate;
argument_types_without_predicate.resize(arguments_size_without_predicate);
argument_values_without_predicate.resize(arguments_size_without_predicate);
for (size_t i = 0; i < arguments_size_without_predicate; ++i)
{
argument_types_without_predicate[i] = arguments_types[i];
argument_values_without_predicate[i] = argument_values[i];
}
nested_func->compileAdd(builder, aggregate_data_ptr, argument_types_without_predicate, argument_values_without_predicate);
ValuesWithType arguments_without_predicate = arguments;
arguments_without_predicate.pop_back();
nested_func->compileAdd(builder, aggregate_data_ptr, arguments_without_predicate);
b.CreateBr(join_block);

View File

@ -1459,11 +1459,11 @@ public:
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData()));
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
if constexpr (Data::is_compilable)
{
Data::compileChangeIfBetter(builder, aggregate_data_ptr, argument_values[0]);
Data::compileChangeIfBetter(builder, aggregate_data_ptr, arguments[0].value);
}
else
{

View File

@ -378,12 +378,12 @@ public:
#if USE_EMBEDDED_COMPILER
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
const auto & nullable_type = arguments_types[0];
const auto & nullable_value = argument_values[0];
const auto & nullable_type = arguments[0].type;
const auto & nullable_value = arguments[0].value;
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
@ -405,7 +405,7 @@ public:
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
b.CreateBr(join_block);
b.SetInsertPoint(join_block);
@ -568,36 +568,32 @@ public:
#if USE_EMBEDDED_COMPILER
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
size_t arguments_size = arguments_types.size();
size_t arguments_size = arguments.size();
DataTypes non_nullable_types;
std::vector<llvm::Value * > wrapped_values;
std::vector<llvm::Value * > is_null_values;
ValuesWithType wrapped_arguments;
wrapped_arguments.reserve(arguments_size);
non_nullable_types.resize(arguments_size);
wrapped_values.resize(arguments_size);
is_null_values.resize(arguments_size);
std::vector<llvm::Value *> is_null_values;
is_null_values.reserve(arguments_size);
for (size_t i = 0; i < arguments_size; ++i)
{
const auto & argument_value = argument_values[i];
const auto & argument_value = arguments[i].value;
const auto & argument_type = arguments[i].type;
if (is_nullable[i])
{
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
wrapped_values[i] = wrapped_value;
non_nullable_types[i] = removeNullable(arguments_types[i]);
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
}
else
{
wrapped_values[i] = argument_value;
non_nullable_types[i] = arguments_types[i];
wrapped_arguments.emplace_back(argument_value, argument_type);
}
}
@ -612,9 +608,6 @@ public:
for (auto * is_null_value : is_null_values)
{
if (!is_null_value)
continue;
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
}
@ -630,7 +623,7 @@ public:
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values);
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
b.CreateBr(join_block);
b.SetInsertPoint(join_block);

View File

@ -588,7 +588,7 @@ public:
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
}
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
{
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
@ -597,10 +597,7 @@ public:
auto * sum_value_ptr = aggregate_data_ptr;
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
const auto & argument_type = arguments_types[0];
const auto & argument_value = argument_values[0];
auto * value_cast_to_result = nativeCast(b, argument_type, argument_value, return_type);
auto * value_cast_to_result = nativeCast(b, arguments[0], this->getResultType());
auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result);
b.CreateStore(sum_result_value, sum_value_ptr);

View File

@ -6,6 +6,7 @@
#include <Core/Block.h>
#include <Core/ColumnNumbers.h>
#include <Core/Field.h>
#include <Core/ValuesWithType.h>
#include <Interpreters/Context_fwd.h>
#include <base/types.h>
#include <Common/Exception.h>
@ -389,7 +390,7 @@ public:
}
/// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector<llvm::Value *> & /*arguments_values*/) const
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const ValuesWithType & /*arguments*/) const
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName());
}

View File

@ -78,14 +78,14 @@ namespace detail
void serialize(WriteBuffer & buf) const
{
writeBinary(count, buf);
writeBinaryLittleEndian(count, buf);
buf.write(reinterpret_cast<const char *>(elems), count * sizeof(elems[0]));
}
void deserialize(ReadBuffer & buf)
{
UInt16 new_count = 0;
readBinary(new_count, buf);
readBinaryLittleEndian(new_count, buf);
if (new_count > TINY_MAX_ELEMS)
throw Exception(ErrorCodes::INCORRECT_DATA, "The number of elements {} for the 'tiny' kind of quantileTiming is exceeding the maximum of {}", new_count, TINY_MAX_ELEMS);
buf.readStrict(reinterpret_cast<char *>(elems), new_count * sizeof(elems[0]));
@ -164,14 +164,14 @@ namespace detail
void serialize(WriteBuffer & buf) const
{
writeBinary(elems.size(), buf);
writeBinaryLittleEndian(elems.size(), buf);
buf.write(reinterpret_cast<const char *>(elems.data()), elems.size() * sizeof(elems[0]));
}
void deserialize(ReadBuffer & buf)
{
size_t size = 0;
readBinary(size, buf);
readBinaryLittleEndian(size, buf);
if (size > 10'000)
throw Exception(ErrorCodes::INCORRECT_DATA, "The number of elements {} for the 'medium' kind of quantileTiming is too large", size);
@ -341,7 +341,7 @@ namespace detail
void serialize(WriteBuffer & buf) const
{
writeBinary(count, buf);
writeBinaryLittleEndian(count, buf);
if (count * 2 > SMALL_THRESHOLD + BIG_SIZE)
{
@ -356,8 +356,8 @@ namespace detail
{
if (count_small[i])
{
writeBinary(UInt16(i), buf);
writeBinary(count_small[i], buf);
writeBinaryLittleEndian(UInt16(i), buf);
writeBinaryLittleEndian(count_small[i], buf);
}
}
@ -365,19 +365,19 @@ namespace detail
{
if (count_big[i])
{
writeBinary(UInt16(i + SMALL_THRESHOLD), buf);
writeBinary(count_big[i], buf);
writeBinaryLittleEndian(UInt16(i + SMALL_THRESHOLD), buf);
writeBinaryLittleEndian(count_big[i], buf);
}
}
/// Symbolizes end of data.
writeBinary(UInt16(BIG_THRESHOLD), buf);
writeBinaryLittleEndian(UInt16(BIG_THRESHOLD), buf);
}
}
void deserialize(ReadBuffer & buf)
{
readBinary(count, buf);
readBinaryLittleEndian(count, buf);
if (count * 2 > SMALL_THRESHOLD + BIG_SIZE)
{
@ -388,12 +388,12 @@ namespace detail
while (true)
{
UInt16 index = 0;
readBinary(index, buf);
readBinaryLittleEndian(index, buf);
if (index == BIG_THRESHOLD)
break;
UInt64 elem_count = 0;
readBinary(elem_count, buf);
readBinaryLittleEndian(elem_count, buf);
if (index < SMALL_THRESHOLD)
count_small[index] = elem_count;
@ -692,7 +692,7 @@ public:
void serialize(WriteBuffer & buf) const
{
auto kind = which();
DB::writePODBinary(kind, buf);
writeBinaryLittleEndian(kind, buf);
if (kind == Kind::Tiny)
tiny.serialize(buf);
@ -706,7 +706,7 @@ public:
void deserialize(ReadBuffer & buf)
{
Kind kind;
DB::readPODBinary(kind, buf);
readBinaryLittleEndian(kind, buf);
if (kind == Kind::Tiny)
{

View File

@ -721,7 +721,15 @@ void BackupCoordinationRemote::prepareFileInfos() const
bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
{
bool acquired_writing = false;
{
/// Check if this host is already writing this file.
std::lock_guard lock{writing_files_mutex};
if (writing_files.contains(data_file_index))
return false;
}
/// Store in Zookeeper that this host is the only host which is allowed to write this file.
bool host_is_assigned = false;
String full_path = zookeeper_path + "/writing_files/" + std::to_string(data_file_index);
String host_index_str = std::to_string(current_host_index);
@ -733,14 +741,23 @@ bool BackupCoordinationRemote::startWritingFile(size_t data_file_index)
auto code = zk->tryCreate(full_path, host_index_str, zkutil::CreateMode::Persistent);
if (code == Coordination::Error::ZOK)
acquired_writing = true; /// If we've just created this ZooKeeper's node, the writing is acquired, i.e. we should write this data file.
host_is_assigned = true; /// If we've just created this ZooKeeper's node, this host is assigned.
else if (code == Coordination::Error::ZNODEEXISTS)
acquired_writing = (zk->get(full_path) == host_index_str); /// The previous retry could write this ZooKeeper's node and then fail.
host_is_assigned = (zk->get(full_path) == host_index_str); /// The previous retry could write this ZooKeeper's node and then fail.
else
throw zkutil::KeeperException(code, full_path);
});
return acquired_writing;
if (!host_is_assigned)
return false; /// Other host is writing this file.
{
/// Check if this host is already writing this file,
/// and if it's not, mark that this host is writing this file.
/// We have to check that again because we were accessing ZooKeeper with the mutex unlocked.
std::lock_guard lock{writing_files_mutex};
return writing_files.emplace(data_file_index).second; /// Return false if this host is already writing this file.
}
}
bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic<size_t> &) const

View File

@ -106,12 +106,14 @@ private:
mutable std::optional<BackupCoordinationReplicatedAccess> TSA_GUARDED_BY(replicated_access_mutex) replicated_access;
mutable std::optional<BackupCoordinationReplicatedSQLObjects> TSA_GUARDED_BY(replicated_sql_objects_mutex) replicated_sql_objects;
mutable std::optional<BackupCoordinationFileInfos> TSA_GUARDED_BY(file_infos_mutex) file_infos;
std::unordered_set<size_t> TSA_GUARDED_BY(writing_files_mutex) writing_files;
mutable std::mutex zookeeper_mutex;
mutable std::mutex replicated_tables_mutex;
mutable std::mutex replicated_access_mutex;
mutable std::mutex replicated_sql_objects_mutex;
mutable std::mutex file_infos_mutex;
mutable std::mutex writing_files_mutex;
};
}

View File

@ -165,7 +165,7 @@ void IBridge::initialize(Application & self)
http_timeout = config().getUInt64("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
max_server_connections = config().getUInt("max-server-connections", 1024);
keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10);
http_max_field_value_size = config().getUInt64("http-max-field-value-size", 1048576);
http_max_field_value_size = config().getUInt64("http-max-field-value-size", 128 * 1024);
struct rlimit limit;
const UInt64 gb = 1024 * 1024 * 1024;

View File

@ -1165,11 +1165,20 @@ void ClientBase::onProfileEvents(Block & block)
/// Flush all buffers.
void ClientBase::resetOutput()
{
/// Order is important: format, compression, file
if (output_format)
output_format->finalize();
output_format.reset();
logs_out_stream.reset();
if (out_file_buf)
{
out_file_buf->finalize();
out_file_buf.reset();
}
if (pager_cmd)
{
pager_cmd->in.close();
@ -1177,15 +1186,9 @@ void ClientBase::resetOutput()
}
pager_cmd = nullptr;
if (out_file_buf)
{
out_file_buf->next();
out_file_buf.reset();
}
if (out_logs_buf)
{
out_logs_buf->next();
out_logs_buf->finalize();
out_logs_buf.reset();
}

View File

@ -588,7 +588,7 @@ void Connection::sendQuery(
if (method == "ZSTD")
level = settings->network_zstd_compression_level;
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs);
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_deflate_qpl_codec);
compression_codec = CompressionCodecFactory::instance().get(method, level);
}
else

View File

@ -528,6 +528,7 @@ StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t n, Arena & are
{
WriteBufferFromArena out(arena, begin);
func->serialize(data[n], out, version);
out.finalize();
return out.complete();
}

View File

@ -200,9 +200,11 @@ void AsyncLoader::start()
void AsyncLoader::wait()
{
// Because job can create new jobs in other pools we have to recheck in cycle
// Because job can create new jobs in other pools we have to recheck in cycle.
// Also wait for all workers to finish to avoid races on `pool.workers`,
// which can decrease even after all jobs are already finished.
std::unique_lock lock{mutex};
while (!scheduled_jobs.empty())
while (!scheduled_jobs.empty() || hasWorker(lock))
{
lock.unlock();
for (auto & p : pools)
@ -719,4 +721,14 @@ void AsyncLoader::worker(Pool & pool)
}
}
bool AsyncLoader::hasWorker(std::unique_lock<std::mutex> &) const
{
for (const Pool & pool : pools)
{
if (pool.workers > 0)
return true;
}
return false;
}
}

View File

@ -445,6 +445,7 @@ private:
void updateCurrentPriorityAndSpawn(std::unique_lock<std::mutex> &);
void spawn(Pool & pool, std::unique_lock<std::mutex> &);
void worker(Pool & pool);
bool hasWorker(std::unique_lock<std::mutex> &) const;
// Logging
const bool log_failures; // Worker should log all exceptions caught from job functions.

View File

@ -391,6 +391,7 @@ The server successfully detected this situation and will download merged part fr
M(FilesystemCacheLockMetadataMicroseconds, "Lock filesystem cache metadata time") \
M(FilesystemCacheLockCacheMicroseconds, "Lock filesystem cache time") \
M(FilesystemCacheReserveMicroseconds, "Filesystem cache space reservation time") \
M(FilesystemCacheEvictMicroseconds, "Filesystem cache eviction time") \
M(FilesystemCacheGetOrSetMicroseconds, "Filesystem cache getOrSet() time") \
M(FilesystemCacheGetMicroseconds, "Filesystem cache get() time") \
M(FileSegmentWaitMicroseconds, "Wait on DOWNLOADING state") \

View File

@ -260,7 +260,7 @@ std::unique_ptr<ShellCommand> ShellCommand::executeDirect(const ShellCommand::Co
std::vector<char *> argv(arguments.size() + 2);
std::vector<char> argv_data(argv_sum_size);
WriteBuffer writer(argv_data.data(), argv_sum_size);
WriteBufferFromPointer writer(argv_data.data(), argv_sum_size);
argv[0] = writer.position();
writer.write(path.data(), path.size() + 1);
@ -271,6 +271,8 @@ std::unique_ptr<ShellCommand> ShellCommand::executeDirect(const ShellCommand::Co
writer.write(arguments[i].data(), arguments[i].size() + 1);
}
writer.finalize();
argv[arguments.size() + 1] = nullptr;
return executeImpl(path.data(), argv.data(), config);

View File

@ -526,6 +526,7 @@ public:
String getConnectedZooKeeperHost() const { return connected_zk_host; }
UInt16 getConnectedZooKeeperPort() const { return connected_zk_port; }
size_t getConnectedZooKeeperIndex() const { return connected_zk_index; }
UInt64 getConnectedTime() const { return connected_time; }
private:
void init(ZooKeeperArgs args_);
@ -593,6 +594,7 @@ private:
String connected_zk_host;
UInt16 connected_zk_port;
size_t connected_zk_index;
UInt64 connected_time = timeInSeconds(std::chrono::system_clock::now());
std::mutex mutex;

View File

@ -1107,17 +1107,20 @@ void ZooKeeper::initApiVersion()
get(keeper_api_version_path, std::move(callback), {});
if (future.wait_for(std::chrono::milliseconds(args.operation_timeout_ms)) != std::future_status::ready)
{
LOG_TRACE(log, "Failed to get API version: timeout");
return;
throw Exception(Error::ZOPERATIONTIMEOUT, "Failed to get API version: timeout");
}
auto response = future.get();
if (response.error != Coordination::Error::ZOK)
if (response.error == Coordination::Error::ZNONODE)
{
LOG_TRACE(log, "Failed to get API version");
LOG_TRACE(log, "API version not found, assuming {}", keeper_api_version);
return;
}
else if (response.error != Coordination::Error::ZOK)
{
throw Exception(response.error, "Failed to get API version");
}
uint8_t keeper_version{0};
DB::ReadBufferFromOwnString buf(response.data);

View File

@ -1,5 +1,7 @@
#pragma once
#include <string>
#include <functional>
namespace DB
{

View File

@ -8,6 +8,7 @@
#include <Poco/Logger.h>
#include <Common/logger_useful.h>
#include "libaccel_config.h"
#include <Common/MemorySanitizer.h>
namespace DB
{
@ -382,6 +383,11 @@ UInt32 CompressionCodecDeflateQpl::getMaxCompressedDataSize(UInt32 uncompressed_
UInt32 CompressionCodecDeflateQpl::doCompressData(const char * source, UInt32 source_size, char * dest) const
{
/// QPL library is using AVX-512 with some shuffle operations.
/// Memory sanitizer don't understand if there was uninitialized memory in SIMD register but it was not used in the result of shuffle.
#if defined(MEMORY_SANITIZER)
__msan_unpoison(dest, getMaxCompressedDataSize(source_size));
#endif
Int32 res = HardwareCodecDeflateQpl::RET_ERROR;
if (DeflateQplJobHWPool::instance().isJobPoolReady())
res = hw_codec->doCompressData(source, source_size, dest, getMaxCompressedDataSize(source_size));
@ -392,6 +398,11 @@ UInt32 CompressionCodecDeflateQpl::doCompressData(const char * source, UInt32 so
void CompressionCodecDeflateQpl::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const
{
/// QPL library is using AVX-512 with some shuffle operations.
/// Memory sanitizer don't understand if there was uninitialized memory in SIMD register but it was not used in the result of shuffle.
#if defined(MEMORY_SANITIZER)
__msan_unpoison(dest, uncompressed_size);
#endif
switch (getDecompressMode())
{
case CodecMode::Synchronous:

View File

@ -98,7 +98,7 @@ public:
protected:
bool isCompression() const override { return true; }
bool isGenericCompression() const override { return true; }
bool isExperimental() const override { return true; }
bool isDeflateQpl() const override { return true; }
UInt32 doCompressData(const char * source, UInt32 source_size, char * dest) const override;
void doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const override;

View File

@ -40,10 +40,10 @@ public:
CompressionCodecPtr getDefaultCodec() const;
/// Validate codecs AST specified by user and parses codecs description (substitute default parameters)
ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs) const;
ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const;
/// Validate codecs AST specified by user
void validateCodec(const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs) const;
void validateCodec(const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const;
/// Get codec by AST and possible column_type. Some codecs can use
/// information about type to improve inner settings, but every codec should

View File

@ -34,7 +34,7 @@ namespace ErrorCodes
void CompressionCodecFactory::validateCodec(
const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs) const
const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const
{
if (family_name.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Compression codec name cannot be empty");
@ -43,13 +43,13 @@ void CompressionCodecFactory::validateCodec(
{
auto literal = std::make_shared<ASTLiteral>(static_cast<UInt64>(*level));
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", makeASTFunction(Poco::toUpper(family_name), literal)),
{}, sanity_check, allow_experimental_codecs);
{}, sanity_check, allow_experimental_codecs, enable_deflate_qpl_codec);
}
else
{
auto identifier = std::make_shared<ASTIdentifier>(Poco::toUpper(family_name));
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", identifier),
{}, sanity_check, allow_experimental_codecs);
{}, sanity_check, allow_experimental_codecs, enable_deflate_qpl_codec);
}
}
@ -77,7 +77,7 @@ bool innerDataTypeIsFloat(const DataTypePtr & type)
}
ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs) const
const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const
{
if (const auto * func = ast->as<ASTFunction>())
{
@ -159,6 +159,12 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
" You can enable it with the 'allow_experimental_codecs' setting.",
codec_family_name);
if (!enable_deflate_qpl_codec && result_codec->isDeflateQpl())
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Codec {} is disabled by default."
" You can enable it with the 'enable_deflate_qpl_codec' setting.",
codec_family_name);
codecs_descriptions->children.emplace_back(result_codec->getCodecDesc());
}

View File

@ -109,6 +109,9 @@ public:
/// It will not be allowed to use unless the user will turn off the safety switch.
virtual bool isExperimental() const { return false; }
/// Is this the DEFLATE_QPL codec?
virtual bool isDeflateQpl() const { return false; }
/// If it does nothing.
virtual bool isNone() const { return false; }

View File

@ -674,7 +674,7 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
auto * buffer_start = reinterpret_cast<BufferBase::Position>(entry_buf->data_begin() + entry_buf->size() - write_buffer_header_size);
WriteBuffer write_buf(buffer_start, write_buffer_header_size);
WriteBufferFromPointer write_buf(buffer_start, write_buffer_header_size);
if (serialization_version < KeeperStateMachine::ZooKeeperLogSerializationVersion::WITH_TIME)
writeIntBinary(request_for_session->time, write_buf);
@ -684,6 +684,8 @@ nuraft::cb_func::ReturnCode KeeperServer::callbackFunc(nuraft::cb_func::Type typ
if (request_for_session->digest->version != KeeperStorage::NO_DIGEST)
writeIntBinary(request_for_session->digest->value, write_buf);
write_buf.finalize();
return nuraft::cb_func::ReturnCode::Ok;
}
case nuraft::cb_func::PreAppendLogFollower:

View File

@ -16,7 +16,7 @@ void IMySQLWritePacket::writePayload(WriteBuffer & buffer, uint8_t & sequence_id
{
MySQLPacketPayloadWriteBuffer buf(buffer, getPayloadSize(), sequence_id);
writePayloadImpl(buf);
buf.next();
buf.finalize();
if (buf.remainingPayloadSize())
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "Incomplete payload. Written {} bytes, expected {} bytes.",

View File

@ -53,7 +53,7 @@
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
/// later is just a number for server version (one number instead of commit SHA)
/// for simplicity (sometimes it may be more convenient in some use cases).
#define DBMS_TCP_PROTOCOL_VERSION 54462
#define DBMS_TCP_PROTOCOL_VERSION 54463
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
@ -73,3 +73,5 @@
#define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 54462
#define DBMS_MIN_PROTOCOL_VERSION_WITH_TOTAL_BYTES_IN_PROGRESS 54463

View File

@ -127,7 +127,7 @@ namespace fmt
template <typename FormatContext>
auto format(const DB::QualifiedTableName & name, FormatContext & ctx)
{
return format_to(ctx.out(), "{}.{}", DB::backQuoteIfNeed(name.database), DB::backQuoteIfNeed(name.table));
return fmt::format_to(ctx.out(), "{}.{}", DB::backQuoteIfNeed(name.database), DB::backQuoteIfNeed(name.table));
}
};
}

View File

@ -199,6 +199,7 @@ class IColumn;
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \
\
M(UInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine", 0) \
M(Bool, use_mysql_types_in_show_columns, false, "Use MySQL converted types when connected via MySQL compatibility for show columns query", 0) \
\
M(UInt64, optimize_min_equality_disjunction_chain_length, 3, "The minimum length of the expression `expr = x1 OR ... expr = xN` for optimization ", 0) \
\
@ -314,8 +315,8 @@ class IColumn;
M(Seconds, http_receive_timeout, DEFAULT_HTTP_READ_BUFFER_TIMEOUT, "HTTP receive timeout", 0) \
M(UInt64, http_max_uri_size, 1048576, "Maximum URI length of HTTP request", 0) \
M(UInt64, http_max_fields, 1000000, "Maximum number of fields in HTTP header", 0) \
M(UInt64, http_max_field_name_size, 1048576, "Maximum length of field name in HTTP header", 0) \
M(UInt64, http_max_field_value_size, 1048576, "Maximum length of field value in HTTP header", 0) \
M(UInt64, http_max_field_name_size, 128 * 1024, "Maximum length of field name in HTTP header", 0) \
M(UInt64, http_max_field_value_size, 128 * 1024, "Maximum length of field value in HTTP header", 0) \
M(UInt64, http_max_chunk_size, 100_GiB, "Maximum value of a chunk size in HTTP chunked transfer encoding", 0) \
M(Bool, http_skip_not_found_url_for_globs, true, "Skip url's for globs with HTTP_NOT_FOUND error", 0) \
M(Bool, optimize_throw_if_noop, false, "If setting is enabled and OPTIMIZE query didn't actually assign a merge then an explanatory exception is thrown", 0) \
@ -327,6 +328,7 @@ class IColumn;
M(Bool, allow_distributed_ddl, true, "If it is set to true, then a user is allowed to executed distributed DDL queries.", 0) \
M(Bool, allow_suspicious_codecs, false, "If it is set to true, allow to specify meaningless compression codecs.", 0) \
M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \
M(Bool, enable_deflate_qpl_codec, false, "Enable/disable the DEFLATE_QPL codec.", 0) \
M(UInt64, query_profiler_real_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for real clock timer of query profiler (in nanoseconds). Set 0 value to turn off the real clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
M(UInt64, query_profiler_cpu_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off the CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
M(Bool, metrics_perf_events_enabled, false, "If enabled, some of the perf events will be measured throughout queries' execution.", 0) \

View File

@ -1,8 +1,10 @@
#include <Core/SettingsEnums.h>
#include <magic_enum.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int UNKNOWN_LOAD_BALANCING;
@ -15,7 +17,6 @@ namespace ErrorCodes
extern const int UNKNOWN_UNION;
}
IMPLEMENT_SETTING_ENUM(LoadBalancing, ErrorCodes::UNKNOWN_LOAD_BALANCING,
{{"random", LoadBalancing::RANDOM},
{"nearest_hostname", LoadBalancing::NEAREST_HOSTNAME},
@ -54,7 +55,7 @@ IMPLEMENT_SETTING_ENUM(OverflowMode, ErrorCodes::UNKNOWN_OVERFLOW_MODE,
{"break", OverflowMode::BREAK}})
IMPLEMENT_SETTING_ENUM_WITH_RENAME(OverflowModeGroupBy, ErrorCodes::UNKNOWN_OVERFLOW_MODE,
IMPLEMENT_SETTING_ENUM(OverflowModeGroupBy, ErrorCodes::UNKNOWN_OVERFLOW_MODE,
{{"throw", OverflowMode::THROW},
{"break", OverflowMode::BREAK},
{"any", OverflowMode::ANY}})
@ -67,51 +68,26 @@ IMPLEMENT_SETTING_ENUM(DistributedProductMode, ErrorCodes::UNKNOWN_DISTRIBUTED_P
{"allow", DistributedProductMode::ALLOW}})
IMPLEMENT_SETTING_ENUM_WITH_RENAME(DateTimeInputFormat, ErrorCodes::BAD_ARGUMENTS,
IMPLEMENT_SETTING_ENUM(DateTimeInputFormat, ErrorCodes::BAD_ARGUMENTS,
{{"basic", FormatSettings::DateTimeInputFormat::Basic},
{"best_effort", FormatSettings::DateTimeInputFormat::BestEffort},
{"best_effort_us", FormatSettings::DateTimeInputFormat::BestEffortUS}})
IMPLEMENT_SETTING_ENUM_WITH_RENAME(DateTimeOutputFormat, ErrorCodes::BAD_ARGUMENTS,
IMPLEMENT_SETTING_ENUM(DateTimeOutputFormat, ErrorCodes::BAD_ARGUMENTS,
{{"simple", FormatSettings::DateTimeOutputFormat::Simple},
{"iso", FormatSettings::DateTimeOutputFormat::ISO},
{"unix_timestamp", FormatSettings::DateTimeOutputFormat::UnixTimestamp}})
IMPLEMENT_SETTING_ENUM(LogsLevel, ErrorCodes::BAD_ARGUMENTS,
{{"none", LogsLevel::none},
{"fatal", LogsLevel::fatal},
{"error", LogsLevel::error},
{"warning", LogsLevel::warning},
{"information", LogsLevel::information},
{"debug", LogsLevel::debug},
{"trace", LogsLevel::trace},
{"test", LogsLevel::test}})
IMPLEMENT_SETTING_AUTO_ENUM(LogsLevel, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_ENUM_WITH_RENAME(LogQueriesType, ErrorCodes::BAD_ARGUMENTS,
{{"QUERY_START", QUERY_START},
{"QUERY_FINISH", QUERY_FINISH},
{"EXCEPTION_BEFORE_START", EXCEPTION_BEFORE_START},
{"EXCEPTION_WHILE_PROCESSING", EXCEPTION_WHILE_PROCESSING}})
IMPLEMENT_SETTING_AUTO_ENUM(LogQueriesType, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_AUTO_ENUM(DefaultDatabaseEngine, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_ENUM_WITH_RENAME(DefaultDatabaseEngine, ErrorCodes::BAD_ARGUMENTS,
{{"Ordinary", DefaultDatabaseEngine::Ordinary},
{"Atomic", DefaultDatabaseEngine::Atomic}})
IMPLEMENT_SETTING_AUTO_ENUM(DefaultTableEngine, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_ENUM_WITH_RENAME(DefaultTableEngine, ErrorCodes::BAD_ARGUMENTS,
{{"None", DefaultTableEngine::None},
{"Log", DefaultTableEngine::Log},
{"StripeLog", DefaultTableEngine::StripeLog},
{"MergeTree", DefaultTableEngine::MergeTree},
{"ReplacingMergeTree", DefaultTableEngine::ReplacingMergeTree},
{"ReplicatedMergeTree", DefaultTableEngine::ReplicatedMergeTree},
{"ReplicatedReplacingMergeTree", DefaultTableEngine::ReplicatedReplacingMergeTree},
{"Memory", DefaultTableEngine::Memory}})
IMPLEMENT_SETTING_ENUM(CleanDeletedRows, ErrorCodes::BAD_ARGUMENTS,
{{"Never", CleanDeletedRows::Never},
{"Always", CleanDeletedRows::Always}})
IMPLEMENT_SETTING_AUTO_ENUM(CleanDeletedRows, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_MULTI_ENUM(MySQLDataTypesSupport, ErrorCodes::UNKNOWN_MYSQL_DATATYPES_SUPPORT_LEVEL,
{{"decimal", MySQLDataTypesSupport::DECIMAL},
@ -149,14 +125,7 @@ IMPLEMENT_SETTING_ENUM(CapnProtoEnumComparingMode, ErrorCodes::BAD_ARGUMENTS,
{"by_values", FormatSettings::CapnProtoEnumComparingMode::BY_VALUES},
{"by_names_case_insensitive", FormatSettings::CapnProtoEnumComparingMode::BY_NAMES_CASE_INSENSITIVE}})
IMPLEMENT_SETTING_ENUM(EscapingRule, ErrorCodes::BAD_ARGUMENTS,
{{"None", FormatSettings::EscapingRule::None},
{"Escaped", FormatSettings::EscapingRule::Escaped},
{"Quoted", FormatSettings::EscapingRule::Quoted},
{"CSV", FormatSettings::EscapingRule::CSV},
{"JSON", FormatSettings::EscapingRule::JSON},
{"XML", FormatSettings::EscapingRule::XML},
{"Raw", FormatSettings::EscapingRule::Raw}})
IMPLEMENT_SETTING_AUTO_ENUM(EscapingRule, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_ENUM(MsgPackUUIDRepresentation, ErrorCodes::BAD_ARGUMENTS,
{{"bin", FormatSettings::MsgPackUUIDRepresentation::BIN},
@ -166,17 +135,15 @@ IMPLEMENT_SETTING_ENUM(MsgPackUUIDRepresentation, ErrorCodes::BAD_ARGUMENTS,
IMPLEMENT_SETTING_ENUM(Dialect, ErrorCodes::BAD_ARGUMENTS,
{{"clickhouse", Dialect::clickhouse},
{"kusto", Dialect::kusto}})
// FIXME: do not add 'kusto_auto' to the list. Maybe remove it from code completely?
IMPLEMENT_SETTING_ENUM(ParallelReplicasCustomKeyFilterType, ErrorCodes::BAD_ARGUMENTS,
{{"default", ParallelReplicasCustomKeyFilterType::DEFAULT},
{"range", ParallelReplicasCustomKeyFilterType::RANGE}})
IMPLEMENT_SETTING_ENUM(LocalFSReadMethod, ErrorCodes::BAD_ARGUMENTS,
{{"mmap", LocalFSReadMethod::mmap},
{"pread", LocalFSReadMethod::pread},
{"read", LocalFSReadMethod::read}})
IMPLEMENT_SETTING_AUTO_ENUM(LocalFSReadMethod, ErrorCodes::BAD_ARGUMENTS)
IMPLEMENT_SETTING_ENUM_WITH_RENAME(ParquetVersion, ErrorCodes::BAD_ARGUMENTS,
IMPLEMENT_SETTING_ENUM(ParquetVersion, ErrorCodes::BAD_ARGUMENTS,
{{"1.0", FormatSettings::ParquetVersion::V1_0},
{"2.4", FormatSettings::ParquetVersion::V2_4},
{"2.6", FormatSettings::ParquetVersion::V2_6},

View File

@ -10,7 +10,6 @@
#include <unordered_map>
#include <string_view>
namespace DB
{
namespace ErrorCodes
@ -371,19 +370,26 @@ void SettingFieldEnum<EnumT, Traits>::readBinary(ReadBuffer & in)
*this = Traits::fromString(SettingFieldEnumHelpers::readBinary(in));
}
template <typename Type>
constexpr auto getEnumValues()
{
std::array<std::pair<std::string_view, Type>, magic_enum::enum_count<Type>()> enum_values{};
size_t index = 0;
for (auto value : magic_enum::enum_values<Type>())
enum_values[index++] = std::pair{magic_enum::enum_name(value), value};
return enum_values;
}
/// NOLINTNEXTLINE
#define DECLARE_SETTING_ENUM(ENUM_TYPE) \
DECLARE_SETTING_ENUM_WITH_RENAME(ENUM_TYPE, ENUM_TYPE)
/// NOLINTNEXTLINE
#define IMPLEMENT_SETTING_ENUM(ENUM_TYPE, ERROR_CODE_FOR_UNEXPECTED_NAME, ...) \
IMPLEMENT_SETTING_ENUM_WITH_RENAME(ENUM_TYPE, ERROR_CODE_FOR_UNEXPECTED_NAME, __VA_ARGS__)
/// NOLINTNEXTLINE
#define DECLARE_SETTING_ENUM_WITH_RENAME(NEW_NAME, ENUM_TYPE) \
struct SettingField##NEW_NAME##Traits \
{ \
using EnumType = ENUM_TYPE; \
using EnumValuePairs = std::pair<const char *, EnumType>[]; \
static const String & toString(EnumType value); \
static EnumType fromString(std::string_view str); \
}; \
@ -391,13 +397,20 @@ void SettingFieldEnum<EnumT, Traits>::readBinary(ReadBuffer & in)
using SettingField##NEW_NAME = SettingFieldEnum<ENUM_TYPE, SettingField##NEW_NAME##Traits>;
/// NOLINTNEXTLINE
#define IMPLEMENT_SETTING_ENUM_WITH_RENAME(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, ...) \
#define IMPLEMENT_SETTING_ENUM(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, ...) \
IMPLEMENT_SETTING_ENUM_IMPL(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, EnumValuePairs, __VA_ARGS__)
/// NOLINTNEXTLINE
#define IMPLEMENT_SETTING_AUTO_ENUM(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME) \
IMPLEMENT_SETTING_ENUM_IMPL(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, , getEnumValues<EnumType>())
/// NOLINTNEXTLINE
#define IMPLEMENT_SETTING_ENUM_IMPL(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, PAIRS_TYPE, ...) \
const String & SettingField##NEW_NAME##Traits::toString(typename SettingField##NEW_NAME::EnumType value) \
{ \
static const std::unordered_map<EnumType, String> map = [] { \
std::unordered_map<EnumType, String> res; \
constexpr std::pair<const char *, EnumType> pairs[] = __VA_ARGS__; \
for (const auto & [name, val] : pairs) \
for (const auto & [name, val] : PAIRS_TYPE __VA_ARGS__) \
res.emplace(val, name); \
return res; \
}(); \
@ -413,8 +426,7 @@ void SettingFieldEnum<EnumT, Traits>::readBinary(ReadBuffer & in)
{ \
static const std::unordered_map<std::string_view, EnumType> map = [] { \
std::unordered_map<std::string_view, EnumType> res; \
constexpr std::pair<const char *, EnumType> pairs[] = __VA_ARGS__; \
for (const auto & [name, val] : pairs) \
for (const auto & [name, val] : PAIRS_TYPE __VA_ARGS__) \
res.emplace(name, val); \
return res; \
}(); \
@ -527,6 +539,7 @@ void SettingFieldMultiEnum<EnumT, Traits>::readBinary(ReadBuffer & in)
struct SettingField##NEW_NAME##Traits \
{ \
using EnumType = ENUM_TYPE; \
using EnumValuePairs = std::pair<const char *, EnumType>[]; \
static size_t getEnumSize(); \
static const String & toString(EnumType value); \
static EnumType fromString(std::string_view str); \
@ -540,11 +553,18 @@ void SettingFieldMultiEnum<EnumT, Traits>::readBinary(ReadBuffer & in)
/// NOLINTNEXTLINE
#define IMPLEMENT_SETTING_MULTI_ENUM_WITH_RENAME(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, ...) \
IMPLEMENT_SETTING_ENUM_WITH_RENAME(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, __VA_ARGS__)\
IMPLEMENT_SETTING_ENUM(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME, __VA_ARGS__)\
size_t SettingField##NEW_NAME##Traits::getEnumSize() {\
return std::initializer_list<std::pair<const char*, NEW_NAME>> __VA_ARGS__ .size();\
}
/// NOLINTNEXTLINE
#define IMPLEMENT_SETTING_MULTI_AUTO_ENUM(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME) \
IMPLEMENT_SETTING_AUTO_ENUM(NEW_NAME, ERROR_CODE_FOR_UNEXPECTED_NAME)\
size_t SettingField##NEW_NAME##Traits::getEnumSize() {\
return getEnumValues<EnumType>().size();\
}
/// Can keep a value of any type. Used for user-defined settings.
struct SettingFieldCustom
{

26
src/Core/ValueWithType.h Normal file
View File

@ -0,0 +1,26 @@
#pragma once
#include <DataTypes/IDataType.h>
namespace llvm
{
class Value;
}
namespace DB
{
/// LLVM value with its data type
struct ValueWithType
{
llvm::Value * value = nullptr;
DataTypePtr type;
ValueWithType() = default;
ValueWithType(llvm::Value * value_, DataTypePtr type_)
: value(value_)
, type(std::move(type_))
{}
};
}

13
src/Core/ValuesWithType.h Normal file
View File

@ -0,0 +1,13 @@
#pragma once
#include <vector>
#include <Core/ValueWithType.h>
namespace DB
{
using ValuesWithType = std::vector<ValueWithType>;
}

View File

@ -45,6 +45,7 @@ public:
String doGetName() const override;
String getNameWithoutVersion() const;
const char * getFamilyName() const override { return "AggregateFunction"; }
String getSQLCompatibleName() const override { return "TEXT"; }
TypeIndex getTypeId() const override { return TypeIndex::AggregateFunction; }
Array getParameters() const { return parameters; }

View File

@ -30,6 +30,10 @@ public:
{
return "Array";
}
String getSQLCompatibleName() const override
{
return "TEXT";
}
bool canBeInsideNullable() const override
{

View File

@ -13,6 +13,7 @@ public:
TypeIndex getTypeId() const override { return TypeIndex::Date; }
const char * getFamilyName() const override { return family_name; }
String getSQLCompatibleName() const override { return "DATE"; }
bool canBeUsedAsVersion() const override { return true; }
bool canBeInsideNullable() const override { return true; }

View File

@ -13,6 +13,7 @@ public:
TypeIndex getTypeId() const override { return TypeIndex::Date32; }
const char * getFamilyName() const override { return family_name; }
String getSQLCompatibleName() const override { return "DATE"; }
Field getDefault() const override
{

View File

@ -36,6 +36,7 @@ public:
static constexpr auto family_name = "DateTime";
const char * getFamilyName() const override { return family_name; }
String getSQLCompatibleName() const override { return "DATETIME"; }
String doGetName() const override;
TypeIndex getTypeId() const override { return TypeIndex::DateTime; }

View File

@ -28,6 +28,7 @@ public:
DataTypeDateTime64(UInt32 scale_, const TimezoneMixin & time_zone_info);
const char * getFamilyName() const override { return family_name; }
String getSQLCompatibleName() const override { return "DATETIME"; }
std::string doGetName() const override;
TypeIndex getTypeId() const override { return type_id; }
@ -37,6 +38,8 @@ public:
bool canBeUsedAsVersion() const override { return true; }
bool isSummable() const override { return false; }
protected:
SerializationPtr doGetDefaultSerialization() const override;
};

View File

@ -36,6 +36,29 @@ const char * DataTypeEnum<Type>::getFamilyName() const
return EnumName<FieldType>::value;
}
template <typename Type>
std::string DataTypeEnum<Type>::generateMySQLName(const Values & values)
{
WriteBufferFromOwnString out;
writeString("ENUM", out);
writeChar('(', out);
auto first = true;
for (const auto & name_and_value : values)
{
if (!first)
writeString(", ", out);
first = false;
writeQuotedString(name_and_value.first, out);
}
writeChar(')', out);
return out.str();
}
template <typename Type>
std::string DataTypeEnum<Type>::generateName(const Values & values)

View File

@ -46,12 +46,14 @@ public:
private:
std::string type_name;
static std::string generateName(const Values & values);
static std::string generateMySQLName(const Values & values);
public:
explicit DataTypeEnum(const Values & values_);
std::string doGetName() const override { return type_name; }
const char * getFamilyName() const override;
String getSQLCompatibleName() const override { return generateMySQLName(this->getValues()); }
TypeIndex getTypeId() const override { return type_id; }

View File

@ -42,6 +42,8 @@ public:
TypeIndex getTypeId() const override { return type_id; }
const char * getFamilyName() const override { return "FixedString"; }
/// Use TEXT for compatibility with MySQL to allow arbitrary bytes.
String getSQLCompatibleName() const override { return "TEXT"; }
size_t getN() const
{

View File

@ -24,6 +24,7 @@ public:
std::string doGetName() const override;
const char * getFamilyName() const override { return "Function"; }
String getSQLCompatibleName() const override { return "TEXT"; }
TypeIndex getTypeId() const override { return TypeIndex::Function; }
const DataTypes & getArgumentTypes() const

View File

@ -19,6 +19,8 @@ public:
static constexpr auto type_id = TypeToTypeIndex<IPv4>;
const char * getFamilyName() const override { return TypeName<IPv4>.data(); }
String getSQLCompatibleName() const override { return "TEXT"; }
TypeIndex getTypeId() const override { return type_id; }
Field getDefault() const override { return IPv4{}; }
@ -59,6 +61,8 @@ public:
static constexpr auto type_id = TypeToTypeIndex<IPv6>;
const char * getFamilyName() const override { return TypeName<IPv6>.data(); }
String getSQLCompatibleName() const override { return "TEXT"; }
TypeIndex getTypeId() const override { return type_id; }
Field getDefault() const override { return IPv6{}; }

View File

@ -26,6 +26,7 @@ public:
std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); }
const char * getFamilyName() const override { return "Interval"; }
String getSQLCompatibleName() const override { return "TEXT"; }
TypeIndex getTypeId() const override { return TypeIndex::Interval; }
bool equals(const IDataType & rhs) const override;

View File

@ -12,6 +12,7 @@ class DataTypeLowCardinality : public IDataType
private:
DataTypePtr dictionary_type;
public:
explicit DataTypeLowCardinality(DataTypePtr dictionary_type_);
@ -22,6 +23,8 @@ public:
return "LowCardinality(" + dictionary_type->getName() + ")";
}
const char * getFamilyName() const override { return "LowCardinality"; }
String getSQLCompatibleName() const override { return dictionary_type->getSQLCompatibleName(); }
TypeIndex getTypeId() const override { return TypeIndex::LowCardinality; }
MutableColumnPtr createColumn() const override;

Some files were not shown because too many files have changed in this diff Show More