merge master and fix conflict

This commit is contained in:
taiyang-li 2022-03-21 15:05:43 +08:00
commit 49b6f3dfc5
1883 changed files with 32646 additions and 16385 deletions

View File

@ -33,6 +33,8 @@ Checks: '-*,
performance-no-automatic-move, performance-no-automatic-move,
performance-trivially-destructible, performance-trivially-destructible,
performance-unnecessary-copy-initialization, performance-unnecessary-copy-initialization,
performance-noexcept-move-constructor,
performance-move-const-arg,
readability-avoid-const-params-in-decls, readability-avoid-const-params-in-decls,
readability-const-return-type, readability-const-return-type,
@ -206,3 +208,5 @@ CheckOptions:
value: CamelCase value: CamelCase
- key: modernize-loop-convert.UseCxx20ReverseRanges - key: modernize-loop-convert.UseCxx20ReverseRanges
value: false value: false
- key: performance-move-const-arg.CheckTriviallyCopyableMove
value: false

1
.gitattributes vendored
View File

@ -1,2 +1,3 @@
contrib/* linguist-vendored contrib/* linguist-vendored
*.h linguist-language=C++ *.h linguist-language=C++
tests/queries/0_stateless/data_json/* binary

17
.github/codecov.yml vendored
View File

@ -1,17 +0,0 @@
codecov:
max_report_age: "off"
strict_yaml_branch: "master"
ignore:
- "contrib"
- "docs"
- "benchmark"
- "tests"
- "docker"
- "debian"
- "cmake"
comment: false
github_checks:
annotations: false

View File

@ -1,43 +0,0 @@
# This workflow checks out code, performs an Anchore container image
# vulnerability and compliance scan, and integrates the results with
# GitHub Advanced Security code scanning feature. For more information on
# the Anchore scan action usage and parameters, see
# https://github.com/anchore/scan-action. For more information on
# Anchore container image scanning in general, see
# https://docs.anchore.com.
name: Docker Container Scan (clickhouse-server)
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
"on":
pull_request:
paths:
- docker/server/Dockerfile
- .github/workflows/anchore-analysis.yml
schedule:
- cron: '0 21 * * *'
jobs:
Anchore-Build-Scan:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
- name: Build the Docker image
run: |
cd docker/server
perl -pi -e 's|=\$version||g' Dockerfile
docker build . --file Dockerfile --tag localbuild/testimage:latest
- name: Run the local Anchore scan action itself with GitHub Advanced Security code scanning integration enabled
uses: anchore/scan-action@v2
id: scan
with:
image: "localbuild/testimage:latest"
acs-report-enable: true
- name: Upload Anchore Scan Report
uses: github/codeql-action/upload-sarif@v1
with:
sarif_file: ${{ steps.scan.outputs.sarif }}

View File

@ -32,7 +32,7 @@ jobs:
uses: svenstaro/upload-release-action@v2 uses: svenstaro/upload-release-action@v2
with: with:
repo_token: ${{ secrets.GITHUB_TOKEN }} repo_token: ${{ secrets.GITHUB_TOKEN }}
file: ${{runner.temp}}/release_packages/* file: ${{runner.temp}}/push_to_artifactory/*
overwrite: true overwrite: true
tag: ${{ github.ref }} tag: ${{ github.ref }}
file_glob: true file_glob: true

116
.gitmodules vendored
View File

@ -1,6 +1,6 @@
[submodule "contrib/poco"] [submodule "contrib/poco"]
path = contrib/poco path = contrib/poco
url = https://github.com/ClickHouse-Extras/poco.git url = https://github.com/ClickHouse/poco.git
branch = clickhouse branch = clickhouse
[submodule "contrib/zstd"] [submodule "contrib/zstd"]
path = contrib/zstd path = contrib/zstd
@ -10,13 +10,13 @@
url = https://github.com/lz4/lz4.git url = https://github.com/lz4/lz4.git
[submodule "contrib/librdkafka"] [submodule "contrib/librdkafka"]
path = contrib/librdkafka path = contrib/librdkafka
url = https://github.com/ClickHouse-Extras/librdkafka.git url = https://github.com/ClickHouse/librdkafka.git
[submodule "contrib/cctz"] [submodule "contrib/cctz"]
path = contrib/cctz path = contrib/cctz
url = https://github.com/ClickHouse-Extras/cctz.git url = https://github.com/ClickHouse/cctz.git
[submodule "contrib/zlib-ng"] [submodule "contrib/zlib-ng"]
path = contrib/zlib-ng path = contrib/zlib-ng
url = https://github.com/ClickHouse-Extras/zlib-ng.git url = https://github.com/ClickHouse/zlib-ng.git
branch = clickhouse-2.0.x branch = clickhouse-2.0.x
[submodule "contrib/googletest"] [submodule "contrib/googletest"]
path = contrib/googletest path = contrib/googletest
@ -32,51 +32,51 @@
url = https://github.com/google/re2.git url = https://github.com/google/re2.git
[submodule "contrib/llvm"] [submodule "contrib/llvm"]
path = contrib/llvm path = contrib/llvm
url = https://github.com/ClickHouse-Extras/llvm url = https://github.com/ClickHouse/llvm
[submodule "contrib/mariadb-connector-c"] [submodule "contrib/mariadb-connector-c"]
path = contrib/mariadb-connector-c path = contrib/mariadb-connector-c
url = https://github.com/ClickHouse-Extras/mariadb-connector-c.git url = https://github.com/ClickHouse/mariadb-connector-c.git
[submodule "contrib/jemalloc"] [submodule "contrib/jemalloc"]
path = contrib/jemalloc path = contrib/jemalloc
url = https://github.com/jemalloc/jemalloc.git url = https://github.com/jemalloc/jemalloc.git
[submodule "contrib/unixodbc"] [submodule "contrib/unixodbc"]
path = contrib/unixodbc path = contrib/unixodbc
url = https://github.com/ClickHouse-Extras/UnixODBC.git url = https://github.com/ClickHouse/UnixODBC.git
[submodule "contrib/protobuf"] [submodule "contrib/protobuf"]
path = contrib/protobuf path = contrib/protobuf
url = https://github.com/ClickHouse-Extras/protobuf.git url = https://github.com/ClickHouse/protobuf.git
branch = v3.13.0.1 branch = v3.13.0.1
[submodule "contrib/boost"] [submodule "contrib/boost"]
path = contrib/boost path = contrib/boost
url = https://github.com/ClickHouse-Extras/boost.git url = https://github.com/ClickHouse/boost.git
[submodule "contrib/base64"] [submodule "contrib/base64"]
path = contrib/base64 path = contrib/base64
url = https://github.com/ClickHouse-Extras/Turbo-Base64.git url = https://github.com/ClickHouse/Turbo-Base64.git
[submodule "contrib/arrow"] [submodule "contrib/arrow"]
path = contrib/arrow path = contrib/arrow
url = https://github.com/ClickHouse-Extras/arrow.git url = https://github.com/ClickHouse/arrow.git
branch = blessed/release-6.0.1 branch = blessed/release-6.0.1
[submodule "contrib/thrift"] [submodule "contrib/thrift"]
path = contrib/thrift path = contrib/thrift
url = https://github.com/apache/thrift.git url = https://github.com/apache/thrift.git
[submodule "contrib/libhdfs3"] [submodule "contrib/libhdfs3"]
path = contrib/libhdfs3 path = contrib/libhdfs3
url = https://github.com/ClickHouse-Extras/libhdfs3.git url = https://github.com/ClickHouse/libhdfs3.git
[submodule "contrib/libxml2"] [submodule "contrib/libxml2"]
path = contrib/libxml2 path = contrib/libxml2
url = https://github.com/GNOME/libxml2.git url = https://github.com/GNOME/libxml2.git
[submodule "contrib/libgsasl"] [submodule "contrib/libgsasl"]
path = contrib/libgsasl path = contrib/libgsasl
url = https://github.com/ClickHouse-Extras/libgsasl.git url = https://github.com/ClickHouse/libgsasl.git
[submodule "contrib/libcxx"] [submodule "contrib/libcxx"]
path = contrib/libcxx path = contrib/libcxx
url = https://github.com/ClickHouse-Extras/libcxx.git url = https://github.com/ClickHouse/libcxx.git
[submodule "contrib/libcxxabi"] [submodule "contrib/libcxxabi"]
path = contrib/libcxxabi path = contrib/libcxxabi
url = https://github.com/ClickHouse-Extras/libcxxabi.git url = https://github.com/ClickHouse/libcxxabi.git
[submodule "contrib/snappy"] [submodule "contrib/snappy"]
path = contrib/snappy path = contrib/snappy
url = https://github.com/ClickHouse-Extras/snappy.git url = https://github.com/ClickHouse/snappy.git
[submodule "contrib/cppkafka"] [submodule "contrib/cppkafka"]
path = contrib/cppkafka path = contrib/cppkafka
url = https://github.com/mfontanini/cppkafka.git url = https://github.com/mfontanini/cppkafka.git
@ -85,95 +85,95 @@
url = https://github.com/google/brotli.git url = https://github.com/google/brotli.git
[submodule "contrib/h3"] [submodule "contrib/h3"]
path = contrib/h3 path = contrib/h3
url = https://github.com/ClickHouse-Extras/h3 url = https://github.com/ClickHouse/h3
[submodule "contrib/hyperscan"] [submodule "contrib/hyperscan"]
path = contrib/hyperscan path = contrib/hyperscan
url = https://github.com/ClickHouse-Extras/hyperscan.git url = https://github.com/ClickHouse/hyperscan.git
[submodule "contrib/libunwind"] [submodule "contrib/libunwind"]
path = contrib/libunwind path = contrib/libunwind
url = https://github.com/ClickHouse-Extras/libunwind.git url = https://github.com/ClickHouse/libunwind.git
[submodule "contrib/simdjson"] [submodule "contrib/simdjson"]
path = contrib/simdjson path = contrib/simdjson
url = https://github.com/simdjson/simdjson.git url = https://github.com/simdjson/simdjson.git
[submodule "contrib/rapidjson"] [submodule "contrib/rapidjson"]
path = contrib/rapidjson path = contrib/rapidjson
url = https://github.com/ClickHouse-Extras/rapidjson url = https://github.com/ClickHouse/rapidjson
[submodule "contrib/fastops"] [submodule "contrib/fastops"]
path = contrib/fastops path = contrib/fastops
url = https://github.com/ClickHouse-Extras/fastops url = https://github.com/ClickHouse/fastops
[submodule "contrib/orc"] [submodule "contrib/orc"]
path = contrib/orc path = contrib/orc
url = https://github.com/ClickHouse-Extras/orc url = https://github.com/ClickHouse/orc
[submodule "contrib/sparsehash-c11"] [submodule "contrib/sparsehash-c11"]
path = contrib/sparsehash-c11 path = contrib/sparsehash-c11
url = https://github.com/sparsehash/sparsehash-c11.git url = https://github.com/sparsehash/sparsehash-c11.git
[submodule "contrib/grpc"] [submodule "contrib/grpc"]
path = contrib/grpc path = contrib/grpc
url = https://github.com/ClickHouse-Extras/grpc.git url = https://github.com/ClickHouse/grpc.git
branch = v1.33.2 branch = v1.33.2
[submodule "contrib/aws"] [submodule "contrib/aws"]
path = contrib/aws path = contrib/aws
url = https://github.com/ClickHouse-Extras/aws-sdk-cpp.git url = https://github.com/ClickHouse/aws-sdk-cpp.git
[submodule "aws-c-event-stream"] [submodule "aws-c-event-stream"]
path = contrib/aws-c-event-stream path = contrib/aws-c-event-stream
url = https://github.com/ClickHouse-Extras/aws-c-event-stream.git url = https://github.com/ClickHouse/aws-c-event-stream.git
[submodule "aws-c-common"] [submodule "aws-c-common"]
path = contrib/aws-c-common path = contrib/aws-c-common
url = https://github.com/ClickHouse-Extras/aws-c-common.git url = https://github.com/ClickHouse/aws-c-common.git
[submodule "aws-checksums"] [submodule "aws-checksums"]
path = contrib/aws-checksums path = contrib/aws-checksums
url = https://github.com/ClickHouse-Extras/aws-checksums.git url = https://github.com/ClickHouse/aws-checksums.git
[submodule "contrib/curl"] [submodule "contrib/curl"]
path = contrib/curl path = contrib/curl
url = https://github.com/curl/curl.git url = https://github.com/curl/curl.git
[submodule "contrib/icudata"] [submodule "contrib/icudata"]
path = contrib/icudata path = contrib/icudata
url = https://github.com/ClickHouse-Extras/icudata.git url = https://github.com/ClickHouse/icudata.git
[submodule "contrib/icu"] [submodule "contrib/icu"]
path = contrib/icu path = contrib/icu
url = https://github.com/unicode-org/icu.git url = https://github.com/unicode-org/icu.git
[submodule "contrib/flatbuffers"] [submodule "contrib/flatbuffers"]
path = contrib/flatbuffers path = contrib/flatbuffers
url = https://github.com/ClickHouse-Extras/flatbuffers.git url = https://github.com/ClickHouse/flatbuffers.git
[submodule "contrib/replxx"] [submodule "contrib/replxx"]
path = contrib/replxx path = contrib/replxx
url = https://github.com/ClickHouse-Extras/replxx.git url = https://github.com/ClickHouse/replxx.git
[submodule "contrib/avro"] [submodule "contrib/avro"]
path = contrib/avro path = contrib/avro
url = https://github.com/ClickHouse-Extras/avro.git url = https://github.com/ClickHouse/avro.git
ignore = untracked ignore = untracked
[submodule "contrib/msgpack-c"] [submodule "contrib/msgpack-c"]
path = contrib/msgpack-c path = contrib/msgpack-c
url = https://github.com/msgpack/msgpack-c url = https://github.com/msgpack/msgpack-c
[submodule "contrib/libcpuid"] [submodule "contrib/libcpuid"]
path = contrib/libcpuid path = contrib/libcpuid
url = https://github.com/ClickHouse-Extras/libcpuid.git url = https://github.com/ClickHouse/libcpuid.git
[submodule "contrib/openldap"] [submodule "contrib/openldap"]
path = contrib/openldap path = contrib/openldap
url = https://github.com/ClickHouse-Extras/openldap.git url = https://github.com/ClickHouse/openldap.git
[submodule "contrib/AMQP-CPP"] [submodule "contrib/AMQP-CPP"]
path = contrib/AMQP-CPP path = contrib/AMQP-CPP
url = https://github.com/ClickHouse-Extras/AMQP-CPP.git url = https://github.com/ClickHouse/AMQP-CPP.git
[submodule "contrib/cassandra"] [submodule "contrib/cassandra"]
path = contrib/cassandra path = contrib/cassandra
url = https://github.com/ClickHouse-Extras/cpp-driver.git url = https://github.com/ClickHouse/cpp-driver.git
branch = clickhouse branch = clickhouse
[submodule "contrib/libuv"] [submodule "contrib/libuv"]
path = contrib/libuv path = contrib/libuv
url = https://github.com/ClickHouse-Extras/libuv.git url = https://github.com/ClickHouse/libuv.git
branch = clickhouse branch = clickhouse
[submodule "contrib/fmtlib"] [submodule "contrib/fmtlib"]
path = contrib/fmtlib path = contrib/fmtlib
url = https://github.com/fmtlib/fmt.git url = https://github.com/fmtlib/fmt.git
[submodule "contrib/sentry-native"] [submodule "contrib/sentry-native"]
path = contrib/sentry-native path = contrib/sentry-native
url = https://github.com/ClickHouse-Extras/sentry-native.git url = https://github.com/ClickHouse/sentry-native.git
[submodule "contrib/krb5"] [submodule "contrib/krb5"]
path = contrib/krb5 path = contrib/krb5
url = https://github.com/ClickHouse-Extras/krb5 url = https://github.com/ClickHouse/krb5
[submodule "contrib/cyrus-sasl"] [submodule "contrib/cyrus-sasl"]
path = contrib/cyrus-sasl path = contrib/cyrus-sasl
url = https://github.com/ClickHouse-Extras/cyrus-sasl url = https://github.com/ClickHouse/cyrus-sasl
branch = cyrus-sasl-2.1 branch = cyrus-sasl-2.1
[submodule "contrib/croaring"] [submodule "contrib/croaring"]
path = contrib/croaring path = contrib/croaring
@ -184,7 +184,7 @@
url = https://github.com/danlark1/miniselect url = https://github.com/danlark1/miniselect
[submodule "contrib/rocksdb"] [submodule "contrib/rocksdb"]
path = contrib/rocksdb path = contrib/rocksdb
url = https://github.com/ClickHouse-Extras/rocksdb.git url = https://github.com/ClickHouse/rocksdb.git
[submodule "contrib/xz"] [submodule "contrib/xz"]
path = contrib/xz path = contrib/xz
url = https://github.com/xz-mirror/xz url = https://github.com/xz-mirror/xz
@ -194,53 +194,53 @@
branch = lts_2021_11_02 branch = lts_2021_11_02
[submodule "contrib/dragonbox"] [submodule "contrib/dragonbox"]
path = contrib/dragonbox path = contrib/dragonbox
url = https://github.com/ClickHouse-Extras/dragonbox.git url = https://github.com/ClickHouse/dragonbox.git
[submodule "contrib/fast_float"] [submodule "contrib/fast_float"]
path = contrib/fast_float path = contrib/fast_float
url = https://github.com/fastfloat/fast_float url = https://github.com/fastfloat/fast_float
[submodule "contrib/libpq"] [submodule "contrib/libpq"]
path = contrib/libpq path = contrib/libpq
url = https://github.com/ClickHouse-Extras/libpq url = https://github.com/ClickHouse/libpq
[submodule "contrib/boringssl"] [submodule "contrib/boringssl"]
path = contrib/boringssl path = contrib/boringssl
url = https://github.com/ClickHouse-Extras/boringssl.git url = https://github.com/ClickHouse/boringssl.git
branch = MergeWithUpstream branch = MergeWithUpstream
[submodule "contrib/NuRaft"] [submodule "contrib/NuRaft"]
path = contrib/NuRaft path = contrib/NuRaft
url = https://github.com/ClickHouse-Extras/NuRaft.git url = https://github.com/ClickHouse/NuRaft.git
[submodule "contrib/nanodbc"] [submodule "contrib/nanodbc"]
path = contrib/nanodbc path = contrib/nanodbc
url = https://github.com/ClickHouse-Extras/nanodbc.git url = https://github.com/ClickHouse/nanodbc.git
[submodule "contrib/datasketches-cpp"] [submodule "contrib/datasketches-cpp"]
path = contrib/datasketches-cpp path = contrib/datasketches-cpp
url = https://github.com/ClickHouse-Extras/datasketches-cpp.git url = https://github.com/ClickHouse/datasketches-cpp.git
[submodule "contrib/yaml-cpp"] [submodule "contrib/yaml-cpp"]
path = contrib/yaml-cpp path = contrib/yaml-cpp
url = https://github.com/ClickHouse-Extras/yaml-cpp.git url = https://github.com/ClickHouse/yaml-cpp.git
[submodule "contrib/cld2"] [submodule "contrib/cld2"]
path = contrib/cld2 path = contrib/cld2
url = https://github.com/ClickHouse-Extras/cld2.git url = https://github.com/ClickHouse/cld2.git
[submodule "contrib/libstemmer_c"] [submodule "contrib/libstemmer_c"]
path = contrib/libstemmer_c path = contrib/libstemmer_c
url = https://github.com/ClickHouse-Extras/libstemmer_c.git url = https://github.com/ClickHouse/libstemmer_c.git
[submodule "contrib/wordnet-blast"] [submodule "contrib/wordnet-blast"]
path = contrib/wordnet-blast path = contrib/wordnet-blast
url = https://github.com/ClickHouse-Extras/wordnet-blast.git url = https://github.com/ClickHouse/wordnet-blast.git
[submodule "contrib/lemmagen-c"] [submodule "contrib/lemmagen-c"]
path = contrib/lemmagen-c path = contrib/lemmagen-c
url = https://github.com/ClickHouse-Extras/lemmagen-c.git url = https://github.com/ClickHouse/lemmagen-c.git
[submodule "contrib/libpqxx"] [submodule "contrib/libpqxx"]
path = contrib/libpqxx path = contrib/libpqxx
url = https://github.com/ClickHouse-Extras/libpqxx.git url = https://github.com/ClickHouse/libpqxx.git
[submodule "contrib/sqlite-amalgamation"] [submodule "contrib/sqlite-amalgamation"]
path = contrib/sqlite-amalgamation path = contrib/sqlite-amalgamation
url = https://github.com/azadkuh/sqlite-amalgamation url = https://github.com/azadkuh/sqlite-amalgamation
[submodule "contrib/s2geometry"] [submodule "contrib/s2geometry"]
path = contrib/s2geometry path = contrib/s2geometry
url = https://github.com/ClickHouse-Extras/s2geometry.git url = https://github.com/ClickHouse/s2geometry.git
[submodule "contrib/bzip2"] [submodule "contrib/bzip2"]
path = contrib/bzip2 path = contrib/bzip2
url = https://github.com/ClickHouse-Extras/bzip2.git url = https://github.com/ClickHouse/bzip2.git
[submodule "contrib/magic_enum"] [submodule "contrib/magic_enum"]
path = contrib/magic_enum path = contrib/magic_enum
url = https://github.com/Neargye/magic_enum url = https://github.com/Neargye/magic_enum
@ -249,16 +249,16 @@
url = https://github.com/google/libprotobuf-mutator url = https://github.com/google/libprotobuf-mutator
[submodule "contrib/sysroot"] [submodule "contrib/sysroot"]
path = contrib/sysroot path = contrib/sysroot
url = https://github.com/ClickHouse-Extras/sysroot.git url = https://github.com/ClickHouse/sysroot.git
[submodule "contrib/nlp-data"] [submodule "contrib/nlp-data"]
path = contrib/nlp-data path = contrib/nlp-data
url = https://github.com/ClickHouse-Extras/nlp-data.git url = https://github.com/ClickHouse/nlp-data.git
[submodule "contrib/hive-metastore"] [submodule "contrib/hive-metastore"]
path = contrib/hive-metastore path = contrib/hive-metastore
url = https://github.com/ClickHouse-Extras/hive-metastore url = https://github.com/ClickHouse/hive-metastore
[submodule "contrib/azure"] [submodule "contrib/azure"]
path = contrib/azure path = contrib/azure
url = https://github.com/ClickHouse-Extras/azure-sdk-for-cpp.git url = https://github.com/ClickHouse/azure-sdk-for-cpp.git
[submodule "contrib/minizip-ng"] [submodule "contrib/minizip-ng"]
path = contrib/minizip-ng path = contrib/minizip-ng
url = https://github.com/zlib-ng/minizip-ng url = https://github.com/zlib-ng/minizip-ng

View File

@ -1,4 +1,138 @@
### ClickHouse release v22.2, 2022-02-17 ### Table of Contents
**[ClickHouse release v22.3-lts, 2022-03-17](#223)**<br>
**[ClickHouse release v22.2, 2022-02-17](#222)**<br>
**[ClickHouse release v22.1, 2022-01-18](#221)**<br>
**[Changelog for 2021](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/whats-new/changelog/2021.md)**<br>
## <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
#### Backward Incompatible Change
* Make `arrayCompact` function behave as other higher-order functions: perform compaction not of lambda function results but on the original array. If you're using nontrivial lambda functions in arrayCompact you may restore old behaviour by wrapping `arrayCompact` arguments into `arrayMap`. Closes [#34010](https://github.com/ClickHouse/ClickHouse/issues/34010) [#18535](https://github.com/ClickHouse/ClickHouse/issues/18535) [#14778](https://github.com/ClickHouse/ClickHouse/issues/14778). [#34795](https://github.com/ClickHouse/ClickHouse/pull/34795) ([Alexandre Snarskii](https://github.com/snar)).
* Change implementation specific behavior on overflow of function `toDatetime`. It will be saturated to the nearest min/max supported instant of datetime instead of wraparound. This change is highlighted as "backward incompatible" because someone may unintentionally rely on the old behavior. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)).
#### New Feature
* Support for caching data locally for remote filesystems. It can be enabled for `s3` disks. Closes [#28961](https://github.com/ClickHouse/ClickHouse/issues/28961). [#33717](https://github.com/ClickHouse/ClickHouse/pull/33717) ([Kseniia Sumarokova](https://github.com/kssenii)). In the meantime, we enabled the test suite on s3 filesystem and no more known issues exist, so it is started to be production ready.
* Add new table function `hive`. It can be used as follows `hive('<hive metastore url>', '<hive database>', '<hive table name>', '<columns definition>', '<partition columns>')` for example `SELECT * FROM hive('thrift://hivetest:9083', 'test', 'demo', 'id Nullable(String), score Nullable(Int32), day Nullable(String)', 'day')`. [#34946](https://github.com/ClickHouse/ClickHouse/pull/34946) ([lgbo](https://github.com/lgbo-ustc)).
* Support authentication of users connected via SSL by their X.509 certificate. [#31484](https://github.com/ClickHouse/ClickHouse/pull/31484) ([eungenue](https://github.com/eungenue)).
* Support schema inference for inserting into table functions `file`/`hdfs`/`s3`/`url`. [#34732](https://github.com/ClickHouse/ClickHouse/pull/34732) ([Kruglov Pavel](https://github.com/Avogar)).
* Now you can read `system.zookeeper` table without restrictions on path or using `like` expression. This reads can generate quite heavy load for zookeeper so to enable this ability you have to enable setting `allow_unrestricted_reads_from_keeper`. [#34609](https://github.com/ClickHouse/ClickHouse/pull/34609) ([Sergei Trifonov](https://github.com/serxa)).
* Display CPU and memory metrics in clickhouse-local. Close [#34545](https://github.com/ClickHouse/ClickHouse/issues/34545). [#34605](https://github.com/ClickHouse/ClickHouse/pull/34605) ([李扬](https://github.com/taiyang-li)).
* Implement `startsWith` and `endsWith` function for arrays, closes [#33982](https://github.com/ClickHouse/ClickHouse/issues/33982). [#34368](https://github.com/ClickHouse/ClickHouse/pull/34368) ([usurai](https://github.com/usurai)).
* Add three functions for Map data type: 1. `mapReplace(map1, map2)` - replaces values for keys in map1 with the values of the corresponding keys in map2; adds keys from map2 that don't exist in map1. 2. `mapFilter` 3. `mapMap`. mapFilter and mapMap are higher order functions, accepting two arguments, the first argument is a lambda function with k, v pair as arguments, the second argument is a column of type Map. [#33698](https://github.com/ClickHouse/ClickHouse/pull/33698) ([hexiaoting](https://github.com/hexiaoting)).
* Allow getting default user and password for clickhouse-client from the `CLICKHOUSE_USER` and `CLICKHOUSE_PASSWORD` environment variables. Close [#34538](https://github.com/ClickHouse/ClickHouse/issues/34538). [#34947](https://github.com/ClickHouse/ClickHouse/pull/34947) ([DR](https://github.com/freedomDR)).
#### Experimental Feature
* New data type `Object(<schema_format>)`, which supports storing of semi-structured data (for now JSON only). Data is written to such types as string. Then all paths are extracted according to format of semi-structured data and written as separate columns in most optimal types, that can store all their values. Those columns can be queried by names that match paths in source data. E.g `data.key1.key2` or with cast operator `data.key1.key2::Int64`.
* Add `database_replicated_allow_only_replicated_engine` setting. When enabled, it only allowed to only create `Replicated` tables or tables with stateless engines in `Replicated` databases. [#35214](https://github.com/ClickHouse/ClickHouse/pull/35214) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). Note that `Replicated` database is still an experimental feature.
#### Performance Improvement
* Improve performance of insertion into `MergeTree` tables by optimizing sorting. Up to 2x improvement is observed on realistic benchmarks. [#34750](https://github.com/ClickHouse/ClickHouse/pull/34750) ([Maksim Kita](https://github.com/kitaisreal)).
* Columns pruning when reading Parquet, ORC and Arrow files from URL and S3. Closes [#34163](https://github.com/ClickHouse/ClickHouse/issues/34163). [#34849](https://github.com/ClickHouse/ClickHouse/pull/34849) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Columns pruning when reading Parquet, ORC and Arrow files from Hive. [#34954](https://github.com/ClickHouse/ClickHouse/pull/34954) ([lgbo](https://github.com/lgbo-ustc)).
* A bunch of performance optimizations from a performance superhero. Improve performance of processing queries with large `IN` section. Improve performance of `direct` dictionary if its source is `ClickHouse`. Improve performance of `detectCharset `, `detectLanguageUnknown ` functions. [#34888](https://github.com/ClickHouse/ClickHouse/pull/34888) ([Maksim Kita](https://github.com/kitaisreal)).
* Improve performance of `any` aggregate function by using more batching. [#34760](https://github.com/ClickHouse/ClickHouse/pull/34760) ([Raúl Marín](https://github.com/Algunenano)).
* Multiple improvements for performance of `clickhouse-keeper`: less locking [#35010](https://github.com/ClickHouse/ClickHouse/pull/35010) ([zhanglistar](https://github.com/zhanglistar)), lower memory usage by streaming reading and writing of snapshot instead of full copy. [#34584](https://github.com/ClickHouse/ClickHouse/pull/34584) ([zhanglistar](https://github.com/zhanglistar)), optimizing compaction of log store in the RAFT implementation. [#34534](https://github.com/ClickHouse/ClickHouse/pull/34534) ([zhanglistar](https://github.com/zhanglistar)), versioning of the internal data structure [#34486](https://github.com/ClickHouse/ClickHouse/pull/34486) ([zhanglistar](https://github.com/zhanglistar)).
#### Improvement
* Allow asynchronous inserts to table functions. Fixes [#34864](https://github.com/ClickHouse/ClickHouse/issues/34864). [#34866](https://github.com/ClickHouse/ClickHouse/pull/34866) ([Anton Popov](https://github.com/CurtizJ)).
* Implicit type casting of the key argument for functions `dictGetHierarchy`, `dictIsIn`, `dictGetChildren`, `dictGetDescendants`. Closes [#34970](https://github.com/ClickHouse/ClickHouse/issues/34970). [#35027](https://github.com/ClickHouse/ClickHouse/pull/35027) ([Maksim Kita](https://github.com/kitaisreal)).
* `EXPLAIN AST` query can output AST in form of a graph in Graphviz format: `EXPLAIN AST graph = 1 SELECT * FROM system.parts`. [#35173](https://github.com/ClickHouse/ClickHouse/pull/35173) ([李扬](https://github.com/taiyang-li)).
* When large files were written with `s3` table function or table engine, the content type on the files was mistakenly set to `application/xml` due to a bug in the AWS SDK. This closes [#33964](https://github.com/ClickHouse/ClickHouse/issues/33964). [#34433](https://github.com/ClickHouse/ClickHouse/pull/34433) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Change restrictive row policies a bit to make them an easier alternative to permissive policies in easy cases. If for a particular table only restrictive policies exist (without permissive policies) users will be able to see some rows. Also `SHOW CREATE ROW POLICY` will always show `AS permissive` or `AS restrictive` in row policy's definition. [#34596](https://github.com/ClickHouse/ClickHouse/pull/34596) ([Vitaly Baranov](https://github.com/vitlibar)).
* Improve schema inference with globs in File/S3/HDFS/URL engines. Try to use the next path for schema inference in case of error. [#34465](https://github.com/ClickHouse/ClickHouse/pull/34465) ([Kruglov Pavel](https://github.com/Avogar)).
* Play UI now correctly detects the preferred light/dark theme from the OS. [#35068](https://github.com/ClickHouse/ClickHouse/pull/35068) ([peledni](https://github.com/peledni)).
* Added `date_time_input_format = 'best_effort_us'`. Closes [#34799](https://github.com/ClickHouse/ClickHouse/issues/34799). [#34982](https://github.com/ClickHouse/ClickHouse/pull/34982) ([WenYao](https://github.com/Cai-Yao)).
* A new settings called `allow_plaintext_password` and `allow_no_password` are added in server configuration which turn on/off authentication types that can be potentially insecure in some environments. They are allowed by default. [#34738](https://github.com/ClickHouse/ClickHouse/pull/34738) ([Heena Bansal](https://github.com/HeenaBansal2009)).
* Support for `DateTime64` data type in `Arrow` format, closes [#8280](https://github.com/ClickHouse/ClickHouse/issues/8280) and closes [#28574](https://github.com/ClickHouse/ClickHouse/issues/28574). [#34561](https://github.com/ClickHouse/ClickHouse/pull/34561) ([李扬](https://github.com/taiyang-li)).
* Reload `remote_url_allow_hosts` (filtering of outgoing connections) on config update. [#35294](https://github.com/ClickHouse/ClickHouse/pull/35294) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Support `--testmode` parameter for `clickhouse-local`. This parameter enables interpretation of test hints that we use in functional tests. [#35264](https://github.com/ClickHouse/ClickHouse/pull/35264) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add `distributed_depth` to query log. It is like a more detailed variant of `is_initial_query` [#35207](https://github.com/ClickHouse/ClickHouse/pull/35207) ([李扬](https://github.com/taiyang-li)).
* Respect `remote_url_allow_hosts` for `MySQL` and `PostgreSQL` table functions. [#35191](https://github.com/ClickHouse/ClickHouse/pull/35191) ([Heena Bansal](https://github.com/HeenaBansal2009)).
* Added `disk_name` field to `system.part_log`. [#35178](https://github.com/ClickHouse/ClickHouse/pull/35178) ([Artyom Yurkov](https://github.com/Varinara)).
* Do not retry non-rertiable errors when querying remote URLs. Closes [#35161](https://github.com/ClickHouse/ClickHouse/issues/35161). [#35172](https://github.com/ClickHouse/ClickHouse/pull/35172) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Support distributed INSERT SELECT queries (the setting `parallel_distributed_insert_select`) table function `view()`. [#35132](https://github.com/ClickHouse/ClickHouse/pull/35132) ([Azat Khuzhin](https://github.com/azat)).
* More precise memory tracking during `INSERT` into `Buffer` with `AggregateFunction`. [#35072](https://github.com/ClickHouse/ClickHouse/pull/35072) ([Azat Khuzhin](https://github.com/azat)).
* Avoid division by zero in Query Profiler if Linux kernel has a bug. Closes [#34787](https://github.com/ClickHouse/ClickHouse/issues/34787). [#35032](https://github.com/ClickHouse/ClickHouse/pull/35032) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add more sanity checks for keeper configuration: now mixing of localhost and non-local servers is not allowed, also add checks for same value of internal raft port and keeper client port. [#35004](https://github.com/ClickHouse/ClickHouse/pull/35004) ([alesapin](https://github.com/alesapin)).
* Currently, if the user changes the settings of the system tables there will be tons of logs and ClickHouse will rename the tables every minute. This fixes [#34929](https://github.com/ClickHouse/ClickHouse/issues/34929). [#34949](https://github.com/ClickHouse/ClickHouse/pull/34949) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Use connection pool for Hive metastore client. [#34940](https://github.com/ClickHouse/ClickHouse/pull/34940) ([lgbo](https://github.com/lgbo-ustc)).
* Ignore per-column `TTL` in `CREATE TABLE AS` if new table engine does not support it (i.e. if the engine is not of `MergeTree` family). [#34938](https://github.com/ClickHouse/ClickHouse/pull/34938) ([Azat Khuzhin](https://github.com/azat)).
* Allow `LowCardinality` strings for `ngrambf_v1`/`tokenbf_v1` indexes. Closes [#21865](https://github.com/ClickHouse/ClickHouse/issues/21865). [#34911](https://github.com/ClickHouse/ClickHouse/pull/34911) ([Lars Hiller Eidnes](https://github.com/larspars)).
* Allow opening empty sqlite db if the file doesn't exist. Closes [#33367](https://github.com/ClickHouse/ClickHouse/issues/33367). [#34907](https://github.com/ClickHouse/ClickHouse/pull/34907) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Implement memory statistics for FreeBSD - this is required for `max_server_memory_usage` to work correctly. [#34902](https://github.com/ClickHouse/ClickHouse/pull/34902) ([Alexandre Snarskii](https://github.com/snar)).
* In previous versions the progress bar in clickhouse-client can jump forward near 50% for no reason. This closes [#34324](https://github.com/ClickHouse/ClickHouse/issues/34324). [#34801](https://github.com/ClickHouse/ClickHouse/pull/34801) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Now `ALTER TABLE DROP COLUMN columnX` queries for `MergeTree` table engines will work instantly when `columnX` is an `ALIAS` column. Fixes [#34660](https://github.com/ClickHouse/ClickHouse/issues/34660). [#34786](https://github.com/ClickHouse/ClickHouse/pull/34786) ([alesapin](https://github.com/alesapin)).
* Show hints when user mistyped the name of a data skipping index. Closes [#29698](https://github.com/ClickHouse/ClickHouse/issues/29698). [#34764](https://github.com/ClickHouse/ClickHouse/pull/34764) ([flynn](https://github.com/ucasfl)).
* Support `remote()`/`cluster()` table functions for `parallel_distributed_insert_select`. [#34728](https://github.com/ClickHouse/ClickHouse/pull/34728) ([Azat Khuzhin](https://github.com/azat)).
* Do not reset logging that configured via `--log-file`/`--errorlog-file` command line options in case of empty configuration in the config file. [#34718](https://github.com/ClickHouse/ClickHouse/pull/34718) ([Amos Bird](https://github.com/amosbird)).
* Extract schema only once on table creation and prevent reading from local files/external sources to extract schema on each server startup. [#34684](https://github.com/ClickHouse/ClickHouse/pull/34684) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow specifying argument names for executable UDFs. This is necessary for formats where argument name is part of serialization, like `Native`, `JSONEachRow`. Closes [#34604](https://github.com/ClickHouse/ClickHouse/issues/34604). [#34653](https://github.com/ClickHouse/ClickHouse/pull/34653) ([Maksim Kita](https://github.com/kitaisreal)).
* `MaterializedMySQL` (experimental feature) now supports `materialized_mysql_tables_list` (a comma-separated list of MySQL database tables, which will be replicated by the MaterializedMySQL database engine. Default value: empty list — means all the tables will be replicated), mentioned at [#32977](https://github.com/ClickHouse/ClickHouse/issues/32977). [#34487](https://github.com/ClickHouse/ClickHouse/pull/34487) ([zzsmdfj](https://github.com/zzsmdfj)).
* Improve OpenTelemetry span logs for INSERT operation on distributed table. [#34480](https://github.com/ClickHouse/ClickHouse/pull/34480) ([Frank Chen](https://github.com/FrankChen021)).
* Make the znode `ctime` and `mtime` consistent between servers in ClickHouse Keeper. [#33441](https://github.com/ClickHouse/ClickHouse/pull/33441) ([小路](https://github.com/nicelulu)).
#### Build/Testing/Packaging Improvement
* Package repository is migrated to JFrog Artifactory (**Mikhail f. Shiryaev**).
* Randomize some settings in functional tests, so more possible combinations of settings will be tested. This is yet another fuzzing method to ensure better test coverage. This closes [#32268](https://github.com/ClickHouse/ClickHouse/issues/32268). [#34092](https://github.com/ClickHouse/ClickHouse/pull/34092) ([Kruglov Pavel](https://github.com/Avogar)).
* Drop PVS-Studio from our CI. [#34680](https://github.com/ClickHouse/ClickHouse/pull/34680) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add an ability to build stripped binaries with CMake. In previous versions it was performed by dh-tools. [#35196](https://github.com/ClickHouse/ClickHouse/pull/35196) ([alesapin](https://github.com/alesapin)).
* Smaller "fat-free" `clickhouse-keeper` build. [#35031](https://github.com/ClickHouse/ClickHouse/pull/35031) ([alesapin](https://github.com/alesapin)).
* Use @robot-clickhouse as an author and committer for PRs like https://github.com/ClickHouse/ClickHouse/pull/34685. [#34793](https://github.com/ClickHouse/ClickHouse/pull/34793) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Limit DWARF version for debug info by 4 max, because our internal stack symbolizer cannot parse DWARF version 5. This makes sense if you compile ClickHouse with clang-15. [#34777](https://github.com/ClickHouse/ClickHouse/pull/34777) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove `clickhouse-test` debian package as unneeded complication. CI use tests from repository and standalone testing via deb package is no longer supported. [#34606](https://github.com/ClickHouse/ClickHouse/pull/34606) ([Ilya Yatsishin](https://github.com/qoega)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* A fix for HDFS integration: When the inner buffer size is too small, NEED_MORE_INPUT in `HadoopSnappyDecoder` will run multi times (>=3) for one compressed block. This makes the input data be copied into the wrong place in `HadoopSnappyDecoder::buffer`. [#35116](https://github.com/ClickHouse/ClickHouse/pull/35116) ([lgbo](https://github.com/lgbo-ustc)).
* Ignore obsolete grants in ATTACH GRANT statements. This PR fixes [#34815](https://github.com/ClickHouse/ClickHouse/issues/34815). [#34855](https://github.com/ClickHouse/ClickHouse/pull/34855) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix segfault in Postgres database when getting create table query if database was created using named collections. Closes [#35312](https://github.com/ClickHouse/ClickHouse/issues/35312). [#35313](https://github.com/ClickHouse/ClickHouse/pull/35313) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix partial merge join duplicate rows bug, close [#31009](https://github.com/ClickHouse/ClickHouse/issues/31009). [#35311](https://github.com/ClickHouse/ClickHouse/pull/35311) ([Vladimir C](https://github.com/vdimir)).
* Fix possible `Assertion 'position() != working_buffer.end()' failed` while using bzip2 compression with small `max_read_buffer_size` setting value. The bug was found in https://github.com/ClickHouse/ClickHouse/pull/35047. [#35300](https://github.com/ClickHouse/ClickHouse/pull/35300) ([Kruglov Pavel](https://github.com/Avogar)). While using lz4 compression with a small max_read_buffer_size setting value. [#35296](https://github.com/ClickHouse/ClickHouse/pull/35296) ([Kruglov Pavel](https://github.com/Avogar)). While using lzma compression with small `max_read_buffer_size` setting value. [#35295](https://github.com/ClickHouse/ClickHouse/pull/35295) ([Kruglov Pavel](https://github.com/Avogar)). While using `brotli` compression with a small `max_read_buffer_size` setting value. The bug was found in https://github.com/ClickHouse/ClickHouse/pull/35047. [#35281](https://github.com/ClickHouse/ClickHouse/pull/35281) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix possible segfault in `JSONEachRow` schema inference. [#35291](https://github.com/ClickHouse/ClickHouse/pull/35291) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix `CHECK TABLE` query in case when sparse columns are enabled in table. [#35274](https://github.com/ClickHouse/ClickHouse/pull/35274) ([Anton Popov](https://github.com/CurtizJ)).
* Avoid std::terminate in case of exception in reading from remote VFS. [#35257](https://github.com/ClickHouse/ClickHouse/pull/35257) ([Azat Khuzhin](https://github.com/azat)).
* Fix reading port from config, close [#34776](https://github.com/ClickHouse/ClickHouse/issues/34776). [#35193](https://github.com/ClickHouse/ClickHouse/pull/35193) ([Vladimir C](https://github.com/vdimir)).
* Fix error in query with `WITH TOTALS` in case if `HAVING` returned empty result. This fixes [#33711](https://github.com/ClickHouse/ClickHouse/issues/33711). [#35186](https://github.com/ClickHouse/ClickHouse/pull/35186) ([Amos Bird](https://github.com/amosbird)).
* Fix a corner case of `replaceRegexpAll`, close [#35117](https://github.com/ClickHouse/ClickHouse/issues/35117). [#35182](https://github.com/ClickHouse/ClickHouse/pull/35182) ([Vladimir C](https://github.com/vdimir)).
* Schema inference didn't work properly on case of `INSERT INTO FUNCTION s3(...) FROM ...`, it tried to read schema from s3 file instead of from select query. [#35176](https://github.com/ClickHouse/ClickHouse/pull/35176) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix MaterializedPostgreSQL (experimental feature) `table overrides` for partition by, etc. Closes [#35048](https://github.com/ClickHouse/ClickHouse/issues/35048). [#35162](https://github.com/ClickHouse/ClickHouse/pull/35162) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix MaterializedPostgreSQL (experimental feature) adding new table to replication (ATTACH TABLE) after manually removing (DETACH TABLE). Closes [#33800](https://github.com/ClickHouse/ClickHouse/issues/33800). Closes [#34922](https://github.com/ClickHouse/ClickHouse/issues/34922). Closes [#34315](https://github.com/ClickHouse/ClickHouse/issues/34315). [#35158](https://github.com/ClickHouse/ClickHouse/pull/35158) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix partition pruning error when non-monotonic function is used with IN operator. This fixes [#35136](https://github.com/ClickHouse/ClickHouse/issues/35136). [#35146](https://github.com/ClickHouse/ClickHouse/pull/35146) ([Amos Bird](https://github.com/amosbird)).
* Fixed slightly incorrect translation of YAML configs to XML. [#35135](https://github.com/ClickHouse/ClickHouse/pull/35135) ([Miel Donkers](https://github.com/mdonkers)).
* Fix `optimize_skip_unused_shards_rewrite_in` for signed columns and negative values. [#35134](https://github.com/ClickHouse/ClickHouse/pull/35134) ([Azat Khuzhin](https://github.com/azat)).
* The `update_lag` external dictionary configuration option was unusable showing the error message ``Unexpected key `update_lag` in dictionary source configuration``. [#35089](https://github.com/ClickHouse/ClickHouse/pull/35089) ([Jason Chu](https://github.com/1lann)).
* Avoid possible deadlock on server shutdown. [#35081](https://github.com/ClickHouse/ClickHouse/pull/35081) ([Azat Khuzhin](https://github.com/azat)).
* Fix missing alias after function is optimized to a subcolumn when setting `optimize_functions_to_subcolumns` is enabled. Closes [#33798](https://github.com/ClickHouse/ClickHouse/issues/33798). [#35079](https://github.com/ClickHouse/ClickHouse/pull/35079) ([qieqieplus](https://github.com/qieqieplus)).
* Fix reading from `system.asynchronous_inserts` table if there exists asynchronous insert into table function. [#35050](https://github.com/ClickHouse/ClickHouse/pull/35050) ([Anton Popov](https://github.com/CurtizJ)).
* Fix possible exception `Reading for MergeTree family tables must be done with last position boundary` (relevant to operation on remote VFS). Closes [#34979](https://github.com/ClickHouse/ClickHouse/issues/34979). [#35001](https://github.com/ClickHouse/ClickHouse/pull/35001) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix unexpected result when use -State type aggregate function in window frame. [#34999](https://github.com/ClickHouse/ClickHouse/pull/34999) ([metahys](https://github.com/metahys)).
* Fix possible segfault in FileLog (experimental feature). Closes [#30749](https://github.com/ClickHouse/ClickHouse/issues/30749). [#34996](https://github.com/ClickHouse/ClickHouse/pull/34996) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix possible rare error `Cannot push block to port which already has data`. [#34993](https://github.com/ClickHouse/ClickHouse/pull/34993) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix wrong schema inference for unquoted dates in CSV. Closes [#34768](https://github.com/ClickHouse/ClickHouse/issues/34768). [#34961](https://github.com/ClickHouse/ClickHouse/pull/34961) ([Kruglov Pavel](https://github.com/Avogar)).
* Integration with Hive: Fix unexpected result when use `in` in `where` in hive query. [#34945](https://github.com/ClickHouse/ClickHouse/pull/34945) ([lgbo](https://github.com/lgbo-ustc)).
* Avoid busy polling in ClickHouse Keeper while searching for changelog files to delete. [#34931](https://github.com/ClickHouse/ClickHouse/pull/34931) ([Azat Khuzhin](https://github.com/azat)).
* Fix DateTime64 conversion from PostgreSQL. Closes [#33364](https://github.com/ClickHouse/ClickHouse/issues/33364). [#34910](https://github.com/ClickHouse/ClickHouse/pull/34910) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix possible "Part directory doesn't exist" during `INSERT` into MergeTree table backed by VFS over s3. [#34876](https://github.com/ClickHouse/ClickHouse/pull/34876) ([Azat Khuzhin](https://github.com/azat)).
* Support DDLs like CREATE USER to be executed on cross replicated cluster. [#34860](https://github.com/ClickHouse/ClickHouse/pull/34860) ([Jianmei Zhang](https://github.com/zhangjmruc)).
* Fix bugs for multiple columns group by in `WindowView` (experimental feature). [#34859](https://github.com/ClickHouse/ClickHouse/pull/34859) ([vxider](https://github.com/Vxider)).
* Fix possible failures in S2 functions when queries contain const columns. [#34745](https://github.com/ClickHouse/ClickHouse/pull/34745) ([Bharat Nallan](https://github.com/bharatnc)).
* Fix bug for H3 funcs containing const columns which cause queries to fail. [#34743](https://github.com/ClickHouse/ClickHouse/pull/34743) ([Bharat Nallan](https://github.com/bharatnc)).
* Fix `No such file or directory` with enabled `fsync_part_directory` and vertical merge. [#34739](https://github.com/ClickHouse/ClickHouse/pull/34739) ([Azat Khuzhin](https://github.com/azat)).
* Fix serialization/printing for system queries `RELOAD MODEL`, `RELOAD FUNCTION`, `RESTART DISK` when used `ON CLUSTER`. Closes [#34514](https://github.com/ClickHouse/ClickHouse/issues/34514). [#34696](https://github.com/ClickHouse/ClickHouse/pull/34696) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix `allow_experimental_projection_optimization` with `enable_global_with_statement` (before it may lead to `Stack size too large` error in case of multiple expressions in `WITH` clause, and also it executes scalar subqueries again and again, so not it will be more optimal). [#34650](https://github.com/ClickHouse/ClickHouse/pull/34650) ([Azat Khuzhin](https://github.com/azat)).
* Stop to select part for mutate when the other replica has already updated the transaction log for `ReplatedMergeTree` engine. [#34633](https://github.com/ClickHouse/ClickHouse/pull/34633) ([Jianmei Zhang](https://github.com/zhangjmruc)).
* Fix incorrect result of trivial count query when part movement feature is used [#34089](https://github.com/ClickHouse/ClickHouse/issues/34089). [#34385](https://github.com/ClickHouse/ClickHouse/pull/34385) ([nvartolomei](https://github.com/nvartolomei)).
* Fix inconsistency of `max_query_size` limitation in distributed subqueries. [#34078](https://github.com/ClickHouse/ClickHouse/pull/34078) ([Chao Ma](https://github.com/godliness)).
### <a id="222"></a> ClickHouse release v22.2, 2022-02-17
#### Upgrade Notes #### Upgrade Notes
@ -174,7 +308,7 @@
* This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)). * This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)).
### ClickHouse release v22.1, 2022-01-18 ### <a id="221"></a> ClickHouse release v22.1, 2022-01-18
#### Upgrade Notes #### Upgrade Notes

View File

@ -248,7 +248,9 @@ endif()
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
set(USE_DEBUG_HELPERS ON) set(USE_DEBUG_HELPERS ON)
endif() endif()
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS}) option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
# Create BuildID when using lld. For other linkers it is created by default. # Create BuildID when using lld. For other linkers it is created by default.
if (LINKER_NAME MATCHES "lld$") if (LINKER_NAME MATCHES "lld$")
@ -263,6 +265,11 @@ if (OBJCOPY_PATH AND YANDEX_OFFICIAL_BUILD AND (NOT CMAKE_TOOLCHAIN_FILE))
set (USE_BINARY_HASH 1) set (USE_BINARY_HASH 1)
endif () endif ()
# Allows to build stripped binary in a separate directory
if (OBJCOPY_PATH AND READELF_PATH)
set(BUILD_STRIPPED_BINARIES_PREFIX "" CACHE STRING "Build stripped binaries with debug info in separate directory")
endif()
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
@ -285,8 +292,13 @@ include(cmake/cpu_features.cmake)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables") set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
# Reproducible builds # Reproducible builds
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") # If turned `ON`, remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE().
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.") option(ENABLE_BUILD_PATH_MAPPING "Enable remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE(). It's to generate reproducible builds. See https://reproducible-builds.org/docs/build-path" ON)
if (ENABLE_BUILD_PATH_MAPPING)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
endif()
if (${CMAKE_VERSION} VERSION_LESS "3.12.4") if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
# CMake < 3.12 doesn't support setting 20 as a C++ standard version. # CMake < 3.12 doesn't support setting 20 as a C++ standard version.

View File

@ -10,6 +10,6 @@ ClickHouse® is an open-source column-oriented database management system that a
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format. * [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time. * [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events. * [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation. * [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev. * [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any. * [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.

View File

@ -15,7 +15,7 @@ The following versions of ClickHouse server are currently being supported with s
| 20.x | :x: | | 20.x | :x: |
| 21.1 | :x: | | 21.1 | :x: |
| 21.2 | :x: | | 21.2 | :x: |
| 21.3 | | | 21.3 | :x: |
| 21.4 | :x: | | 21.4 | :x: |
| 21.5 | :x: | | 21.5 | :x: |
| 21.6 | :x: | | 21.6 | :x: |
@ -23,9 +23,11 @@ The following versions of ClickHouse server are currently being supported with s
| 21.8 | ✅ | | 21.8 | ✅ |
| 21.9 | :x: | | 21.9 | :x: |
| 21.10 | :x: | | 21.10 | :x: |
| 21.11 | | | 21.11 | :x: |
| 21.12 | | | 21.12 | :x: |
| 22.1 | ✅ | | 22.1 | ✅ |
| 22.2 | ✅ |
| 22.3 | ✅ |
## Reporting a Vulnerability ## Reporting a Vulnerability

View File

@ -17,6 +17,7 @@ set (SRCS
terminalColors.cpp terminalColors.cpp
errnoToString.cpp errnoToString.cpp
StringRef.cpp StringRef.cpp
safeExit.cpp
) )
if (ENABLE_REPLXX) if (ENABLE_REPLXX)

View File

@ -19,6 +19,12 @@
#if defined(__SSE4_2__) #if defined(__SSE4_2__)
#include <smmintrin.h> #include <smmintrin.h>
#include <nmmintrin.h> #include <nmmintrin.h>
#define CRC_INT _mm_crc32_u64
#endif
#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
#include <arm_acle.h>
#define CRC_INT __crc32cd
#endif #endif
@ -40,9 +46,9 @@ struct StringRef
constexpr StringRef(const char * data_, size_t size_) : data(data_), size(size_) {} constexpr StringRef(const char * data_, size_t size_) : data(data_), size(size_) {}
StringRef(const std::string & s) : data(s.data()), size(s.size()) {} StringRef(const std::string & s) : data(s.data()), size(s.size()) {} /// NOLINT
constexpr explicit StringRef(std::string_view s) : data(s.data()), size(s.size()) {} constexpr explicit StringRef(std::string_view s) : data(s.data()), size(s.size()) {}
constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {} constexpr StringRef(const char * data_) : StringRef(std::string_view{data_}) {} /// NOLINT
constexpr StringRef() = default; constexpr StringRef() = default;
std::string toString() const { return std::string(data, size); } std::string toString() const { return std::string(data, size); }
@ -205,7 +211,7 @@ struct StringRefHash64
} }
}; };
#if defined(__SSE4_2__) #if defined(CRC_INT)
/// Parts are taken from CityHash. /// Parts are taken from CityHash.
@ -281,13 +287,13 @@ struct CRC32Hash
do do
{ {
UInt64 word = unalignedLoad<UInt64>(pos); UInt64 word = unalignedLoad<UInt64>(pos);
res = _mm_crc32_u64(res, word); res = CRC_INT(res, word);
pos += 8; pos += 8;
} while (pos + 8 < end); } while (pos + 8 < end);
UInt64 word = unalignedLoad<UInt64>(end - 8); /// I'm not sure if this is normal. UInt64 word = unalignedLoad<UInt64>(end - 8); /// I'm not sure if this is normal.
res = _mm_crc32_u64(res, word); res = CRC_INT(res, word);
return res; return res;
} }

View File

@ -26,3 +26,27 @@ void insertAtEnd(std::vector<T> & dest, std::vector<T> && src)
dest.insert(dest.end(), std::make_move_iterator(src.begin()), std::make_move_iterator(src.end())); dest.insert(dest.end(), std::make_move_iterator(src.begin()), std::make_move_iterator(src.end()));
src.clear(); src.clear();
} }
template <typename Container>
void insertAtEnd(Container & dest, const Container & src)
{
if (src.empty())
return;
dest.insert(dest.end(), src.begin(), src.end());
}
template <typename Container>
void insertAtEnd(Container & dest, Container && src)
{
if (src.empty())
return;
if (dest.empty())
{
dest.swap(src);
return;
}
dest.insert(dest.end(), std::make_move_iterator(src.begin()), std::make_move_iterator(src.end()));
src.clear();
}

18
base/base/safeExit.cpp Normal file
View File

@ -0,0 +1,18 @@
#if defined(OS_LINUX)
# include <sys/syscall.h>
#endif
#include <unistd.h>
#include <base/safeExit.h>
#include <base/defines.h>
[[noreturn]] void safeExit(int code)
{
#if defined(THREAD_SANITIZER) && defined(OS_LINUX)
/// Thread sanitizer tries to do something on exit that we don't need if we want to exit immediately,
/// while connection handling threads are still run.
(void)syscall(SYS_exit_group, code);
__builtin_unreachable();
#else
_exit(code);
#endif
}

4
base/base/safeExit.h Normal file
View File

@ -0,0 +1,4 @@
#pragma once
/// _exit() with a workaround for TSan.
[[noreturn]] void safeExit(int code);

View File

@ -18,7 +18,7 @@
#include "Common/config_version.h" #include "Common/config_version.h"
#include <Common/config.h> #include <Common/config.h>
#if USE_SENTRY #if USE_SENTRY && !defined(KEEPER_STANDALONE_BUILD)
# include <sentry.h> # include <sentry.h>
# include <stdio.h> # include <stdio.h>

View File

@ -1,5 +1,13 @@
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake") include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
add_headers_and_sources(loggers .) add_headers_and_sources(loggers .)
# Standard version depends on DBMS and works with text log
add_library(loggers ${loggers_sources} ${loggers_headers}) add_library(loggers ${loggers_sources} ${loggers_headers})
target_compile_definitions(loggers PUBLIC WITH_TEXT_LOG=1)
target_link_libraries(loggers PRIVATE dbms clickhouse_common_io) target_link_libraries(loggers PRIVATE dbms clickhouse_common_io)
target_include_directories(loggers PUBLIC ..) target_include_directories(loggers PUBLIC ..)
# Lightweight version doesn't work with textlog and also doesn't depend on DBMS
add_library(loggers_no_text_log ${loggers_sources} ${loggers_headers})
target_link_libraries(loggers_no_text_log PRIVATE clickhouse_common_io)
target_include_directories(loggers PUBLIC ..)

View File

@ -9,7 +9,11 @@
#include <Poco/ConsoleChannel.h> #include <Poco/ConsoleChannel.h>
#include <Poco/Logger.h> #include <Poco/Logger.h>
#include <Poco/Net/RemoteSyslogChannel.h> #include <Poco/Net/RemoteSyslogChannel.h>
#include <Interpreters/TextLog.h>
#ifdef WITH_TEXT_LOG
#include <Interpreters/TextLog.h>
#endif
#include <filesystem> #include <filesystem>
namespace fs = std::filesystem; namespace fs = std::filesystem;
@ -30,17 +34,21 @@ static std::string createDirectory(const std::string & file)
return path; return path;
}; };
#ifdef WITH_TEXT_LOG
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority) void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
{ {
text_log = log; text_log = log;
text_log_max_priority = max_priority; text_log_max_priority = max_priority;
} }
#endif
void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger /*_root*/, const std::string & cmd_name) void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Logger & logger /*_root*/, const std::string & cmd_name)
{ {
#ifdef WITH_TEXT_LOG
if (split) if (split)
if (auto log = text_log.lock()) if (auto log = text_log.lock())
split->addTextLog(log, text_log_max_priority); split->addTextLog(log, text_log_max_priority);
#endif
auto current_logger = config.getString("logger", ""); auto current_logger = config.getString("logger", "");
if (config_logger == current_logger) //-V1051 if (config_logger == current_logger) //-V1051

View File

@ -7,10 +7,12 @@
#include <Poco/Util/Application.h> #include <Poco/Util/Application.h>
#include "OwnSplitChannel.h" #include "OwnSplitChannel.h"
#ifdef WITH_TEXT_LOG
namespace DB namespace DB
{ {
class TextLog; class TextLog;
} }
#endif
namespace Poco::Util namespace Poco::Util
{ {
@ -27,7 +29,9 @@ public:
/// Close log files. On next log write files will be reopened. /// Close log files. On next log write files will be reopened.
void closeLogs(Poco::Logger & logger); void closeLogs(Poco::Logger & logger);
#ifdef WITH_TEXT_LOG
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority); void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
#endif
private: private:
Poco::AutoPtr<Poco::FileChannel> log_file; Poco::AutoPtr<Poco::FileChannel> log_file;
@ -37,8 +41,10 @@ private:
/// Previous value of logger element in config. It is used to reinitialize loggers whenever the value changed. /// Previous value of logger element in config. It is used to reinitialize loggers whenever the value changed.
std::string config_logger; std::string config_logger;
#ifdef WITH_TEXT_LOG
std::weak_ptr<DB::TextLog> text_log; std::weak_ptr<DB::TextLog> text_log;
int text_log_max_priority = -1; int text_log_max_priority = -1;
#endif
Poco::AutoPtr<DB::OwnSplitChannel> split; Poco::AutoPtr<DB::OwnSplitChannel> split;
}; };

View File

@ -20,10 +20,13 @@ namespace DB
{ {
void OwnSplitChannel::log(const Poco::Message & msg) void OwnSplitChannel::log(const Poco::Message & msg)
{ {
#ifdef WITH_TEXT_LOG
auto logs_queue = CurrentThread::getInternalTextLogsQueue(); auto logs_queue = CurrentThread::getInternalTextLogsQueue();
if (channels.empty() && (logs_queue == nullptr || msg.getPriority() > logs_queue->max_priority)) if (channels.empty() && (logs_queue == nullptr || msg.getPriority() > logs_queue->max_priority))
return; return;
#endif
if (auto * masker = SensitiveDataMasker::getInstance()) if (auto * masker = SensitiveDataMasker::getInstance())
{ {
@ -86,6 +89,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
channel.first->log(msg); // ordinary child channel.first->log(msg); // ordinary child
} }
#ifdef WITH_TEXT_LOG
auto logs_queue = CurrentThread::getInternalTextLogsQueue(); auto logs_queue = CurrentThread::getInternalTextLogsQueue();
/// Log to "TCP queue" if message is not too noisy /// Log to "TCP queue" if message is not too noisy
@ -137,6 +141,7 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
if (text_log_locked) if (text_log_locked)
text_log_locked->add(elem); text_log_locked->add(elem);
} }
#endif
} }
@ -145,12 +150,14 @@ void OwnSplitChannel::addChannel(Poco::AutoPtr<Poco::Channel> channel, const std
channels.emplace(name, ExtendedChannelPtrPair(std::move(channel), dynamic_cast<ExtendedLogChannel *>(channel.get()))); channels.emplace(name, ExtendedChannelPtrPair(std::move(channel), dynamic_cast<ExtendedLogChannel *>(channel.get())));
} }
#ifdef WITH_TEXT_LOG
void OwnSplitChannel::addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority) void OwnSplitChannel::addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
{ {
std::lock_guard<std::mutex> lock(text_log_mutex); std::lock_guard<std::mutex> lock(text_log_mutex);
text_log = log; text_log = log;
text_log_max_priority.store(max_priority, std::memory_order_relaxed); text_log_max_priority.store(max_priority, std::memory_order_relaxed);
} }
#endif
void OwnSplitChannel::setLevel(const std::string & name, int level) void OwnSplitChannel::setLevel(const std::string & name, int level)
{ {

View File

@ -7,10 +7,12 @@
#include <Poco/Channel.h> #include <Poco/Channel.h>
#include "ExtendedLogChannel.h" #include "ExtendedLogChannel.h"
#ifdef WITH_TEXT_LOG
namespace DB namespace DB
{ {
class TextLog; class TextLog;
} }
#endif
namespace DB namespace DB
{ {
@ -25,7 +27,9 @@ public:
/// Adds a child channel /// Adds a child channel
void addChannel(Poco::AutoPtr<Poco::Channel> channel, const std::string & name); void addChannel(Poco::AutoPtr<Poco::Channel> channel, const std::string & name);
#ifdef WITH_TEXT_LOG
void addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority); void addTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
#endif
void setLevel(const std::string & name, int level); void setLevel(const std::string & name, int level);
@ -40,8 +44,10 @@ private:
std::mutex text_log_mutex; std::mutex text_log_mutex;
#ifdef WITH_TEXT_LOG
std::weak_ptr<DB::TextLog> text_log; std::weak_ptr<DB::TextLog> text_log;
std::atomic<int> text_log_max_priority = -1; std::atomic<int> text_log_max_priority = -1;
#endif
}; };
} }

View File

@ -11,10 +11,6 @@ DATASET="${TABLE}_v1.tar.xz"
QUERIES_FILE="queries.sql" QUERIES_FILE="queries.sql"
TRIES=3 TRIES=3
AMD64_BIN_URL="https://builds.clickhouse.com/master/amd64/clickhouse"
AARCH64_BIN_URL="https://builds.clickhouse.com/master/aarch64/clickhouse"
POWERPC64_BIN_URL="https://builds.clickhouse.com/master/ppc64le/clickhouse"
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'. # Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
FASTER_DOWNLOAD=wget FASTER_DOWNLOAD=wget
@ -33,20 +29,60 @@ fi
mkdir -p clickhouse-benchmark-$SCALE mkdir -p clickhouse-benchmark-$SCALE
pushd clickhouse-benchmark-$SCALE pushd clickhouse-benchmark-$SCALE
if [[ ! -f clickhouse ]]; then OS=$(uname -s)
CPU=$(uname -m) ARCH=$(uname -m)
if [[ ($CPU == x86_64) || ($CPU == amd64) ]]; then
$FASTER_DOWNLOAD "$AMD64_BIN_URL" DIR=
elif [[ $CPU == aarch64 ]]; then
$FASTER_DOWNLOAD "$AARCH64_BIN_URL" if [ "${OS}" = "Linux" ]
elif [[ $CPU == powerpc64le ]]; then then
$FASTER_DOWNLOAD "$POWERPC64_BIN_URL" if [ "${ARCH}" = "x86_64" ]
else then
echo "Unsupported CPU type: $CPU" DIR="amd64"
exit 1 elif [ "${ARCH}" = "aarch64" ]
then
DIR="aarch64"
elif [ "${ARCH}" = "powerpc64le" ]
then
DIR="powerpc64le"
fi
elif [ "${OS}" = "FreeBSD" ]
then
if [ "${ARCH}" = "x86_64" ]
then
DIR="freebsd"
elif [ "${ARCH}" = "aarch64" ]
then
DIR="freebsd-aarch64"
elif [ "${ARCH}" = "powerpc64le" ]
then
DIR="freebsd-powerpc64le"
fi
elif [ "${OS}" = "Darwin" ]
then
if [ "${ARCH}" = "x86_64" ]
then
DIR="macos"
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
then
DIR="macos-aarch64"
fi fi
fi fi
if [ -z "${DIR}" ]
then
echo "The '${OS}' operating system with the '${ARCH}' architecture is not supported."
exit 1
fi
URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
echo
echo "Will download ${URL}"
echo
curl -O "${URL}" && chmod a+x clickhouse || exit 1
echo
echo "Successfully downloaded the ClickHouse binary"
chmod a+x clickhouse chmod a+x clickhouse
if [[ ! -f $QUERIES_FILE ]]; then if [[ ! -f $QUERIES_FILE ]]; then
@ -88,7 +124,12 @@ echo
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
sync sync
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null if [ "${OS}" = "Darwin" ]
then
sudo purge > /dev/null
else
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
fi
echo -n "[" echo -n "["
for i in $(seq 1 $TRIES); do for i in $(seq 1 $TRIES); do
@ -104,27 +145,45 @@ echo
echo "Benchmark complete. System info:" echo "Benchmark complete. System info:"
echo echo
echo '----Version, build id-----------' if [ "${OS}" = "Darwin" ]
./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())" then
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw echo '----Version, build id-----------'
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))" ./clickhouse local --query "SELECT format('Version: {}', version())"
echo '----CPU-------------------------' sw_vers | grep BuildVersion
cat /proc/cpuinfo | grep -i -F 'model name' | uniq ./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
lscpu ./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
echo '----Block Devices---------------' echo '----CPU-------------------------'
lsblk sysctl hw.model
echo '----Disk Free and Total--------' sysctl -a | grep -E 'hw.activecpu|hw.memsize|hw.byteorder|cachesize'
df -h . echo '----Disk Free and Total--------'
echo '----Memory Free and Total-------' df -h .
free -h echo '----Memory Free and Total-------'
echo '----Physical Memory Amount------' vm_stat
cat /proc/meminfo | grep MemTotal echo '----Physical Memory Amount------'
echo '----RAID Info-------------------' ls -l /var/vm
cat /proc/mdstat echo '--------------------------------'
#echo '----PCI-------------------------' else
#lspci echo '----Version, build id-----------'
#echo '----All Hardware Info-----------' ./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())"
#lshw ./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
echo '--------------------------------' ./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
echo '----CPU-------------------------'
cat /proc/cpuinfo | grep -i -F 'model name' | uniq
lscpu
echo '----Block Devices---------------'
lsblk
echo '----Disk Free and Total--------'
df -h .
echo '----Memory Free and Total-------'
free -h
echo '----Physical Memory Amount------'
cat /proc/meminfo | grep MemTotal
echo '----RAID Info-------------------'
cat /proc/mdstat
#echo '----PCI-------------------------'
#lspci
#echo '----All Hardware Info-----------'
#lshw
echo '--------------------------------'
fi
echo echo

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54460) SET(VERSION_REVISION 54461)
SET(VERSION_MAJOR 22) SET(VERSION_MAJOR 22)
SET(VERSION_MINOR 3) SET(VERSION_MINOR 4)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 75366fc95e510b7ac76759ef670702ae5f488a51) SET(VERSION_GITHASH 92ab33f560e638d1989c5ca543021ab53d110f5c)
SET(VERSION_DESCRIBE v22.3.1.1-testing) SET(VERSION_DESCRIBE v22.4.1.1-testing)
SET(VERSION_STRING 22.3.1.1) SET(VERSION_STRING 22.4.1.1)
# end of autochange # end of autochange

View File

@ -55,5 +55,5 @@ endif ()
if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS) if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
message(STATUS message(STATUS
"${CMAKE_CURRENT_SOURCE_DIR}: Have ${AVAILABLE_PHYSICAL_MEMORY} megabytes of memory. "${CMAKE_CURRENT_SOURCE_DIR}: Have ${AVAILABLE_PHYSICAL_MEMORY} megabytes of memory.
Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS}") Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
endif () endif ()

25
cmake/strip.sh Executable file
View File

@ -0,0 +1,25 @@
#!/usr/bin/env bash
BINARY_PATH=$1
BINARY_NAME=$(basename $BINARY_PATH)
DESTINATION_STRIPPED_DIR=$2
OBJCOPY_PATH=${3:objcopy}
READELF_PATH=${4:readelf}
BUILD_ID=$($READELF_PATH -n $1 | sed -n '/Build ID/ { s/.*: //p; q; }')
BUILD_ID_PREFIX=${BUILD_ID:0:2}
BUILD_ID_SUFFIX=${BUILD_ID:2}
TEMP_BINARY_PATH="${BINARY_PATH}_temp"
DESTINATION_DEBUG_INFO_DIR="$DESTINATION_STRIPPED_DIR/lib/debug/.build-id"
DESTINATION_STRIP_BINARY_DIR="$DESTINATION_STRIPPED_DIR/bin"
mkdir -p "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX"
mkdir -p "$DESTINATION_STRIP_BINARY_DIR"
$OBJCOPY_PATH --only-keep-debug "$BINARY_PATH" "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug"
touch "$TEMP_BINARY_PATH"
$OBJCOPY_PATH --add-gnu-debuglink "$DESTINATION_DEBUG_INFO_DIR/$BUILD_ID_PREFIX/$BUILD_ID_SUFFIX.debug" "$BINARY_PATH" "$TEMP_BINARY_PATH"
$OBJCOPY_PATH --strip-all "$TEMP_BINARY_PATH" "$DESTINATION_STRIP_BINARY_DIR/$BINARY_NAME"
rm -f "$TEMP_BINARY_PATH"

26
cmake/strip_binary.cmake Normal file
View File

@ -0,0 +1,26 @@
macro(clickhouse_strip_binary)
set(oneValueArgs TARGET DESTINATION_DIR BINARY_PATH)
cmake_parse_arguments(STRIP "" "${oneValueArgs}" "" ${ARGN})
if (NOT DEFINED STRIP_TARGET)
message(FATAL_ERROR "A target name must be provided for stripping binary")
endif()
if (NOT DEFINED STRIP_BINARY_PATH)
message(FATAL_ERROR "A binary path name must be provided for stripping binary")
endif()
if (NOT DEFINED STRIP_DESTINATION_DIR)
message(FATAL_ERROR "Destination directory for stripped binary must be provided")
endif()
add_custom_command(TARGET ${STRIP_TARGET} POST_BUILD
COMMAND bash ${ClickHouse_SOURCE_DIR}/cmake/strip.sh ${STRIP_BINARY_PATH} ${STRIP_DESTINATION_DIR} ${OBJCOPY_PATH} ${READELF_PATH}
COMMENT "Stripping clickhouse binary" VERBATIM
)
install(PROGRAMS ${STRIP_DESTINATION_DIR}/bin/${STRIP_TARGET} DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
install(DIRECTORY ${STRIP_DESTINATION_DIR}/lib/debug DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
endmacro()

View File

@ -169,3 +169,33 @@ if (OBJCOPY_PATH)
else () else ()
message (FATAL_ERROR "Cannot find objcopy.") message (FATAL_ERROR "Cannot find objcopy.")
endif () endif ()
# Readelf (FIXME copypaste)
if (COMPILER_GCC)
find_program (READELF_PATH NAMES "llvm-readelf" "llvm-readelf-13" "llvm-readelf-12" "llvm-readelf-11" "readelf")
else ()
find_program (READELF_PATH NAMES "llvm-readelf-${COMPILER_VERSION_MAJOR}" "llvm-readelf" "readelf")
endif ()
if (NOT READELF_PATH AND OS_DARWIN)
find_program (BREW_PATH NAMES "brew")
if (BREW_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
if (LLVM_PREFIX)
find_program (READELF_PATH NAMES "llvm-readelf" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
if (NOT READELF_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
if (BINUTILS_PREFIX)
find_program (READELF_PATH NAMES "readelf" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
endif ()
endif ()
endif ()
if (READELF_PATH)
message (STATUS "Using readelf: ${READELF_PATH}")
else ()
message (FATAL_ERROR "Cannot find readelf.")
endif ()

2
contrib/icu vendored

@ -1 +1 @@
Subproject commit faa2f9f9e1fe74c5ed00eba371d2830134cdbea1 Subproject commit a56dde820dc35665a66f2e9ee8ba58e75049b668

View File

@ -212,7 +212,9 @@ set(ICUUC_SOURCES
"${ICU_SOURCE_DIR}/common/ubiditransform.cpp" "${ICU_SOURCE_DIR}/common/ubiditransform.cpp"
"${ICU_SOURCE_DIR}/common/pluralmap.cpp" "${ICU_SOURCE_DIR}/common/pluralmap.cpp"
"${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp" "${ICU_SOURCE_DIR}/common/static_unicode_sets.cpp"
"${ICU_SOURCE_DIR}/common/restrace.cpp") "${ICU_SOURCE_DIR}/common/restrace.cpp"
"${ICU_SOURCE_DIR}/common/emojiprops.cpp"
"${ICU_SOURCE_DIR}/common/lstmbe.cpp")
set(ICUI18N_SOURCES set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/ucln_in.cpp" "${ICU_SOURCE_DIR}/i18n/ucln_in.cpp"
@ -398,7 +400,6 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp" "${ICU_SOURCE_DIR}/i18n/sharedbreakiterator.cpp"
"${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp" "${ICU_SOURCE_DIR}/i18n/scientificnumberformatter.cpp"
"${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp" "${ICU_SOURCE_DIR}/i18n/dayperiodrules.cpp"
"${ICU_SOURCE_DIR}/i18n/nounit.cpp"
"${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp" "${ICU_SOURCE_DIR}/i18n/number_affixutils.cpp"
"${ICU_SOURCE_DIR}/i18n/number_compact.cpp" "${ICU_SOURCE_DIR}/i18n/number_compact.cpp"
"${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp" "${ICU_SOURCE_DIR}/i18n/number_decimalquantity.cpp"
@ -446,12 +447,21 @@ set(ICUI18N_SOURCES
"${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp" "${ICU_SOURCE_DIR}/i18n/formattedvalue.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp" "${ICU_SOURCE_DIR}/i18n/formattedval_iterimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp" "${ICU_SOURCE_DIR}/i18n/formattedval_sbimpl.cpp"
"${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp") "${ICU_SOURCE_DIR}/i18n/formatted_string_builder.cpp"
"${ICU_SOURCE_DIR}/i18n/measunit_extra.cpp"
"${ICU_SOURCE_DIR}/i18n/number_symbolswrapper.cpp"
"${ICU_SOURCE_DIR}/i18n/number_usageprefs.cpp"
"${ICU_SOURCE_DIR}/i18n/numrange_capi.cpp"
"${ICU_SOURCE_DIR}/i18n/pluralranges.cpp"
"${ICU_SOURCE_DIR}/i18n/units_complexconverter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_converter.cpp"
"${ICU_SOURCE_DIR}/i18n/units_data.cpp"
"${ICU_SOURCE_DIR}/i18n/units_router.cpp")
file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ") file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" CONTENT " ")
enable_language(ASM) enable_language(ASM)
set(ICUDATA_SOURCES set(ICUDATA_SOURCES
"${ICUDATA_SOURCE_DIR}/icudt66l_dat.S" "${ICUDATA_SOURCE_DIR}/icudt70l_dat.S"
"${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC "${CMAKE_CURRENT_BINARY_DIR}/empty.cpp" # Without this cmake can incorrectly detects library type (OBJECT) instead of SHARED/STATIC
) )

2
contrib/icudata vendored

@ -1 +1 @@
Subproject commit f020820388e3faafb44cc643574a2d563dfde572 Subproject commit 72d9a4a7febc904e2b0a534ccb25ae40fac5f1e5

2
contrib/jemalloc vendored

@ -1 +1 @@
Subproject commit ca709c3139f77f4c00a903cdee46d71e9028f6c6 Subproject commit 78b58379c854a639df79beb3289351129d863d4b

View File

@ -4,12 +4,21 @@
extern "C" { extern "C" {
#endif #endif
#if !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wredundant-decls"
#endif
#include <jemalloc/jemalloc_defs.h> #include <jemalloc/jemalloc_defs.h>
#include <jemalloc/jemalloc_rename.h> #include <jemalloc/jemalloc_rename.h>
#include <jemalloc/jemalloc_macros.h> #include <jemalloc/jemalloc_macros.h>
#include <jemalloc/jemalloc_protos.h> #include <jemalloc/jemalloc_protos.h>
#include <jemalloc/jemalloc_typedefs.h> #include <jemalloc/jemalloc_typedefs.h>
#if !defined(__clang__)
#pragma GCC diagnostic pop
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -1,4 +1,8 @@
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined") # During cross-compilation in our CI we have to use llvm-tblgen and other building tools
# tools to be build for host architecture and everything else for target architecture (e.g. AArch64)
# Possible workaround is to use llvm-tblgen from some package...
# But lets just enable LLVM for native builds
if (CMAKE_CROSSCOMPILING OR SANITIZE STREQUAL "undefined")
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF) set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
else() else()
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON) set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
@ -22,9 +26,6 @@ set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm")
set (REQUIRED_LLVM_LIBRARIES set (REQUIRED_LLVM_LIBRARIES
LLVMExecutionEngine LLVMExecutionEngine
LLVMRuntimeDyld LLVMRuntimeDyld
LLVMX86CodeGen
LLVMX86Desc
LLVMX86Info
LLVMAsmPrinter LLVMAsmPrinter
LLVMDebugInfoDWARF LLVMDebugInfoDWARF
LLVMGlobalISel LLVMGlobalISel
@ -56,6 +57,12 @@ set (REQUIRED_LLVM_LIBRARIES
LLVMDemangle LLVMDemangle
) )
if (ARCH_AMD64)
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
elseif (ARCH_AARCH64)
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
endif ()
#function(llvm_libs_all REQUIRED_LLVM_LIBRARIES) #function(llvm_libs_all REQUIRED_LLVM_LIBRARIES)
# llvm_map_components_to_libnames (result all) # llvm_map_components_to_libnames (result all)
# if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result) # if (USE_STATIC_LIBRARIES OR NOT "LLVM" IN_LIST result)

View File

@ -4,7 +4,7 @@ FROM ubuntu:20.04
ARG apt_archive="http://archive.ubuntu.com" ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/" ARG repository="deb https://packages.clickhouse.com/deb stable main"
ARG version=22.1.1.* ARG version=22.1.1.*
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image
@ -58,7 +58,7 @@ RUN groupadd -r clickhouse --gid=101 \
wget \ wget \
tzdata \ tzdata \
&& mkdir -p /etc/apt/sources.list.d \ && mkdir -p /etc/apt/sources.list.d \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \ && apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \ && echo $repository > /etc/apt/sources.list.d/clickhouse.list \
&& if [ -n "$deb_location_url" ]; then \ && if [ -n "$deb_location_url" ]; then \
echo "installing from custom url with deb packages: $deb_location_url" \ echo "installing from custom url with deb packages: $deb_location_url" \

View File

@ -263,9 +263,20 @@ function run_tests
if [[ $NPROC == 0 ]]; then if [[ $NPROC == 0 ]]; then
NPROC=1 NPROC=1
fi fi
time clickhouse-test --hung-check -j "${NPROC}" --order=random \
--fast-tests-only --no-long --testname --shard --zookeeper --check-zookeeper-session \ local test_opts=(
-- "$FASTTEST_FOCUS" 2>&1 \ --hung-check
--fast-tests-only
--no-long
--testname
--shard
--zookeeper
--check-zookeeper-session
--order random
--print-time
--jobs "${NPROC}"
)
time clickhouse-test "${test_opts[@]}" -- "$FASTTEST_FOCUS" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \ | ts '%Y-%m-%d %H:%M:%S' \
| tee "$FASTTEST_OUTPUT/test_result.txt" | tee "$FASTTEST_OUTPUT/test_result.txt"
set -e set -e

View File

@ -60,5 +60,5 @@ clientPort=2181 \n\
maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg
RUN mkdir /zookeeper && chmod -R 777 /zookeeper RUN mkdir /zookeeper && chmod -R 777 /zookeeper
ENV TZ=Europe/Moscow ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

View File

@ -42,6 +42,9 @@ COPY prepare_hive_data.sh /
COPY demo_data.txt / COPY demo_data.txt /
ENV PATH=/apache-hive-2.3.9-bin/bin:/hadoop-3.1.0/bin:/hadoop-3.1.0/sbin:$PATH ENV PATH=/apache-hive-2.3.9-bin/bin:/hadoop-3.1.0/bin:/hadoop-3.1.0/sbin:$PATH
RUN service ssh start && sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml && hdfs namenode -format
RUN apt install -y python3 python3-pip
RUN pip3 install flask requests
COPY http_api_server.py /
COPY start.sh / COPY start.sh /

View File

@ -0,0 +1,70 @@
import os
import subprocess
import datetime
from flask import Flask, flash, request, redirect, url_for
def run_command(command, wait=False):
print("{} - execute shell command:{}".format(datetime.datetime.now(), command))
lines = []
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
if wait:
for l in iter(p.stdout.readline, b''):
lines.append(l)
p.poll()
return (lines, p.returncode)
else:
return(iter(p.stdout.readline, b''), 0)
UPLOAD_FOLDER = './'
ALLOWED_EXTENSIONS = {'txt', 'sh'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def hello_world():
return 'Hello World'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('upload_file', name=filename))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
@app.route('/run', methods=['GET', 'POST'])
def parse_request():
data = request.data # data is empty
run_command(data, wait=True)
return 'Ok'
if __name__ == '__main__':
app.run(port=5011)

View File

@ -2,5 +2,9 @@
hive -e "create database test" hive -e "create database test"
hive -e "create table test.demo(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'; create table test.demo_orc(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'; " hive -e "create table test.demo(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'; create table test.demo_orc(id string, score int) PARTITIONED BY(day string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat'; "
hive -e "create table test.parquet_demo(id string, score int) PARTITIONED BY(day string, hour string) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'"
hive -e "create table test.demo_text(id string, score int, day string)row format delimited fields terminated by ','; load data local inpath '/demo_data.txt' into table test.demo_text " hive -e "create table test.demo_text(id string, score int, day string)row format delimited fields terminated by ','; load data local inpath '/demo_data.txt' into table test.demo_text "
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.demo partition(day) select * from test.demo_text; insert into test.demo_orc partition(day) select * from test.demo_text" hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.demo partition(day) select * from test.demo_text; insert into test.demo_orc partition(day) select * from test.demo_text"
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.parquet_demo partition(day, hour) select id, score, day, '00' as hour from test.demo;"
hive -e "set hive.exec.dynamic.partition.mode=nonstrict;insert into test.parquet_demo partition(day, hour) select id, score, day, '01' as hour from test.demo;"

View File

@ -1,6 +1,5 @@
service ssh start service ssh start
sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml sed s/HOSTNAME/$HOSTNAME/ /hadoop-3.1.0/etc/hadoop/core-site.xml.template > /hadoop-3.1.0/etc/hadoop/core-site.xml
hadoop namenode -format
start-all.sh start-all.sh
service mysql start service mysql start
mysql -u root -e "CREATE USER \"test\"@\"localhost\" IDENTIFIED BY \"test\"" mysql -u root -e "CREATE USER \"test\"@\"localhost\" IDENTIFIED BY \"test\""
@ -9,4 +8,4 @@ schematool -initSchema -dbType mysql
#nohup hiveserver2 & #nohup hiveserver2 &
nohup hive --service metastore & nohup hive --service metastore &
bash /prepare_hive_data.sh bash /prepare_hive_data.sh
while true; do sleep 1000; done python3 http_api_server.py

View File

@ -40,7 +40,7 @@ RUN apt-get update \
/tmp/* \ /tmp/* \
&& apt-get clean && apt-get clean
ENV TZ=Europe/Moscow ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV DOCKER_CHANNEL stable ENV DOCKER_CHANNEL stable

View File

@ -13,6 +13,17 @@ COPY s3downloader /s3downloader
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net" ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
ENV DATASETS="hits visits" ENV DATASETS="hits visits"
ENV EXPORT_S3_STORAGE_POLICIES=1
# Download Minio-related binaries
RUN arch=${TARGETARCH:-amd64} \
&& wget "https://dl.min.io/server/minio/release/linux-${arch}/minio" \
&& chmod +x ./minio \
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/mc" \
&& chmod +x ./mc
ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse"
COPY setup_minio.sh /
COPY run.sh / COPY run.sh /
CMD ["/bin/bash", "/run.sh"] CMD ["/bin/bash", "/run.sh"]

View File

@ -17,6 +17,8 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
# install test configs # install test configs
/usr/share/clickhouse-test/config/install.sh /usr/share/clickhouse-test/config/install.sh
./setup_minio.sh
function start() function start()
{ {
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
@ -93,6 +95,8 @@ else
clickhouse-client --query "SHOW TABLES FROM test" clickhouse-client --query "SHOW TABLES FROM test"
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits"
fi fi
clickhouse-client --query "SHOW TABLES FROM test" clickhouse-client --query "SHOW TABLES FROM test"

View File

@ -0,0 +1,66 @@
#!/bin/bash
# TODO: Make this file shared with stateless tests
#
# Usage for local run:
#
# ./docker/test/stateful/setup_minio.sh ./tests/
#
set -e -x -a -u
ls -lha
mkdir -p ./minio_data
if [ ! -f ./minio ]; then
echo 'MinIO binary not found, downloading...'
BINARY_TYPE=$(uname -s | tr '[:upper:]' '[:lower:]')
wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-amd64/minio" \
&& chmod +x ./minio \
&& wget "https://dl.min.io/client/mc/release/${BINARY_TYPE}-amd64/mc" \
&& chmod +x ./mc
fi
MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
./minio server --address ":11111" ./minio_data &
while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied
do
echo "Trying to connect to minio"
sleep 1
done
lsof -i :11111
sleep 5
./mc alias set clickminio http://localhost:11111 clickhouse clickhouse
./mc admin user add clickminio test testtest
./mc admin policy set clickminio readwrite user=test
./mc mb clickminio/test
# Upload data to Minio. By default after unpacking all tests will in
# /usr/share/clickhouse-test/queries
TEST_PATH=${1:-/usr/share/clickhouse-test}
MINIO_DATA_PATH=${TEST_PATH}/queries/1_stateful/data_minio
# Iterating over globs will cause redudant FILE variale to be a path to a file, not a filename
# shellcheck disable=SC2045
for FILE in $(ls "${MINIO_DATA_PATH}"); do
echo "$FILE";
./mc cp "${MINIO_DATA_PATH}"/"$FILE" clickminio/test/"$FILE";
done
mkdir -p ~/.aws
cat <<EOT >> ~/.aws/credentials
[default]
aws_access_key_id=clickhouse
aws_secret_access_key=clickhouse
EOT

View File

@ -60,6 +60,7 @@ RUN arch=${TARGETARCH:-amd64} \
ENV MINIO_ROOT_USER="clickhouse" ENV MINIO_ROOT_USER="clickhouse"
ENV MINIO_ROOT_PASSWORD="clickhouse" ENV MINIO_ROOT_PASSWORD="clickhouse"
ENV EXPORT_S3_STORAGE_POLICIES=1
COPY run.sh / COPY run.sh /
COPY setup_minio.sh / COPY setup_minio.sh /

View File

@ -29,5 +29,6 @@ COPY run.sh /
ENV DATASETS="hits visits" ENV DATASETS="hits visits"
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net" ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
ENV EXPORT_S3_STORAGE_POLICIES=1
CMD ["/bin/bash", "/run.sh"] CMD ["/bin/bash", "/run.sh"]

View File

@ -173,6 +173,8 @@ quit
configure configure
./setup_minio.sh
start start
# shellcheck disable=SC2086 # No quotes because I want to split it into words. # shellcheck disable=SC2086 # No quotes because I want to split it into words.
@ -188,6 +190,8 @@ clickhouse-client --query "SHOW TABLES FROM datasets"
clickhouse-client --query "SHOW TABLES FROM test" clickhouse-client --query "SHOW TABLES FROM test"
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits"
clickhouse-client --query "SHOW TABLES FROM test" clickhouse-client --query "SHOW TABLES FROM test"
./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" \ ./stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" \

View File

@ -38,7 +38,7 @@ Writing the docs is extremely useful for project's users and developers, and gro
The documentation contains information about all the aspects of the ClickHouse lifecycle: developing, testing, installing, operating, and using. The base language of the documentation is English. The English version is the most actual. All other languages are supported as much as they can by contributors from different countries. The documentation contains information about all the aspects of the ClickHouse lifecycle: developing, testing, installing, operating, and using. The base language of the documentation is English. The English version is the most actual. All other languages are supported as much as they can by contributors from different countries.
At the moment, [documentation](https://clickhouse.com/docs) exists in English, Russian, Chinese, Japanese, and Farsi. We store the documentation besides the ClickHouse source code in the [GitHub repository](https://github.com/ClickHouse/ClickHouse/tree/master/docs). At the moment, [documentation](https://clickhouse.com/docs) exists in English, Russian, Chinese, Japanese. We store the documentation besides the ClickHouse source code in the [GitHub repository](https://github.com/ClickHouse/ClickHouse/tree/master/docs).
Each language lays in the corresponding folder. Files that are not translated from English are the symbolic links to the English ones. Each language lays in the corresponding folder. Files that are not translated from English are the symbolic links to the English ones.

View File

@ -1,11 +1,11 @@
sudo apt-get install apt-transport-https ca-certificates dirmngr sudo apt-get install -y apt-transport-https ca-certificates dirmngr
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4 sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
echo "deb https://repo.clickhouse.com/deb/stable/ main/" | sudo tee \ echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \
/etc/apt/sources.list.d/clickhouse.list /etc/apt/sources.list.d/clickhouse.list
sudo apt-get update sudo apt-get update
sudo apt-get install -y clickhouse-server clickhouse-client sudo apt-get install -y clickhouse-server clickhouse-client
sudo service clickhouse-server start sudo service clickhouse-server start
clickhouse-client # or "clickhouse-client --password" if you set up a password. clickhouse-client # or "clickhouse-client --password" if you've set up a password.

View File

@ -0,0 +1,11 @@
sudo apt-get install apt-transport-https ca-certificates dirmngr
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv E0C56BD4
echo "deb https://repo.clickhouse.com/deb/stable/ main/" | sudo tee \
/etc/apt/sources.list.d/clickhouse.list
sudo apt-get update
sudo apt-get install -y clickhouse-server clickhouse-client
sudo service clickhouse-server start
clickhouse-client # or "clickhouse-client --password" if you set up a password.

View File

@ -1,7 +1,6 @@
sudo yum install yum-utils sudo yum install -y yum-utils
sudo rpm --import https://repo.clickhouse.com/CLICKHOUSE-KEY.GPG sudo yum-config-manager --add-repo https://packages.clickhouse.com/rpm/clickhouse.repo
sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/clickhouse.repo sudo yum install -y clickhouse-server clickhouse-client
sudo yum install clickhouse-server clickhouse-client
sudo /etc/init.d/clickhouse-server start sudo /etc/init.d/clickhouse-server start
clickhouse-client # or "clickhouse-client --password" if you set up a password. clickhouse-client # or "clickhouse-client --password" if you set up a password.

View File

@ -0,0 +1,7 @@
sudo yum install yum-utils
sudo rpm --import https://repo.clickhouse.com/CLICKHOUSE-KEY.GPG
sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/clickhouse.repo
sudo yum install clickhouse-server clickhouse-client
sudo /etc/init.d/clickhouse-server start
clickhouse-client # or "clickhouse-client --password" if you set up a password.

View File

@ -1,19 +1,20 @@
export LATEST_VERSION=$(curl -s https://repo.clickhouse.com/tgz/stable/ | \ LATEST_VERSION=$(curl -s https://packages.clickhouse.com/tgz/stable/ | \
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1) grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-$LATEST_VERSION.tgz export LATEST_VERSION
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-dbg-$LATEST_VERSION.tgz curl -O "https://packages.clickhouse.com/tgz/stable/clickhouse-common-static-$LATEST_VERSION.tgz"
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-server-$LATEST_VERSION.tgz curl -O "https://packages.clickhouse.com/tgz/stable/clickhouse-common-static-dbg-$LATEST_VERSION.tgz"
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-client-$LATEST_VERSION.tgz curl -O "https://packages.clickhouse.com/tgz/stable/clickhouse-server-$LATEST_VERSION.tgz"
curl -O "https://packages.clickhouse.com/tgz/stable/clickhouse-client-$LATEST_VERSION.tgz"
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz"
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh"
tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION.tgz"
sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh"
tar -xzvf clickhouse-server-$LATEST_VERSION.tgz tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz"
sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh"
sudo /etc/init.d/clickhouse-server start sudo /etc/init.d/clickhouse-server start
tar -xzvf clickhouse-client-$LATEST_VERSION.tgz tar -xzvf "clickhouse-client-$LATEST_VERSION.tgz"
sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh sudo "clickhouse-client-$LATEST_VERSION/install/doinst.sh"

View File

@ -0,0 +1,19 @@
export LATEST_VERSION=$(curl -s https://repo.clickhouse.com/tgz/stable/ | \
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-server-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-client-$LATEST_VERSION.tgz
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh
tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz
sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh
tar -xzvf clickhouse-server-$LATEST_VERSION.tgz
sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh
sudo /etc/init.d/clickhouse-server start
tar -xzvf clickhouse-client-$LATEST_VERSION.tgz
sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh

View File

@ -5,7 +5,7 @@ toc_title: Source Code Browser
# Browse ClickHouse Source Code {#browse-clickhouse-source-code} # Browse ClickHouse Source Code {#browse-clickhouse-source-code}
You can use **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/html_report/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily. You can use **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily.
Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual. Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.

View File

@ -156,14 +156,6 @@ $ cd ClickHouse
$ ./release $ ./release
``` ```
## Faster builds for development
Normally all tools of the ClickHouse bundle, such as `clickhouse-server`, `clickhouse-client` etc., are linked into a single static executable, `clickhouse`. This executable must be re-linked on every change, which might be slow. One common way to improve build time is to use the 'split' build configuration, which builds a separate binary for every tool, and further splits the code into several shared libraries. To enable this tweak, pass the following flags to `cmake`:
```
-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1
```
## You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse} ## You Dont Have to Build ClickHouse {#you-dont-have-to-build-clickhouse}
ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour. ClickHouse is available in pre-built binaries and packages. Binaries are portable and can be run on any Linux flavour.
@ -172,9 +164,9 @@ They are built for stable, prestable and testing releases as long as for every c
To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”. To find the freshest build from `master`, go to [commits page](https://github.com/ClickHouse/ClickHouse/commits/master), click on the first green checkmark or red cross near commit, and click to the “Details” link right after “ClickHouse Build Check”.
## Split build configuration {#split-build} ## Faster builds for development: Split build configuration {#split-build}
Normally ClickHouse is statically linked into a single static `clickhouse` binary with minimal dependencies. This is convenient for distribution, but it means that on every change the entire binary is linked again, which is slow and may be inconvenient for development. There is an alternative configuration which creates dynamically loaded shared libraries instead, allowing faster incremental builds. To use it, add the following flags to your `cmake` invocation: Normally, ClickHouse is statically linked into a single static `clickhouse` binary with minimal dependencies. This is convenient for distribution, but it means that on every change the entire binary needs to be linked, which is slow and may be inconvenient for development. There is an alternative configuration which instead creates dynamically loaded shared libraries and separate binaries `clickhouse-server`, `clickhouse-client` etc., allowing for faster incremental builds. To use it, add the following flags to your `cmake` invocation:
``` ```
-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1 -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 -DCLICKHOUSE_SPLIT_BINARY=1
``` ```

View File

@ -190,15 +190,3 @@ Runs randomly generated queries to catch program errors. If it fails, ask a main
## Performance Tests ## Performance Tests
Measure changes in query performance. This is the longest check that takes just below 6 hours to run. The performance test report is described in detail [here](https://github.com/ClickHouse/ClickHouse/tree/master/docker/test/performance-comparison#how-to-read-the-report). Measure changes in query performance. This is the longest check that takes just below 6 hours to run. The performance test report is described in detail [here](https://github.com/ClickHouse/ClickHouse/tree/master/docker/test/performance-comparison#how-to-read-the-report).
# QA
> What is a `Task (private network)` item on status pages?
It's a link to the Yandex's internal job system. Yandex employees can see the check's start time and its more verbose status.
> Where the tests are run
Somewhere on Yandex internal infrastructure.

View File

@ -40,8 +40,8 @@ The list of third-party libraries:
| grpc | [Apache](https://github.com/ClickHouse-Extras/grpc/blob/60c986e15cae70aade721d26badabab1f822fdd6/LICENSE) | | grpc | [Apache](https://github.com/ClickHouse-Extras/grpc/blob/60c986e15cae70aade721d26badabab1f822fdd6/LICENSE) |
| h3 | [Apache](https://github.com/ClickHouse-Extras/h3/blob/c7f46cfd71fb60e2fefc90e28abe81657deff735/LICENSE) | | h3 | [Apache](https://github.com/ClickHouse-Extras/h3/blob/c7f46cfd71fb60e2fefc90e28abe81657deff735/LICENSE) |
| hyperscan | [Boost](https://github.com/ClickHouse-Extras/hyperscan/blob/e9f08df0213fc637aac0a5bbde9beeaeba2fe9fa/LICENSE) | | hyperscan | [Boost](https://github.com/ClickHouse-Extras/hyperscan/blob/e9f08df0213fc637aac0a5bbde9beeaeba2fe9fa/LICENSE) |
| icu | [Public Domain](https://github.com/unicode-org/icu/blob/faa2f9f9e1fe74c5ed00eba371d2830134cdbea1/icu4c/LICENSE) | | icu | [Public Domain](https://github.com/unicode-org/icu/blob/a56dde820dc35665a66f2e9ee8ba58e75049b668/icu4c/LICENSE) |
| icudata | [Public Domain](https://github.com/ClickHouse-Extras/icudata/blob/f020820388e3faafb44cc643574a2d563dfde572/LICENSE) | | icudata | [Public Domain](https://github.com/ClickHouse-Extras/icudata/blob/72d9a4a7febc904e2b0a534ccb25ae40fac5f1e5/LICENSE) |
| jemalloc | [BSD 2-clause](https://github.com/ClickHouse-Extras/jemalloc/blob/e6891d9746143bf2cf617493d880ba5a0b9a3efd/COPYING) | | jemalloc | [BSD 2-clause](https://github.com/ClickHouse-Extras/jemalloc/blob/e6891d9746143bf2cf617493d880ba5a0b9a3efd/COPYING) |
| krb5 | [MIT](https://github.com/ClickHouse-Extras/krb5/blob/5149dea4e2be0f67707383d2682b897c14631374/src/lib/gssapi/LICENSE) | | krb5 | [MIT](https://github.com/ClickHouse-Extras/krb5/blob/5149dea4e2be0f67707383d2682b897c14631374/src/lib/gssapi/LICENSE) |
| libc-headers | [LGPL](https://github.com/ClickHouse-Extras/libc-headers/blob/a720b7105a610acbd7427eea475a5b6810c151eb/LICENSE) | | libc-headers | [LGPL](https://github.com/ClickHouse-Extras/libc-headers/blob/a720b7105a610acbd7427eea475a5b6810c151eb/LICENSE) |

View File

@ -229,6 +229,25 @@ As simple code editors, you can use Sublime Text or Visual Studio Code, or Kate
Just in case, it is worth mentioning that CLion creates `build` path on its own, it also on its own selects `debug` for build type, for configuration it uses a version of CMake that is defined in CLion and not the one installed by you, and finally, CLion will use `make` to run build tasks instead of `ninja`. This is normal behaviour, just keep that in mind to avoid confusion. Just in case, it is worth mentioning that CLion creates `build` path on its own, it also on its own selects `debug` for build type, for configuration it uses a version of CMake that is defined in CLion and not the one installed by you, and finally, CLion will use `make` to run build tasks instead of `ninja`. This is normal behaviour, just keep that in mind to avoid confusion.
## Debugging
Many graphical IDEs offer with an integrated debugger but you can also use a standalone debugger.
### GDB
### LLDB
# tell LLDB where to find the source code
settings set target.source-map /path/to/build/dir /path/to/source/dir
# configure LLDB to display code before/after currently executing line
settings set stop-line-count-before 10
settings set stop-line-count-after 10
target create ./clickhouse-client
# <set breakpoints here>
process launch -- --query="SELECT * FROM TAB"
## Writing Code {#writing-code} ## Writing Code {#writing-code}
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/ The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
@ -243,7 +262,7 @@ List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3A
## Test Data {#test-data} ## Test Data {#test-data}
Developing ClickHouse often requires loading realistic datasets. It is particularly important for performance testing. We have a specially prepared set of anonymized data from Yandex.Metrica. It requires additionally some 3GB of free disk space. Note that this data is not required to accomplish most of the development tasks. Developing ClickHouse often requires loading realistic datasets. It is particularly important for performance testing. We have a specially prepared set of anonymized data of web analytics. It requires additionally some 3GB of free disk space. Note that this data is not required to accomplish most of the development tasks.
sudo apt install wget xz-utils sudo apt install wget xz-utils
@ -270,7 +289,7 @@ Navigate to your fork repository in GitHubs UI. If you have been developing i
A pull request can be created even if the work is not completed yet. In this case please put the word “WIP” (work in progress) at the beginning of the title, it can be changed later. This is useful for cooperative reviewing and discussion of changes as well as for running all of the available tests. It is important that you provide a brief description of your changes, it will later be used for generating release changelogs. A pull request can be created even if the work is not completed yet. In this case please put the word “WIP” (work in progress) at the beginning of the title, it can be changed later. This is useful for cooperative reviewing and discussion of changes as well as for running all of the available tests. It is important that you provide a brief description of your changes, it will later be used for generating release changelogs.
Testing will commence as soon as Yandex employees label your PR with a tag “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour. Testing will commence as soon as ClickHouse employees label your PR with a tag “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour.
The system will prepare ClickHouse binary builds for your pull request individually. To retrieve these builds click the “Details” link next to “ClickHouse build check” entry in the list of checks. There you will find direct links to the built .deb packages of ClickHouse which you can deploy even on your production servers (if you have no fear). The system will prepare ClickHouse binary builds for your pull request individually. To retrieve these builds click the “Details” link next to “ClickHouse build check” entry in the list of checks. There you will find direct links to the built .deb packages of ClickHouse which you can deploy even on your production servers (if you have no fear).

View File

@ -404,9 +404,9 @@ enum class CompressionMethod
}; };
``` ```
**15.** All names must be in English. Transliteration of Russian words is not allowed. **15.** All names must be in English. Transliteration of Hebrew words is not allowed.
not Stroka not T_PAAMAYIM_NEKUDOTAYIM
**16.** Abbreviations are acceptable if they are well known (when you can easily find the meaning of the abbreviation in Wikipedia or in a search engine). **16.** Abbreviations are acceptable if they are well known (when you can easily find the meaning of the abbreviation in Wikipedia or in a search engine).

View File

@ -11,7 +11,7 @@ Functional tests are the most simple and convenient to use. Most of ClickHouse f
Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference. Each functional test sends one or multiple queries to the running ClickHouse server and compares the result with reference.
Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from Yandex.Metrica and it is available to general public. Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from CLickHouse and it is available to general public.
Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery --testmode`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`.
@ -133,44 +133,6 @@ If the system clickhouse-server is already running and you do not want to stop i
`clickhouse` binary has almost no dependencies and works across wide range of Linux distributions. To quick and dirty test your changes on a server, you can simply `scp` your fresh built `clickhouse` binary to your server and then run it as in examples above. `clickhouse` binary has almost no dependencies and works across wide range of Linux distributions. To quick and dirty test your changes on a server, you can simply `scp` your fresh built `clickhouse` binary to your server and then run it as in examples above.
## Testing Environment {#testing-environment}
Before publishing release as stable we deploy it on testing environment. Testing environment is a cluster that process 1/39 part of [Yandex.Metrica](https://metrica.yandex.com/) data. We share our testing environment with Yandex.Metrica team. ClickHouse is upgraded without downtime on top of existing data. We look at first that data is processed successfully without lagging from realtime, the replication continue to work and there is no issues visible to Yandex.Metrica team. First check can be done in the following way:
``` sql
SELECT hostName() AS h, any(version()), any(uptime()), max(UTCEventTime), count() FROM remote('example01-01-{1..3}t', merge, hits) WHERE EventDate >= today() - 2 GROUP BY h ORDER BY h;
```
In some cases we also deploy to testing environment of our friend teams in Yandex: Market, Cloud, etc. Also we have some hardware servers that are used for development purposes.
## Load Testing {#load-testing}
After deploying to testing environment we run load testing with queries from production cluster. This is done manually.
Make sure you have enabled `query_log` on your production cluster.
Collect query log for a day or more:
``` bash
$ clickhouse-client --query="SELECT DISTINCT query FROM system.query_log WHERE event_date = today() AND query LIKE '%ym:%' AND query NOT LIKE '%system.query_log%' AND type = 2 AND is_initial_query" > queries.tsv
```
This is a way complicated example. `type = 2` will filter queries that are executed successfully. `query LIKE '%ym:%'` is to select relevant queries from Yandex.Metrica. `is_initial_query` is to select only queries that are initiated by client, not by ClickHouse itself (as parts of distributed query processing).
`scp` this log to your testing cluster and run it as following:
``` bash
$ clickhouse benchmark --concurrency 16 < queries.tsv
```
(probably you also want to specify a `--user`)
Then leave it for a night or weekend and go take a rest.
You should check that `clickhouse-server` does not crash, memory footprint is bounded and performance not degrading over time.
Precise query execution timings are not recorded and not compared due to high variability of queries and environment.
## Build Tests {#build-tests} ## Build Tests {#build-tests}
Build tests allow to check that build is not broken on various alternative configurations and on some foreign systems. These tests are automated as well. Build tests allow to check that build is not broken on various alternative configurations and on some foreign systems. These tests are automated as well.
@ -259,13 +221,13 @@ Thread Fuzzer (please don't mix up with Thread Sanitizer) is another kind of fuz
## Security Audit ## Security Audit
People from Yandex Security Team did some basic overview of ClickHouse capabilities from the security standpoint. Our Security Team did some basic overview of ClickHouse capabilities from the security standpoint.
## Static Analyzers {#static-analyzers} ## Static Analyzers {#static-analyzers}
We run `clang-tidy` on per-commit basis. `clang-static-analyzer` checks are also enabled. `clang-tidy` is also used for some style checks. We run `clang-tidy` on per-commit basis. `clang-static-analyzer` checks are also enabled. `clang-tidy` is also used for some style checks.
We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`, `CodeQL`. You will find instructions for usage in `tests/instructions/` directory. Also you can read [the article in russian](https://habr.com/company/yandex/blog/342018/). We have evaluated `clang-tidy`, `Coverity`, `cppcheck`, `PVS-Studio`, `tscancode`, `CodeQL`. You will find instructions for usage in `tests/instructions/` directory.
If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of the box. If you use `CLion` as an IDE, you can leverage some `clang-tidy` checks out of the box.
@ -310,12 +272,6 @@ Alternatively you can try `uncrustify` tool to reformat your code. Configuration
We also use `codespell` to find typos in code. It is automated as well. We also use `codespell` to find typos in code. It is automated as well.
## Metrica B2B Tests {#metrica-b2b-tests}
Each ClickHouse release is tested with Yandex Metrica and AppMetrica engines. Testing and stable versions of ClickHouse are deployed on VMs and run with a small copy of Metrica engine that is processing fixed sample of input data. Then results of two instances of Metrica engine are compared together.
These tests are automated by separate team. Due to high number of moving parts, tests are fail most of the time by completely unrelated reasons, that are very difficult to figure out. Most likely these tests have negative value for us. Nevertheless these tests was proved to be useful in about one or two times out of hundreds.
## Test Coverage {#test-coverage} ## Test Coverage {#test-coverage}
We also track test coverage but only for functional tests and only for clickhouse-server. It is performed on daily basis. We also track test coverage but only for functional tests and only for clickhouse-server. It is performed on daily basis.

View File

@ -36,6 +36,7 @@ ENGINE = MaterializedMySQL('host:port', ['database' | database], 'user', 'passwo
- `max_flush_data_time` — Maximum number of milliseconds that data is allowed to cache in memory (for database and the cache data unable to query). When this time is exceeded, the data will be materialized. Default: `1000`. - `max_flush_data_time` — Maximum number of milliseconds that data is allowed to cache in memory (for database and the cache data unable to query). When this time is exceeded, the data will be materialized. Default: `1000`.
- `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disables retry. Default: `1000`. - `max_wait_time_when_mysql_unavailable` — Retry interval when MySQL is not available (milliseconds). Negative value disables retry. Default: `1000`.
- `allows_query_when_mysql_lost` — Allows to query a materialized table when MySQL is lost. Default: `0` (`false`). - `allows_query_when_mysql_lost` — Allows to query a materialized table when MySQL is lost. Default: `0` (`false`).
- `materialized_mysql_tables_list` — a comma-separated list of mysql database tables, which will be replicated by MaterializedMySQL database engine. Default value: empty list — means whole tables will be replicated.
```sql ```sql
CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***') CREATE DATABASE mysql ENGINE = MaterializedMySQL('localhost:3306', 'db', 'user', '***')
@ -75,7 +76,7 @@ When working with the `MaterializedMySQL` database engine, [ReplacingMergeTree](
| FLOAT | [Float32](../../sql-reference/data-types/float.md) | | FLOAT | [Float32](../../sql-reference/data-types/float.md) |
| DOUBLE | [Float64](../../sql-reference/data-types/float.md) | | DOUBLE | [Float64](../../sql-reference/data-types/float.md) |
| DECIMAL, NEWDECIMAL | [Decimal](../../sql-reference/data-types/decimal.md) | | DECIMAL, NEWDECIMAL | [Decimal](../../sql-reference/data-types/decimal.md) |
| DATE, NEWDATE | [Date](../../sql-reference/data-types/date.md) | | DATE, NEWDATE | [Date32](../../sql-reference/data-types/date32.md) |
| DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) | | DATETIME, TIMESTAMP | [DateTime](../../sql-reference/data-types/datetime.md) |
| DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) | | DATETIME2, TIMESTAMP2 | [DateTime64](../../sql-reference/data-types/datetime64.md) |
| YEAR | [UInt16](../../sql-reference/data-types/int-uint.md) | | YEAR | [UInt16](../../sql-reference/data-types/int-uint.md) |

View File

@ -49,6 +49,8 @@ ENGINE = MySQL('host:port', ['database' | database], 'user', 'password')
All other MySQL data types are converted into [String](../../sql-reference/data-types/string.md). All other MySQL data types are converted into [String](../../sql-reference/data-types/string.md).
Because of the ClickHouse date type has a different range from the MySQL date range,If the MySQL date type is out of the range of ClickHouse date, you can use the setting mysql_datatypes_support_level to modify the mapping from the MySQL date type to the Clickhouse date type: date2Date32 (convert MySQL's date type to ClickHouse Date32) or date2String(convert MySQL's date type to ClickHouse String,this is usually used when your mysql data is less than 1925) or default(convert MySQL's date type to ClickHouse Date).
[Nullable](../../sql-reference/data-types/nullable.md) is supported. [Nullable](../../sql-reference/data-types/nullable.md) is supported.
## Global Variables Support {#global-variables-support} ## Global Variables Support {#global-variables-support}

View File

@ -26,7 +26,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
``` sql ``` sql
CREATE TABLE s3_engine_table (name String, value UInt32) CREATE TABLE s3_engine_table (name String, value UInt32)
ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip') ENGINE=S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip')
SETTINGS input_format_with_names_use_header = 0; SETTINGS input_format_with_names_use_header = 0;
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3); INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
@ -75,19 +75,19 @@ Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.cs
``` sql ``` sql
CREATE TABLE big_table (name String, value UInt32) CREATE TABLE big_table (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV'); ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/my_folder/file-{000..999}.csv', 'CSV');
``` ```
**Example with wildcards 2** **Example with wildcards 2**
Suppose we have several files in CSV format with the following URIs on S3: Suppose we have several files in CSV format with the following URIs on S3:
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv' - 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/some_folder/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv' - 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/some_folder/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv' - 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/some_folder/some_file_3.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv' - 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/another_folder/some_file_1.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv' - 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/another_folder/some_file_2.csv'
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv' - 'https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/another_folder/some_file_3.csv'
There are several ways to make a table consisting of all six files: There are several ways to make a table consisting of all six files:
@ -96,21 +96,21 @@ There are several ways to make a table consisting of all six files:
``` sql ``` sql
CREATE TABLE table_with_range (name String, value UInt32) CREATE TABLE table_with_range (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV'); ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV');
``` ```
2. Take all files with `some_file_` prefix (there should be no extra files with such prefix in both folders): 2. Take all files with `some_file_` prefix (there should be no extra files with such prefix in both folders):
``` sql ``` sql
CREATE TABLE table_with_question_mark (name String, value UInt32) CREATE TABLE table_with_question_mark (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV'); ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/some_file_?', 'CSV');
``` ```
3. Take all the files in both folders (all files should satisfy format and schema described in query): 3. Take all the files in both folders (all files should satisfy format and schema described in query):
``` sql ``` sql
CREATE TABLE table_with_asterisk (name String, value UInt32) CREATE TABLE table_with_asterisk (name String, value UInt32)
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV'); ENGINE = S3('https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/{some,another}_folder/*', 'CSV');
``` ```
## S3-related Settings {#settings} ## S3-related Settings {#settings}
@ -142,7 +142,7 @@ The following settings can be specified in configuration file for given endpoint
``` xml ``` xml
<s3> <s3>
<endpoint-name> <endpoint-name>
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint> <endpoint>https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/</endpoint>
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> --> <!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> --> <!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
<!-- <region>us-west-1</region> --> <!-- <region>us-west-1</region> -->

View File

@ -55,27 +55,28 @@ WHERE table = 'visits'
``` ```
``` text ``` text
┌─partition─┬─name───────────┬─active─┐ ┌─partition─┬─name──────────────┬─active─┐
│ 201901 │ 201901_1_3_1 │ 0 │ │ 201901 │ 201901_1_3_1 │ 0 │
│ 201901 │ 201901_1_9_2 │ 1 │ │ 201901 │ 201901_1_9_2_11 │ 1 │
│ 201901 │ 201901_8_8_0 │ 0 │ │ 201901 │ 201901_8_8_0 │ 0 │
│ 201901 │ 201901_9_9_0 │ 0 │ │ 201901 │ 201901_9_9_0 │ 0 │
│ 201902 │ 201902_4_6_1 │ 1 │ │ 201902 │ 201902_4_6_1_11 │ 1 │
│ 201902 │ 201902_10_10_0 │ 1 │ │ 201902 │ 201902_10_10_0_11 │ 1 │
│ 201902 │ 201902_11_11_0 │ 1 │ │ 201902 │ 201902_11_11_0_11 │ 1 │
└───────────┴────────────────┴────────┘ └───────────┴───────────────────┴────────┘
``` ```
The `partition` column contains the names of the partitions. There are two partitions in this example: `201901` and `201902`. You can use this column value to specify the partition name in [ALTER … PARTITION](../../../sql-reference/statements/alter/partition.md) queries. The `partition` column contains the names of the partitions. There are two partitions in this example: `201901` and `201902`. You can use this column value to specify the partition name in [ALTER … PARTITION](../../../sql-reference/statements/alter/partition.md) queries.
The `name` column contains the names of the partition data parts. You can use this column to specify the name of the part in the [ALTER ATTACH PART](../../../sql-reference/statements/alter/partition.md#alter_attach-partition) query. The `name` column contains the names of the partition data parts. You can use this column to specify the name of the part in the [ALTER ATTACH PART](../../../sql-reference/statements/alter/partition.md#alter_attach-partition) query.
Lets break down the name of the first part: `201901_1_3_1`: Lets break down the name of the part: `201901_1_9_2_11`:
- `201901` is the partition name. - `201901` is the partition name.
- `1` is the minimum number of the data block. - `1` is the minimum number of the data block.
- `3` is the maximum number of the data block. - `9` is the maximum number of the data block.
- `1` is the chunk level (the depth of the merge tree it is formed from). - `2` is the chunk level (the depth of the merge tree it is formed from).
- `11` is the mutation version (if a part mutated)
!!! info "Info" !!! info "Info"
The parts of old-type tables have the name: `20190117_20190123_2_2_0` (minimum date - maximum date - minimum block number - maximum block number - level). The parts of old-type tables have the name: `20190117_20190123_2_2_0` (minimum date - maximum date - minimum block number - maximum block number - level).
@ -89,16 +90,16 @@ OPTIMIZE TABLE visits PARTITION 201902;
``` ```
``` text ``` text
┌─partition─┬─name───────────┬─active─┐ ┌─partition─┬─name─────────────┬─active─┐
│ 201901 │ 201901_1_3_1 │ 0 │ │ 201901 │ 201901_1_3_1 │ 0 │
│ 201901 │ 201901_1_9_2 │ 1 │ │ 201901 │ 201901_1_9_2_11 │ 1 │
│ 201901 │ 201901_8_8_0 │ 0 │ │ 201901 │ 201901_8_8_0 │ 0 │
│ 201901 │ 201901_9_9_0 │ 0 │ │ 201901 │ 201901_9_9_0 │ 0 │
│ 201902 │ 201902_4_6_1 │ 0 │ │ 201902 │ 201902_4_6_1 │ 0 │
│ 201902 │ 201902_4_11_2 │ 1 │ │ 201902 │ 201902_4_11_2_11 │ 1 │
│ 201902 │ 201902_10_10_0 │ 0 │ │ 201902 │ 201902_10_10_0 │ 0 │
│ 201902 │ 201902_11_11_0 │ 0 │ │ 201902 │ 201902_11_11_0 │ 0 │
└───────────┴────────────────┴────────┘ └───────────┴──────────────────┴────────┘
``` ```
Inactive parts will be deleted approximately 10 minutes after merging. Inactive parts will be deleted approximately 10 minutes after merging.
@ -109,12 +110,12 @@ Another way to view a set of parts and partitions is to go into the directory of
/var/lib/clickhouse/data/default/visits$ ls -l /var/lib/clickhouse/data/default/visits$ ls -l
total 40 total 40
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 201901_1_3_1
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201901_1_9_2_11
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_8_8_0
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 15:52 201901_9_9_0
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_10_10_0
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:17 201902_11_11_0
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 16:19 201902_4_11_2_11
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1 drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 5 12:09 201902_4_6_1
drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached
``` ```

View File

@ -802,7 +802,7 @@ Configuration markup:
<disks> <disks>
<s3> <s3>
<type>s3</type> <type>s3</type>
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint> <endpoint>https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id> <access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key> <secret_access_key>your_secret_access_key</secret_access_key>
<region></region> <region></region>
@ -856,7 +856,7 @@ S3 disk can be configured as `main` or `cold` storage:
<disks> <disks>
<s3> <s3>
<type>s3</type> <type>s3</type>
<endpoint>https://storage.yandexcloud.net/my-bucket/root-path/</endpoint> <endpoint>https://clickhouse-public-datasets.s3.amazonaws.com/my-bucket/root-path/</endpoint>
<access_key_id>your_access_key_id</access_key_id> <access_key_id>your_access_key_id</access_key_id>
<secret_access_key>your_secret_access_key</secret_access_key> <secret_access_key>your_secret_access_key</secret_access_key>
</s3> </s3>

View File

@ -97,7 +97,7 @@ ZooKeeper is not used in `SELECT` queries because replication does not affect th
For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it does not create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data. For each `INSERT` query, approximately ten entries are added to ZooKeeper through several transactions. (To be more precise, this is for each inserted block of data; an INSERT query contains one block or one block per `max_insert_block_size = 1048576` rows.) This leads to slightly longer latencies for `INSERT` compared to non-replicated tables. But if you follow the recommendations to insert data in batches of no more than one `INSERT` per second, it does not create any problems. The entire ClickHouse cluster used for coordinating one ZooKeeper cluster has a total of several hundred `INSERTs` per second. The throughput on data inserts (the number of rows per second) is just as high as for non-replicated data.
For very large clusters, you can use different ZooKeeper clusters for different shards. However, this hasnt proven necessary on the Yandex.Metrica cluster (approximately 300 servers). For very large clusters, you can use different ZooKeeper clusters for different shards. However, from our experience this has not proven necessary based on production clusters with approximately 300 servers.
Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting. Replication is asynchronous and multi-master. `INSERT` queries (as well as `ALTER`) can be sent to any available server. Data is inserted on the server where the query is run, and then it is copied to the other servers. Because it is asynchronous, recently inserted data appears on the other replicas with some latency. If part of the replicas are not available, the data is written when they become available. If a replica is available, the latency is the amount of time it takes to transfer the block of compressed data over the network. The number of threads performing background tasks for replicated tables can be set by [background_schedule_pool_size](../../../operations/settings/settings.md#background_schedule_pool_size) setting.
@ -111,7 +111,7 @@ Data blocks are deduplicated. For multiple writes of the same data block (data b
During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.) During replication, only the source data to insert is transferred over the network. Further data transformation (merging) is coordinated and performed on all the replicas in the same way. This minimizes network usage, which means that replication works well when replicas reside in different datacenters. (Note that duplicating data in different datacenters is the main goal of replication.)
You can have any number of replicas of the same data. Yandex.Metrica uses double replication in production. Each server uses RAID-5 or RAID-6, and RAID-10 in some cases. This is a relatively reliable and convenient solution. You can have any number of replicas of the same data. Based on our experiences, a relatively reliable and convenient solution could use double replication in production, with each server using RAID-5 or RAID-6 (and RAID-10 in some cases).
The system monitors data synchronicity on replicas and is able to recover after a failure. Failover is automatic (for small differences in data) or semi-automatic (when data differs too much, which may indicate a configuration error). The system monitors data synchronicity on replicas and is able to recover after a failure. Failover is automatic (for small differences in data) or semi-automatic (when data differs too much, which may indicate a configuration error).
@ -163,7 +163,7 @@ Example:
<macros> <macros>
<layer>05</layer> <layer>05</layer>
<shard>02</shard> <shard>02</shard>
<replica>example05-02-1.yandex.ru</replica> <replica>example05-02-1</replica>
</macros> </macros>
``` ```
@ -172,7 +172,7 @@ In this case, the path consists of the following parts:
`/clickhouse/tables/` is the common prefix. We recommend using exactly this one. `/clickhouse/tables/` is the common prefix. We recommend using exactly this one.
`{layer}-{shard}` is the shard identifier. In this example it consists of two parts, since the Yandex.Metrica cluster uses bi-level sharding. For most tasks, you can leave just the {shard} substitution, which will be expanded to the shard identifier. `{layer}-{shard}` is the shard identifier. In this example it consists of two parts, since the example cluster uses bi-level sharding. For most tasks, you can leave just the {shard} substitution, which will be expanded to the shard identifier.
`table_name` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it does not change after a RENAME query. `table_name` is the name of the node for the table in ZooKeeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it does not change after a RENAME query.
*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name` *HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name`

View File

@ -197,7 +197,7 @@ A simple remainder from the division is a limited solution for sharding and isn
You should be concerned about the sharding scheme in the following cases: You should be concerned about the sharding scheme in the following cases:
- Queries are used that require joining data (`IN` or `JOIN`) by a specific key. If data is sharded by this key, you can use local `IN` or `JOIN` instead of `GLOBAL IN` or `GLOBAL JOIN`, which is much more efficient. - Queries are used that require joining data (`IN` or `JOIN`) by a specific key. If data is sharded by this key, you can use local `IN` or `JOIN` instead of `GLOBAL IN` or `GLOBAL JOIN`, which is much more efficient.
- A large number of servers is used (hundreds or more) with a large number of small queries, for example, queries for data of individual clients (e.g. websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, as weve done in Yandex.Metrica, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. `Distributed` tables are created for each layer, and a single shared distributed table is created for global queries. - A large number of servers is used (hundreds or more) with a large number of small queries, for example, queries for data of individual clients (e.g. websites, advertisers, or partners). In order for the small queries to not affect the entire cluster, it makes sense to locate data for a single client on a single shard. Alternatively, you can set up bi-level sharding: divide the entire cluster into “layers”, where a layer may consist of multiple shards. Data for a single client is located on a single layer, but shards can be added to a layer as necessary, and data is randomly distributed within them. `Distributed` tables are created for each layer, and a single shared distributed table is created for global queries.
Data is written asynchronously. When inserted in the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The periodicity for sending data is managed by the [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better utilizing local server and network resources. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. The number of threads performing background tasks can be set by [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) setting. Data is written asynchronously. When inserted in the table, the data block is just written to the local file system. The data is sent to the remote servers in the background as soon as possible. The periodicity for sending data is managed by the [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms) and [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms) settings. The `Distributed` engine sends each file with inserted data separately, but you can enable batch sending of files with the [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts) setting. This setting improves cluster performance by better utilizing local server and network resources. You should check whether data is sent successfully by checking the list of files (data waiting to be sent) in the table directory: `/var/lib/clickhouse/data/database/table/`. The number of threads performing background tasks can be set by [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) setting.

View File

@ -22,4 +22,4 @@ Here is the illustration of the difference between traditional row-oriented syst
**Columnar** **Columnar**
![Columnar](https://clickhouse.com/docs/en/images/column-oriented.gif#) ![Columnar](https://clickhouse.com/docs/en/images/column-oriented.gif#)
A columnar database is a preferred choice for analytical applications because it allows to have many columns in a table just in case, but do not pay the cost for unused columns on read query execution time. Column-oriented databases are designed for big data processing because and data warehousing, they often natively scale using distributed clusters of low-cost hardware to increase throughput. ClickHouse does it with combination of [distributed](../../engines/table-engines/special/distributed.md) and [replicated](../../engines/table-engines/mergetree-family/replication.md) tables. A columnar database is a preferred choice for analytical applications because it allows to have many columns in a table just in case, but do not pay the cost for unused columns on read query execution time. Column-oriented databases are designed for big data processing and data warehousing, because they often natively scale using distributed clusters of low-cost hardware to increase throughput. ClickHouse does it with combination of [distributed](../../engines/table-engines/special/distributed.md) and [replicated](../../engines/table-engines/mergetree-family/replication.md) tables.

View File

@ -6,7 +6,7 @@ toc_priority: 110
# Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce} # Why Not Use Something Like MapReduce? {#why-not-use-something-like-mapreduce}
We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Yandex uses its in-house solution, YT. We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on distributed sorting. The most common open-source solution in this class is [Apache Hadoop](http://hadoop.apache.org). Large IT companies often have proprietary in-house solutions.
These systems arent appropriate for online queries due to their high latency. In other words, they cant be used as the back-end for a web interface. These types of systems arent useful for real-time data updates. Distributed sorting isnt the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks. These systems arent appropriate for online queries due to their high latency. In other words, they cant be used as the back-end for a web interface. These types of systems arent useful for real-time data updates. Distributed sorting isnt the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is an optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks.

View File

@ -9,7 +9,7 @@ toc_priority: 11
This question usually arises when people see official ClickHouse t-shirts. They have large words **“ClickHouse не тормозит”** on the front. This question usually arises when people see official ClickHouse t-shirts. They have large words **“ClickHouse не тормозит”** on the front.
Before ClickHouse became open-source, it has been developed as an in-house storage system by the largest Russian IT company, [Yandex](https://yandex.com/company/). Thats why it initially got its slogan in Russian, which is “не тормозит” (pronounced as “ne tormozit”). After the open-source release we first produced some of those t-shirts for events in Russia and it was a no-brainer to use the slogan as-is. Before ClickHouse became open-source, it has been developed as an in-house storage system by the largest Russian IT company, Yandex. Thats why it initially got its slogan in Russian, which is “не тормозит” (pronounced as “ne tormozit”). After the open-source release we first produced some of those t-shirts for events in Russia and it was a no-brainer to use the slogan as-is.
One of the following batches of those t-shirts was supposed to be given away on events outside of Russia and we tried to make the English version of the slogan. Unfortunately, the Russian language is kind of elegant in terms of expressing stuff and there was a restriction of limited space on a t-shirt, so we failed to come up with good enough translation (most options appeared to be either long or inaccurate) and decided to keep the slogan in Russian even on t-shirts produced for international events. It appeared to be a great decision because people all over the world get positively surprised and curious when they see it. One of the following batches of those t-shirts was supposed to be given away on events outside of Russia and we tried to make the English version of the slogan. Unfortunately, the Russian language is kind of elegant in terms of expressing stuff and there was a restriction of limited space on a t-shirt, so we failed to come up with good enough translation (most options appeared to be either long or inaccurate) and decided to keep the slogan in Russian even on t-shirts produced for international events. It appeared to be a great decision because people all over the world get positively surprised and curious when they see it.

View File

@ -11,7 +11,7 @@ This section describes how to obtain example datasets and import them into Click
The list of documented datasets: The list of documented datasets:
- [GitHub Events](../../getting-started/example-datasets/github-events.md) - [GitHub Events](../../getting-started/example-datasets/github-events.md)
- [Anonymized Yandex.Metrica Dataset](../../getting-started/example-datasets/metrica.md) - [Anonymized Web Analytics Dataset](../../getting-started/example-datasets/metrica.md)
- [Recipes](../../getting-started/example-datasets/recipes.md) - [Recipes](../../getting-started/example-datasets/recipes.md)
- [Star Schema Benchmark](../../getting-started/example-datasets/star-schema.md) - [Star Schema Benchmark](../../getting-started/example-datasets/star-schema.md)
- [WikiStat](../../getting-started/example-datasets/wikistat.md) - [WikiStat](../../getting-started/example-datasets/wikistat.md)

View File

@ -1,11 +1,11 @@
--- ---
toc_priority: 15 toc_priority: 15
toc_title: Yandex.Metrica Data toc_title: Web Analytics Data
--- ---
# Anonymized Yandex.Metrica Data {#anonymized-yandex-metrica-data} # Anonymized Web Analytics Data {#anonymized-web-analytics-data}
Dataset consists of two tables containing anonymized data about hits (`hits_v1`) and visits (`visits_v1`) of Yandex.Metrica. You can read more about Yandex.Metrica in [ClickHouse history](../../introduction/history.md) section. Dataset consists of two tables containing anonymized web analytics data with hits (`hits_v1`) and visits (`visits_v1`).
The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://datasets.clickhouse.com/hits/tsv/hits_100m_obfuscated_v1.tsv.xz and as prepared partitions at https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz. The dataset consists of two tables, either of them can be downloaded as a compressed `tsv.xz` file or as prepared partitions. In addition to that, an extended version of the `hits` table containing 100 million rows is available as TSV at https://datasets.clickhouse.com/hits/tsv/hits_100m_obfuscated_v1.tsv.xz and as prepared partitions at https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz.
@ -73,6 +73,6 @@ clickhouse-client --query "SELECT COUNT(*) FROM datasets.visits_v1"
## Example Queries {#example-queries} ## Example Queries {#example-queries}
[ClickHouse tutorial](../../getting-started/tutorial.md) is based on Yandex.Metrica dataset and the recommended way to get started with this dataset is to just go through tutorial. [The ClickHouse tutorial](../../getting-started/tutorial.md) is based on this web analytics dataset, and the recommended way to get started with this dataset is to go through the tutorial.
Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) of ClickHouse (they are named `test.hits` and `test.visits` there). Additional examples of queries to these tables can be found among [stateful tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/queries/1_stateful) of ClickHouse (they are named `test.hits` and `test.visits` there).

View File

@ -375,7 +375,7 @@ Q3: 0.051 sec.
Q4: 0.072 sec. Q4: 0.072 sec.
In this case, the query processing time is determined above all by network latency. In this case, the query processing time is determined above all by network latency.
We ran queries using a client located in a Yandex datacenter in Finland on a cluster in Russia, which added about 20 ms of latency. We ran queries using a client located in a different datacenter than where the cluster was located, which added about 20 ms of latency.
## Summary {#summary} ## Summary {#summary}

View File

@ -27,9 +27,17 @@ It is recommended to use official pre-compiled `deb` packages for Debian or Ubun
{% include 'install/deb.sh' %} {% include 'install/deb.sh' %}
``` ```
<details markdown="1">
<summary>Deprecated Method for installing deb-packages</summary>
``` bash
{% include 'install/deb_repo.sh' %}
```
</details>
You can replace `stable` with `lts` or `testing` to use different [release trains](../faq/operations/production.md) based on your needs. You can replace `stable` with `lts` or `testing` to use different [release trains](../faq/operations/production.md) based on your needs.
You can also download and install packages manually from [here](https://repo.clickhouse.com/deb/stable/main/). You can also download and install packages manually from [here](https://packages.clickhouse.com/deb/pool/stable).
#### Packages {#packages} #### Packages {#packages}
@ -49,11 +57,17 @@ It is recommended to use official pre-compiled `rpm` packages for CentOS, RedHat
First, you need to add the official repository: First, you need to add the official repository:
``` bash ``` bash
sudo yum install yum-utils {% include 'install/rpm.sh' %}
sudo rpm --import https://repo.clickhouse.com/CLICKHOUSE-KEY.GPG
sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/stable/x86_64
``` ```
<details markdown="1">
<summary>Deprecated Method for installing rpm-packages</summary>
``` bash
{% include 'install/rpm_repo.sh' %}
```
</details>
If you want to use the most recent version, replace `stable` with `testing` (this is recommended for your testing environments). `prestable` is sometimes also available. If you want to use the most recent version, replace `stable` with `testing` (this is recommended for your testing environments). `prestable` is sometimes also available.
Then run these commands to install packages: Then run these commands to install packages:
@ -62,36 +76,27 @@ Then run these commands to install packages:
sudo yum install clickhouse-server clickhouse-client sudo yum install clickhouse-server clickhouse-client
``` ```
You can also download and install packages manually from [here](https://repo.clickhouse.com/rpm/stable/x86_64). You can also download and install packages manually from [here](https://packages.clickhouse.com/rpm/stable).
### From Tgz Archives {#from-tgz-archives} ### From Tgz Archives {#from-tgz-archives}
It is recommended to use official pre-compiled `tgz` archives for all Linux distributions, where installation of `deb` or `rpm` packages is not possible. It is recommended to use official pre-compiled `tgz` archives for all Linux distributions, where installation of `deb` or `rpm` packages is not possible.
The required version can be downloaded with `curl` or `wget` from repository https://repo.clickhouse.com/tgz/. The required version can be downloaded with `curl` or `wget` from repository https://packages.clickhouse.com/tgz/.
After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest stable version: After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest stable version:
``` bash ``` bash
export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep stable | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1` {% include 'install/tgz.sh' %}
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-server-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-client-$LATEST_VERSION.tgz
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh
tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz
sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh
tar -xzvf clickhouse-server-$LATEST_VERSION.tgz
sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh
sudo /etc/init.d/clickhouse-server start
tar -xzvf clickhouse-client-$LATEST_VERSION.tgz
sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh
``` ```
<details markdown="1">
<summary>Deprecated Method for installing tgz archives</summary>
``` bash
{% include 'install/tgz_repo.sh' %}
```
</details>
For production environments, its recommended to use the latest `stable`-version. You can find its number on GitHub page https://github.com/ClickHouse/ClickHouse/tags with postfix `-stable`. For production environments, its recommended to use the latest `stable`-version. You can find its number on GitHub page https://github.com/ClickHouse/ClickHouse/tags with postfix `-stable`.
### From Docker Image {#from-docker-image} ### From Docker Image {#from-docker-image}
@ -215,6 +220,6 @@ SELECT 1
**Congratulations, the system works!** **Congratulations, the system works!**
To continue experimenting, you can download one of the test data sets or go through [tutorial](https://clickhouse.com/tutorial.html). To continue experimenting, you can download one of the test data sets or go through [tutorial](./tutorial.md).
[Original article](https://clickhouse.com/docs/en/getting_started/install/) <!--hide--> [Original article](https://clickhouse.com/docs/en/getting_started/install/) <!--hide-->

View File

@ -5,29 +5,19 @@ toc_title: Playground
# ClickHouse Playground {#clickhouse-playground} # ClickHouse Playground {#clickhouse-playground}
[ClickHouse Playground](https://play.clickhouse.com) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster. [ClickHouse Playground](https://play.clickhouse.com/play?user=play) allows people to experiment with ClickHouse by running queries instantly, without setting up their server or cluster.
Several example datasets are available in Playground as well as sample queries that show ClickHouse features. Theres also a selection of ClickHouse LTS releases to experiment with. Several example datasets are available in Playground.
ClickHouse Playground gives the experience of m2.small [Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse) instance (4 vCPU, 32 GB RAM) hosted in [Yandex.Cloud](https://cloud.yandex.com/). More information about [cloud providers](../commercial/cloud.md).
You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md). You can make queries to Playground using any HTTP client, for example [curl](https://curl.haxx.se) or [wget](https://www.gnu.org/software/wget/), or set up a connection using [JDBC](../interfaces/jdbc.md) or [ODBC](../interfaces/odbc.md) drivers. More information about software products that support ClickHouse is available [here](../interfaces/index.md).
## Credentials {#credentials} ## Credentials {#credentials}
| Parameter | Value | | Parameter | Value |
|:--------------------|:----------------------------------------| |:--------------------|:-----------------------------------|
| HTTPS endpoint | `https://play-api.clickhouse.com:8443` | | HTTPS endpoint | `https://play.clickhouse.com:443/` |
| Native TCP endpoint | `play-api.clickhouse.com:9440` | | Native TCP endpoint | `play.clickhouse.com:9440` |
| User | `playground` | | User | `explorer` or `play` |
| Password | `clickhouse` | | Password | (empty) |
There are additional endpoints with specific ClickHouse releases to experiment with their differences (ports and user/password are the same as above):
- 20.3 LTS: `play-api-v20-3.clickhouse.com`
- 19.14 LTS: `play-api-v19-14.clickhouse.com`
!!! note "Note"
All these endpoints require a secure TLS connection.
## Limitations {#limitations} ## Limitations {#limitations}
@ -36,31 +26,18 @@ The queries are executed as a read-only user. It implies some limitations:
- DDL queries are not allowed - DDL queries are not allowed
- INSERT queries are not allowed - INSERT queries are not allowed
The following settings are also enforced: The service also have quotas on its usage.
- [max_result_bytes=10485760](../operations/settings/query-complexity/#max-result-bytes)
- [max_result_rows=2000](../operations/settings/query-complexity/#setting-max_result_rows)
- [result_overflow_mode=break](../operations/settings/query-complexity/#result-overflow-mode)
- [max_execution_time=60000](../operations/settings/query-complexity/#max-execution-time)
## Examples {#examples} ## Examples {#examples}
HTTPS endpoint example with `curl`: HTTPS endpoint example with `curl`:
``` bash ``` bash
curl "https://play-api.clickhouse.com:8443/?query=SELECT+'Play+ClickHouse\!';&user=playground&password=clickhouse&database=datasets" curl "https://play.clickhouse.com/?user=explorer" --data-binary "SELECT 'Play ClickHouse'"
``` ```
TCP endpoint example with [CLI](../interfaces/cli.md): TCP endpoint example with [CLI](../interfaces/cli.md):
``` bash ``` bash
clickhouse client --secure -h play-api.clickhouse.com --port 9440 -u playground --password clickhouse -q "SELECT 'Play ClickHouse\!'" clickhouse client --secure --host play.clickhouse.com --user explorer
``` ```
## Implementation Details {#implementation-details}
ClickHouse Playground web interface makes requests via ClickHouse [HTTP API](../interfaces/http.md).
The Playground backend is just a ClickHouse cluster without any additional server-side application. As mentioned above, ClickHouse HTTPS and TCP/TLS endpoints are also publicly available as a part of the Playground, both are proxied through [Cloudflare Spectrum](https://www.cloudflare.com/products/cloudflare-spectrum/) to add an extra layer of protection and improved global connectivity.
!!! warning "Warning"
Exposing the ClickHouse server to the public internet in any other situation is **strongly not recommended**. Make sure it listens only on a private network and is covered by a properly configured firewall.

View File

@ -80,7 +80,7 @@ clickhouse-client --query='INSERT INTO table FORMAT TabSeparated' < data.tsv
## Import Sample Dataset {#import-sample-dataset} ## Import Sample Dataset {#import-sample-dataset}
Now its time to fill our ClickHouse server with some sample data. In this tutorial, well use the anonymized data of Yandex.Metrica, the first service that runs ClickHouse in production way before it became open-source (more on that in [history section](../introduction/history.md)). There are [multiple ways to import Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md), and for the sake of the tutorial, well go with the most realistic one. Now its time to fill our ClickHouse server with some sample data. In this tutorial, well use some anonymized web analytics data. There are [multiple ways to import the dataset](../getting-started/example-datasets/metrica.md), and for the sake of the tutorial, well go with the most realistic one.
### Download and Extract Table Data {#download-and-extract-table-data} ### Download and Extract Table Data {#download-and-extract-table-data}
@ -105,7 +105,7 @@ Syntax for creating tables is way more complicated compared to databases (see [r
2. Table schema, i.e. list of columns and their [data types](../sql-reference/data-types/index.md). 2. Table schema, i.e. list of columns and their [data types](../sql-reference/data-types/index.md).
3. [Table engine](../engines/table-engines/index.md) and its settings, which determines all the details on how queries to this table will be physically executed. 3. [Table engine](../engines/table-engines/index.md) and its settings, which determines all the details on how queries to this table will be physically executed.
Yandex.Metrica is a web analytics service, and sample dataset does not cover its full functionality, so there are only two tables to create: There are two tables to create:
- `hits` is a table with each action done by all users on all websites covered by the service. - `hits` is a table with each action done by all users on all websites covered by the service.
- `visits` is a table that contains pre-built sessions instead of individual actions. - `visits` is a table that contains pre-built sessions instead of individual actions.
@ -533,19 +533,19 @@ Example config for a cluster with three shards, one replica each:
<perftest_3shards_1replicas> <perftest_3shards_1replicas>
<shard> <shard>
<replica> <replica>
<host>example-perftest01j.yandex.ru</host> <host>example-perftest01j</host>
<port>9000</port> <port>9000</port>
</replica> </replica>
</shard> </shard>
<shard> <shard>
<replica> <replica>
<host>example-perftest02j.yandex.ru</host> <host>example-perftest02j</host>
<port>9000</port> <port>9000</port>
</replica> </replica>
</shard> </shard>
<shard> <shard>
<replica> <replica>
<host>example-perftest03j.yandex.ru</host> <host>example-perftest03j</host>
<port>9000</port> <port>9000</port>
</replica> </replica>
</shard> </shard>
@ -591,15 +591,15 @@ Example config for a cluster of one shard containing three replicas:
<perftest_1shards_3replicas> <perftest_1shards_3replicas>
<shard> <shard>
<replica> <replica>
<host>example-perftest01j.yandex.ru</host> <host>example-perftest01j</host>
<port>9000</port> <port>9000</port>
</replica> </replica>
<replica> <replica>
<host>example-perftest02j.yandex.ru</host> <host>example-perftest02j</host>
<port>9000</port> <port>9000</port>
</replica> </replica>
<replica> <replica>
<host>example-perftest03j.yandex.ru</host> <host>example-perftest03j</host>
<port>9000</port> <port>9000</port>
</replica> </replica>
</shard> </shard>
@ -617,15 +617,15 @@ ZooKeeper locations are specified in the configuration file:
``` xml ``` xml
<zookeeper> <zookeeper>
<node> <node>
<host>zoo01.yandex.ru</host> <host>zoo01</host>
<port>2181</port> <port>2181</port>
</node> </node>
<node> <node>
<host>zoo02.yandex.ru</host> <host>zoo02</host>
<port>2181</port> <port>2181</port>
</node> </node>
<node> <node>
<host>zoo03.yandex.ru</host> <host>zoo03</host>
<port>2181</port> <port>2181</port>
</node> </node>
</zookeeper> </zookeeper>

View File

@ -5,7 +5,7 @@ toc_title: Applying CatBoost Models
# Applying a Catboost Model in ClickHouse {#applying-catboost-model-in-clickhouse} # Applying a Catboost Model in ClickHouse {#applying-catboost-model-in-clickhouse}
[CatBoost](https://catboost.ai) is a free and open-source gradient boosting library developed at [Yandex](https://yandex.com/company/) for machine learning. [CatBoost](https://catboost.ai) is a free and open-source gradient boosting library developed at Yandex for machine learning.
With this instruction, you will learn to apply pre-trained models in ClickHouse by running model inference from SQL. With this instruction, you will learn to apply pre-trained models in ClickHouse by running model inference from SQL.

View File

@ -51,6 +51,7 @@ The supported formats are:
| [PrettySpace](#prettyspace) | ✗ | ✔ | | [PrettySpace](#prettyspace) | ✗ | ✔ |
| [Protobuf](#protobuf) | ✔ | ✔ | | [Protobuf](#protobuf) | ✔ | ✔ |
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ | | [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
| [ProtobufList](#protobuflist) | ✔ | ✔ |
| [Avro](#data-format-avro) | ✔ | ✔ | | [Avro](#data-format-avro) | ✔ | ✔ |
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | | [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
| [Parquet](#data-format-parquet) | ✔ | ✔ | | [Parquet](#data-format-parquet) | ✔ | ✔ |
@ -64,7 +65,7 @@ The supported formats are:
| [Null](#null) | ✗ | ✔ | | [Null](#null) | ✗ | ✔ |
| [XML](#xml) | ✗ | ✔ | | [XML](#xml) | ✗ | ✔ |
| [CapnProto](#capnproto) | ✔ | ✔ | | [CapnProto](#capnproto) | ✔ | ✔ |
| [LineAsString](#lineasstring) | ✔ | | | [LineAsString](#lineasstring) | ✔ | |
| [Regexp](#data-format-regexp) | ✔ | ✗ | | [Regexp](#data-format-regexp) | ✔ | ✗ |
| [RawBLOB](#rawblob) | ✔ | ✔ | | [RawBLOB](#rawblob) | ✔ | ✔ |
| [MsgPack](#msgpack) | ✔ | ✔ | | [MsgPack](#msgpack) | ✔ | ✔ |
@ -300,7 +301,7 @@ Result:
<tr> <th>Search phrase</th> <th>Count</th> </tr> <tr> <th>Search phrase</th> <th>Count</th> </tr>
<tr> <td></td> <td>8267016</td> </tr> <tr> <td></td> <td>8267016</td> </tr>
<tr> <td>bathroom interior design</td> <td>2166</td> </tr> <tr> <td>bathroom interior design</td> <td>2166</td> </tr>
<tr> <td>yandex</td> <td>1655</td> </tr> <tr> <td>clickhouse</td> <td>1655</td> </tr>
<tr> <td>spring 2014 fashion</td> <td>1549</td> </tr> <tr> <td>spring 2014 fashion</td> <td>1549</td> </tr>
<tr> <td>freeform photos</td> <td>1480</td> </tr> <tr> <td>freeform photos</td> <td>1480</td> </tr>
</table> </table>
@ -371,7 +372,7 @@ Similar to TabSeparated, but outputs a value in name=value format. Names are esc
``` text ``` text
SearchPhrase= count()=8267016 SearchPhrase= count()=8267016
SearchPhrase=bathroom interior design count()=2166 SearchPhrase=bathroom interior design count()=2166
SearchPhrase=yandex count()=1655 SearchPhrase=clickhouse count()=1655
SearchPhrase=2014 spring fashion count()=1549 SearchPhrase=2014 spring fashion count()=1549
SearchPhrase=freeform photos count()=1480 SearchPhrase=freeform photos count()=1480
SearchPhrase=angelina jolie count()=1245 SearchPhrase=angelina jolie count()=1245
@ -1060,7 +1061,7 @@ XML format is suitable only for output, not for parsing. Example:
<field>2166</field> <field>2166</field>
</row> </row>
<row> <row>
<SearchPhrase>yandex</SearchPhrase> <SearchPhrase>clickhouse</SearchPhrase>
<field>1655</field> <field>1655</field>
</row> </row>
<row> <row>
@ -1230,7 +1231,38 @@ See also [how to read/write length-delimited protobuf messages in popular langua
## ProtobufSingle {#protobufsingle} ## ProtobufSingle {#protobufsingle}
Same as [Protobuf](#protobuf) but for storing/parsing single Protobuf message without length delimiters. Same as [Protobuf](#protobuf) but for storing/parsing a single Protobuf message without length delimiter.
As a result, only a single table row can be written/read.
## ProtobufList {#protobuflist}
Similar to Protobuf but rows are represented as a sequence of sub-messages contained in a message with fixed name "Envelope".
Usage example:
``` sql
SELECT * FROM test.table FORMAT ProtobufList SETTINGS format_schema = 'schemafile:MessageType'
```
``` bash
cat protobuflist_messages.bin | clickhouse-client --query "INSERT INTO test.table FORMAT ProtobufList SETTINGS format_schema='schemafile:MessageType'"
```
where the file `schemafile.proto` looks like this:
``` capnp
syntax = "proto3";
message Envelope {
message MessageType {
string name = 1;
string surname = 2;
uint32 birthDate = 3;
repeated string phoneNumbers = 4;
};
MessageType row = 1;
};
```
## Avro {#data-format-avro} ## Avro {#data-format-avro}
@ -1364,7 +1396,8 @@ The table below shows supported data types and how they match ClickHouse [data t
| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT` | | `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT` |
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` | | `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` | | `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` | | `DATE64` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `TIMESTAMP` | [DateTime64](../sql-reference/data-types/datetime64.md) | `TIMESTAMP` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` | | — | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
@ -1421,7 +1454,8 @@ The table below shows supported data types and how they match ClickHouse [data t
| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT32` | | `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT32` |
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` | | `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `FLOAT64` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` | | `DATE32` | [Date](../sql-reference/data-types/date.md) | `UINT16` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` | | `DATE64` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `TIMESTAMP` | [DateTime64](../sql-reference/data-types/datetime64.md) | `TIMESTAMP` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` | | `STRING`, `BINARY` | [FixedString](../sql-reference/data-types/fixedstring.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
@ -1483,7 +1517,8 @@ The table below shows supported data types and how they match ClickHouse [data t
| `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT` | | `FLOAT`, `HALF_FLOAT` | [Float32](../sql-reference/data-types/float.md) | `FLOAT` |
| `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` | | `DOUBLE` | [Float64](../sql-reference/data-types/float.md) | `DOUBLE` |
| `DATE32` | [Date](../sql-reference/data-types/date.md) | `DATE32` | | `DATE32` | [Date](../sql-reference/data-types/date.md) | `DATE32` |
| `DATE64`, `TIMESTAMP` | [DateTime](../sql-reference/data-types/datetime.md) | `TIMESTAMP` | | `DATE64` | [DateTime](../sql-reference/data-types/datetime.md) | `UINT32` |
| `TIMESTAMP` | [DateTime64](../sql-reference/data-types/datetime64.md) | `TIMESTAMP` |
| `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` | | `STRING`, `BINARY` | [String](../sql-reference/data-types/string.md) | `BINARY` |
| `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` | | `DECIMAL` | [Decimal](../sql-reference/data-types/decimal.md) | `DECIMAL` |
| `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` | | `LIST` | [Array](../sql-reference/data-types/array.md) | `LIST` |

View File

@ -12,7 +12,7 @@ ClickHouse provides three network interfaces (they can be optionally wrapped in
- [Native TCP](../interfaces/tcp.md), which has less overhead. - [Native TCP](../interfaces/tcp.md), which has less overhead.
- [gRPC](grpc.md). - [gRPC](grpc.md).
In most cases it is recommended to use appropriate tool or library instead of interacting with those directly. Officially supported by Yandex are the following: In most cases it is recommended to use an appropriate tool or library instead of interacting with those directly. The following are officially supported by ClickHouse:
- [Command-line client](../interfaces/cli.md) - [Command-line client](../interfaces/cli.md)
- [JDBC driver](../interfaces/jdbc.md) - [JDBC driver](../interfaces/jdbc.md)

View File

@ -6,7 +6,7 @@ toc_title: Integrations
# Integration Libraries from Third-party Developers {#integration-libraries-from-third-party-developers} # Integration Libraries from Third-party Developers {#integration-libraries-from-third-party-developers}
!!! warning "Disclaimer" !!! warning "Disclaimer"
Yandex does **not** maintain the tools and libraries listed below and havent done any extensive testing to ensure their quality. ClickHouse, Inc. does **not** maintain the tools and libraries listed below and havent done extensive testing to ensure their quality.
## Infrastructure Products {#infrastructure-products} ## Infrastructure Products {#infrastructure-products}

View File

@ -5,7 +5,7 @@ toc_title: Performance
# Performance {#performance} # Performance {#performance}
According to internal testing results at Yandex, ClickHouse shows the best performance (both the highest throughput for long queries and the lowest latency on short queries) for comparable operating scenarios among systems of its class that were available for testing. You can view the test results on a [separate page](https://clickhouse.com/benchmark/dbms/). ClickHouse shows the best performance (both the highest throughput for long queries and the lowest latency on short queries) for comparable operating scenarios among systems of its class that were available for testing. You can view the test results on a [separate page](https://clickhouse.com/benchmark/dbms/).
Numerous independent benchmarks came to similar conclusions. They are not difficult to find using an internet search, or you can see [our small collection of related links](https://clickhouse.com/#independent-benchmarks). Numerous independent benchmarks came to similar conclusions. They are not difficult to find using an internet search, or you can see [our small collection of related links](https://clickhouse.com/#independent-benchmarks).

View File

@ -55,7 +55,7 @@ Internal coordination settings are located in `<keeper_server>.<coordination_set
- `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true). - `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true).
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000). - `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000). - `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
- `four_letter_word_white_list` — White list of 4lw commands (default: "conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro"). - `four_letter_word_allow_list` — Allow list of 4lw commands (default: "conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro").
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description.
@ -121,7 +121,7 @@ clickhouse keeper --config /etc/your_path_to_config/config.xml
ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively. ClickHouse Keeper also provides 4lw commands which are almost the same with Zookeeper. Each command is composed of four letters such as `mntr`, `stat` etc. There are some more interesting commands: `stat` gives some general information about the server and connected clients, while `srvr` and `cons` give extended details on server and connections respectively.
The 4lw commands has a white list configuration `four_letter_word_white_list` which has default value "conf,cons,crst,envi,ruok,srst,srvr,stat,wchc,wchs,dirs,mntr,isro". The 4lw commands has a allow list configuration `four_letter_word_allow_list` which has default value "conf,cons,crst,envi,ruok,srst,srvr,stat,wchs,dirs,mntr,isro".
You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port. You can issue the commands to ClickHouse Keeper via telnet or nc, at the client port.
@ -201,7 +201,7 @@ Server stats reset.
``` ```
server_id=1 server_id=1
tcp_port=2181 tcp_port=2181
four_letter_word_white_list=* four_letter_word_allow_list=*
log_storage_path=./coordination/logs log_storage_path=./coordination/logs
snapshot_storage_path=./coordination/snapshots snapshot_storage_path=./coordination/snapshots
max_requests_batch_size=100 max_requests_batch_size=100

View File

@ -0,0 +1,229 @@
---
toc_priority: 69
toc_title: "Named connections"
---
# Storing details for connecting to external sources in configuration files {#named-collections}
Details for connecting to external sources (dictionaries, tables, table functions) can be saved
in configuration files and thus simplify the creation of objects and hide credentials
from users with only SQL access.
Parameters can be set in XML `<format>CSV</format>` and overridden in SQL `, format = 'TSV'`.
The parameters in SQL can be overridden using format `key` = `value`: `compression_method = 'gzip'`.
Named connections are stored in the `config.xml` file of the ClickHouse server in the `<named_collections>` section and are applied when ClickHouse starts.
Example of configuration:
```xml
$ cat /etc/clickhouse-server/config.d/named_collections.xml
<clickhouse>
<named_collections>
...
</named_collections>
</clickhouse>
```
## Named connections for accessing S3.
The description of parameters see [s3 Table Function](../sql-reference/table-functions/s3.md).
Example of configuration:
```xml
<clickhouse>
<named_collections>
<s3_mydata>
<access_key_id>AKIAIOSFODNN7EXAMPLE</access_key_id>
<secret_access_key> wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY</secret_access_key>
<format>CSV</format>
</s3_mydata>
</named_collections>
</clickhouse>
```
### Example of using named connections with the s3 function
```sql
INSERT INTO FUNCTION s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz',
format = 'TSV', structure = 'number UInt64', compression_method = 'gzip')
SELECT * FROM numbers(10000);
SELECT count()
FROM s3(s3_mydata, url = 'https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz')
┌─count()─┐
│ 10000 │
└─────────┘
1 rows in set. Elapsed: 0.279 sec. Processed 10.00 thousand rows, 90.00 KB (35.78 thousand rows/s., 322.02 KB/s.)
```
### Example of using named connections with an S3 table
```sql
CREATE TABLE s3_engine_table (number Int64)
ENGINE=S3(s3_mydata, url='https://s3.us-east-1.amazonaws.com/yourbucket/mydata/test_file.tsv.gz', format = 'TSV')
SETTINGS input_format_with_names_use_header = 0;
SELECT * FROM s3_engine_table LIMIT 3;
┌─number─┐
│ 0 │
│ 1 │
│ 2 │
└────────┘
```
## Named connections for accessing MySQL database
The description of parameters see [mysql](../sql-reference/table-functions/mysql.md).
Example of configuration:
```xml
<clickhouse>
<named_collections>
<mymysql>
<user>myuser</user>
<password>mypass</password>
<host>127.0.0.1</host>
<port>3306</port>
<database>test</database>
<connection_pool_size>8</connection_pool_size>
<on_duplicate_clause>1</on_duplicate_clause>
<replace_query>1</replace_query>
</mymysql>
</named_collections>
</clickhouse>
```
### Example of using named connections with the mysql function
```sql
SELECT count() FROM mysql(mymysql, table = 'test');
┌─count()─┐
│ 3 │
└─────────┘
```
### Example of using named connections with an MySQL table
```sql
CREATE TABLE mytable(A Int64) ENGINE = MySQL(mymysql, table = 'test', connection_pool_size=3, replace_query=0);
SELECT count() FROM mytable;
┌─count()─┐
│ 3 │
└─────────┘
```
### Example of using named connections with database with engine MySQL
```sql
CREATE DATABASE mydatabase ENGINE = MySQL(mymysql);
SHOW TABLES FROM mydatabase;
┌─name───┐
│ source │
│ test │
└────────┘
```
### Example of using named connections with an external dictionary with source MySQL
```sql
CREATE DICTIONARY dict (A Int64, B String)
PRIMARY KEY A
SOURCE(MYSQL(NAME mymysql TABLE 'source'))
LIFETIME(MIN 1 MAX 2)
LAYOUT(HASHED());
SELECT dictGet('dict', 'B', 2);
┌─dictGet('dict', 'B', 2)─┐
│ two │
└─────────────────────────┘
```
## Named connections for accessing PostgreSQL database
The description of parameters see [postgresql](../sql-reference/table-functions/postgresql.md).
Example of configuration:
```xml
<clickhouse>
<named_collections>
<mypg>
<user>pguser</user>
<password>jw8s0F4</password>
<host>127.0.0.1</host>
<port>5432</port>
<database>test</database>
<schema>test_schema</schema>
<connection_pool_size>8</connection_pool_size>
</mypg>
</named_collections>
</clickhouse>
```
### Example of using named connections with the postgresql function
```sql
SELECT * FROM postgresql(mypg, table = 'test');
┌─a─┬─b───┐
│ 2 │ two │
│ 1 │ one │
└───┴─────┘
SELECT * FROM postgresql(mypg, table = 'test', schema = 'public');
┌─a─┐
│ 1 │
│ 2 │
│ 3 │
└───┘
```
### Example of using named connections with database with engine PostgreSQL
```sql
CREATE TABLE mypgtable (a Int64) ENGINE = PostgreSQL(mypg, table = 'test', schema = 'public');
SELECT * FROM mypgtable;
┌─a─┐
│ 1 │
│ 2 │
│ 3 │
└───┘
```
### Example of using named connections with database with engine PostgreSQL
```sql
CREATE DATABASE mydatabase ENGINE = PostgreSQL(mypg);
SHOW TABLES FROM mydatabase
┌─name─┐
│ test │
└──────┘
```
### Example of using named connections with an external dictionary with source POSTGRESQL
```sql
CREATE DICTIONARY dict (a Int64, b String)
PRIMARY KEY a
SOURCE(POSTGRESQL(NAME mypg TABLE test))
LIFETIME(MIN 1 MAX 2)
LAYOUT(HASHED());
SELECT dictGet('dict', 'b', 2);
┌─dictGet('dict', 'b', 2)─┐
│ two │
└─────────────────────────┘
```

View File

@ -38,6 +38,18 @@ Alternatively you can perform benchmark in the following steps.
wget https://builds.clickhouse.com/master/amd64/clickhouse wget https://builds.clickhouse.com/master/amd64/clickhouse
# For aarch64: # For aarch64:
wget https://builds.clickhouse.com/master/aarch64/clickhouse wget https://builds.clickhouse.com/master/aarch64/clickhouse
# For powerpc64le:
wget https://builds.clickhouse.com/master/powerpc64le/clickhouse
# For freebsd:
wget https://builds.clickhouse.com/master/freebsd/clickhouse
# For freebsd-aarch64:
wget https://builds.clickhouse.com/master/freebsd-aarch64/clickhouse
# For freebsd-powerpc64le:
wget https://builds.clickhouse.com/master/freebsd-powerpc64le/clickhouse
# For macos:
wget https://builds.clickhouse.com/master/macos/clickhouse
# For macos-aarch64:
wget https://builds.clickhouse.com/master/macos-aarch64/clickhouse
# Then do: # Then do:
chmod a+x clickhouse chmod a+x clickhouse
``` ```
@ -47,7 +59,7 @@ wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/cl
chmod a+x benchmark-new.sh chmod a+x benchmark-new.sh
wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql wget https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/queries.sql
``` ```
3. Download test data according to the [Yandex.Metrica dataset](../getting-started/example-datasets/metrica.md) instruction (“hits” table containing 100 million rows). 3. Download the [web analytics dataset](../getting-started/example-datasets/metrica.md) (“hits” table containing 100 million rows).
```bash ```bash
wget https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz wget https://datasets.clickhouse.com/hits/partitions/hits_100m_obfuscated_v1.tar.xz
tar xvf hits_100m_obfuscated_v1.tar.xz -C . tar xvf hits_100m_obfuscated_v1.tar.xz -C .
@ -66,6 +78,6 @@ mv hits_100m_obfuscated_v1/* .
```bash ```bash
./benchmark-new.sh hits_100m_obfuscated ./benchmark-new.sh hits_100m_obfuscated
``` ```
7. Send the numbers and the info about your hardware configuration to clickhouse-feedback@yandex-team.com 7. Send the numbers and the info about your hardware configuration to feedback@clickhouse.com
All the results are published here: https://clickhouse.com/benchmark/hardware/ All the results are published here: https://clickhouse.com/benchmark/hardware/

View File

@ -101,7 +101,7 @@ Quotas can use the “quota key” feature to report on resources for multiple k
<web_global> <web_global>
<!-- keyed The quota_key "key" is passed in the query parameter, <!-- keyed The quota_key "key" is passed in the query parameter,
and the quota is tracked separately for each key value. and the quota is tracked separately for each key value.
For example, you can pass a Yandex.Metrica username as the key, For example, you can pass a username of your service as the key,
so the quota will be counted separately for each username. so the quota will be counted separately for each username.
Using keys makes sense only if quota_key is transmitted by the program, not by a user. Using keys makes sense only if quota_key is transmitted by the program, not by a user.

View File

@ -410,7 +410,7 @@ Useful for breaking away from a specific network interface.
**Example** **Example**
``` xml ``` xml
<interserver_http_host>example.yandex.ru</interserver_http_host> <interserver_http_host>example.clickhouse.com</interserver_http_host>
``` ```
## interserver_https_port {#interserver-https-port} ## interserver_https_port {#interserver-https-port}
@ -430,7 +430,7 @@ Similar to `interserver_http_host`, except that this hostname can be used by oth
**Example** **Example**
``` xml ``` xml
<interserver_https_host>example.yandex.ru</interserver_https_host> <interserver_https_host>example.clickhouse.com</interserver_https_host>
``` ```
## interserver_http_credentials {#server-settings-interserver-http-credentials} ## interserver_http_credentials {#server-settings-interserver-http-credentials}
@ -1247,7 +1247,7 @@ The time zone is necessary for conversions between String and DateTime formats w
**Example** **Example**
``` xml ``` xml
<timezone>Europe/Moscow</timezone> <timezone>Asia/Istanbul</timezone>
``` ```
## tcp_port {#server_configuration_parameters-tcp_port} ## tcp_port {#server_configuration_parameters-tcp_port}

View File

@ -256,7 +256,7 @@ Possible values:
Default value: 161061273600 (150 GB). Default value: 161061273600 (150 GB).
The merge scheduler periodically analyzes the sizes and number of parts in partitions, and if there is enough free resources in the pool, it starts background merges. Merges occur until the total size of the source parts is less than `max_bytes_to_merge_at_max_space_in_pool`. The merge scheduler periodically analyzes the sizes and number of parts in partitions, and if there is enough free resources in the pool, it starts background merges. Merges occur until the total size of the source parts is larger than `max_bytes_to_merge_at_max_space_in_pool`.
Merges initiated by [OPTIMIZE FINAL](../../sql-reference/statements/optimize.md) ignore `max_bytes_to_merge_at_max_space_in_pool` and merge parts only taking into account available resources (free disk's space) until one part remains in the partition. Merges initiated by [OPTIMIZE FINAL](../../sql-reference/statements/optimize.md) ignore `max_bytes_to_merge_at_max_space_in_pool` and merge parts only taking into account available resources (free disk's space) until one part remains in the partition.
@ -346,7 +346,7 @@ Default value: `0`.
**Usage** **Usage**
The value of the `min_bytes_to_rebalance_partition_over_jbod` setting should be less than the value of the [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) setting. Otherwise, ClickHouse throws an exception. The value of the `min_bytes_to_rebalance_partition_over_jbod` setting should not be less than the value of the [max_bytes_to_merge_at_max_space_in_pool](../../operations/settings/merge-tree-settings.md#max-bytes-to-merge-at-max-space-in-pool) / 1024. Otherwise, ClickHouse throws an exception.
## detach_not_byte_identical_parts {#detach_not_byte_identical_parts} ## detach_not_byte_identical_parts {#detach_not_byte_identical_parts}

View File

@ -1326,7 +1326,7 @@ If a query from the same user with the same query_id already exists at thi
`1` Cancel the old query and start running the new one. `1` Cancel the old query and start running the new one.
Yandex.Metrica uses this parameter set to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasnt finished yet, it should be cancelled. Set this parameter to 1 for implementing suggestions for segmentation conditions. After entering the next character, if the old query hasnt finished yet, it should be cancelled.
## replace_running_query_max_wait_ms {#replace-running-query-max-wait-ms} ## replace_running_query_max_wait_ms {#replace-running-query-max-wait-ms}
@ -1380,7 +1380,7 @@ load_balancing = nearest_hostname
The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the servers hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames). The number of errors is counted for each replica. Every 5 minutes, the number of errors is integrally divided by 2. Thus, the number of errors is calculated for a recent time with exponential smoothing. If there is one replica with a minimal number of errors (i.e. errors occurred recently on the other replicas), the query is sent to it. If there are multiple replicas with the same minimal number of errors, the query is sent to the replica with a hostname that is most similar to the servers hostname in the config file (for the number of different characters in identical positions, up to the minimum length of both hostnames).
For instance, example01-01-1 and example01-01-2.yandex.ru are different in one position, while example01-01-1 and example01-02-2 differ in two places. For instance, example01-01-1 and example01-01-2 are different in one position, while example01-01-1 and example01-02-2 differ in two places.
This method might seem primitive, but it does not require external data about network topology, and it does not compare IP addresses, which would be complicated for our IPv6 addresses. This method might seem primitive, but it does not require external data about network topology, and it does not compare IP addresses, which would be complicated for our IPv6 addresses.
Thus, if there are equivalent replicas, the closest one by name is preferred. Thus, if there are equivalent replicas, the closest one by name is preferred.
@ -3290,6 +3290,19 @@ Possible values:
Default value: `16`. Default value: `16`.
## max_insert_delayed_streams_for_parallel_write {#max-insert-delayed-streams-for-parallel-write}
The maximum number of streams (columns) to delay final part flush.
It makes difference only if underlying storage supports parallel write (i.e. S3), otherwise it will not give any benefit.
Possible values:
- Positive integer.
- 0 or 1 — Disabled.
Default value: `1000` for S3 and `0` otherwise.
## opentelemetry_start_trace_probability {#opentelemetry-start-trace-probability} ## opentelemetry_start_trace_probability {#opentelemetry-start-trace-probability}
Sets the probability that the ClickHouse can start a trace for executed queries (if no parent [trace context](https://www.w3.org/TR/trace-context/) is supplied). Sets the probability that the ClickHouse can start a trace for executed queries (if no parent [trace context](https://www.w3.org/TR/trace-context/) is supplied).

View File

@ -33,7 +33,7 @@ Client section in `config.xml` will look like:
Add Zookeeper to ClickHouse config with some cluster and macros: Add Zookeeper to ClickHouse config with some cluster and macros:
``` xml ``` xml
<yandex> <clickhouse>
<zookeeper> <zookeeper>
<node> <node>
<host>localhost</host> <host>localhost</host>
@ -41,7 +41,7 @@ Add Zookeeper to ClickHouse config with some cluster and macros:
<secure>1</secure> <secure>1</secure>
</node> </node>
</zookeeper> </zookeeper>
</yandex> </clickhouse>
``` ```
Start `clickhouse-server`. In logs you should see: Start `clickhouse-server`. In logs you should see:

View File

@ -14,18 +14,18 @@ Columns:
``` ```
```text ```text
┌─name─────────────────────┬─is_aggregate─┬─case_insensitive─┬─alias_to─┐ ┌─name──────────────────┬─is_aggregate─┬─case_insensitive─┬─alias_to─┬─create_query─┬─origin─┐
sumburConsistentHash │ 0 │ 0 │ │ logTrace │ 0 │ 0 │ │ │ System │
yandexConsistentHash │ 0 │ 0 │ │ aes_decrypt_mysql │ 0 │ 0 │ │ │ System │
demangle │ 0 │ 0 │ │ aes_encrypt_mysql │ 0 │ 0 │ │ │ System │
addressToLine │ 0 │ 0 │ │ decrypt │ 0 │ 0 │ │ │ System │
JSONExtractRaw │ 0 │ 0 │ │ encrypt │ 0 │ 0 │ │ │ System │
JSONExtractKeysAndValues │ 0 │ 0 │ │ toBool │ 0 │ 0 │ │ │ System │
JSONExtract │ 0 │ 0 │ │ windowID │ 0 │ 0 │ │ │ System │
JSONExtractString │ 0 │ 0 │ │ hopStart │ 0 │ 0 │ │ │ System │
JSONExtractFloat │ 0 │ 0 │ │ hop │ 0 │ 0 │ │ │ System │
JSONExtractInt │ 0 │ 0 │ │ snowflakeToDateTime64 │ 0 │ 0 │ │ │ System │
└──────────────────────────┴──────────────┴──────────────────┴──────────┘ └───────────────────────┴──────────────┴──────────────────┴──────────┴──────────────┴────────┘
10 rows in set. Elapsed: 0.002 sec. 10 rows in set. Elapsed: 0.002 sec.
``` ```

View File

@ -82,7 +82,7 @@ Columns:
- `path` ([String](../../sql-reference/data-types/string.md)) Absolute path to the folder with data part files. - `path` ([String](../../sql-reference/data-types/string.md)) Absolute path to the folder with data part files.
- `disk` ([String](../../sql-reference/data-types/string.md)) Name of a disk that stores the data part. - `disk_name` ([String](../../sql-reference/data-types/string.md)) Name of a disk that stores the data part.
- `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files. - `hash_of_all_files` ([String](../../sql-reference/data-types/string.md)) [sipHash128](../../sql-reference/functions/hash-functions.md#hash_functions-siphash128) of compressed files.

View File

@ -65,13 +65,13 @@ Row 1:
────── ──────
database: merge database: merge
table: visits_v2 table: visits_v2
replica_name: mtgiga001-1t.metrika.yandex.net replica_name: mtgiga001-1t
position: 15 position: 15
node_name: queue-0009325559 node_name: queue-0009325559
type: MERGE_PARTS type: MERGE_PARTS
create_time: 2020-12-07 14:04:21 create_time: 2020-12-07 14:04:21
required_quorum: 0 required_quorum: 0
source_replica: mtgiga001-1t.metrika.yandex.net source_replica: mtgiga001-1t
new_part_name: 20201130_121373_121384_2 new_part_name: 20201130_121373_121384_2
parts_to_merge: ['20201130_121373_121378_1','20201130_121379_121379_0','20201130_121380_121380_0','20201130_121381_121381_0','20201130_121382_121382_0','20201130_121383_121383_0','20201130_121384_121384_0'] parts_to_merge: ['20201130_121373_121378_1','20201130_121379_121379_0','20201130_121380_121380_0','20201130_121381_121381_0','20201130_121382_121382_0','20201130_121383_121383_0','20201130_121384_121384_0']
is_detach: 0 is_detach: 0

View File

@ -40,7 +40,7 @@ FORMAT Vertical
``` text ``` text
Row 1: Row 1:
────── ──────
name: example01-08-1.yandex.ru name: example01-08-1
value: value:
czxid: 932998691229 czxid: 932998691229
mzxid: 932998691229 mzxid: 932998691229
@ -57,7 +57,7 @@ path: /clickhouse/tables/01-08/visits/replicas
Row 2: Row 2:
────── ──────
name: example01-08-2.yandex.ru name: example01-08-2
value: value:
czxid: 933002738135 czxid: 933002738135
mzxid: 933002738135 mzxid: 933002738135

View File

@ -139,7 +139,7 @@ With the default settings, ZooKeeper is a time bomb:
This bomb must be defused. This bomb must be defused.
The ZooKeeper (3.5.1) configuration below is used in the Yandex.Metrica production environment as of May 20, 2017: The ZooKeeper (3.5.1) configuration below is used in a large production environment:
zoo.cfg: zoo.cfg:

View File

@ -13,7 +13,7 @@ The suffix -If can be appended to the name of any aggregate function. In this ca
Examples: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` and so on. Examples: `sumIf(column, cond)`, `countIf(cond)`, `avgIf(x, cond)`, `quantilesTimingIf(level1, level2)(x, cond)`, `argMinIf(arg, val, cond)` and so on.
With conditional aggregate functions, you can calculate aggregates for several conditions at once, without using subqueries and `JOIN`s. For example, in Yandex.Metrica, conditional aggregate functions are used to implement the segment comparison functionality. With conditional aggregate functions, you can calculate aggregates for several conditions at once, without using subqueries and `JOIN`s. For example, conditional aggregate functions can be used to implement the segment comparison functionality.
## -Array {#agg-functions-combinator-array} ## -Array {#agg-functions-combinator-array}

View File

@ -40,7 +40,7 @@ When inserting data into ClickHouse, you can use different formats of date and t
``` sql ``` sql
CREATE TABLE dt CREATE TABLE dt
( (
`timestamp` DateTime('Europe/Moscow'), `timestamp` DateTime('Asia/Istanbul'),
`event_id` UInt8 `event_id` UInt8
) )
ENGINE = TinyLog; ENGINE = TinyLog;
@ -61,13 +61,13 @@ SELECT * FROM dt;
└─────────────────────┴──────────┘ └─────────────────────┴──────────┘
``` ```
- When inserting datetime as an integer, it is treated as Unix Timestamp (UTC). `1546300800` represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as string the value will be shown as `'2019-01-01 03:00:00'` - When inserting datetime as an integer, it is treated as Unix Timestamp (UTC). `1546300800` represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Asia/Istanbul` (UTC+3) timezone specified, when outputting as string the value will be shown as `'2019-01-01 03:00:00'`
- When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Europe/Moscow` timezone and saved as `1546290000`. - When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Asia/Istanbul` timezone and saved as `1546290000`.
**2.** Filtering on `DateTime` values **2.** Filtering on `DateTime` values
``` sql ``` sql
SELECT * FROM dt WHERE timestamp = toDateTime('2019-01-01 00:00:00', 'Europe/Moscow') SELECT * FROM dt WHERE timestamp = toDateTime('2019-01-01 00:00:00', 'Asia/Istanbul')
``` ```
``` text ``` text
@ -91,12 +91,12 @@ SELECT * FROM dt WHERE timestamp = '2019-01-01 00:00:00'
**3.** Getting a time zone for a `DateTime`-type column: **3.** Getting a time zone for a `DateTime`-type column:
``` sql ``` sql
SELECT toDateTime(now(), 'Europe/Moscow') AS column, toTypeName(column) AS x SELECT toDateTime(now(), 'Asia/Istanbul') AS column, toTypeName(column) AS x
``` ```
``` text ``` text
┌──────────────column─┬─x─────────────────────────┐ ┌──────────────column─┬─x─────────────────────────┐
│ 2019-10-16 04:12:04 │ DateTime('Europe/Moscow') │ │ 2019-10-16 04:12:04 │ DateTime('Asia/Istanbul') │
└─────────────────────┴───────────────────────────┘ └─────────────────────┴───────────────────────────┘
``` ```
@ -105,7 +105,7 @@ SELECT toDateTime(now(), 'Europe/Moscow') AS column, toTypeName(column) AS x
``` sql ``` sql
SELECT SELECT
toDateTime(timestamp, 'Europe/London') as lon_time, toDateTime(timestamp, 'Europe/London') as lon_time,
toDateTime(timestamp, 'Europe/Moscow') as mos_time toDateTime(timestamp, 'Asia/Istanbul') as mos_time
FROM dt FROM dt
``` ```

View File

@ -27,7 +27,7 @@ Supported range of values: \[1925-01-01 00:00:00, 2283-11-11 23:59:59.99999999\]
``` sql ``` sql
CREATE TABLE dt CREATE TABLE dt
( (
`timestamp` DateTime64(3, 'Europe/Moscow'), `timestamp` DateTime64(3, 'Asia/Istanbul'),
`event_id` UInt8 `event_id` UInt8
) )
ENGINE = TinyLog; ENGINE = TinyLog;
@ -48,13 +48,13 @@ SELECT * FROM dt;
└─────────────────────────┴──────────┘ └─────────────────────────┴──────────┘
``` ```
- When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Europe/Moscow` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'`. - When inserting datetime as an integer, it is treated as an appropriately scaled Unix Timestamp (UTC). `1546300800000` (with precision 3) represents `'2019-01-01 00:00:00'` UTC. However, as `timestamp` column has `Asia/Istanbul` (UTC+3) timezone specified, when outputting as a string the value will be shown as `'2019-01-01 03:00:00'`.
- When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Europe/Moscow` timezone and stored as `1546290000000`. - When inserting string value as datetime, it is treated as being in column timezone. `'2019-01-01 00:00:00'` will be treated as being in `Asia/Istanbul` timezone and stored as `1546290000000`.
2. Filtering on `DateTime64` values 2. Filtering on `DateTime64` values
``` sql ``` sql
SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Europe/Moscow'); SELECT * FROM dt WHERE timestamp = toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul');
``` ```
``` text ``` text
@ -68,12 +68,12 @@ Unlike `DateTime`, `DateTime64` values are not converted from `String` automatic
3. Getting a time zone for a `DateTime64`-type value: 3. Getting a time zone for a `DateTime64`-type value:
``` sql ``` sql
SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS x; SELECT toDateTime64(now(), 3, 'Asia/Istanbul') AS column, toTypeName(column) AS x;
``` ```
``` text ``` text
┌──────────────────column─┬─x──────────────────────────────┐ ┌──────────────────column─┬─x──────────────────────────────┐
│ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Europe/Moscow') │ │ 2019-10-16 04:12:04.000 │ DateTime64(3, 'Asia/Istanbul') │
└─────────────────────────┴────────────────────────────────┘ └─────────────────────────┴────────────────────────────────┘
``` ```
@ -82,7 +82,7 @@ SELECT toDateTime64(now(), 3, 'Europe/Moscow') AS column, toTypeName(column) AS
``` sql ``` sql
SELECT SELECT
toDateTime64(timestamp, 3, 'Europe/London') as lon_time, toDateTime64(timestamp, 3, 'Europe/London') as lon_time,
toDateTime64(timestamp, 3, 'Europe/Moscow') as mos_time toDateTime64(timestamp, 3, 'Asia/Istanbul') as mos_time
FROM dt; FROM dt;
``` ```

View File

@ -225,15 +225,15 @@ This storage method works the same way as hashed and allows using date/time (arb
Example: The table contains discounts for each advertiser in the format: Example: The table contains discounts for each advertiser in the format:
``` text ``` text
+---------|-------------|-------------|------+ +---------------|---------------------|-------------------|--------+
| advertiser id | discount start date | discount end date | amount | | advertiser id | discount start date | discount end date | amount |
+===============+=====================+===================+========+ +===============+=====================+===================+========+
| 123 | 2015-01-01 | 2015-01-15 | 0.15 | | 123 | 2015-01-01 | 2015-01-15 | 0.15 |
+---------|-------------|-------------|------+ +---------------|---------------------|-------------------|--------+
| 123 | 2015-01-16 | 2015-01-31 | 0.25 | | 123 | 2015-01-16 | 2015-01-31 | 0.25 |
+---------|-------------|-------------|------+ +---------------|---------------------|-------------------|--------+
| 456 | 2015-01-01 | 2015-01-15 | 0.05 | | 456 | 2015-01-01 | 2015-01-15 | 0.05 |
+---------|-------------|-------------|------+ +---------------|---------------------|-------------------|--------+
``` ```
To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). These elements must contain elements `name` and `type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others). To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). These elements must contain elements `name` and `type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others).
@ -272,10 +272,10 @@ LAYOUT(RANGE_HASHED())
RANGE(MIN first MAX last) RANGE(MIN first MAX last)
``` ```
To work with these dictionaries, you need to pass an additional argument to the `dictGetT` function, for which a range is selected: To work with these dictionaries, you need to pass an additional argument to the `dictGet*` function, for which a range is selected:
``` sql ``` sql
dictGetT('dict_name', 'attr_name', id, date) dictGet*('dict_name', 'attr_name', id, date)
``` ```
This function returns the value for the specified `id`s and the date range that includes the passed date. This function returns the value for the specified `id`s and the date range that includes the passed date.
@ -479,17 +479,17 @@ This type of storage is for mapping network prefixes (IP addresses) to metadata
Example: The table contains network prefixes and their corresponding AS number and country code: Example: The table contains network prefixes and their corresponding AS number and country code:
``` text ``` text
+-----------|-----|------+ +-----------------|-------|--------+
| prefix | asn | cca2 | | prefix | asn | cca2 |
+=================+=======+========+ +=================+=======+========+
| 202.79.32.0/20 | 17501 | NP | | 202.79.32.0/20 | 17501 | NP |
+-----------|-----|------+ +-----------------|-------|--------+
| 2620:0:870::/48 | 3856 | US | | 2620:0:870::/48 | 3856 | US |
+-----------|-----|------+ +-----------------|-------|--------+
| 2a02:6b8:1::/48 | 13238 | RU | | 2a02:6b8:1::/48 | 13238 | RU |
+-----------|-----|------+ +-----------------|-------|--------+
| 2001:db8::/32 | 65536 | ZZ | | 2001:db8::/32 | 65536 | ZZ |
+-----------|-----|------+ +-----------------|-------|--------+
``` ```
When using this type of layout, the structure must have a composite key. When using this type of layout, the structure must have a composite key.
@ -538,10 +538,10 @@ PRIMARY KEY prefix
The key must have only one String type attribute that contains an allowed IP prefix. Other types are not supported yet. The key must have only one String type attribute that contains an allowed IP prefix. Other types are not supported yet.
For queries, you must use the same functions (`dictGetT` with a tuple) as for dictionaries with composite keys: For queries, you must use the same functions (`dictGet*` with a tuple) as for dictionaries with composite keys:
``` sql ``` sql
dictGetT('dict_name', 'attr_name', tuple(ip)) dictGet*('dict_name', 'attr_name', tuple(ip))
``` ```
The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6: The function takes either `UInt32` for IPv4, or `FixedString(16)` for IPv6:

View File

@ -14,7 +14,7 @@ This allows you to:
- Check whether a region is part of another region. - Check whether a region is part of another region.
- Get a chain of parent regions. - Get a chain of parent regions.
All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with Yandex.Metrica dictionaries”. All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with web analytics dictionaries”.
The internal dictionaries are disabled in the default package. The internal dictionaries are disabled in the default package.
To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file. To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file.
@ -48,5 +48,3 @@ Dictionary updates (other than loading at first use) do not block queries. Durin
We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server.
There are also functions for working with OS identifiers and Yandex.Metrica search engines, but they shouldnt be used.

Some files were not shown because too many files have changed in this diff Show More