mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge branch 'master' into null-deserialization
This commit is contained in:
commit
1f8535c02b
13
.github/workflows/cancel.yml
vendored
Normal file
13
.github/workflows/cancel.yml
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
name: Cancel
|
||||
on: # yamllint disable-line rule:truthy
|
||||
workflow_run:
|
||||
workflows: ["CIGithubActions"]
|
||||
types:
|
||||
- requested
|
||||
jobs:
|
||||
cancel:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
with:
|
||||
workflow_id: ${{ github.event.workflow.id }}
|
238
.github/workflows/main.yml
vendored
238
.github/workflows/main.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Ligthweight GithubActions
|
||||
name: CIGithubActions
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
types:
|
||||
@ -11,20 +11,24 @@ on: # yamllint disable-line rule:truthy
|
||||
- master
|
||||
jobs:
|
||||
CheckLabels:
|
||||
runs-on: [self-hosted]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Labels check
|
||||
run: cd $GITHUB_WORKSPACE/tests/ci && python3 run_check.py
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 run_check.py
|
||||
DockerHubPush:
|
||||
needs: CheckLabels
|
||||
runs-on: [self-hosted]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: cd $GITHUB_WORKSPACE/tests/ci && python3 docker_images_check.py
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
@ -32,7 +36,7 @@ jobs:
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
StyleCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
@ -42,12 +46,226 @@ jobs:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Style Check
|
||||
run: cd $GITHUB_WORKSPACE/tests/ci && python3 style_check.py
|
||||
env:
|
||||
TEMP_PATH: ${{ runner.temp }}/style_check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 style_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
DocsCheck:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docs_check
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Docs Check
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/docs_check
|
||||
REPO_COPY: ${{runner.temp}}/docs_check/ClickHouse
|
||||
run: |
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 docs_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebDebug:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/images_path
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/build_check
|
||||
IMAGES_PATH: ${{runner.temp}}/images_path
|
||||
REPO_COPY: ${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
BUILD_NUMBER: 7
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 build_check.py "$CHECK_NAME" $BUILD_NUMBER
|
||||
- name: Upload build URLs to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.BUILD_NAME }}
|
||||
path: ${{ runner.temp }}/build_check/${{ env.BUILD_NAME }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderReport:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Report Builder
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/report_check
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'ClickHouse build check (actions)'
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 build_report_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatelessTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateless tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateless_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
KILL_TIMEOUT: 10800
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FunctionalStatefulTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stateful tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stateful_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
KILL_TIMEOUT: 3600
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 functional_test_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER $KILL_TIMEOUT
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
StressTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Stress test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/stress_debug
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Stress tests (debug, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/stress_debug/ClickHouse
|
||||
REQUIRED_BUILD_NUMBER: 7
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 stress_check.py "$CHECK_NAME" $REQUIRED_BUILD_NUMBER
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FastTest:
|
||||
needs: DockerHubPush
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Fast Test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/fasttest
|
||||
REPO_COPY: ${{runner.temp}}/fasttest/ClickHouse
|
||||
CACHES_PATH: ${{runner.temp}}/../ccaches
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci && python3 fast_test_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
FinishCheck:
|
||||
needs: [StyleCheck, DockerHubPush, CheckLabels]
|
||||
runs-on: [self-hosted]
|
||||
needs: [StyleCheck, DockerHubPush, CheckLabels, BuilderReport, FastTest, FunctionalStatelessTestDebug, FunctionalStatefulTestDebug, DocsCheck, StressTestDebug]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Finish label
|
||||
run: cd $GITHUB_WORKSPACE/tests/ci && python3 finish_check.py
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 finish_check.py
|
||||
|
55
.github/workflows/release.yml
vendored
Normal file
55
.github/workflows/release.yml
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
name: DocsReleaseChecks
|
||||
concurrency:
|
||||
group: master-release
|
||||
cancel-in-progress: true
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- 'website/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/**'
|
||||
jobs:
|
||||
DockerHubPush:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Images check
|
||||
run: |
|
||||
cd $GITHUB_WORKSPACE/tests/ci
|
||||
python3 docker_images_check.py
|
||||
- name: Upload images files to artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ runner.temp }}/docker_images_check/changed_images.json
|
||||
DocsRelease:
|
||||
needs: DockerHubPush
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{runner.temp}}/docs_release
|
||||
- name: Docs Release
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/docs_release
|
||||
REPO_COPY: ${{runner.temp}}/docs_release/ClickHouse
|
||||
CLOUDFLARE_TOKEN: ${{secrets.CLOUDFLARE}}
|
||||
ROBOT_CLICKHOUSE_SSH_KEY: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
run: |
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 docs_release.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
2
.gitmodules
vendored
2
.gitmodules
vendored
@ -76,7 +76,7 @@
|
||||
url = https://github.com/ClickHouse-Extras/libcxxabi.git
|
||||
[submodule "contrib/snappy"]
|
||||
path = contrib/snappy
|
||||
url = https://github.com/google/snappy
|
||||
url = https://github.com/ClickHouse-Extras/snappy.git
|
||||
[submodule "contrib/cppkafka"]
|
||||
path = contrib/cppkafka
|
||||
url = https://github.com/mfontanini/cppkafka.git
|
||||
|
@ -149,8 +149,6 @@ if (ENABLE_FUZZING)
|
||||
set (ENABLE_JEMALLOC 0)
|
||||
set (ENABLE_CHECK_HEAVY_BUILDS 1)
|
||||
set (GLIBC_COMPATIBILITY OFF)
|
||||
set (ENABLE_PROTOBUF ON)
|
||||
set (USE_INTERNAL_PROTOBUF_LIBRARY ON)
|
||||
endif()
|
||||
|
||||
# Global libraries
|
||||
@ -581,6 +579,7 @@ include (cmake/find/yaml-cpp.cmake)
|
||||
include (cmake/find/s2geometry.cmake)
|
||||
include (cmake/find/nlp.cmake)
|
||||
include (cmake/find/bzip2.cmake)
|
||||
include (cmake/find/filelog.cmake)
|
||||
|
||||
if(NOT USE_INTERNAL_PARQUET_LIBRARY)
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
|
@ -177,8 +177,6 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
|
||||
}
|
||||
|
||||
|
||||
#if !defined(ARCADIA_BUILD) /// Arcadia's variant of CCTZ already has the same implementation.
|
||||
|
||||
/// Prefer to load timezones from blobs linked to the binary.
|
||||
/// The blobs are provided by "tzdata" library.
|
||||
/// This allows to avoid dependency on system tzdata.
|
||||
@ -234,5 +232,3 @@ namespace cctz_extension
|
||||
|
||||
ZoneInfoSourceFactory zone_info_source_factory = custom_factory;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -103,7 +103,6 @@ String LineReader::readLine(const String & first_prompt, const String & second_p
|
||||
continue;
|
||||
}
|
||||
|
||||
#if !defined(ARCADIA_BUILD) /// C++20
|
||||
const char * has_extender = nullptr;
|
||||
for (const auto * extender : extenders)
|
||||
{
|
||||
@ -133,7 +132,6 @@ String LineReader::readLine(const String & first_prompt, const String & second_p
|
||||
if (input.empty())
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
line += (line.empty() ? "" : "\n") + input;
|
||||
|
||||
|
@ -83,10 +83,6 @@
|
||||
# define BOOST_USE_UCONTEXT 1
|
||||
#endif
|
||||
|
||||
#if defined(ARCADIA_BUILD) && defined(BOOST_USE_UCONTEXT)
|
||||
# undef BOOST_USE_UCONTEXT
|
||||
#endif
|
||||
|
||||
/// TODO: Strange enough, there is no way to detect UB sanitizer.
|
||||
|
||||
/// Explicitly allow undefined behaviour for certain functions. Use it as a function attribute.
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <stdexcept>
|
||||
#include <fstream>
|
||||
#include <base/getMemoryAmount.h>
|
||||
#include <base/getPageSize.h>
|
||||
|
||||
@ -15,6 +16,17 @@
|
||||
*/
|
||||
uint64_t getMemoryAmountOrZero()
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
// Try to lookup at the Cgroup limit
|
||||
std::ifstream cgroup_limit("/sys/fs/cgroup/memory/memory.limit_in_bytes");
|
||||
if (cgroup_limit.is_open())
|
||||
{
|
||||
uint64_t amount = 0; // in case of read error
|
||||
cgroup_limit >> amount;
|
||||
return amount;
|
||||
}
|
||||
#endif
|
||||
|
||||
int64_t num_pages = sysconf(_SC_PHYS_PAGES);
|
||||
if (num_pages <= 0)
|
||||
return 0;
|
||||
|
@ -49,12 +49,3 @@ namespace
|
||||
#define LOG_WARNING(logger, ...) LOG_IMPL(logger, DB::LogsLevel::warning, Poco::Message::PRIO_WARNING, __VA_ARGS__)
|
||||
#define LOG_ERROR(logger, ...) LOG_IMPL(logger, DB::LogsLevel::error, Poco::Message::PRIO_ERROR, __VA_ARGS__)
|
||||
#define LOG_FATAL(logger, ...) LOG_IMPL(logger, DB::LogsLevel::error, Poco::Message::PRIO_FATAL, __VA_ARGS__)
|
||||
|
||||
|
||||
/// Compatibility for external projects.
|
||||
#if defined(ARCADIA_BUILD)
|
||||
using Poco::Logger;
|
||||
using Poco::Message;
|
||||
using DB::LogsLevel;
|
||||
using DB::CurrentThread;
|
||||
#endif
|
||||
|
@ -3,41 +3,24 @@
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#else
|
||||
# include <algorithm>
|
||||
#endif
|
||||
#include <miniselect/floyd_rivest_select.h>
|
||||
|
||||
template <class RandomIt>
|
||||
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_select(first, nth, last);
|
||||
#else
|
||||
::std::nth_element(first, nth, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class RandomIt>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last);
|
||||
#else
|
||||
::std::partial_sort(first, middle, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class RandomIt, class Compare>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare);
|
||||
#else
|
||||
::std::partial_sort(first, middle, last, compare);
|
||||
#endif
|
||||
}
|
||||
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
}
|
||||
|
@ -63,9 +63,7 @@
|
||||
#include <Common/Elf.h>
|
||||
#include <filesystem>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config_version.h>
|
||||
#endif
|
||||
#include <Common/config_version.h>
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
# pragma GCC diagnostic ignored "-Wunused-macros"
|
||||
|
@ -15,14 +15,12 @@
|
||||
#include <Core/ServerUUID.h>
|
||||
#include <Common/hex.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include "Common/config_version.h"
|
||||
# include <Common/config.h>
|
||||
#endif
|
||||
#include "Common/config_version.h"
|
||||
#include <Common/config.h>
|
||||
|
||||
#if USE_SENTRY
|
||||
|
||||
# include <sentry.h> // Y_IGNORE
|
||||
# include <sentry.h>
|
||||
# include <stdio.h>
|
||||
# include <filesystem>
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
#include <errmsg.h>
|
||||
#include <mysql.h>
|
||||
#else
|
||||
#include <mysql/errmsg.h> //Y_IGNORE
|
||||
#include <mysql/errmsg.h>
|
||||
#include <mysql/mysql.h>
|
||||
#endif
|
||||
|
||||
|
15906
benchmark/duckdb/log
Normal file
15906
benchmark/duckdb/log
Normal file
File diff suppressed because one or more lines are too long
43
benchmark/duckdb/queries.sql
Normal file
43
benchmark/duckdb/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM hits;
|
||||
SELECT count(*) FROM hits WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits;
|
||||
SELECT sum(UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits;
|
||||
SELECT min(EventDate), max(EventDate) FROM hits;
|
||||
SELECT AdvEngineID, count(*) FROM hits WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM hits GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS m, SearchPhrase, count(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM hits WHERE UserID = 12345678901234567890;
|
||||
SELECT count(*) FROM hits WHERE URL::TEXT LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits WHERE URL::TEXT LIKE '%metrika%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM hits WHERE Title::TEXT LIKE '%Яндекс%' AND URL::TEXT NOT LIKE '%.yandex.%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM hits WHERE URL::TEXT LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(octet_length(URL)) AS l, count(*) AS c FROM hits WHERE octet_length(URL) > 0 GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT regexp_replace(Referer::TEXT, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(octet_length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits WHERE octet_length(Referer) > 0 GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits;
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(URL) > 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(Title) > 0 GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS "Minute", count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) ORDER BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime)));
|
762
benchmark/duckdb/usability.md
Normal file
762
benchmark/duckdb/usability.md
Normal file
File diff suppressed because one or more lines are too long
12
benchmark/postgresql/benchmark.sh
Executable file
12
benchmark/postgresql/benchmark.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits_100m_pg/' | while read query; do
|
||||
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
|
||||
echo "$query";
|
||||
for i in {1..3}; do
|
||||
# For some reason JIT does not work on my machine
|
||||
sudo -u postgres psql tutorial -t -c 'set jit = off' -c '\timing' -c "$query" | grep 'Time' | tee --append log
|
||||
done;
|
||||
done;
|
142
benchmark/postgresql/instructions.md
Normal file
142
benchmark/postgresql/instructions.md
Normal file
@ -0,0 +1,142 @@
|
||||
Create a table in PostgreSQL:
|
||||
|
||||
```
|
||||
CREATE TABLE hits_100m_pg
|
||||
(
|
||||
WatchID BIGINT NOT NULL,
|
||||
JavaEnable SMALLINT NOT NULL,
|
||||
Title TEXT NOT NULL,
|
||||
GoodEvent SMALLINT NOT NULL,
|
||||
EventTime TIMESTAMP NOT NULL,
|
||||
EventDate Date NOT NULL,
|
||||
CounterID INTEGER NOT NULL,
|
||||
ClientIP INTEGER NOT NULL,
|
||||
RegionID INTEGER NOT NULL,
|
||||
UserID BIGINT NOT NULL,
|
||||
CounterClass SMALLINT NOT NULL,
|
||||
OS SMALLINT NOT NULL,
|
||||
UserAgent SMALLINT NOT NULL,
|
||||
URL TEXT NOT NULL,
|
||||
Referer TEXT NOT NULL,
|
||||
Refresh SMALLINT NOT NULL,
|
||||
RefererCategoryID SMALLINT NOT NULL,
|
||||
RefererRegionID INTEGER NOT NULL,
|
||||
URLCategoryID SMALLINT NOT NULL,
|
||||
URLRegionID INTEGER NOT NULL,
|
||||
ResolutionWidth SMALLINT NOT NULL,
|
||||
ResolutionHeight SMALLINT NOT NULL,
|
||||
ResolutionDepth SMALLINT NOT NULL,
|
||||
FlashMajor SMALLINT NOT NULL,
|
||||
FlashMinor SMALLINT NOT NULL,
|
||||
FlashMinor2 TEXT NOT NULL,
|
||||
NetMajor SMALLINT NOT NULL,
|
||||
NetMinor SMALLINT NOT NULL,
|
||||
UserAgentMajor SMALLINT NOT NULL,
|
||||
UserAgentMinor CHAR(2) NOT NULL,
|
||||
CookieEnable SMALLINT NOT NULL,
|
||||
JavascriptEnable SMALLINT NOT NULL,
|
||||
IsMobile SMALLINT NOT NULL,
|
||||
MobilePhone SMALLINT NOT NULL,
|
||||
MobilePhoneModel TEXT NOT NULL,
|
||||
Params TEXT NOT NULL,
|
||||
IPNetworkID INTEGER NOT NULL,
|
||||
TraficSourceID SMALLINT NOT NULL,
|
||||
SearchEngineID SMALLINT NOT NULL,
|
||||
SearchPhrase TEXT NOT NULL,
|
||||
AdvEngineID SMALLINT NOT NULL,
|
||||
IsArtifical SMALLINT NOT NULL,
|
||||
WindowClientWidth SMALLINT NOT NULL,
|
||||
WindowClientHeight SMALLINT NOT NULL,
|
||||
ClientTimeZone SMALLINT NOT NULL,
|
||||
ClientEventTime TIMESTAMP NOT NULL,
|
||||
SilverlightVersion1 SMALLINT NOT NULL,
|
||||
SilverlightVersion2 SMALLINT NOT NULL,
|
||||
SilverlightVersion3 INTEGER NOT NULL,
|
||||
SilverlightVersion4 SMALLINT NOT NULL,
|
||||
PageCharset TEXT NOT NULL,
|
||||
CodeVersion INTEGER NOT NULL,
|
||||
IsLink SMALLINT NOT NULL,
|
||||
IsDownload SMALLINT NOT NULL,
|
||||
IsNotBounce SMALLINT NOT NULL,
|
||||
FUniqID BIGINT NOT NULL,
|
||||
OriginalURL TEXT NOT NULL,
|
||||
HID INTEGER NOT NULL,
|
||||
IsOldCounter SMALLINT NOT NULL,
|
||||
IsEvent SMALLINT NOT NULL,
|
||||
IsParameter SMALLINT NOT NULL,
|
||||
DontCountHits SMALLINT NOT NULL,
|
||||
WithHash SMALLINT NOT NULL,
|
||||
HitColor CHAR NOT NULL,
|
||||
LocalEventTime TIMESTAMP NOT NULL,
|
||||
Age SMALLINT NOT NULL,
|
||||
Sex SMALLINT NOT NULL,
|
||||
Income SMALLINT NOT NULL,
|
||||
Interests SMALLINT NOT NULL,
|
||||
Robotness SMALLINT NOT NULL,
|
||||
RemoteIP INTEGER NOT NULL,
|
||||
WindowName INTEGER NOT NULL,
|
||||
OpenerName INTEGER NOT NULL,
|
||||
HistoryLength SMALLINT NOT NULL,
|
||||
BrowserLanguage TEXT NOT NULL,
|
||||
BrowserCountry TEXT NOT NULL,
|
||||
SocialNetwork TEXT NOT NULL,
|
||||
SocialAction TEXT NOT NULL,
|
||||
HTTPError SMALLINT NOT NULL,
|
||||
SendTiming INTEGER NOT NULL,
|
||||
DNSTiming INTEGER NOT NULL,
|
||||
ConnectTiming INTEGER NOT NULL,
|
||||
ResponseStartTiming INTEGER NOT NULL,
|
||||
ResponseEndTiming INTEGER NOT NULL,
|
||||
FetchTiming INTEGER NOT NULL,
|
||||
SocialSourceNetworkID SMALLINT NOT NULL,
|
||||
SocialSourcePage TEXT NOT NULL,
|
||||
ParamPrice BIGINT NOT NULL,
|
||||
ParamOrderID TEXT NOT NULL,
|
||||
ParamCurrency TEXT NOT NULL,
|
||||
ParamCurrencyID SMALLINT NOT NULL,
|
||||
OpenstatServiceName TEXT NOT NULL,
|
||||
OpenstatCampaignID TEXT NOT NULL,
|
||||
OpenstatAdID TEXT NOT NULL,
|
||||
OpenstatSourceID TEXT NOT NULL,
|
||||
UTMSource TEXT NOT NULL,
|
||||
UTMMedium TEXT NOT NULL,
|
||||
UTMCampaign TEXT NOT NULL,
|
||||
UTMContent TEXT NOT NULL,
|
||||
UTMTerm TEXT NOT NULL,
|
||||
FromTag TEXT NOT NULL,
|
||||
HasGCLID SMALLINT NOT NULL,
|
||||
RefererHash BIGINT NOT NULL,
|
||||
URLHash BIGINT NOT NULL,
|
||||
CLID INTEGER NOT NULL
|
||||
);
|
||||
```
|
||||
|
||||
Create a dump from ClickHouse:
|
||||
|
||||
```
|
||||
SELECT WatchID::Int64, JavaEnable, replaceAll(replaceAll(replaceAll(toValidUTF8(Title), '\0', ''), '"', ''), '\\', ''), GoodEvent, EventTime, EventDate, CounterID::Int32, ClientIP::Int32, RegionID::Int32,
|
||||
UserID::Int64, CounterClass, OS, UserAgent, replaceAll(replaceAll(replaceAll(toValidUTF8(URL), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(Referer), '\0', ''), '"', ''), '\\', ''), Refresh, RefererCategoryID::Int16, RefererRegionID::Int32,
|
||||
URLCategoryID::Int16, URLRegionID::Int32, ResolutionWidth::Int16, ResolutionHeight::Int16, ResolutionDepth, FlashMajor, FlashMinor,
|
||||
FlashMinor2, NetMajor, NetMinor, UserAgentMajor::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(UserAgentMinor::String), '\0', ''), '"', ''), '\\', ''), CookieEnable, JavascriptEnable, IsMobile, MobilePhone,
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(MobilePhoneModel), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(Params), '\0', ''), '"', ''), '\\', ''), IPNetworkID::Int32, TraficSourceID, SearchEngineID::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(SearchPhrase), '\0', ''), '"', ''), '\\', ''),
|
||||
AdvEngineID, IsArtifical, WindowClientWidth::Int16, WindowClientHeight::Int16, ClientTimeZone, ClientEventTime,
|
||||
SilverlightVersion1, SilverlightVersion2, SilverlightVersion3::Int32, SilverlightVersion4::Int16, replaceAll(replaceAll(replaceAll(toValidUTF8(PageCharset), '\0', ''), '"', ''), '\\', ''),
|
||||
CodeVersion::Int32, IsLink, IsDownload, IsNotBounce, FUniqID::Int64, replaceAll(replaceAll(replaceAll(toValidUTF8(OriginalURL), '\0', ''), '"', ''), '\\', ''), HID::Int32, IsOldCounter, IsEvent,
|
||||
IsParameter, DontCountHits, WithHash, replaceAll(replaceAll(replaceAll(toValidUTF8(HitColor::String), '\0', ''), '"', ''), '\\', ''), LocalEventTime, Age, Sex, Income, Interests::Int16, Robotness, RemoteIP::Int32,
|
||||
WindowName, OpenerName, HistoryLength, replaceAll(replaceAll(replaceAll(toValidUTF8(BrowserLanguage::String), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(BrowserCountry::String), '\0', ''), '"', ''), '\\', ''),
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(SocialNetwork), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(SocialAction), '\0', ''), '"', ''), '\\', ''),
|
||||
HTTPError, least(SendTiming, 30000), least(DNSTiming, 30000), least(ConnectTiming, 30000), least(ResponseStartTiming, 30000),
|
||||
least(ResponseEndTiming, 30000), least(FetchTiming, 30000), SocialSourceNetworkID,
|
||||
replaceAll(replaceAll(replaceAll(toValidUTF8(SocialSourcePage), '\0', ''), '"', ''), '\\', ''), ParamPrice, replaceAll(replaceAll(replaceAll(toValidUTF8(ParamOrderID), '\0', ''), '"', ''), '\\', ''), replaceAll(replaceAll(replaceAll(toValidUTF8(ParamCurrency::String), '\0', ''), '"', ''), '\\', ''),
|
||||
ParamCurrencyID::Int16, OpenstatServiceName, OpenstatCampaignID, OpenstatAdID, OpenstatSourceID,
|
||||
UTMSource, UTMMedium, UTMCampaign, UTMContent, UTMTerm, FromTag, HasGCLID, RefererHash::Int64, URLHash::Int64, CLID::Int32
|
||||
FROM hits_100m_obfuscated
|
||||
INTO OUTFILE 'dump.tsv'
|
||||
FORMAT TSV
|
||||
```
|
||||
|
||||
Insert data into PostgreSQL:
|
||||
|
||||
```
|
||||
\copy hits_100m_pg FROM 'dump.tsv';
|
||||
```
|
129
benchmark/postgresql/log
Normal file
129
benchmark/postgresql/log
Normal file
@ -0,0 +1,129 @@
|
||||
Time: 122020.258 ms (02:02.020)
|
||||
Time: 5060.281 ms (00:05.060)
|
||||
Time: 5052.692 ms (00:05.053)
|
||||
Time: 129594.172 ms (02:09.594)
|
||||
Time: 8079.623 ms (00:08.080)
|
||||
Time: 7866.964 ms (00:07.867)
|
||||
Time: 129584.717 ms (02:09.585)
|
||||
Time: 8276.161 ms (00:08.276)
|
||||
Time: 8153.295 ms (00:08.153)
|
||||
Time: 123707.890 ms (02:03.708)
|
||||
Time: 6835.297 ms (00:06.835)
|
||||
Time: 6607.039 ms (00:06.607)
|
||||
Time: 166640.676 ms (02:46.641)
|
||||
Time: 75401.239 ms (01:15.401)
|
||||
Time: 73526.027 ms (01:13.526)
|
||||
Time: 272715.750 ms (04:32.716)
|
||||
Time: 182721.613 ms (03:02.722)
|
||||
Time: 182880.525 ms (03:02.881)
|
||||
Time: 127108.191 ms (02:07.108)
|
||||
Time: 6542.913 ms (00:06.543)
|
||||
Time: 6339.887 ms (00:06.340)
|
||||
Time: 127339.314 ms (02:07.339)
|
||||
Time: 8376.381 ms (00:08.376)
|
||||
Time: 7831.872 ms (00:07.832)
|
||||
Time: 179176.439 ms (02:59.176)
|
||||
Time: 58559.297 ms (00:58.559)
|
||||
Time: 58139.265 ms (00:58.139)
|
||||
Time: 182019.101 ms (03:02.019)
|
||||
Time: 58435.027 ms (00:58.435)
|
||||
Time: 58130.994 ms (00:58.131)
|
||||
Time: 132449.502 ms (02:12.450)
|
||||
Time: 11203.104 ms (00:11.203)
|
||||
Time: 11048.435 ms (00:11.048)
|
||||
Time: 128445.641 ms (02:08.446)
|
||||
Time: 11602.145 ms (00:11.602)
|
||||
Time: 11418.356 ms (00:11.418)
|
||||
Time: 162831.387 ms (02:42.831)
|
||||
Time: 41510.710 ms (00:41.511)
|
||||
Time: 41682.899 ms (00:41.683)
|
||||
Time: 171898.965 ms (02:51.899)
|
||||
Time: 47379.274 ms (00:47.379)
|
||||
Time: 47429.908 ms (00:47.430)
|
||||
Time: 161607.811 ms (02:41.608)
|
||||
Time: 41674.409 ms (00:41.674)
|
||||
Time: 40854.340 ms (00:40.854)
|
||||
Time: 175247.929 ms (02:55.248)
|
||||
Time: 46721.776 ms (00:46.722)
|
||||
Time: 46507.631 ms (00:46.508)
|
||||
Time: 335961.271 ms (05:35.961)
|
||||
Time: 248535.866 ms (04:08.536)
|
||||
Time: 247383.678 ms (04:07.384)
|
||||
Time: 132852.983 ms (02:12.853)
|
||||
Time: 14939.304 ms (00:14.939)
|
||||
Time: 14607.525 ms (00:14.608)
|
||||
Time: 243461.844 ms (04:03.462)
|
||||
Time: 157307.904 ms (02:37.308)
|
||||
Time: 155093.101 ms (02:35.093)
|
||||
Time: 122090.761 ms (02:02.091)
|
||||
Time: 6411.266 ms (00:06.411)
|
||||
Time: 6308.178 ms (00:06.308)
|
||||
Time: 126584.819 ms (02:06.585)
|
||||
Time: 8836.471 ms (00:08.836)
|
||||
Time: 8532.176 ms (00:08.532)
|
||||
Time: 125225.097 ms (02:05.225)
|
||||
Time: 10236.910 ms (00:10.237)
|
||||
Time: 9849.757 ms (00:09.850)
|
||||
Time: 139140.064 ms (02:19.140)
|
||||
Time: 21797.859 ms (00:21.798)
|
||||
Time: 21559.214 ms (00:21.559)
|
||||
Time: 124757.485 ms (02:04.757)
|
||||
Time: 8728.403 ms (00:08.728)
|
||||
Time: 8714.130 ms (00:08.714)
|
||||
Time: 120687.258 ms (02:00.687)
|
||||
Time: 8366.245 ms (00:08.366)
|
||||
Time: 8146.856 ms (00:08.147)
|
||||
Time: 122327.148 ms (02:02.327)
|
||||
Time: 8698.359 ms (00:08.698)
|
||||
Time: 8480.807 ms (00:08.481)
|
||||
Time: 123958.614 ms (02:03.959)
|
||||
Time: 8595.931 ms (00:08.596)
|
||||
Time: 8241.773 ms (00:08.242)
|
||||
Time: 128982.905 ms (02:08.983)
|
||||
Time: 11252.783 ms (00:11.253)
|
||||
Time: 10957.931 ms (00:10.958)
|
||||
Time: 208455.385 ms (03:28.455)
|
||||
Time: 102530.897 ms (01:42.531)
|
||||
Time: 102049.298 ms (01:42.049)
|
||||
Time: 131268.420 ms (02:11.268)
|
||||
Time: 21094.466 ms (00:21.094)
|
||||
Time: 20934.610 ms (00:20.935)
|
||||
Time: 164084.134 ms (02:44.084)
|
||||
Time: 77418.547 ms (01:17.419)
|
||||
Time: 75422.290 ms (01:15.422)
|
||||
Time: 174800.022 ms (02:54.800)
|
||||
Time: 87859.594 ms (01:27.860)
|
||||
Time: 85733.954 ms (01:25.734)
|
||||
Time: 419357.463 ms (06:59.357)
|
||||
Time: 339047.269 ms (05:39.047)
|
||||
Time: 334808.230 ms (05:34.808)
|
||||
Time: 475011.901 ms (07:55.012)
|
||||
Time: 344406.246 ms (05:44.406)
|
||||
Time: 347197.731 ms (05:47.198)
|
||||
Time: 464657.732 ms (07:44.658)
|
||||
Time: 332084.079 ms (05:32.084)
|
||||
Time: 330921.322 ms (05:30.921)
|
||||
Time: 152490.615 ms (02:32.491)
|
||||
Time: 30954.343 ms (00:30.954)
|
||||
Time: 31379.062 ms (00:31.379)
|
||||
Time: 128539.127 ms (02:08.539)
|
||||
Time: 12802.672 ms (00:12.803)
|
||||
Time: 12494.088 ms (00:12.494)
|
||||
Time: 125850.120 ms (02:05.850)
|
||||
Time: 10318.773 ms (00:10.319)
|
||||
Time: 9953.030 ms (00:09.953)
|
||||
Time: 126602.092 ms (02:06.602)
|
||||
Time: 8935.571 ms (00:08.936)
|
||||
Time: 8711.184 ms (00:08.711)
|
||||
Time: 133222.456 ms (02:13.222)
|
||||
Time: 11848.869 ms (00:11.849)
|
||||
Time: 11752.640 ms (00:11.753)
|
||||
Time: 126950.067 ms (02:06.950)
|
||||
Time: 11260.892 ms (00:11.261)
|
||||
Time: 10943.649 ms (00:10.944)
|
||||
Time: 128451.171 ms (02:08.451)
|
||||
Time: 10984.980 ms (00:10.985)
|
||||
Time: 10770.609 ms (00:10.771)
|
||||
Time: 124621.000 ms (02:04.621)
|
||||
Time: 8885.466 ms (00:08.885)
|
||||
Time: 8857.296 ms (00:08.857)
|
43
benchmark/postgresql/queries.sql
Normal file
43
benchmark/postgresql/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM {table};
|
||||
SELECT count(*) FROM {table} WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table};
|
||||
SELECT sum(UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM {table};
|
||||
SELECT min(EventDate), max(EventDate) FROM {table};
|
||||
SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM {table} WHERE UserID = -6101065172474983726;
|
||||
SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
11
benchmark/timescaledb/benchmark.sh
Executable file
11
benchmark/timescaledb/benchmark.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits_100m_obfuscated/' | while read query; do
|
||||
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||
|
||||
echo "$query";
|
||||
for i in {1..3}; do
|
||||
sudo -u postgres psql tutorial -t -c 'set jit = off' -c '\timing' -c "$query" | grep 'Time' | tee --append log
|
||||
done;
|
||||
done;
|
215
benchmark/timescaledb/log
Normal file
215
benchmark/timescaledb/log
Normal file
@ -0,0 +1,215 @@
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated;
|
||||
Time: 3259.733 ms (00:03.260)
|
||||
Time: 3135.484 ms (00:03.135)
|
||||
Time: 3135.579 ms (00:03.136)
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated WHERE AdvEngineID != 0;
|
||||
Time: 146854.557 ms (02:26.855)
|
||||
Time: 6921.736 ms (00:06.922)
|
||||
Time: 6619.892 ms (00:06.620)
|
||||
3
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_100m_obfuscated;
|
||||
Time: 146568.297 ms (02:26.568)
|
||||
Time: 7481.610 ms (00:07.482)
|
||||
Time: 7258.209 ms (00:07.258)
|
||||
3
|
||||
SELECT sum(UserID) FROM hits_100m_obfuscated;
|
||||
Time: 146864.106 ms (02:26.864)
|
||||
Time: 5690.024 ms (00:05.690)
|
||||
Time: 5381.820 ms (00:05.382)
|
||||
3
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits_100m_obfuscated;
|
||||
Time: 227507.331 ms (03:47.507)
|
||||
Time: 69165.471 ms (01:09.165)
|
||||
Time: 72216.950 ms (01:12.217)
|
||||
3
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits_100m_obfuscated;
|
||||
Time: 323644.397 ms (05:23.644)
|
||||
Time: 177578.740 ms (02:57.579)
|
||||
Time: 175055.738 ms (02:55.056)
|
||||
3
|
||||
SELECT min(EventDate), max(EventDate) FROM hits_100m_obfuscated;
|
||||
Time: 146147.843 ms (02:26.148)
|
||||
Time: 5735.128 ms (00:05.735)
|
||||
Time: 5428.638 ms (00:05.429)
|
||||
3
|
||||
SELECT AdvEngineID, count(*) FROM hits_100m_obfuscated WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
Time: 148658.450 ms (02:28.658)
|
||||
Time: 7014.882 ms (00:07.015)
|
||||
Time: 6599.736 ms (00:06.600)
|
||||
3
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
Time: 202423.122 ms (03:22.423)
|
||||
Time: 54439.047 ms (00:54.439)
|
||||
Time: 54800.354 ms (00:54.800)
|
||||
3
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits_100m_obfuscated GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
Time: 201152.491 ms (03:21.152)
|
||||
Time: 55875.854 ms (00:55.876)
|
||||
Time: 55200.330 ms (00:55.200)
|
||||
3
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
Time: 146042.603 ms (02:26.043)
|
||||
Time: 9931.633 ms (00:09.932)
|
||||
Time: 10037.032 ms (00:10.037)
|
||||
3
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
Time: 150811.952 ms (02:30.812)
|
||||
Time: 10320.230 ms (00:10.320)
|
||||
Time: 9993.232 ms (00:09.993)
|
||||
3
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 173071.218 ms (02:53.071)
|
||||
Time: 34314.835 ms (00:34.315)
|
||||
Time: 34420.919 ms (00:34.421)
|
||||
3
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
Time: 172874.155 ms (02:52.874)
|
||||
Time: 43704.494 ms (00:43.704)
|
||||
Time: 43918.380 ms (00:43.918)
|
||||
3
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 178484.822 ms (02:58.485)
|
||||
Time: 36850.436 ms (00:36.850)
|
||||
Time: 35789.029 ms (00:35.789)
|
||||
3
|
||||
SELECT UserID, count(*) FROM hits_100m_obfuscated GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 169720.759 ms (02:49.721)
|
||||
Time: 24125.730 ms (00:24.126)
|
||||
Time: 23782.745 ms (00:23.783)
|
||||
3
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 182335.631 ms (03:02.336)
|
||||
Time: 37324.563 ms (00:37.325)
|
||||
Time: 37124.250 ms (00:37.124)
|
||||
3
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
Time: 163799.714 ms (02:43.800)
|
||||
Time: 18514.031 ms (00:18.514)
|
||||
Time: 18968.524 ms (00:18.969)
|
||||
3
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM hits_100m_obfuscated GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
Time: 294799.480 ms (04:54.799)
|
||||
Time: 149592.992 ms (02:29.593)
|
||||
Time: 149466.291 ms (02:29.466)
|
||||
3
|
||||
SELECT UserID FROM hits_100m_obfuscated WHERE UserID = -6101065172474983726;
|
||||
Time: 140797.496 ms (02:20.797)
|
||||
Time: 5312.321 ms (00:05.312)
|
||||
Time: 5020.502 ms (00:05.021)
|
||||
3
|
||||
SELECT count(*) FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%';
|
||||
Time: 143092.287 ms (02:23.092)
|
||||
Time: 7893.874 ms (00:07.894)
|
||||
Time: 7661.326 ms (00:07.661)
|
||||
3
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 143682.424 ms (02:23.682)
|
||||
Time: 9249.962 ms (00:09.250)
|
||||
Time: 9073.876 ms (00:09.074)
|
||||
3
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM hits_100m_obfuscated WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
Time: 150965.884 ms (02:30.966)
|
||||
Time: 20350.812 ms (00:20.351)
|
||||
Time: 20074.939 ms (00:20.075)
|
||||
3
|
||||
SELECT * FROM hits_100m_obfuscated WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
Time: 4674.669 ms (00:04.675)
|
||||
Time: 4532.389 ms (00:04.532)
|
||||
Time: 4555.457 ms (00:04.555)
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
Time: 5.177 ms
|
||||
Time: 5.031 ms
|
||||
Time: 4.419 ms
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
Time: 141152.210 ms (02:21.152)
|
||||
Time: 7492.968 ms (00:07.493)
|
||||
Time: 7300.428 ms (00:07.300)
|
||||
3
|
||||
SELECT SearchPhrase FROM hits_100m_obfuscated WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
Time: 30.736 ms
|
||||
Time: 5.018 ms
|
||||
Time: 5.132 ms
|
||||
3
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_100m_obfuscated WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
Time: 144034.016 ms (02:24.034)
|
||||
Time: 10701.672 ms (00:10.702)
|
||||
Time: 10348.565 ms (00:10.349)
|
||||
3
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www.)?([^/]+)/.*$', '1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits_100m_obfuscated WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
Time: 191575.080 ms (03:11.575)
|
||||
Time: 97836.706 ms (01:37.837)
|
||||
Time: 97673.219 ms (01:37.673)
|
||||
3
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_100m_obfuscated;
|
||||
Time: 143652.317 ms (02:23.652)
|
||||
Time: 22185.656 ms (00:22.186)
|
||||
Time: 21887.411 ms (00:21.887)
|
||||
3
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 153481.944 ms (02:33.482)
|
||||
Time: 17748.628 ms (00:17.749)
|
||||
Time: 17551.116 ms (00:17.551)
|
||||
3
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 167448.684 ms (02:47.449)
|
||||
Time: 25902.961 ms (00:25.903)
|
||||
Time: 25592.018 ms (00:25.592)
|
||||
3
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits_100m_obfuscated GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
Time: 299183.443 ms (04:59.183)
|
||||
Time: 145349.772 ms (02:25.350)
|
||||
Time: 143214.688 ms (02:23.215)
|
||||
3
|
||||
SELECT URL, count(*) AS c FROM hits_100m_obfuscated GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
Time: 389851.369 ms (06:29.851)
|
||||
Time: 228158.639 ms (03:48.159)
|
||||
Time: 231811.118 ms (03:51.811)
|
||||
3
|
||||
SELECT 1, URL, count(*) AS c FROM hits_100m_obfuscated GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
Time: 407458.343 ms (06:47.458)
|
||||
Time: 230125.530 ms (03:50.126)
|
||||
Time: 230764.511 ms (03:50.765)
|
||||
3
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_100m_obfuscated GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
Time: 174098.556 ms (02:54.099)
|
||||
Time: 23503.975 ms (00:23.504)
|
||||
Time: 24322.856 ms (00:24.323)
|
||||
3
|
||||
SELECT URL, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
Time: 145906.025 ms (02:25.906)
|
||||
Time: 10824.695 ms (00:10.825)
|
||||
Time: 10484.885 ms (00:10.485)
|
||||
3
|
||||
SELECT Title, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
Time: 144063.711 ms (02:24.064)
|
||||
Time: 8947.980 ms (00:08.948)
|
||||
Time: 8608.434 ms (00:08.608)
|
||||
3
|
||||
SELECT URL, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
Time: 141883.596 ms (02:21.884)
|
||||
Time: 7977.257 ms (00:07.977)
|
||||
Time: 7673.547 ms (00:07.674)
|
||||
3
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
Time: 147100.084 ms (02:27.100)
|
||||
Time: 9527.812 ms (00:09.528)
|
||||
Time: 9457.663 ms (00:09.458)
|
||||
3
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
Time: 144585.669 ms (02:24.586)
|
||||
Time: 10815.223 ms (00:10.815)
|
||||
Time: 10594.707 ms (00:10.595)
|
||||
3
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
Time: 145738.341 ms (02:25.738)
|
||||
Time: 10592.979 ms (00:10.593)
|
||||
Time: 10181.477 ms (00:10.181)
|
||||
3
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM hits_100m_obfuscated WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
||||
Time: 145023.796 ms (02:25.024)
|
||||
Time: 8035.337 ms (00:08.035)
|
||||
Time: 7865.698 ms (00:07.866)
|
129
benchmark/timescaledb/log_compressed
Normal file
129
benchmark/timescaledb/log_compressed
Normal file
@ -0,0 +1,129 @@
|
||||
Time: 1784.299 ms (00:01.784)
|
||||
Time: 1223.461 ms (00:01.223)
|
||||
Time: 1200.665 ms (00:01.201)
|
||||
Time: 22730.141 ms (00:22.730)
|
||||
Time: 1379.227 ms (00:01.379)
|
||||
Time: 1361.595 ms (00:01.362)
|
||||
Time: 29888.235 ms (00:29.888)
|
||||
Time: 3160.611 ms (00:03.161)
|
||||
Time: 3207.363 ms (00:03.207)
|
||||
Time: 53922.569 ms (00:53.923)
|
||||
Time: 2301.456 ms (00:02.301)
|
||||
Time: 2277.009 ms (00:02.277)
|
||||
Time: 45363.999 ms (00:45.364)
|
||||
Time: 43765.848 ms (00:43.766)
|
||||
Time: 44066.621 ms (00:44.067)
|
||||
Time: 172945.633 ms (02:52.946)
|
||||
Time: 136944.098 ms (02:16.944)
|
||||
Time: 138268.413 ms (02:18.268)
|
||||
Time: 16764.579 ms (00:16.765)
|
||||
Time: 2579.907 ms (00:02.580)
|
||||
Time: 2590.390 ms (00:02.590)
|
||||
Time: 1498.034 ms (00:01.498)
|
||||
Time: 1434.534 ms (00:01.435)
|
||||
Time: 1448.123 ms (00:01.448)
|
||||
Time: 113533.016 ms (01:53.533)
|
||||
Time: 78465.335 ms (01:18.465)
|
||||
Time: 80778.839 ms (01:20.779)
|
||||
Time: 90456.388 ms (01:30.456)
|
||||
Time: 87050.166 ms (01:27.050)
|
||||
Time: 88426.851 ms (01:28.427)
|
||||
Time: 45021.632 ms (00:45.022)
|
||||
Time: 12486.342 ms (00:12.486)
|
||||
Time: 12222.489 ms (00:12.222)
|
||||
Time: 44246.843 ms (00:44.247)
|
||||
Time: 15606.856 ms (00:15.607)
|
||||
Time: 15251.554 ms (00:15.252)
|
||||
Time: 29654.719 ms (00:29.655)
|
||||
Time: 29441.858 ms (00:29.442)
|
||||
Time: 29608.141 ms (00:29.608)
|
||||
Time: 103547.383 ms (01:43.547)
|
||||
Time: 104733.648 ms (01:44.734)
|
||||
Time: 105779.016 ms (01:45.779)
|
||||
Time: 29695.834 ms (00:29.696)
|
||||
Time: 15395.447 ms (00:15.395)
|
||||
Time: 15819.650 ms (00:15.820)
|
||||
Time: 27841.552 ms (00:27.842)
|
||||
Time: 29521.849 ms (00:29.522)
|
||||
Time: 27508.521 ms (00:27.509)
|
||||
Time: 56665.709 ms (00:56.666)
|
||||
Time: 56459.321 ms (00:56.459)
|
||||
Time: 56407.620 ms (00:56.408)
|
||||
Time: 27488.888 ms (00:27.489)
|
||||
Time: 25557.427 ms (00:25.557)
|
||||
Time: 25634.140 ms (00:25.634)
|
||||
Time: 97376.463 ms (01:37.376)
|
||||
Time: 96047.902 ms (01:36.048)
|
||||
Time: 99918.341 ms (01:39.918)
|
||||
Time: 6294.887 ms (00:06.295)
|
||||
Time: 6407.262 ms (00:06.407)
|
||||
Time: 6376.369 ms (00:06.376)
|
||||
Time: 40787.808 ms (00:40.788)
|
||||
Time: 11206.256 ms (00:11.206)
|
||||
Time: 11219.871 ms (00:11.220)
|
||||
Time: 12420.227 ms (00:12.420)
|
||||
Time: 12548.301 ms (00:12.548)
|
||||
Time: 12468.458 ms (00:12.468)
|
||||
Time: 57679.878 ms (00:57.680)
|
||||
Time: 35466.123 ms (00:35.466)
|
||||
Time: 35562.064 ms (00:35.562)
|
||||
Time: 13551.276 ms (00:13.551)
|
||||
Time: 13417.313 ms (00:13.417)
|
||||
Time: 13645.287 ms (00:13.645)
|
||||
Time: 150.297 ms
|
||||
Time: 55.995 ms
|
||||
Time: 55.796 ms
|
||||
Time: 3059.796 ms (00:03.060)
|
||||
Time: 3038.246 ms (00:03.038)
|
||||
Time: 3041.210 ms (00:03.041)
|
||||
Time: 4461.720 ms (00:04.462)
|
||||
Time: 4446.691 ms (00:04.447)
|
||||
Time: 4424.526 ms (00:04.425)
|
||||
Time: 29275.463 ms (00:29.275)
|
||||
Time: 17558.747 ms (00:17.559)
|
||||
Time: 17438.621 ms (00:17.439)
|
||||
Time: 203316.184 ms (03:23.316)
|
||||
Time: 190037.946 ms (03:10.038)
|
||||
Time: 189276.624 ms (03:09.277)
|
||||
Time: 36921.542 ms (00:36.922)
|
||||
Time: 36963.771 ms (00:36.964)
|
||||
Time: 36660.406 ms (00:36.660)
|
||||
Time: 38307.345 ms (00:38.307)
|
||||
Time: 17597.355 ms (00:17.597)
|
||||
Time: 17324.776 ms (00:17.325)
|
||||
Time: 39857.567 ms (00:39.858)
|
||||
Time: 26776.411 ms (00:26.776)
|
||||
Time: 26592.819 ms (00:26.593)
|
||||
Time: 162782.290 ms (02:42.782)
|
||||
Time: 160722.582 ms (02:40.723)
|
||||
Time: 162487.263 ms (02:42.487)
|
||||
Time: 261494.290 ms (04:21.494)
|
||||
Time: 263594.014 ms (04:23.594)
|
||||
Time: 260436.201 ms (04:20.436)
|
||||
Time: 265758.455 ms (04:25.758)
|
||||
Time: 270087.523 ms (04:30.088)
|
||||
Time: 266617.218 ms (04:26.617)
|
||||
Time: 30677.159 ms (00:30.677)
|
||||
Time: 28933.542 ms (00:28.934)
|
||||
Time: 29815.271 ms (00:29.815)
|
||||
Time: 19754.932 ms (00:19.755)
|
||||
Time: 16851.157 ms (00:16.851)
|
||||
Time: 16703.289 ms (00:16.703)
|
||||
Time: 10379.500 ms (00:10.379)
|
||||
Time: 10267.336 ms (00:10.267)
|
||||
Time: 10287.944 ms (00:10.288)
|
||||
Time: 17320.582 ms (00:17.321)
|
||||
Time: 9786.410 ms (00:09.786)
|
||||
Time: 9760.578 ms (00:09.761)
|
||||
Time: 33487.352 ms (00:33.487)
|
||||
Time: 26056.528 ms (00:26.057)
|
||||
Time: 25958.258 ms (00:25.958)
|
||||
Time: 28020.227 ms (00:28.020)
|
||||
Time: 5609.725 ms (00:05.610)
|
||||
Time: 5538.744 ms (00:05.539)
|
||||
Time: 15119.473 ms (00:15.119)
|
||||
Time: 5057.455 ms (00:05.057)
|
||||
Time: 5063.154 ms (00:05.063)
|
||||
Time: 3627.703 ms (00:03.628)
|
||||
Time: 3645.232 ms (00:03.645)
|
||||
Time: 3546.855 ms (00:03.547)
|
43
benchmark/timescaledb/queries.sql
Normal file
43
benchmark/timescaledb/queries.sql
Normal file
@ -0,0 +1,43 @@
|
||||
SELECT count(*) FROM {table};
|
||||
SELECT count(*) FROM {table} WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM {table};
|
||||
SELECT sum(UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT UserID) FROM {table};
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM {table};
|
||||
SELECT min(EventDate), max(EventDate) FROM {table};
|
||||
SELECT AdvEngineID, count(*) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM {table} GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM EventTime) AS m, SearchPhrase, count(*) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM {table} WHERE UserID = -6101065172474983726;
|
||||
SELECT count(*) FROM {table} WHERE URL LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT REGEXP_REPLACE(Referer, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(length(Referer)) AS l, count(*) AS c, min(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM {table} GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', EventTime) AS "Minute", count(*) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', EventTime) ORDER BY DATE_TRUNC('minute', EventTime);
|
1663
benchmark/timescaledb/usability.md
Normal file
1663
benchmark/timescaledb/usability.md
Normal file
File diff suppressed because it is too large
Load Diff
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54456)
|
||||
SET(VERSION_REVISION 54457)
|
||||
SET(VERSION_MAJOR 21)
|
||||
SET(VERSION_MINOR 11)
|
||||
SET(VERSION_MINOR 12)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 7a4a0b0edef0ad6e0aa662cd3b90c3f4acf796e7)
|
||||
SET(VERSION_DESCRIBE v21.11.1.1-prestable)
|
||||
SET(VERSION_STRING 21.11.1.1)
|
||||
SET(VERSION_GITHASH 503a418dedf0011e9040c3a1b6913e0b5488be4c)
|
||||
SET(VERSION_DESCRIBE v21.12.1.1-prestable)
|
||||
SET(VERSION_STRING 21.12.1.1)
|
||||
# end of autochange
|
||||
|
@ -18,6 +18,10 @@ option (ENABLE_PCLMULQDQ "Use pclmulqdq instructions on x86_64" 1)
|
||||
option (ENABLE_POPCNT "Use popcnt instructions on x86_64" 1)
|
||||
option (ENABLE_AVX "Use AVX instructions on x86_64" 0)
|
||||
option (ENABLE_AVX2 "Use AVX2 instructions on x86_64" 0)
|
||||
option (ENABLE_AVX512 "Use AVX512 instructions on x86_64" 0)
|
||||
option (ENABLE_BMI "Use BMI instructions on x86_64" 0)
|
||||
option (ENABLE_AVX2_FOR_SPEC_OP "Use avx2 instructions for specific operations on x86_64" 0)
|
||||
option (ENABLE_AVX512_FOR_SPEC_OP "Use avx512 instructions for specific operations on x86_64" 0)
|
||||
|
||||
option (ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries non-portable but more performant code may be generated. This option overrides ENABLE_* options for specific instruction set. Highly not recommended to use." 0)
|
||||
|
||||
@ -127,6 +131,57 @@ else ()
|
||||
if (HAVE_AVX2 AND ENABLE_AVX2)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||
endif ()
|
||||
|
||||
set (TEST_FLAG "-mavx512f -mavx512bw")
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
||||
check_cxx_source_compiles("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
auto a = _mm512_setzero_epi32();
|
||||
(void)a;
|
||||
auto b = _mm512_add_epi16(__m512i(), __m512i());
|
||||
(void)b;
|
||||
return 0;
|
||||
}
|
||||
" HAVE_AVX512)
|
||||
if (HAVE_AVX512 AND ENABLE_AVX512)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||
endif ()
|
||||
|
||||
set (TEST_FLAG "-mbmi")
|
||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
||||
check_cxx_source_compiles("
|
||||
#include <immintrin.h>
|
||||
int main() {
|
||||
auto a = _blsr_u32(0);
|
||||
(void)a;
|
||||
return 0;
|
||||
}
|
||||
" HAVE_BMI)
|
||||
if (HAVE_BMI AND ENABLE_BMI)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||
endif ()
|
||||
|
||||
#Limit avx2/avx512 flag for specific source build
|
||||
set (X86_INTRINSICS_FLAGS "")
|
||||
if (ENABLE_AVX2_FOR_SPEC_OP)
|
||||
if (HAVE_BMI)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
|
||||
endif ()
|
||||
if (HAVE_AVX AND HAVE_AVX2)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx -mavx2")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (ENABLE_AVX512_FOR_SPEC_OP)
|
||||
set (X86_INTRINSICS_FLAGS "")
|
||||
if (HAVE_BMI)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
|
||||
endif ()
|
||||
if (HAVE_AVX512)
|
||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx512f -mavx512bw -mprefer-vector-width=256")
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
cmake_pop_check_state ()
|
||||
|
8
cmake/find/filelog.cmake
Normal file
8
cmake/find/filelog.cmake
Normal file
@ -0,0 +1,8 @@
|
||||
# StorageFileLog only support Linux platform
|
||||
if (OS_LINUX)
|
||||
set (USE_FILELOG 1)
|
||||
message (STATUS "Using StorageFileLog = 1")
|
||||
else()
|
||||
message(STATUS "StorageFileLog is only supported on Linux")
|
||||
endif ()
|
||||
|
@ -8,7 +8,7 @@ endif ()
|
||||
|
||||
if (COMPILER_GCC)
|
||||
# Require minimum version of gcc
|
||||
set (GCC_MINIMUM_VERSION 10)
|
||||
set (GCC_MINIMUM_VERSION 11)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION} AND NOT CMAKE_VERSION VERSION_LESS 2.8.9)
|
||||
message (FATAL_ERROR "GCC version must be at least ${GCC_MINIMUM_VERSION}. For example, if GCC ${GCC_MINIMUM_VERSION} is available under gcc-${GCC_MINIMUM_VERSION}, g++-${GCC_MINIMUM_VERSION} names, do the following: export CC=gcc-${GCC_MINIMUM_VERSION} CXX=g++-${GCC_MINIMUM_VERSION}; rm -rf CMakeCache.txt CMakeFiles; and re run cmake or ./release.")
|
||||
endif ()
|
||||
@ -18,6 +18,10 @@ if (COMPILER_GCC)
|
||||
elseif (COMPILER_CLANG)
|
||||
# Require minimum version of clang/apple-clang
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
||||
# If you are developer you can figure out what exact versions of AppleClang are Ok,
|
||||
# simply remove the following line.
|
||||
message (FATAL_ERROR "AppleClang is not supported, you should install clang from brew. See the instruction: https://clickhouse.com/docs/en/development/build-osx/")
|
||||
|
||||
# AppleClang 10.0.1 (Xcode 10.2) corresponds to LLVM/Clang upstream version 7.0.0
|
||||
# AppleClang 11.0.0 (Xcode 11.0) corresponds to LLVM/Clang upstream version 8.0.0
|
||||
set (XCODE_MINIMUM_VERSION 10.2)
|
||||
@ -31,7 +35,7 @@ elseif (COMPILER_CLANG)
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")
|
||||
endif ()
|
||||
else ()
|
||||
set (CLANG_MINIMUM_VERSION 9)
|
||||
set (CLANG_MINIMUM_VERSION 12)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION})
|
||||
message (FATAL_ERROR "Clang version must be at least ${CLANG_MINIMUM_VERSION}.")
|
||||
endif ()
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 7ecb16844af6a9c283ad432d85ecc2e7d1544676
|
||||
Subproject commit d10351f312c1ae1ca3fdda433693dfbef3acfece
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
||||
Subproject commit 06aa8759d17f2032ffd5efa83969270ca9ac727b
|
||||
Subproject commit 00b03604543367d7e310cb0993973fdcb723ea79
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 46c80daf1b015aa10474ce82e3d24b578c6ae422
|
||||
Subproject commit 39fd359765a3a77b46d94ec3c5def3c7802a920f
|
@ -17,6 +17,16 @@ endif ()
|
||||
|
||||
add_subdirectory("${protobuf_SOURCE_DIR}/cmake" "${protobuf_BINARY_DIR}")
|
||||
|
||||
if (ENABLE_FUZZING)
|
||||
# `protoc` will be built with sanitizer and it could fail during ClickHouse build
|
||||
# It easily reproduces in oss-fuzz building pipeline
|
||||
# To avoid this we can try to build `protoc` without any sanitizer with option `-fno-sanitize=all`, but
|
||||
# it this case we will face with linker errors, because libcxx still will be built with sanitizer
|
||||
# So, we can simply suppress all of these failures with a combination this flag and an environment variable
|
||||
# export MSAN_OPTIONS=exit_code=0
|
||||
target_compile_options(protoc PRIVATE "-fsanitize-recover=all")
|
||||
endif()
|
||||
|
||||
# We don't want to stop compilation on warnings in protobuf's headers.
|
||||
# The following line overrides the value assigned by the command target_include_directories() in libprotobuf.cmake
|
||||
set_property(TARGET libprotobuf PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${protobuf_SOURCE_DIR}/src")
|
||||
|
2
contrib/snappy
vendored
2
contrib/snappy
vendored
@ -1 +1 @@
|
||||
Subproject commit 3f194acb57e0487531c96b97af61dcbd025a78a3
|
||||
Subproject commit fb057edfed820212076239fd32cb2ff23e9016bf
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
||||
clickhouse (21.11.1.1) unstable; urgency=low
|
||||
clickhouse (21.12.1.1) unstable; urgency=low
|
||||
|
||||
* Modified source code
|
||||
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Thu, 09 Sep 2021 12:03:26 +0300
|
||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Tue, 02 Nov 2021 00:56:42 +0300
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.11.1.*
|
||||
ARG version=21.12.1.*
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
|
43
docker/docs/builder/Dockerfile
Normal file
43
docker/docs/builder/Dockerfile
Normal file
@ -0,0 +1,43 @@
|
||||
# docker build -t clickhouse/docs-build .
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
|
||||
RUN sed -i 's|http://archive|http://ru.archive|g' /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
|
||||
python3-setuptools \
|
||||
virtualenv \
|
||||
wget \
|
||||
bash \
|
||||
python \
|
||||
curl \
|
||||
python3-requests \
|
||||
sudo \
|
||||
git \
|
||||
openssl \
|
||||
python3-pip \
|
||||
software-properties-common \
|
||||
language-pack-zh* \
|
||||
chinese* \
|
||||
fonts-arphic-ukai \
|
||||
fonts-arphic-uming \
|
||||
fonts-ipafont-mincho \
|
||||
fonts-ipafont-gothic \
|
||||
fonts-unfonts-core \
|
||||
xvfb \
|
||||
nodejs \
|
||||
npm \
|
||||
openjdk-11-jdk \
|
||||
ssh-client \
|
||||
&& pip --no-cache-dir install scipy \
|
||||
&& apt-get autoremove --yes \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN wget 'https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb'
|
||||
|
||||
RUN npm i -g purify-css
|
||||
|
||||
RUN pip3 install --ignore-installed --upgrade setuptools pip virtualenv
|
9
docker/docs/check/Dockerfile
Normal file
9
docker/docs/check/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
# docker build -t clickhouse/docs-check .
|
||||
FROM clickhouse/docs-builder
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV REPO_PATH=/repo_path
|
||||
ENV OUTPUT_PATH=/output_path
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
9
docker/docs/check/run.sh
Normal file
9
docker/docs/check/run.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
cd $REPO_PATH/docs/tools
|
||||
mkdir venv
|
||||
virtualenv -p $(which python3) venv
|
||||
source venv/bin/activate
|
||||
python3 -m pip install --ignore-installed -r requirements.txt
|
||||
./build.py --skip-git-log 2>&1 | tee $OUTPUT_PATH/output.log
|
9
docker/docs/release/Dockerfile
Normal file
9
docker/docs/release/Dockerfile
Normal file
@ -0,0 +1,9 @@
|
||||
# docker build -t clickhouse/docs-release .
|
||||
FROM clickhouse/docs-builder
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
ENV REPO_PATH=/repo_path
|
||||
ENV OUTPUT_PATH=/output_path
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
10
docker/docs/release/run.sh
Normal file
10
docker/docs/release/run.sh
Normal file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
cd $REPO_PATH/docs/tools
|
||||
mkdir venv
|
||||
virtualenv -p $(which python3) venv
|
||||
source venv/bin/activate
|
||||
python3 -m pip install --ignore-installed -r requirements.txt
|
||||
mkdir -p ~/.ssh && ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts
|
||||
./release.sh 2>&1 | tee tee $OUTPUT_PATH/output.log
|
@ -166,5 +166,20 @@
|
||||
"docker/test/keeper-jepsen": {
|
||||
"name": "clickhouse/keeper-jepsen-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/docs/builder": {
|
||||
"name": "clickhouse/docs-builder",
|
||||
"dependent": [
|
||||
"docker/docs/check",
|
||||
"docker/docs/release"
|
||||
]
|
||||
},
|
||||
"docker/docs/check": {
|
||||
"name": "clickhouse/docs-check",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/docs/release": {
|
||||
"name": "clickhouse/docs-release",
|
||||
"dependent": []
|
||||
}
|
||||
}
|
||||
|
@ -12,19 +12,19 @@ printenv
|
||||
rm -f CMakeCache.txt
|
||||
read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}"
|
||||
# Hope, that the most part of files will be in cache, so we just link new executables
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_C_COMPILER="$CC" -DCMAKE_CXX_COMPILER="$CXX" -DENABLE_CLICKHOUSE_ODBC_BRIDGE=OFF \
|
||||
-DENABLE_LIBRARIES=0 -DENABLE_SSL=1 -DUSE_INTERNAL_SSL_LIBRARY=1 -DUSE_UNWIND=ON -DENABLE_EMBEDDED_COMPILER=0 \
|
||||
-DENABLE_EXAMPLES=0 -DENABLE_UTILS=0 -DENABLE_THINLTO=0 "-DSANITIZE=$SANITIZER" \
|
||||
-DENABLE_FUZZING=1 -DFUZZER='libfuzzer' -DENABLE_TCMALLOC=0 -DENABLE_JEMALLOC=0 \
|
||||
-DENABLE_CHECK_HEAVY_BUILDS=1 -DGLIBC_COMPATIBILITY=OFF "${CMAKE_FLAGS[@]}" ..
|
||||
# Please, add or change flags directly in cmake
|
||||
cmake --debug-trycompile --verbose=1 -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_C_COMPILER="$CC" -DCMAKE_CXX_COMPILER="$CXX" \
|
||||
-DSANITIZE="$SANITIZER" -DENABLE_FUZZING=1 -DFUZZER='libfuzzer' -DENABLE_PROTOBUF=1 -DUSE_INTERNAL_PROTOBUF_LIBRARY=1 "${CMAKE_FLAGS[@]}" ..
|
||||
|
||||
FUZZER_TARGETS=$(find ../src -name '*_fuzzer.cpp' -execdir basename {} .cpp ';' | tr '\n' ' ')
|
||||
|
||||
NUM_JOBS=$(($(nproc || grep -c ^processor /proc/cpuinfo)))
|
||||
|
||||
mkdir -p /output/fuzzers
|
||||
for FUZZER_TARGET in $FUZZER_TARGETS
|
||||
do
|
||||
# shellcheck disable=SC2086 # No quotes because I want it to expand to nothing if empty.
|
||||
ninja $NINJA_FLAGS $FUZZER_TARGET
|
||||
ninja $NINJA_FLAGS $FUZZER_TARGET -j $NUM_JOBS
|
||||
# Find this binary in build directory and strip it
|
||||
FUZZER_PATH=$(find ./src -name "$FUZZER_TARGET")
|
||||
strip --strip-unneeded "$FUZZER_PATH"
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.11.1.*
|
||||
ARG version=21.12.1.*
|
||||
ARG gosu_ver=1.10
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -86,7 +86,7 @@ done
|
||||
if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CLICKHOUSE_PASSWORD" ]; then
|
||||
echo "$0: create new user '$CLICKHOUSE_USER' instead 'default'"
|
||||
cat <<EOT > /etc/clickhouse-server/users.d/default-user.xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Docs: <https://clickhouse.com/docs/en/operations/settings/settings_users/> -->
|
||||
<users>
|
||||
<!-- Remove default user -->
|
||||
@ -103,7 +103,7 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
||||
<access_management>${CLICKHOUSE_ACCESS_MANAGEMENT}</access_management>
|
||||
</${CLICKHOUSE_USER}>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
EOT
|
||||
fi
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=21.11.1.*
|
||||
ARG version=21.12.1.*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y apt-transport-https dirmngr && \
|
||||
|
@ -264,7 +264,7 @@ function run_tests
|
||||
|
||||
set +e
|
||||
time clickhouse-test --hung-check -j 8 --order=random \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper \
|
||||
--fast-tests-only --no-long --testname --shard --zookeeper --check-zookeeper-session \
|
||||
-- "$FASTTEST_FOCUS" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee "$FASTTEST_OUTPUT/test_result.txt"
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086,SC2001
|
||||
# shellcheck disable=SC2086,SC2001,SC2046
|
||||
|
||||
set -eux
|
||||
set -o pipefail
|
||||
@ -13,24 +13,48 @@ script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
echo "$script_dir"
|
||||
repo_dir=ch
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-13_debug_none_bundled_unsplitted_disable_False_binary"}
|
||||
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
|
||||
|
||||
function clone
|
||||
{
|
||||
# The download() function is dependent on CI binaries anyway, so we can take
|
||||
# the repo from the CI as well. For local runs, start directly from the "fuzz"
|
||||
# stage.
|
||||
rm -rf ch ||:
|
||||
mkdir ch ||:
|
||||
wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/clickhouse_no_subs.tar.gz"
|
||||
tar -C ch --strip-components=1 -xf clickhouse_no_subs.tar.gz
|
||||
# For local runs, start directly from the "fuzz" stage.
|
||||
rm -rf "$repo_dir" ||:
|
||||
mkdir "$repo_dir" ||:
|
||||
|
||||
git clone --depth 1 https://github.com/ClickHouse/ClickHouse.git -- "$repo_dir" 2>&1 | ts '%Y-%m-%d %H:%M:%S'
|
||||
(
|
||||
cd "$repo_dir"
|
||||
if [ "$PR_TO_TEST" != "0" ]; then
|
||||
if git fetch --depth 1 origin "+refs/pull/$PR_TO_TEST/merge"; then
|
||||
git checkout FETCH_HEAD
|
||||
echo "Checked out pull/$PR_TO_TEST/merge ($(git rev-parse FETCH_HEAD))"
|
||||
else
|
||||
git fetch --depth 1 origin "+refs/pull/$PR_TO_TEST/head"
|
||||
git checkout "$SHA_TO_TEST"
|
||||
echo "Checked out nominal SHA $SHA_TO_TEST for PR $PR_TO_TEST"
|
||||
fi
|
||||
git diff --name-only master HEAD | tee ci-changed-files.txt
|
||||
else
|
||||
if [ -v COMMIT_SHA ]; then
|
||||
git fetch --depth 2 origin "$SHA_TO_TEST"
|
||||
git checkout "$SHA_TO_TEST"
|
||||
echo "Checked out nominal SHA $SHA_TO_TEST for master"
|
||||
else
|
||||
git fetch --depth 2 origin
|
||||
echo "Using default repository head $(git rev-parse HEAD)"
|
||||
fi
|
||||
git diff --name-only HEAD~1 HEAD | tee ci-changed-files.txt
|
||||
fi
|
||||
cd -
|
||||
)
|
||||
|
||||
ls -lath ||:
|
||||
|
||||
}
|
||||
|
||||
function download
|
||||
{
|
||||
wget -nv -nd -c "https://clickhouse-builds.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse" &
|
||||
wget -nv -nd -c "https://clickhouse-test-reports.s3.yandex.net/$PR_TO_TEST/$SHA_TO_TEST/repo/ci-changed-files.txt" &
|
||||
wait
|
||||
wget -nv -nd -c "$BINARY_URL_TO_DOWNLOAD"
|
||||
|
||||
chmod +x clickhouse
|
||||
ln -s ./clickhouse ./clickhouse-server
|
||||
@ -113,7 +137,7 @@ function fuzz
|
||||
|
||||
# Obtain the list of newly added tests. They will be fuzzed in more extreme way than other tests.
|
||||
# Don't overwrite the NEW_TESTS_OPT so that it can be set from the environment.
|
||||
NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\(\.j2\)\?\)$!ch/\1!p' ci-changed-files.txt | sort -R)"
|
||||
NEW_TESTS="$(sed -n 's!\(^tests/queries/0_stateless/.*\.sql\(\.j2\)\?\)$!ch/\1!p' $repo_dir/ci-changed-files.txt | sort -R)"
|
||||
# ci-changed-files.txt contains also files that has been deleted/renamed, filter them out.
|
||||
NEW_TESTS="$(filter_exists_and_template $NEW_TESTS)"
|
||||
if [[ -n "$NEW_TESTS" ]]
|
||||
|
@ -33,7 +33,7 @@ RUN apt-get update \
|
||||
tzdata \
|
||||
vim \
|
||||
wget \
|
||||
&& pip3 --no-cache-dir install 'git+https://github.com/mymarilyn/clickhouse-driver.git' scipy \
|
||||
&& pip3 --no-cache-dir install 'clickhouse-driver==0.2.1' scipy \
|
||||
&& apt-get purge --yes python3-dev g++ \
|
||||
&& apt-get autoremove --yes \
|
||||
&& apt-get clean \
|
||||
|
@ -196,7 +196,6 @@ function run_tests
|
||||
test_files=$(ls "$test_prefix" | grep "$CHPC_TEST_GREP" | xargs -I{} -n1 readlink -f "$test_prefix/{}")
|
||||
elif [ "$PR_TO_TEST" -ne 0 ] \
|
||||
&& [ "$(wc -l < changed-test-definitions.txt)" -gt 0 ] \
|
||||
&& [ "$(wc -l < changed-test-scripts.txt)" -eq 0 ] \
|
||||
&& [ "$(wc -l < other-changed-files.txt)" -eq 0 ]
|
||||
then
|
||||
# If only the perf tests were changed in the PR, we will run only these
|
||||
@ -208,15 +207,15 @@ function run_tests
|
||||
test_files=$(ls "$test_prefix"/*.xml)
|
||||
fi
|
||||
|
||||
# For PRs w/o changes in test definitons and scripts, test only a subset of
|
||||
# queries, and run them less times. If the corresponding environment variables
|
||||
# are already set, keep those values.
|
||||
if [ "$PR_TO_TEST" -ne 0 ] \
|
||||
&& [ "$(wc -l < changed-test-definitions.txt)" -eq 0 ] \
|
||||
&& [ "$(wc -l < changed-test-scripts.txt)" -eq 0 ]
|
||||
# For PRs w/o changes in test definitons, test only a subset of queries,
|
||||
# and run them less times. If the corresponding environment variables are
|
||||
# already set, keep those values.
|
||||
#
|
||||
# NOTE: too high CHPC_RUNS/CHPC_MAX_QUERIES may hit internal CI timeout.
|
||||
if [ "$PR_TO_TEST" -ne 0 ] && [ "$(wc -l < changed-test-definitions.txt)" -eq 0 ]
|
||||
then
|
||||
CHPC_RUNS=${CHPC_RUNS:-7}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-20}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-10}
|
||||
else
|
||||
CHPC_RUNS=${CHPC_RUNS:-13}
|
||||
CHPC_MAX_QUERIES=${CHPC_MAX_QUERIES:-0}
|
||||
@ -319,14 +318,14 @@ function get_profiles
|
||||
|
||||
wait
|
||||
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
|
||||
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 'QueryFinish' format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
|
||||
@ -635,7 +634,7 @@ create view query_display_names as select * from
|
||||
|
||||
create view partial_query_times as select * from
|
||||
file('analyze/partial-query-times.tsv', TSVWithNamesAndTypes,
|
||||
'test text, query_index int, time_stddev float, time_median float')
|
||||
'test text, query_index int, time_stddev float, time_median double')
|
||||
;
|
||||
|
||||
-- Report for partial queries that we could only run on the new server (e.g.
|
||||
|
@ -24,6 +24,13 @@
|
||||
|
||||
<!-- Don't fail some prewarm queries too early -->
|
||||
<timeout_before_checking_execution_speed>60</timeout_before_checking_execution_speed>
|
||||
|
||||
<!-- Query profiler enabled only for prewarm queries explicitly (see perf.py)
|
||||
This is needed for flamegraphs. -->
|
||||
<query_profiler_real_time_period_ns>0</query_profiler_real_time_period_ns>
|
||||
<query_profiler_cpu_time_period_ns>0</query_profiler_cpu_time_period_ns>
|
||||
<!-- Disable memory profiler too, since due to max_untracked_memory some queries may add trace entry and some may not -->
|
||||
<memory_profiler_step>0</memory_profiler_step>
|
||||
</default>
|
||||
</profiles>
|
||||
<users>
|
||||
|
@ -102,7 +102,6 @@ then
|
||||
base=$(git -C right/ch merge-base pr origin/master)
|
||||
git -C right/ch diff --name-only "$base" pr -- . | tee all-changed-files.txt
|
||||
git -C right/ch diff --name-only "$base" pr -- tests/performance | tee changed-test-definitions.txt
|
||||
git -C right/ch diff --name-only "$base" pr -- docker/test/performance-comparison | tee changed-test-scripts.txt
|
||||
git -C right/ch diff --name-only "$base" pr -- :!tests/performance :!docker/test/performance-comparison | tee other-changed-files.txt
|
||||
fi
|
||||
|
||||
|
@ -283,8 +283,11 @@ for query_index in queries_to_run:
|
||||
# test coverage. We disable profiler for normal runs because
|
||||
# it makes the results unstable.
|
||||
res = c.execute(q, query_id = prewarm_id,
|
||||
settings = {'max_execution_time': args.max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000})
|
||||
settings = {
|
||||
'max_execution_time': args.max_query_seconds,
|
||||
'query_profiler_real_time_period_ns': 10000000,
|
||||
'memory_profiler_step': '4Mi',
|
||||
})
|
||||
except clickhouse_driver.errors.Error as e:
|
||||
# Add query id to the exception to make debugging easier.
|
||||
e.args = (prewarm_id, *e.args)
|
||||
|
@ -9,6 +9,7 @@ RUN apt-get update -y \
|
||||
|
||||
COPY s3downloader /s3downloader
|
||||
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
|
||||
ENV DATASETS="hits visits"
|
||||
|
||||
COPY run.sh /
|
||||
|
@ -56,7 +56,7 @@ function start()
|
||||
|
||||
start
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --dataset-names $DATASETS
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "SHOW DATABASES"
|
||||
|
||||
@ -109,7 +109,7 @@ function run_tests()
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --no-stateless --hung-check --print-time "${ADDITIONAL_OPTIONS[@]}" \
|
||||
"$SKIP_TESTS_OPTION" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee test_output/test_result.txt
|
||||
set -e
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ function run_tests()
|
||||
fi
|
||||
|
||||
set +e
|
||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
@ -139,12 +139,17 @@ mv /var/log/clickhouse-server/stderr.log /test_output/ ||:
|
||||
if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
|
||||
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
|
||||
fi
|
||||
tar -chf /test_output/text_log_dump.tar /var/lib/clickhouse/data/system/text_log ||:
|
||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||
tar -chf /test_output/zookeeper_log_dump.tar /var/lib/clickhouse/data/system/zookeeper_log ||:
|
||||
tar -chf /test_output/trace_log_dump.tar /var/lib/clickhouse/data/system/trace_log ||:
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
|
||||
# Replace the engine with Ordinary to avoid extra symlinks stuff in artifacts.
|
||||
# (so that clickhouse-local --path can read it w/o extra care).
|
||||
sed -i -e "s/ATTACH DATABASE _ UUID '[^']*'/ATTACH DATABASE system/" -e "s/Atomic/Ordinary/" /var/lib/clickhouse/metadata/system.sql
|
||||
for table in text_log query_log zookeeper_log trace_log; do
|
||||
sed -i "s/ATTACH TABLE _ UUID '[^']*'/ATTACH TABLE $table/" /var/lib/clickhouse/metadata/system/${table}.sql
|
||||
tar -chf /test_output/${table}_dump.tar /var/lib/clickhouse/metadata/system.sql /var/lib/clickhouse/metadata/system/${table}.sql /var/lib/clickhouse/data/system/${table} ||:
|
||||
done
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
||||
|
@ -26,4 +26,6 @@ COPY ./stress /stress
|
||||
COPY run.sh /
|
||||
|
||||
ENV DATASETS="hits visits"
|
||||
ENV S3_URL="https://clickhouse-datasets.s3.yandex.net"
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -46,11 +46,11 @@ function configure()
|
||||
sudo chown root: /var/lib/clickhouse
|
||||
|
||||
# Set more frequent update period of asynchronous metrics to more frequently update information about real memory usage (less chance of OOM).
|
||||
echo "<yandex><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></yandex>" \
|
||||
echo "<clickhouse><asynchronous_metrics_update_period_s>1</asynchronous_metrics_update_period_s></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/asynchronous_metrics_update_period_s.xml
|
||||
|
||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||
echo "<yandex><max_server_memory_usage_to_ram_ratio>0.5</max_server_memory_usage_to_ram_ratio></yandex>" \
|
||||
echo "<clickhouse><max_server_memory_usage_to_ram_ratio>0.5</max_server_memory_usage_to_ram_ratio></clickhouse>" \
|
||||
> /etc/clickhouse-server/config.d/max_server_memory_usage_to_ram_ratio.xml
|
||||
}
|
||||
|
||||
@ -112,7 +112,7 @@ configure
|
||||
start
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --dataset-names $DATASETS
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||
@ -183,8 +183,14 @@ done
|
||||
|
||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||
mv /var/log/clickhouse-server/stderr.log /test_output/
|
||||
tar -chf /test_output/query_log_dump.tar /var/lib/clickhouse/data/system/query_log ||:
|
||||
tar -chf /test_output/trace_log_dump.tar /var/lib/clickhouse/data/system/trace_log ||:
|
||||
|
||||
# Replace the engine with Ordinary to avoid extra symlinks stuff in artifacts.
|
||||
# (so that clickhouse-local --path can read it w/o extra care).
|
||||
sed -i -e "s/ATTACH DATABASE _ UUID '[^']*'/ATTACH DATABASE system/" -e "s/Atomic/Ordinary/" /var/lib/clickhouse/metadata/system.sql
|
||||
for table in query_log trace_log; do
|
||||
sed -i "s/ATTACH TABLE _ UUID '[^']*'/ATTACH TABLE $table/" /var/lib/clickhouse/metadata/system/${table}.sql
|
||||
tar -chf /test_output/${table}_dump.tar /var/lib/clickhouse/metadata/system.sql /var/lib/clickhouse/metadata/system/${table}.sql /var/lib/clickhouse/data/system/${table} ||:
|
||||
done
|
||||
|
||||
# Write check result into check_status.tsv
|
||||
clickhouse-local --structure "test String, res String" -q "SELECT 'failure', test FROM table WHERE res != 'OK' order by (lower(test) like '%hung%') LIMIT 1" < /test_output/test_results.tsv > /test_output/check_status.tsv
|
||||
|
@ -10,7 +10,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
python3-pip \
|
||||
pylint \
|
||||
yamllint \
|
||||
&& pip3 install codespell
|
||||
&& pip3 install codespell PyGithub boto3 unidiff
|
||||
|
||||
COPY run.sh /
|
||||
COPY process_style_check_result.py /
|
||||
|
@ -50,7 +50,7 @@ URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
|
||||
echo
|
||||
echo "Will download ${URL}"
|
||||
echo
|
||||
curl -O "${URL}" && chmod a+x clickhouse &&
|
||||
curl -O "${URL}" && chmod a+x clickhouse || exit 1
|
||||
echo
|
||||
echo "Successfully downloaded the ClickHouse binary, you can run it as:
|
||||
./clickhouse"
|
||||
|
@ -3,9 +3,15 @@ toc_priority: 65
|
||||
toc_title: Build on Mac OS X
|
||||
---
|
||||
|
||||
# You don't have to build ClickHouse
|
||||
|
||||
You can install ClickHouse as follows: https://clickhouse.com/#quick-start
|
||||
Choose Mac x86 or M1.
|
||||
|
||||
# How to Build ClickHouse on Mac OS X {#how-to-build-clickhouse-on-mac-os-x}
|
||||
|
||||
Build should work on x86_64 (Intel) and arm64 (Apple Silicon) based macOS 10.15 (Catalina) and higher with recent Xcode's native AppleClang, or Homebrew's vanilla Clang or GCC compilers.
|
||||
Build should work on x86_64 (Intel) and arm64 (Apple Silicon) based macOS 10.15 (Catalina) and higher with Homebrew's vanilla Clang.
|
||||
It is always recommended to use `clang` compiler. It is possible to use XCode's `AppleClang` or `gcc` but it's strongly discouraged.
|
||||
|
||||
## Install Homebrew {#install-homebrew}
|
||||
|
||||
@ -45,18 +51,6 @@ git clone --recursive git@github.com:ClickHouse/ClickHouse.git
|
||||
|
||||
## Build ClickHouse {#build-clickhouse}
|
||||
|
||||
To build using Xcode's native AppleClang compiler:
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
cmake --build . --config RelWithDebInfo
|
||||
cd ..
|
||||
```
|
||||
|
||||
To build using Homebrew's vanilla Clang compiler:
|
||||
|
||||
``` bash
|
||||
@ -69,7 +63,19 @@ cmake --build . --config RelWithDebInfo
|
||||
cd ..
|
||||
```
|
||||
|
||||
To build using Homebrew's vanilla GCC compiler:
|
||||
To build using Xcode's native AppleClang compiler (this option is strongly not recommended; use the option above):
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
rm -rf build
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo ..
|
||||
cmake --build . --config RelWithDebInfo
|
||||
cd ..
|
||||
```
|
||||
|
||||
To build using Homebrew's vanilla GCC compiler (this option is absolutely not recommended, I'm wondering why do we ever have it):
|
||||
|
||||
``` bash
|
||||
cd ClickHouse
|
||||
|
@ -35,6 +35,8 @@ The [system.clusters](../../operations/system-tables/clusters.md) system table c
|
||||
|
||||
When creating a new replica of the database, this replica creates tables by itself. If the replica has been unavailable for a long time and has lagged behind the replication log — it checks its local metadata with the current metadata in ZooKeeper, moves the extra tables with data to a separate non-replicated database (so as not to accidentally delete anything superfluous), creates the missing tables, updates the table names if they have been renamed. The data is replicated at the `ReplicatedMergeTree` level, i.e. if the table is not replicated, the data will not be replicated (the database is responsible only for metadata).
|
||||
|
||||
[`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md) queries are allowed but not replicated. The database engine will only add/fetch/remove the partition/part to the current replica. However, if the table itself uses a Replicated table engine, then the data will be replicated after using `ATTACH`.
|
||||
|
||||
## Usage Example {#usage-example}
|
||||
|
||||
Creating a cluster with three hosts:
|
||||
|
@ -36,7 +36,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name
|
||||
|
||||
Create a table in ClickHouse which allows to read data from MongoDB collection:
|
||||
|
||||
``` text
|
||||
``` sql
|
||||
CREATE TABLE mongo_table
|
||||
(
|
||||
key UInt64,
|
||||
@ -46,7 +46,7 @@ CREATE TABLE mongo_table
|
||||
|
||||
To read from an SSL secured MongoDB server:
|
||||
|
||||
``` text
|
||||
``` sql
|
||||
CREATE TABLE mongo_table_ssl
|
||||
(
|
||||
key UInt64,
|
||||
|
@ -27,11 +27,10 @@ It is recommended to use official pre-compiled `deb` packages for Debian or Ubun
|
||||
{% include 'install/deb.sh' %}
|
||||
```
|
||||
|
||||
You can replace `stable` with `lts` or `testing` to use different [“release trains”](../faq/operations/production.md) based on your needs.
|
||||
You can replace `stable` with `lts` or `testing` to use different [release trains](../faq/operations/production.md) based on your needs.
|
||||
|
||||
You can also download and install packages manually from [here](https://repo.clickhouse.com/deb/stable/main/).
|
||||
|
||||
|
||||
#### Packages {#packages}
|
||||
|
||||
- `clickhouse-common-static` — Installs ClickHouse compiled binary files.
|
||||
|
@ -16,10 +16,13 @@ The supported formats are:
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ |
|
||||
| [Template](#format-template) | ✔ | ✔ |
|
||||
| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ |
|
||||
| [CSV](#csv) | ✔ | ✔ |
|
||||
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
|
||||
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
|
||||
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
|
||||
| [Values](#data-format-values) | ✔ | ✔ |
|
||||
| [Vertical](#vertical) | ✗ | ✔ |
|
||||
@ -33,8 +36,10 @@ The supported formats are:
|
||||
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
|
||||
| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ |
|
||||
| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [TSKV](#tskv) | ✔ | ✔ |
|
||||
| [Pretty](#pretty) | ✗ | ✔ |
|
||||
@ -51,6 +56,7 @@ The supported formats are:
|
||||
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
|
||||
| [ORC](#data-format-orc) | ✔ | ✔ |
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
@ -127,6 +133,9 @@ Arrays are written as a list of comma-separated values in square brackets. Numbe
|
||||
[NULL](../sql-reference/syntax.md) is formatted according to setting [format_tsv_null_representation](../operations/settings/settings.md#settings-format_tsv_null_representation) (default value is `\N`).
|
||||
|
||||
|
||||
If setting [input_format_tsv_empty_as_default](../operations/settings/settings.md#settings-input_format_tsv_empty_as_default) is enabled,
|
||||
empty input fields are replaced with default values. For complex default expressions [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#settings-input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
|
||||
Each element of [Nested](../sql-reference/data-types/nested-data-structures/nested.md) structures is represented as array.
|
||||
|
||||
For example:
|
||||
@ -165,18 +174,35 @@ This format is also available under the name `TSVRaw`.
|
||||
## TabSeparatedWithNames {#tabseparatedwithnames}
|
||||
|
||||
Differs from the `TabSeparated` format in that the column names are written in the first row.
|
||||
During parsing, the first row is completely ignored. You can’t use column names to determine their position or to check their correctness.
|
||||
(Support for parsing the header row may be added in the future.)
|
||||
If setting [input_format_with_names_use_header](../operations/settings/settings.md#settings-input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
|
||||
This format is also available under the name `TSVWithNames`.
|
||||
|
||||
## TabSeparatedWithNamesAndTypes {#tabseparatedwithnamesandtypes}
|
||||
|
||||
Differs from the `TabSeparated` format in that the column names are written to the first row, while the column types are in the second row.
|
||||
During parsing, the first and second rows are completely ignored.
|
||||
The first row with names is processed the same way as in `TabSeparatedWithNames` format.
|
||||
If setting [input_format_with_types_use_header](../operations/settings/settings.md#settings-input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
|
||||
This format is also available under the name `TSVWithNamesAndTypes`.
|
||||
|
||||
## TabSeparatedRawWithNames {#tabseparatedrawwithnames}
|
||||
|
||||
Differs from `TabSeparatedWithNames` format in that the rows are written without escaping.
|
||||
When parsing with this format, tabs or linefeeds are not allowed in each field.
|
||||
|
||||
This format is also available under the name `TSVRawWithNames`.
|
||||
|
||||
## TabSeparatedWithNamesAndTypes {#tabseparatedrawwithnamesandtypes}
|
||||
|
||||
Differs from `TabSeparatedWithNamesAndTypes` format in that the rows are written without escaping.
|
||||
When parsing with this format, tabs or linefeeds are not allowed in each field.
|
||||
|
||||
This format is also available under the name `TSVRawWithNamesAndNames`.
|
||||
|
||||
## Template {#format-template}
|
||||
|
||||
This format allows specifying a custom format string with placeholders for values with a specified escaping rule.
|
||||
@ -197,7 +223,7 @@ where `delimiter_i` is a delimiter between values (`$` symbol can be escaped as
|
||||
- `Raw` (without escaping, similarly to `TSVRaw`)
|
||||
- `None` (no escaping rule, see further)
|
||||
|
||||
If an escaping rule is omitted, then `None` will be used. `XML` and `Raw` are suitable only for output.
|
||||
If an escaping rule is omitted, then `None` will be used. `XML` is suitable only for output.
|
||||
|
||||
So, for the following format string:
|
||||
|
||||
@ -377,9 +403,8 @@ $ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FOR
|
||||
|
||||
When parsing, all values can be parsed either with or without quotes. Both double and single quotes are supported. Rows can also be arranged without quotes. In this case, they are parsed up to the delimiter character or line feed (CR or LF). In violation of the RFC, when parsing rows without quotes, the leading and trailing spaces and tabs are ignored. For the line feed, Unix (LF), Windows (CR LF) and Mac OS Classic (CR LF) types are all supported.
|
||||
|
||||
Empty unquoted input values are replaced with default values for the respective columns, if
|
||||
[input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)
|
||||
is enabled.
|
||||
If setting [input_format_csv_empty_as_default](../operations/settings/settings.md#settings-input_format_csv_empty_as_default) is enabled,
|
||||
empty unquoted input values are replaced with default values. For complex default expressions [input_format_defaults_for_omitted_fields](../operations/settings/settings.md#settings-input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
|
||||
`NULL` is formatted according to setting [format_csv_null_representation](../operations/settings/settings.md#settings-format_csv_null_representation) (default value is `\N`).
|
||||
|
||||
@ -387,7 +412,11 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
Also prints the header row, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
Also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
## CSVWithNamesAndTypes {#csvwithnamesandtypes}
|
||||
|
||||
Also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
## CustomSeparated {#format-customseparated}
|
||||
|
||||
@ -659,10 +688,21 @@ Differs from `JSONEachRow`/`JSONStringsEachRow` in that ClickHouse will also yie
|
||||
{"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}}
|
||||
```
|
||||
|
||||
## JSONCompactEachRowWithNames {#jsoncompacteachrowwithnames}
|
||||
|
||||
Differs from `JSONCompactEachRow` format in that it also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
## JSONCompactEachRowWithNamesAndTypes {#jsoncompacteachrowwithnamesandtypes}
|
||||
|
||||
Differs from `JSONCompactEachRow` format in that it also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
## JSONCompactStringsEachRowWithNames {#jsoncompactstringseachrowwithnames}
|
||||
|
||||
Differs from `JSONCompactStringsEachRow` in that in that it also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
## JSONCompactStringsEachRowWithNamesAndTypes {#jsoncompactstringseachrowwithnamesandtypes}
|
||||
|
||||
Differs from `JSONCompactEachRow`/`JSONCompactStringsEachRow` in that the column names and types are written as the first two rows.
|
||||
Differs from `JSONCompactStringsEachRow` in that it also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
```json
|
||||
["'hello'", "multiply(42, number)", "range(5)"]
|
||||
@ -705,7 +745,7 @@ CREATE TABLE IF NOT EXISTS example_table
|
||||
- If `input_format_defaults_for_omitted_fields = 1`, then the default value for `x` equals `0`, but the default value of `a` equals `x * 2`.
|
||||
|
||||
!!! note "Warning"
|
||||
When inserting data with `insert_sample_with_metadata = 1`, ClickHouse consumes more computational resources, compared to insertion with `insert_sample_with_metadata = 0`.
|
||||
When inserting data with `input_format_defaults_for_omitted_fields = 1`, ClickHouse consumes more computational resources, compared to insertion with `input_format_defaults_for_omitted_fields = 0`.
|
||||
|
||||
### Selecting Data {#selecting-data}
|
||||
|
||||
@ -912,6 +952,13 @@ Array is represented as a varint length (unsigned [LEB128](https://en.wikipedia.
|
||||
|
||||
For [NULL](../sql-reference/syntax.md#null-literal) support, an additional byte containing 1 or 0 is added before each [Nullable](../sql-reference/data-types/nullable.md) value. If 1, then the value is `NULL` and this byte is interpreted as a separate value. If 0, the value after the byte is not `NULL`.
|
||||
|
||||
## RowBinaryWithNames {#rowbinarywithnames}
|
||||
|
||||
Similar to [RowBinary](#rowbinary), but with added header:
|
||||
|
||||
- [LEB128](https://en.wikipedia.org/wiki/LEB128)-encoded number of columns (N)
|
||||
- N `String`s specifying column names
|
||||
|
||||
## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes}
|
||||
|
||||
Similar to [RowBinary](#rowbinary), but with added header:
|
||||
|
14
docs/en/interfaces/third-party/gui.md
vendored
14
docs/en/interfaces/third-party/gui.md
vendored
@ -129,6 +129,20 @@ Features:
|
||||
- Support monitor (processor, connection, query)
|
||||
- Support migrate data
|
||||
|
||||
### Bytebase {#bytebase}
|
||||
|
||||
[Bytebase](https://bytebase.com) is a web-based, open source schema change and version control tool for teams. It supports various databases including ClickHouse.
|
||||
|
||||
Features:
|
||||
|
||||
- Schema review between developers and DBAs.
|
||||
- Database-as-Code, version control the schema in VCS such GitLab and trigger the deployment upon code commit.
|
||||
- Streamlined deployment with per-environment policy.
|
||||
- Full migration history.
|
||||
- Schema drift detection.
|
||||
- Backup and restore.
|
||||
- RBAC.
|
||||
|
||||
## Commercial {#commercial}
|
||||
|
||||
### DataGrip {#datagrip}
|
||||
|
@ -22,17 +22,21 @@ toc_title: Adopters
|
||||
| <a href="https://apiroad.net/" class="favicon">ApiRoad</a> | API marketplace | Analytics | — | — | [Blog post, Nov 2018, Mar 2020](https://pixeljets.com/blog/clickhouse-vs-elasticsearch/) |
|
||||
| <a href="https://www.appsflyer.com" class="favicon">Appsflyer</a> | Mobile analytics | Main product | — | — | [Talk in Russian, July 2019](https://www.youtube.com/watch?v=M3wbRlcpBbY) |
|
||||
| <a href="https://arenadata.tech/" class="favicon">ArenaData</a> | Data Platform | Main product | — | — | [Slides in Russian, December 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup38/indexes.pdf) |
|
||||
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
|
||||
| <a href="https://avito.ru/" class="favicon">Avito</a> | Classifieds | Monitoring | — | — | [Meetup, April 2020](https://www.youtube.com/watch?v=n1tm4j4W8ZQ) |
|
||||
| <a href="https://badoo.com" class="favicon">Badoo</a> | Dating | Timeseries | — | — | [Slides in Russian, December 2019](https://presentations.clickhouse.com/meetup38/forecast.pdf) |
|
||||
| <a href="https://badoo.com" class="favicon">Badoo</a> | Dating | Timeseries | — | 1.6 mln events/sec (2018) | [Slides in Russian, December 2019](https://presentations.clickhouse.com/meetup38/forecast.pdf) |
|
||||
| <a href="https://beeline.ru/" class="favicon">Beeline</a> | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) |
|
||||
| <a href="https://www.benocs.com/" class="favicon">Benocs</a> | Network Telemetry and Analytics | Main Product | — | — | [Slides in English, October 2017](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup9/lpm.pdf) |
|
||||
| <a href="https://www.bigo.sg/" class="favicon">BIGO</a> | Video | Computing Platform | — | — | [Blog Article, August 2020](https://www.programmersought.com/article/44544895251/) |
|
||||
| <a href="https://www.bloomberg.com/">Bloomberg</a> | Finance, Media | Monitoring | — | — | [Slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
||||
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
|
||||
| <a href="https://www.bloomberg.com/">Bloomberg</a> | Finance, Media | Monitoring | — | — | [Job opening, September 2021](https://careers.bloomberg.com/job/detail/94913), [slides, May 2018](https://www.slideshare.net/Altinity/http-analytics-for-6m-requests-per-second-using-clickhouse-by-alexander-bocharov) |
|
||||
| <a href="https://bloxy.info" class="favicon">Bloxy</a> | Blockchain | Analytics | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/4_bloxy.pptx) |
|
||||
| <a href="https://www.bytedance.com" class="favicon">Bytedance</a> | Social platforms | — | — | — | [The ClickHouse Meetup East, October 2020](https://www.youtube.com/watch?v=ckChUkC3Pns) |
|
||||
| <a href="https://cardsmobile.ru/" class="favicon">CardsMobile</a> | Finance | Analytics | — | — | [VC.ru](https://vc.ru/s/cardsmobile/143449-rukovoditel-gruppy-analiza-dannyh) |
|
||||
| <a href="https://carto.com/" class="favicon">CARTO</a> | Business Intelligence | Geo analytics | — | — | [Geospatial processing with ClickHouse](https://carto.com/blog/geospatial-processing-with-clickhouse/) |
|
||||
| <a href="http://public.web.cern.ch/public/" class="favicon">CERN</a> | Research | Experiment | — | — | [Press release, April 2012](https://www.yandex.com/company/press_center/press_releases/2012/2012-04-10/) |
|
||||
| <a href="https://www.checklyhq.com/" class="favicon">Checkly</a> | Software Development | Analytics | — | — | [Tweet, October 2021](https://twitter.com/tim_nolet/status/1445810665743081474?s=20) |
|
||||
| <a href="https://chelpipegroup.com/" class="favicon">ChelPipe Group</a> | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) |
|
||||
| <a href="http://cisco.com/" class="favicon">Cisco</a> | Networking | Traffic analysis | — | — | [Lightning talk, October 2019](https://youtu.be/-hI1vDR2oPY?t=5057) |
|
||||
| <a href="https://www.citadelsecurities.com/" class="favicon">Citadel Securities</a> | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) |
|
||||
| <a href="https://city-mobil.ru" class="favicon">Citymobil</a> | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
||||
@ -48,6 +52,7 @@ toc_title: Adopters
|
||||
| <a href="https://db.com" class="favicon">Deutsche Bank</a> | Finance | BI Analytics | — | — | [Slides in English, October 2019](https://bigdatadays.ru/wp-content/uploads/2019/10/D2-H3-3_Yakunin-Goihburg.pdf) |
|
||||
| <a href="https://deeplay.io/eng/" class="favicon">Deeplay</a> | Gaming Analytics | — | — | — | [Job advertisement, 2020](https://career.habr.com/vacancies/1000062568) |
|
||||
| <a href="https://www.diva-e.com" class="favicon">Diva-e</a> | Digital consulting | Main Product | — | — | [Slides in English, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup29/ClickHouse-MeetUp-Unusual-Applications-sd-2019-09-17.pdf) |
|
||||
| <a href="https://ecommpay.com/" class="favicon">Ecommpay</a> | Payment Processing | Logs | — | — | [Video, Nov 2019](https://www.youtube.com/watch?v=d3GdZTOWGLk) |
|
||||
| <a href="https://www.ecwid.com/" class="favicon">Ecwid</a> | E-commerce SaaS | Metrics, Logging | — | — | [Slides in Russian, April 2019](https://nastachku.ru/var/files/1/presentation/backend/2_Backend_6.pdf) |
|
||||
| <a href="https://www.ebay.com/" class="favicon">eBay</a> | E-commerce | Logs, Metrics and Events | — | — | [Official website, Sep 2020](https://tech.ebayinc.com/engineering/ou-online-analytical-processing/) |
|
||||
| <a href="https://www.exness.com/" class="favicon">Exness</a> | Trading | Metrics, Logging | — | — | [Talk in Russian, May 2019](https://youtu.be/_rpU-TvSfZ8?t=3215) |
|
||||
@ -57,9 +62,11 @@ toc_title: Adopters
|
||||
| <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | 14 bn records/day as of Jan 2021 | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.genotek.ru/" class="favicon">Genotek</a> | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) |
|
||||
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
|
||||
| <a href="https://glaber.io/" class="favicon">Glaber</a> | Monitoring | Main product | — | — | [Website](https://glaber.io/) |
|
||||
| <a href="https://graphcdn.io/" class="favicon">GraphCDN</a> | CDN | Traffic Analytics | — | — | [Blog Post in English, August 2021](https://altinity.com/blog/delivering-insight-on-graphql-apis-with-clickhouse-at-graphcdn/) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
|
||||
| <a href="https://www.the-ica.com/" class="favicon">ICA</a> | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.com/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
| <a href="https://infobaleen.com" class="favicon">Infobaleen</a> | AI markting tool | Analytics | — | — | [Official site](https://infobaleen.com) |
|
||||
@ -71,9 +78,11 @@ toc_title: Adopters
|
||||
| <a href="https://ippon.tech" class="favicon">Ippon Technologies</a> | Technology Consulting | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=205) |
|
||||
| <a href="https://www.ivi.ru/" class="favicon">Ivi</a> | Online Cinema | Analytics, Monitoring | — | — | [Article in Russian, Jan 2018](https://habr.com/en/company/ivi/blog/347408/) |
|
||||
| <a href="https://jinshuju.net" class="favicon">Jinshuju 金数据</a> | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) |
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
|
||||
| <a href="https://www.kodiakdata.com/" class="favicon">Kodiak Data</a> | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
||||
| <a href="https://kontur.ru" class="favicon">Kontur</a> | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||
| <a href="https://www.kuaishou.com/" class="favicon">Kuaishou</a> | Video | — | — | — | [ClickHouse Meetup, October 2018](https://clickhouse.com/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/) |
|
||||
| <a href="https://www.kgk-global.com/en/" class="favicon">KGK Global</a> | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) |
|
||||
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||
| <a href="https://lifestreet.com/" class="favicon">LifeStreet</a> | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
|
||||
| <a href="https://mcs.mail.ru/" class="favicon">Mail.ru Cloud Solutions</a> | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
|
||||
@ -88,7 +97,10 @@ toc_title: Adopters
|
||||
| <a href="https://www.netskope.com/" class="favicon">Netskope</a> | Network Security | — | — | — | [Job advertisement, March 2021](https://www.mendeley.com/careers/job/senior-software-developer-backend-developer-1346348) |
|
||||
| <a href="https://niclabs.cl/" class="favicon">NIC Labs</a> | Network Monitoring | RaTA-DNS | — | — | [Blog post, March 2021](https://niclabs.cl/ratadns/2021/03/Clickhouse) |
|
||||
| <a href="https://getnoc.com/" class="favicon">NOC Project</a> | Network Monitoring | Analytics | Main Product | — | [Official Website](https://getnoc.com/features/big-data/) |
|
||||
| <a href="https://www.noction.com" class="favicon">Noction</a> | Network Technology | Main Product | — | — | [Official Website](https://www.noction.com/news/irp-3-11-remote-triggered-blackholing-capability)
|
||||
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
|
||||
| <a href="https://ok.ru" class="favicon">Ok.ru</a> | Social Network | — | 72 servers | 810 TB compressed, 50bn rows/day, 1.5 TB/day | [SmartData conference, Oct 2021](https://assets.ctfassets.net/oxjq45e8ilak/4JPHkbJenLgZhBGGyyonFP/57472ec6987003ec4078d0941740703b/____________________ClickHouse_______________________.pdf) |
|
||||
| <a href="https://omnicomm.ru/" class="favicon">Omnicomm</a> | Transportation Monitoring | — | — | — | [Facebook post, Oct 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) |
|
||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||
| <a href="https://corp.ozon.com/" class="favicon">OZON</a> | E-commerce | — | — | — | [Official website](https://job.ozon.ru/vacancy/razrabotchik-clickhouse-ekspluatatsiya-40991870/) |
|
||||
| <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) |
|
||||
@ -103,6 +115,7 @@ toc_title: Adopters
|
||||
| <a href="https://qrator.net" class="favicon">Qrator</a> | DDoS protection | Main product | — | — | [Blog Post, March 2019](https://blog.qrator.net/en/clickhouse-ddos-mitigation_37/) |
|
||||
| <a href="https://www.rbinternational.com/" class="favicon">Raiffeisenbank</a> | Banking | Analytics | — | — | [Lecture in Russian, December 2020](https://cs.hse.ru/announcements/421965599.html) |
|
||||
| <a href="https://rambler.ru" class="favicon">Rambler</a> | Internet services | Analytics | — | — | [Talk in Russian, April 2018](https://medium.com/@ramblertop/разработка-api-clickhouse-для-рамблер-топ-100-f4c7e56f3141) |
|
||||
| <a href="https://replicahq.com" class="favicon">Replica</a> | Urban Planning | Analytics | — | — | [Job advertisement](https://boards.greenhouse.io/replica/jobs/5547732002?gh_jid=5547732002) |
|
||||
| <a href="https://retell.cc/" class="favicon">Retell</a> | Speech synthesis | Analytics | — | — | [Blog Article, August 2020](https://vc.ru/services/153732-kak-sozdat-audiostati-na-vashem-sayte-i-zachem-eto-nuzhno) |
|
||||
| <a href="https://www.rollbar.com" class="favicon">Rollbar</a> | Software Development | Main Product | — | — | [Official Website](https://www.rollbar.com) |
|
||||
| <a href="https://rspamd.com/" class="favicon">Rspamd</a> | Antispam | Analytics | — | — | [Official Website](https://rspamd.com/doc/modules/clickhouse.html) |
|
||||
@ -116,6 +129,7 @@ toc_title: Adopters
|
||||
| <a href="https://sentry.io/" class="favicon">Sentry</a> | Software Development | Main product | — | — | [Blog Post in English, May 2019](https://blog.sentry.io/2019/05/16/introducing-snuba-sentrys-new-search-infrastructure) |
|
||||
| <a href="https://seo.do/" class="favicon">seo.do</a> | Analytics | Main product | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/CH%20Presentation-%20Metehan%20Çetinkaya.pdf) |
|
||||
| <a href="http://www.sgk.gov.tr/wps/portal/sgk/tr" class="favicon">SGK</a> | Government Social Security | Analytics | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup35/ClickHouse%20Meetup-Ramazan%20POLAT.pdf) |
|
||||
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
|
||||
| <a href="http://english.sina.com/index.html" class="favicon">Sina</a> | News | — | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/6.%20ClickHouse最佳实践%20高鹏_新浪.pdf) |
|
||||
| <a href="https://www.sipfront.com/" class="favicon">Sipfront</a> | Software Development | Analytics | — | — | [Tweet, October 2021](https://twitter.com/andreasgranig/status/1446404332337913895?s=20) |
|
||||
| <a href="https://smi2.ru/" class="favicon">SMI2</a> | News | Analytics | — | — | [Blog Post in Russian, November 2017](https://habr.com/ru/company/smi2/blog/314558/) |
|
||||
@ -129,12 +143,14 @@ toc_title: Adopters
|
||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Big Data | Data processing | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/5.%20ClickHouse大数据集群应用_李俊飞腾讯网媒事业部.pdf) |
|
||||
| <a href="https://www.tencent.com" class="favicon">Tencent</a> | Messaging | Logging | — | — | [Talk in Chinese, November 2019](https://youtu.be/T-iVQRuw-QY?t=5050) |
|
||||
| <a href="https://www.tencentmusic.com/" class="favicon">Tencent Music Entertainment (TME)</a> | BigData | Data processing | — | — | [Blog in Chinese, June 2020](https://cloud.tencent.com/developer/article/1637840) |
|
||||
| <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) |
|
||||
| <a href="https://timeflow.systems" class="favicon">Timeflow</a> | Software | Analytics | — | — | [Blog](https://timeflow.systems/why-we-moved-from-druid-to-clickhouse/ ) |
|
||||
| <a href="https://www.tinybird.co/" class="favicon">Tinybird</a> | Real-time Data Products | Data processing | — | — | [Official website](https://www.tinybird.co/) |
|
||||
| <a href="https://trafficstars.com/" class="favicon">Traffic Stars</a> | AD network | — | — | — | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) |
|
||||
| <a href="https://trafficstars.com/" class="favicon">Traffic Stars</a> | AD network | — | 300 servers in Europe/US | 1.8 PiB, 700 000 insert rps (as of 2021) | [Slides in Russian, May 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup15/lightning/ninja.pdf) |
|
||||
| <a href="https://www.uber.com" class="favicon">Uber</a> | Taxi | Logging | — | — | [Slides, February 2020](https://presentations.clickhouse.com/meetup40/uber.pdf) |
|
||||
| <a href="https://hello.utmstat.com/" class="favicon">UTMSTAT</a> | Analytics | Main product | — | — | [Blog post, June 2020](https://vc.ru/tribuna/133956-striming-dannyh-iz-servisa-skvoznoy-analitiki-v-clickhouse) |
|
||||
| <a href="https://vk.com" class="favicon">VKontakte</a> | Social Network | Statistics, Logging | — | — | [Slides in Russian, August 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup17/3_vk.pdf) |
|
||||
| <a href="https://www.vmware.com/" class="favicon">VMWare</a> | Cloud | VeloCloud, SDN | — | — | [Product documentation](https://docs.vmware.com/en/vRealize-Operations-Manager/8.3/com.vmware.vcom.metrics.doc/GUID-A9AD72E1-C948-4CA2-971B-919385AB3CA8.html) |
|
||||
| <a href="https://www.vmware.com/" class="favicon">VMware</a> | Cloud | VeloCloud, SDN | — | — | [Product documentation](https://docs.vmware.com/en/vRealize-Operations-Manager/8.3/com.vmware.vcom.metrics.doc/GUID-A9AD72E1-C948-4CA2-971B-919385AB3CA8.html) |
|
||||
| <a href="https://www.walmartlabs.com/" class="favicon">Walmart Labs</a> | Internet, Retail | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=144) |
|
||||
| <a href="https://wargaming.com/en/" class="favicon">Wargaming</a> | Games | | — | — | [Interview](https://habr.com/en/post/496954/) |
|
||||
| <a href="https://www.wildberries.ru/" class="favicon">Wildberries</a> | E-commerce | | — | — | [Official website](https://it.wildberries.ru/) |
|
||||
@ -148,24 +164,13 @@ toc_title: Adopters
|
||||
| <a href="https://market.yandex.ru/" class="favicon">Yandex Market</a> | e-Commerce | Metrics, Logging | — | — | [Talk in Russian, January 2019](https://youtu.be/_l1qP0DyBcA?t=478) |
|
||||
| <a href="https://metrica.yandex.com" class="favicon">Yandex Metrica</a> | Web analytics | Main product | 630 servers in one cluster, 360 servers in another cluster, 1862 servers in one department | 133 PiB / 8.31 PiB / 120 trillion records | [Slides, February 2020](https://presentations.clickhouse.com/meetup40/introduction/#13) |
|
||||
| <a href="https://www.yotascale.com/" class="favicon">Yotascale</a> | Cloud | Data pipeline | — | 2 bn records/day | [LinkedIn (Accomplishments)](https://www.linkedin.com/in/adilsaleem/) |
|
||||
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
|
||||
| <a href="https://htc-cs.ru/" class="favicon">ЦВТ</a> | Software Development | Metrics, Logging | — | — | [Blog Post, March 2019, in Russian](https://vc.ru/dev/62715-kak-my-stroili-monitoring-na-prometheus-clickhouse-i-elk) |
|
||||
| <a href="https://mkb.ru/" class="favicon">МКБ</a> | Bank | Web-system monitoring | — | — | [Slides in Russian, September 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup28/mkb.pdf) |
|
||||
| <a href="https://cft.ru/" class="favicon">ЦФТ</a> | Banking, Financial products, Payments | — | — | — | [Meetup in Russian, April 2020](https://team.cft.ru/events/162) |
|
||||
| <a href="https://promo.croc.ru/digitalworker" class="favicon">Цифровой Рабочий</a> | Industrial IoT, Analytics | — | — | — | [Blog post in Russian, March 2021](https://habr.com/en/company/croc/blog/548018/) |
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
|
||||
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
|
||||
| <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) |
|
||||
| <a href="https://www.kgk-global.com/en/" class="favicon">KGK Global</a> | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) |
|
||||
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
|
||||
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
|
||||
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
|
||||
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
|
||||
| <a href="https://signoz.io/" class="favicon">SigNoz</a> | Observability Platform | Main Product | — | — | [Source code](https://github.com/SigNoz/signoz) |
|
||||
| <a href="https://chelpipegroup.com/" class="favicon">ChelPipe Group</a> | Analytics | — | — | — | [Blog post, June 2021](https://vc.ru/trade/253172-tyazhelomu-proizvodstvu-user-friendly-sayt-internet-magazin-trub-dlya-chtpz) |
|
||||
| <a href="https://zagravagames.com/en/" class="favicon">Zagrava Trading</a> | — | — | — | — | [Job offer, May 2021](https://twitter.com/datastackjobs/status/1394707267082063874) |
|
||||
| <a href="https://beeline.ru/" class="favicon">Beeline</a> | Telecom | Data Platform | — | — | [Blog post, July 2021](https://habr.com/en/company/beeline/blog/567508/) |
|
||||
| <a href="https://ecommpay.com/" class="favicon">Ecommpay</a> | Payment Processing | Logs | — | — | [Video, Nov 2019](https://www.youtube.com/watch?v=d3GdZTOWGLk) |
|
||||
| <a href="https://omnicomm.ru/" class="favicon">Omnicomm</a> | Transportation Monitoring | — | — | — | [Facebook post, Oct 2021](https://www.facebook.com/OmnicommTeam/posts/2824479777774500) |
|
||||
| <a href="https://ok.ru" class="favicon">Ok.ru</a> | Social Network | — | 72 servers | 810 TB compressed, 50bn rows/day, 1.5 TB/day | [SmartData conference, Oct 2021](https://assets.ctfassets.net/oxjq45e8ilak/4JPHkbJenLgZhBGGyyonFP/57472ec6987003ec4078d0941740703b/____________________ClickHouse_______________________.pdf) |
|
||||
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
|
||||
| <a href="https://www.deepl.com/" class="favicon">Deepl</a> | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -7,7 +7,7 @@ toc_title: Configuration Files
|
||||
|
||||
ClickHouse supports multi-file configuration management. The main server configuration file is `/etc/clickhouse-server/config.xml` or `/etc/clickhouse-server/config.yaml`. Other files must be in the `/etc/clickhouse-server/config.d` directory. Note, that any configuration file can be written either in XML or YAML, but mixing formats in one file is not supported. For example, you can have main configs as `config.xml` and `users.xml` and write additional files in `config.d` and `users.d` directories in `.yaml`.
|
||||
|
||||
All XML files should have the same root element, usually `<yandex>`. As for YAML, `yandex:` should not be present, the parser will insert it automatically.
|
||||
All XML files should have the same root element, usually `<clickhouse>`. As for YAML, `clickhouse:` should not be present, the parser will insert it automatically.
|
||||
|
||||
## Override {#override}
|
||||
|
||||
@ -21,13 +21,13 @@ Some settings specified in the main configuration file can be overridden in othe
|
||||
You can also declare attributes as coming from environment variables by using `from_env="VARIABLE_NAME"`:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<replica from_env="REPLICA" />
|
||||
<layer from_env="LAYER" />
|
||||
<shard from_env="SHARD" />
|
||||
</macros>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Substitution {#substitution}
|
||||
@ -39,7 +39,7 @@ If you want to replace an entire element with a substitution use `include` as el
|
||||
XML substitution example:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Appends XML subtree found at `/profiles-in-zookeeper` ZK path to `<profiles>` element. -->
|
||||
<profiles from_zk="/profiles-in-zookeeper" />
|
||||
|
||||
@ -48,7 +48,7 @@ XML substitution example:
|
||||
<include from_zk="/users-in-zookeeper" />
|
||||
<include from_zk="/other-users-in-zookeeper" />
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
|
||||
@ -72,7 +72,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
```
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<users>
|
||||
<alice>
|
||||
<profile>analytics</profile>
|
||||
@ -83,7 +83,7 @@ $ cat /etc/clickhouse-server/users.d/alice.xml
|
||||
<quota>analytics</quota>
|
||||
</alice>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## YAML examples {#example}
|
||||
|
@ -23,32 +23,32 @@ To enable Kerberos, one should include `kerberos` section in `config.xml`. This
|
||||
Example (goes into `config.xml`):
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos />
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
With principal specification:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos>
|
||||
<principal>HTTP/clickhouse.example.com@EXAMPLE.COM</principal>
|
||||
</kerberos>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
With filtering by realm:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<kerberos>
|
||||
<realm>EXAMPLE.COM</realm>
|
||||
</kerberos>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
!!! warning "Note"
|
||||
@ -80,7 +80,7 @@ Parameters:
|
||||
Example (goes into `users.xml`):
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<users>
|
||||
<!- ... -->
|
||||
@ -91,7 +91,7 @@ Example (goes into `users.xml`):
|
||||
</kerberos>
|
||||
</my_user>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
|
@ -14,7 +14,7 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`.
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<ldap_servers>
|
||||
<!- Typical LDAP server. -->
|
||||
@ -45,7 +45,7 @@ To define LDAP server you must add `ldap_servers` section to the `config.xml`.
|
||||
<enable_tls>no</enable_tls>
|
||||
</my_ad_server>
|
||||
</ldap_servers>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Note, that you can define multiple LDAP servers inside the `ldap_servers` section using distinct names.
|
||||
@ -90,7 +90,7 @@ At each login attempt, ClickHouse tries to "bind" to the specified DN defined by
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<users>
|
||||
<!- ... -->
|
||||
@ -101,7 +101,7 @@ At each login attempt, ClickHouse tries to "bind" to the specified DN defined by
|
||||
</ldap>
|
||||
</my_user>
|
||||
</users>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Note, that user `my_user` refers to `my_ldap_server`. This LDAP server must be configured in the main `config.xml` file as described previously.
|
||||
@ -125,7 +125,7 @@ At each login attempt, ClickHouse tries to find the user definition locally and
|
||||
Goes into `config.xml`.
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!- ... -->
|
||||
<user_directories>
|
||||
<!- Typical LDAP server. -->
|
||||
@ -156,7 +156,7 @@ Goes into `config.xml`.
|
||||
</role_mapping>
|
||||
</ldap>
|
||||
</user_directories>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Note that `my_ldap_server` referred in the `ldap` section inside the `user_directories` section must be a previously defined LDAP server that is configured in the `config.xml` (see [LDAP Server Definition](#ldap-server-definition)).
|
||||
|
@ -23,7 +23,7 @@ chmod a+x ./hardware.sh
|
||||
./hardware.sh
|
||||
```
|
||||
|
||||
3. Copy the output and send it to clickhouse-feedback@yandex-team.com
|
||||
3. Copy the output and send it to feedback@clickhouse.com
|
||||
|
||||
All the results are published here: https://clickhouse.com/benchmark/hardware/
|
||||
|
||||
|
@ -69,6 +69,8 @@ If no conditions met for a data part, ClickHouse uses the `lz4` compression.
|
||||
</compression>
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
## encryption {#server-settings-encryption}
|
||||
|
||||
Configures a command to obtain a key to be used by [encryption codecs](../../sql-reference/statements/create/table.md#create-query-encryption-codecs). Key (or keys) should be written in environment variables or set in the configuration file.
|
||||
@ -131,7 +133,7 @@ Also, users can add nonce that must be 12 bytes long (by default encryption and
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<nonce>0123456789101</nonce>
|
||||
<nonce>012345678910</nonce>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
@ -148,6 +150,8 @@ Or it can be set in hex:
|
||||
|
||||
Everything mentioned above can be applied for `aes_256_gcm_siv` (but the key must be 32 bytes long).
|
||||
|
||||
-->
|
||||
|
||||
## custom_settings_prefixes {#custom_settings_prefixes}
|
||||
|
||||
List of prefixes for [custom settings](../../operations/settings/index.md#custom_settings). The prefixes must be separated with commas.
|
||||
@ -482,16 +486,12 @@ Backlog (queue size of pending connections) of the listen socket.
|
||||
Default value: `4096` (as in linux [5.4+](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=19f92a030ca6d772ab44b22ee6a01378a8cb32d4)).
|
||||
|
||||
Usually this value does not need to be changed, since:
|
||||
- default value is large enough,
|
||||
- and for accepting client's connections server has separate thread.
|
||||
- default value is large enough,
|
||||
- and for accepting client's connections server has separate thread.
|
||||
|
||||
So even if you have `TcpExtListenOverflows` (from `nstat`) non zero and this
|
||||
counter grows for ClickHouse server it does not mean that this value need to be
|
||||
increased, since:
|
||||
- usually if 4096 is not enough it shows some internal ClickHouse scaling
|
||||
issue, so it is better to report an issue.
|
||||
- and it does not mean that the server can handle more connections later (and
|
||||
even if it can, clients can already goes away / disconnect).
|
||||
So even if you have `TcpExtListenOverflows` (from `nstat`) non zero and this counter grows for ClickHouse server it does not mean that this value need to be increased, since:
|
||||
- usually if 4096 is not enough it shows some internal ClickHouse scaling issue, so it is better to report an issue.
|
||||
- and it does not mean that the server can handle more connections later (and even if it could, by that moment clients may be gone or disconnected).
|
||||
|
||||
Examples:
|
||||
|
||||
@ -643,7 +643,7 @@ On hosts with low RAM and swap, you possibly need setting `max_server_memory_usa
|
||||
|
||||
## max_concurrent_queries {#max-concurrent-queries}
|
||||
|
||||
The maximum number of simultaneously processed queries related to MergeTree table. Queries may be limited by other settings: [max_concurrent_queries_for_all_users](#max-concurrent-queries-for-all-users), [min_marks_to_honor_max_concurrent_queries](#min-marks-to-honor-max-concurrent-queries).
|
||||
The maximum number of simultaneously processed queries related to MergeTree table. Queries may be limited by other settings: [max_concurrent_queries_for_user](#max-concurrent-queries-for-user), [max_concurrent_queries_for_all_users](#max-concurrent-queries-for-all-users), [min_marks_to_honor_max_concurrent_queries](#min-marks-to-honor-max-concurrent-queries).
|
||||
|
||||
!!! info "Note"
|
||||
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
@ -659,6 +659,21 @@ Possible values:
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
```
|
||||
|
||||
## max_concurrent_queries_for_user {#max-concurrent-queries-for-user}
|
||||
|
||||
The maximum number of simultaneously processed queries related to MergeTree table per user.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Disabled.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<max_concurrent_queries_for_user>5</max_concurrent_queries_for_user>
|
||||
```
|
||||
|
||||
## max_concurrent_queries_for_all_users {#max-concurrent-queries-for-all-users}
|
||||
|
||||
Throw exception if the value of this setting is less or equal than the current number of simultaneously processed queries.
|
||||
@ -771,14 +786,14 @@ It is enabled by default. If it`s not, you can do this manually.
|
||||
To manually turn on metrics history collection [`system.metric_log`](../../operations/system-tables/metric_log.md), create `/etc/clickhouse-server/config.d/metric_log.xml` with the following content:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<metric_log>
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
**Disabling**
|
||||
@ -786,9 +801,9 @@ To manually turn on metrics history collection [`system.metric_log`](../../opera
|
||||
To disable `metric_log` setting, you should create the following file `/etc/clickhouse-server/config.d/disable_metric_log.xml` with the following content:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<metric_log remove="1" />
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## replicated_merge_tree {#server_configuration_parameters-replicated_merge_tree}
|
||||
@ -1024,7 +1039,7 @@ Parameters:
|
||||
|
||||
**Example**
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<text_log>
|
||||
<level>notice</level>
|
||||
<database>system</database>
|
||||
@ -1033,7 +1048,7 @@ Parameters:
|
||||
<!-- <partition_by>event_date</partition_by> -->
|
||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + INTERVAL 30 day</engine>
|
||||
</text_log>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
|
||||
@ -1275,6 +1290,7 @@ This section contains the following parameters:
|
||||
|
||||
- [Replication](../../engines/table-engines/mergetree-family/replication.md)
|
||||
- [ZooKeeper Programmer’s Guide](http://zookeeper.apache.org/doc/current/zookeeperProgrammers.html)
|
||||
- [Optional secured communication between ClickHouse and Zookeeper](../ssl-zookeeper.md#secured-communication-with-zookeeper)
|
||||
|
||||
## use_minimalistic_part_header_in_zookeeper {#server-settings-use_minimalistic_part_header_in_zookeeper}
|
||||
|
||||
|
@ -399,7 +399,7 @@ Default value: 1.
|
||||
|
||||
## input_format_defaults_for_omitted_fields {#session_settings-input_format_defaults_for_omitted_fields}
|
||||
|
||||
When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv) and [TabSeparated](../../interfaces/formats.md#tabseparated) formats.
|
||||
When performing `INSERT` queries, replace omitted input column values with default values of the respective columns. This option only applies to [JSONEachRow](../../interfaces/formats.md#jsoneachrow), [CSV](../../interfaces/formats.md#csv), [TabSeparated](../../interfaces/formats.md#tabseparated) formats and formats with `WithNames`/`WithNamesAndTypes` suffixes.
|
||||
|
||||
!!! note "Note"
|
||||
When this option is enabled, extended table metadata are sent from server to client. It consumes additional computing resources on the server and can reduce performance.
|
||||
@ -417,6 +417,12 @@ When enabled, replace empty input fields in TSV with default values. For complex
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## input_format_csv_empty_as_default {#settings-input-format-csv-empty-as-default}
|
||||
|
||||
When enabled, replace empty input fields in CSV with default values. For complex default expressions `input_format_defaults_for_omitted_fields` must be enabled too.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## input_format_tsv_enum_as_number {#settings-input_format_tsv_enum_as_number}
|
||||
|
||||
Enables or disables parsing enum values as enum ids for TSV input format.
|
||||
@ -540,8 +546,40 @@ To improve insert performance, we recommend disabling this check if you are sure
|
||||
|
||||
Supported formats:
|
||||
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnames)
|
||||
- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames)
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnames)
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnamesandtypes)
|
||||
- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames)
|
||||
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes)
|
||||
- [JSONCompactEachRowWithNames](../../interfaces/formats.md#jsoncompacteachrowwithnames)
|
||||
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes)
|
||||
- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md#jsoncompactstringseachrowwithnames)
|
||||
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes)
|
||||
- [RowBinaryWithNames](../../interfaces/formats.md#rowbinarywithnames-rowbinarywithnames)
|
||||
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes)
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
Default value: 1.
|
||||
|
||||
## input_format_with_types_use_header {#settings-input-format-with-types-use-header}
|
||||
|
||||
Controls whether format parser should check if data types from the input data match data types from the target table.
|
||||
|
||||
Supported formats:
|
||||
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnames)
|
||||
- [CSVWithNames](../../interfaces/formats.md#csvwithnamesandtypes)
|
||||
- [TabSeparatedWithNames](../../interfaces/formats.md#tabseparatedwithnames)
|
||||
- [TabSeparatedWithNamesAndTypes](../../interfaces/formats.md#tabseparatedwithnamesandtypes)
|
||||
- [JSONCompactEachRowWithNames](../../interfaces/formats.md#jsoncompacteachrowwithnames)
|
||||
- [JSONCompactEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompacteachrowwithnamesandtypes)
|
||||
- [JSONCompactStringsEachRowWithNames](../../interfaces/formats.md#jsoncompactstringseachrowwithnames)
|
||||
- [JSONCompactStringsEachRowWithNamesAndTypes](../../interfaces/formats.md#jsoncompactstringseachrowwithnamesandtypes)
|
||||
- [RowBinaryWithNames](../../interfaces/formats.md#rowbinarywithnames-rowbinarywithnames)
|
||||
- [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes-rowbinarywithnamesandtypes)
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -1397,6 +1435,32 @@ Minimum count of executing same expression before it is get compiled.
|
||||
|
||||
Default value: `3`.
|
||||
|
||||
## compile_aggregate_expressions {#compile_aggregate_expressions}
|
||||
|
||||
Enables or disables JIT-compilation of aggregate functions to native code. Enabling this setting can improve the performance.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Aggregation is done without JIT compilation.
|
||||
- 1 — Aggregation is done using JIT compilation.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [min_count_to_compile_aggregate_expression](#min_count_to_compile_aggregate_expression)
|
||||
|
||||
## min_count_to_compile_aggregate_expression {#min_count_to_compile_aggregate_expression}
|
||||
|
||||
The minimum number of identical aggregate expressions to start JIT-compilation. Works only if the [compile_aggregate_expressions](#compile_aggregate_expressions) setting is enabled.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Identical aggregate expressions are always JIT-compiled.
|
||||
|
||||
Default value: `3`.
|
||||
|
||||
## output_format_json_quote_64bit_integers {#session_settings-output_format_json_quote_64bit_integers}
|
||||
|
||||
Controls quoting of 64-bit or bigger [integers](../../sql-reference/data-types/int-uint.md) (like `UInt64` or `Int128`) when they are output in a [JSON](../../interfaces/formats.md#json) format.
|
||||
@ -1747,9 +1811,11 @@ Do not merge aggregation states from different servers for distributed query pro
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Disabled (final query processing is done on the initiator node).
|
||||
- 1 - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
|
||||
- 2 - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
|
||||
- `0` — Disabled (final query processing is done on the initiator node).
|
||||
- `1` - Do not merge aggregation states from different servers for distributed query processing (query completelly processed on the shard, initiator only proxy the data), can be used in case it is for certain that there are different keys on different shards.
|
||||
- `2` - Same as `1` but applies `ORDER BY` and `LIMIT` (it is not possible when the query processed completelly on the remote node, like for `distributed_group_by_no_merge=1`) on the initiator (can be used for queries with `ORDER BY` and/or `LIMIT`).
|
||||
|
||||
Default value: `0`
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1780,19 +1846,33 @@ FORMAT PrettyCompactMonoBlock
|
||||
└───────┘
|
||||
```
|
||||
|
||||
Default value: 0
|
||||
## distributed_push_down_limit {#distributed-push-down-limit}
|
||||
|
||||
## distributed_push_down_limit (#distributed-push-down-limit}
|
||||
Enables or disables [LIMIT](#limit) applying on each shard separatelly.
|
||||
|
||||
LIMIT will be applied on each shard separatelly. Usually you don't need to use it, since this will be done automatically if it is possible, i.e. for simple query SELECT FROM LIMIT.
|
||||
This will allow to avoid:
|
||||
- Sending extra rows over network;
|
||||
- Processing rows behind the limit on the initiator.
|
||||
|
||||
Starting from 21.9 version you cannot get inaccurate results anymore, since `distributed_push_down_limit` changes query execution only if at least one of the conditions met:
|
||||
- [distributed_group_by_no_merge](#distributed-group-by-no-merge) > 0.
|
||||
- Query **does not have** `GROUP BY`/`DISTINCT`/`LIMIT BY`, but it has `ORDER BY`/`LIMIT`.
|
||||
- Query **has** `GROUP BY`/`DISTINCT`/`LIMIT BY` with `ORDER BY`/`LIMIT` and:
|
||||
- [optimize_skip_unused_shards](#optimize-skip-unused-shards) is enabled.
|
||||
- [optimize_distributed_group_by_sharding_key](#optimize-distributed-group-by-sharding-key) is enabled.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 - Disabled
|
||||
- 1 - Enabled
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
!!! note "Note"
|
||||
That with this setting the result of the query may be inaccurate.
|
||||
Default value: `1`.
|
||||
|
||||
See also:
|
||||
|
||||
- [distributed_group_by_no_merge](#distributed-group-by-no-merge)
|
||||
- [optimize_skip_unused_shards](#optimize-skip-unused-shards)
|
||||
- [optimize_distributed_group_by_sharding_key](#optimize-distributed-group-by-sharding-key)
|
||||
|
||||
## optimize_skip_unused_shards_limit {#optimize-skip-unused-shards-limit}
|
||||
|
||||
@ -1900,6 +1980,7 @@ Default value: 0
|
||||
See also:
|
||||
|
||||
- [distributed_group_by_no_merge](#distributed-group-by-no-merge)
|
||||
- [distributed_push_down_limit](#distributed-push-down-limit)
|
||||
- [optimize_skip_unused_shards](#optimize-skip-unused-shards)
|
||||
|
||||
!!! note "Note"
|
||||
@ -3810,3 +3891,101 @@ Default value: `0`.
|
||||
**See Also**
|
||||
|
||||
- [optimize_move_to_prewhere](#optimize_move_to_prewhere) setting
|
||||
|
||||
## describe_include_subcolumns {#describe_include_subcolumns}
|
||||
|
||||
Enables describing subcolumns for a [DESCRIBE](../../sql-reference/statements/describe-table.md) query. For example, members of a [Tuple](../../sql-reference/data-types/tuple.md) or subcolumns of a [Map](../../sql-reference/data-types/map.md#map-subcolumns), [Nullable](../../sql-reference/data-types/nullable.md#finding-null) or an [Array](../../sql-reference/data-types/array.md#array-size) data type.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Subcolumns are not included in `DESCRIBE` queries.
|
||||
- 1 — Subcolumns are included in `DESCRIBE` queries.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example**
|
||||
|
||||
See an example for the [DESCRIBE](../../sql-reference/statements/describe-table.md) statement.
|
||||
|
||||
## async_insert {#async-insert}
|
||||
|
||||
Enables or disables asynchronous inserts. This makes sense only for insertion over HTTP protocol. Note that deduplication isn't working for such inserts.
|
||||
|
||||
If enabled, the data is combined into batches before the insertion into tables, so it is possible to do small and frequent insertions into ClickHouse (up to 15000 queries per second) without buffer tables.
|
||||
|
||||
The data is inserted either after the [async_insert_max_data_size](#async-insert-max-data-size) is exceeded or after [async_insert_busy_timeout_ms](#async-insert-busy-timeout-ms) milliseconds since the first `INSERT` query. If the [async_insert_stale_timeout_ms](#async-insert-stale-timeout-ms) is set to a non-zero value, the data is inserted after `async_insert_stale_timeout_ms` milliseconds since the last query.
|
||||
|
||||
If [wait_for_async_insert](#wait-for-async-insert) is enabled, every client will wait for the data to be processed and flushed to the table. Otherwise, the query would be processed almost instantly, even if the data is not inserted.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Insertions are made synchronously, one after another.
|
||||
- 1 — Multiple asynchronous insertions enabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## async_insert_threads {#async-insert-threads}
|
||||
|
||||
The maximum number of threads for background data parsing and insertion.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Asynchronous insertions are disabled.
|
||||
|
||||
Default value: `16`.
|
||||
|
||||
## wait_for_async_insert {#wait-for-async-insert}
|
||||
|
||||
Enables or disables waiting for processing of asynchronous insertion. If enabled, server will return `OK` only after the data is inserted. Otherwise, it will return `OK` even if the data wasn't inserted.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Server returns `OK` even if the data is not yet inserted.
|
||||
- 1 — Server returns `OK` only after the data is inserted.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## wait_for_async_insert_timeout {#wait-for-async-insert-timeout}
|
||||
|
||||
The timeout in seconds for waiting for processing of asynchronous insertion.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Disabled.
|
||||
|
||||
Default value: [lock_acquire_timeout](#lock_acquire_timeout).
|
||||
|
||||
## async_insert_max_data_size {#async-insert-max-data-size}
|
||||
|
||||
The maximum size of the unparsed data in bytes collected per query before being inserted.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Asynchronous insertions are disabled.
|
||||
|
||||
Default value: `1000000`.
|
||||
|
||||
## async_insert_busy_timeout_ms {#async-insert-busy-timeout-ms}
|
||||
|
||||
The maximum timeout in milliseconds since the first `INSERT` query before inserting collected data.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Timeout disabled.
|
||||
|
||||
Default value: `200`.
|
||||
|
||||
## async_insert_stale_timeout_ms {#async-insert-stale-timeout-ms}
|
||||
|
||||
The maximum timeout in milliseconds since the last `INSERT` query before dumping collected data. If enabled, the settings prolongs the [async_insert_busy_timeout_ms](#async-insert-busy-timeout-ms) with every `INSERT` query as long as [async_insert_max_data_size](#async-insert-max-data-size) is not exceeded.
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Timeout disabled.
|
||||
|
||||
Default value: `0`.
|
||||
|
74
docs/en/operations/ssl-zookeeper.md
Normal file
74
docs/en/operations/ssl-zookeeper.md
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
toc_priority: 45
|
||||
toc_title: Secured communication with Zookeeper
|
||||
---
|
||||
|
||||
# Optional secured communication between ClickHouse and Zookeeper {#secured-communication-with-zookeeper}
|
||||
|
||||
You should specify `ssl.keyStore.location`, `ssl.keyStore.password` and `ssl.trustStore.location`, `ssl.trustStore.password` for communication with ClickHouse client over SSL. These options are available from Zookeeper version 3.5.2.
|
||||
|
||||
You can add `zookeeper.crt` to trusted certificates.
|
||||
|
||||
``` bash
|
||||
sudo cp zookeeper.crt /usr/local/share/ca-certificates/zookeeper.crt
|
||||
sudo update-ca-certificates
|
||||
```
|
||||
|
||||
Client section in `config.xml` will look like:
|
||||
|
||||
``` xml
|
||||
<client>
|
||||
<certificateFile>/etc/clickhouse-server/client.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/client.key</privateKeyFile>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<invalidCertificateHandler>
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
```
|
||||
|
||||
Add Zookeeper to ClickHouse config with some cluster and macros:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<zookeeper>
|
||||
<node>
|
||||
<host>localhost</host>
|
||||
<port>2281</port>
|
||||
<secure>1</secure>
|
||||
</node>
|
||||
</zookeeper>
|
||||
</yandex>
|
||||
```
|
||||
|
||||
Start `clickhouse-server`. In logs you should see:
|
||||
|
||||
```text
|
||||
<Trace> ZooKeeper: initialized, hosts: secure://localhost:2281
|
||||
```
|
||||
|
||||
Prefix `secure://` indicates that connection is secured by SSL.
|
||||
|
||||
To ensure traffic is encrypted run `tcpdump` on secured port:
|
||||
|
||||
```bash
|
||||
tcpdump -i any dst port 2281 -nnXS
|
||||
```
|
||||
|
||||
And query in `clickhouse-client`:
|
||||
|
||||
```sql
|
||||
SELECT * FROM system.zookeeper WHERE path = '/';
|
||||
```
|
||||
|
||||
On unencrypted connection you will see in `tcpdump` output something like this:
|
||||
|
||||
```text
|
||||
..../zookeeper/q
|
||||
uota.
|
||||
```
|
||||
|
||||
On encrypted connection you should not see this.
|
@ -22,7 +22,7 @@ ClickHouse supports zero-copy replication for `S3` and `HDFS` disks, which means
|
||||
Configuration markup:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<hdfs>
|
||||
@ -44,7 +44,7 @@ Configuration markup:
|
||||
<merge_tree>
|
||||
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
|
||||
</merge_tree>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
@ -96,7 +96,7 @@ Optional parameters:
|
||||
Example of disk configuration:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<disk_s3>
|
||||
@ -113,7 +113,7 @@ Example of disk configuration:
|
||||
</disk_s3_encrypted>
|
||||
</disks>
|
||||
</storage_configuration>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Storing Data on Web Server {#storing-data-on-webserver}
|
||||
@ -127,7 +127,7 @@ Web server storage is supported only for the [MergeTree](../engines/table-engine
|
||||
A ready test case. You need to add this configuration to config:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<web>
|
||||
@ -145,7 +145,7 @@ A ready test case. You need to add this configuration to config:
|
||||
</web>
|
||||
</policies>
|
||||
</storage_configuration>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
And then execute this query:
|
||||
|
@ -24,6 +24,11 @@ Columns:
|
||||
- `is_in_primary_key` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the column is in the primary key expression.
|
||||
- `is_in_sampling_key` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the column is in the sampling key expression.
|
||||
- `compression_codec` ([String](../../sql-reference/data-types/string.md)) — Compression codec name.
|
||||
- `character_octet_length` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum length in bytes for binary data, character data, or text data and images. In ClickHouse makes sense only for `FixedString` data type. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Accuracy of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse it is bitness for integer types and decimal precision for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision_radix` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The base of the number system is the accuracy of approximate numeric data, exact numeric data, integer data or monetary data. In ClickHouse it's 2 for integer types and 10 for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_scale` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The scale of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse makes sense only for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `datetime_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Decimal precision of `DateTime64` data type. For other data types, the `NULL` value is returned.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -34,10 +39,11 @@ SELECT * FROM system.columns LIMIT 2 FORMAT Vertical;
|
||||
```text
|
||||
Row 1:
|
||||
──────
|
||||
database: system
|
||||
table: aggregate_function_combinators
|
||||
name: name
|
||||
database: INFORMATION_SCHEMA
|
||||
table: COLUMNS
|
||||
name: table_catalog
|
||||
type: String
|
||||
position: 1
|
||||
default_kind:
|
||||
default_expression:
|
||||
data_compressed_bytes: 0
|
||||
@ -49,13 +55,19 @@ is_in_sorting_key: 0
|
||||
is_in_primary_key: 0
|
||||
is_in_sampling_key: 0
|
||||
compression_codec:
|
||||
character_octet_length: ᴺᵁᴸᴸ
|
||||
numeric_precision: ᴺᵁᴸᴸ
|
||||
numeric_precision_radix: ᴺᵁᴸᴸ
|
||||
numeric_scale: ᴺᵁᴸᴸ
|
||||
datetime_precision: ᴺᵁᴸᴸ
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
database: system
|
||||
table: aggregate_function_combinators
|
||||
name: is_internal
|
||||
type: UInt8
|
||||
database: INFORMATION_SCHEMA
|
||||
table: COLUMNS
|
||||
name: table_schema
|
||||
type: String
|
||||
position: 2
|
||||
default_kind:
|
||||
default_expression:
|
||||
data_compressed_bytes: 0
|
||||
@ -67,6 +79,11 @@ is_in_sorting_key: 0
|
||||
is_in_primary_key: 0
|
||||
is_in_sampling_key: 0
|
||||
compression_codec:
|
||||
character_octet_length: ᴺᵁᴸᴸ
|
||||
numeric_precision: ᴺᵁᴸᴸ
|
||||
numeric_precision_radix: ᴺᵁᴸᴸ
|
||||
numeric_scale: ᴺᵁᴸᴸ
|
||||
datetime_precision: ᴺᵁᴸᴸ
|
||||
```
|
||||
|
||||
The `system.columns` table contains the following columns (the column type is shown in brackets):
|
||||
|
@ -9,6 +9,7 @@ Columns:
|
||||
- `data_path` ([String](../../sql-reference/data-types/string.md)) — Data path.
|
||||
- `metadata_path` ([String](../../sql-reference/data-types/enum.md)) — Metadata path.
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Database UUID.
|
||||
- `comment` ([String](../../sql-reference/data-types/enum.md)) — Database comment.
|
||||
|
||||
The `name` column from this system table is used for implementing the `SHOW DATABASES` query.
|
||||
|
||||
@ -17,22 +18,20 @@ The `name` column from this system table is used for implementing the `SHOW DATA
|
||||
Create a database.
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE test
|
||||
CREATE DATABASE test;
|
||||
```
|
||||
|
||||
Check all of the available databases to the user.
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.databases
|
||||
SELECT * FROM system.databases;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name───────────────────────────┬─engine─┬─data_path──────────────────┬─metadata_path───────────────────────────────────────────────────────┬─────────────────────────────────uuid─┐
|
||||
│ _temporary_and_external_tables │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │
|
||||
│ default │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/d31/d317b4bd-3595-4386-81ee-c2334694128a/ │ d317b4bd-3595-4386-81ee-c2334694128a │
|
||||
│ test │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/39b/39bf0cc5-4c06-4717-87fe-c75ff3bd8ebb/ │ 39bf0cc5-4c06-4717-87fe-c75ff3bd8ebb │
|
||||
│ system │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/1d1/1d1c869d-e465-4b1b-a51f-be033436ebf9/ │ 1d1c869d-e465-4b1b-a51f-be033436ebf9 │
|
||||
└────────────────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┘
|
||||
┌─name───────────────┬─engine─┬─data_path──────────────────┬─metadata_path───────────────────────────────────────────────────────┬─uuid─────────────────────────────────┬─comment─┐
|
||||
│ INFORMATION_SCHEMA │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │ │
|
||||
│ default │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/d31/d317b4bd-3595-4386-81ee-c2334694128a/ │ 24363899-31d7-42a0-a436-389931d752a0 │ │
|
||||
│ information_schema │ Memory │ /var/lib/clickhouse/ │ │ 00000000-0000-0000-0000-000000000000 │ │
|
||||
│ system │ Atomic │ /var/lib/clickhouse/store/ │ /var/lib/clickhouse/store/1d1/1d1c869d-e465-4b1b-a51f-be033436ebf9/ │ 03e9f3d1-cc88-4a49-83e9-f3d1cc881a49 │ │
|
||||
└────────────────────┴────────┴────────────────────────────┴─────────────────────────────────────────────────────────────────────┴──────────────────────────────────────┴─────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/databases) <!--hide-->
|
||||
|
@ -34,7 +34,7 @@ System log tables can be customized by creating a config file with the same name
|
||||
An example:
|
||||
|
||||
```xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<query_log>
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
@ -45,7 +45,7 @@ An example:
|
||||
-->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
By default, table growth is unlimited. To control a size of a table, you can use [TTL](../../sql-reference/statements/alter/ttl.md#manipulations-with-table-ttl) settings for removing outdated log records. Also you can use the partitioning feature of `MergeTree`-engine tables.
|
||||
|
210
docs/en/operations/system-tables/information_schema.md
Normal file
210
docs/en/operations/system-tables/information_schema.md
Normal file
@ -0,0 +1,210 @@
|
||||
# INFORMATION_SCHEMA {#information-schema}
|
||||
|
||||
`INFORMATION_SCHEMA` (`information_schema`) is a system database that contains views. Using these views, you can get information about the metadata of database objects. These views read data from the columns of the [system.columns](../../operations/system-tables/columns.md), [system.databases](../../operations/system-tables/databases.md) and [system.tables](../../operations/system-tables/tables.md) system tables.
|
||||
|
||||
The structure and composition of system tables may change in different versions of the product, but the support of the `information_schema` makes it possible to change the structure of system tables without changing the method of access to metadata. Metadata requests do not depend on the DBMS used.
|
||||
|
||||
``` sql
|
||||
SHOW TABLES FROM INFORMATION_SCHEMA;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name─────┐
|
||||
│ COLUMNS │
|
||||
│ SCHEMATA │
|
||||
│ TABLES │
|
||||
│ VIEWS │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
`INFORMATION_SCHEMA` contains the following views:
|
||||
|
||||
- [COLUMNS](#columns)
|
||||
- [SCHEMATA](#schemata)
|
||||
- [TABLES](#tables)
|
||||
- [VIEWS](#views)
|
||||
|
||||
## COLUMNS {#columns}
|
||||
|
||||
Contains columns read from the [system.columns](../../operations/system-tables/columns.md) system table and columns that are not supported in ClickHouse or do not make sense (always `NULL`), but must be by the standard.
|
||||
|
||||
Columns:
|
||||
|
||||
- `table_catalog` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_schema` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `column_name` ([String](../../sql-reference/data-types/string.md)) — Column name.
|
||||
- `ordinal_position` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Ordinal position of a column in a table starting with 1.
|
||||
- `column_default` ([String](../../sql-reference/data-types/string.md)) — Expression for the default value, or an empty string if it is not defined.
|
||||
- `is_nullable` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the column type is `Nullable`.
|
||||
- `data_type` ([String](../../sql-reference/data-types/string.md)) — Column type.
|
||||
- `character_maximum_length` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum length in bytes for binary data, character data, or text data and images. In ClickHouse makes sense only for `FixedString` data type. Otherwise, the `NULL` value is returned.
|
||||
- `character_octet_length` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Maximum length in bytes for binary data, character data, or text data and images. In ClickHouse makes sense only for `FixedString` data type. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Accuracy of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse it is bitness for integer types and decimal precision for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_precision_radix` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The base of the number system is the accuracy of approximate numeric data, exact numeric data, integer data or monetary data. In ClickHouse it's 2 for integer types and 10 for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `numeric_scale` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — The scale of approximate numeric data, exact numeric data, integer data, or monetary data. In ClickHouse makes sense only for `Decimal` types. Otherwise, the `NULL` value is returned.
|
||||
- `datetime_precision` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — Decimal precision of `DateTime64` data type. For other data types, the `NULL` value is returned.
|
||||
- `character_set_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `character_set_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `character_set_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `collation_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `collation_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `collation_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `domain_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `domain_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `domain_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE (table_schema=currentDatabase() OR table_schema='') AND table_name NOT LIKE '%inner%' LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
table_catalog: default
|
||||
table_schema: default
|
||||
table_name: describe_example
|
||||
column_name: id
|
||||
ordinal_position: 1
|
||||
column_default:
|
||||
is_nullable: 0
|
||||
data_type: UInt64
|
||||
character_maximum_length: ᴺᵁᴸᴸ
|
||||
character_octet_length: ᴺᵁᴸᴸ
|
||||
numeric_precision: 64
|
||||
numeric_precision_radix: 2
|
||||
numeric_scale: 0
|
||||
datetime_precision: ᴺᵁᴸᴸ
|
||||
character_set_catalog: ᴺᵁᴸᴸ
|
||||
character_set_schema: ᴺᵁᴸᴸ
|
||||
character_set_name: ᴺᵁᴸᴸ
|
||||
collation_catalog: ᴺᵁᴸᴸ
|
||||
collation_schema: ᴺᵁᴸᴸ
|
||||
collation_name: ᴺᵁᴸᴸ
|
||||
domain_catalog: ᴺᵁᴸᴸ
|
||||
domain_schema: ᴺᵁᴸᴸ
|
||||
domain_name: ᴺᵁᴸᴸ
|
||||
```
|
||||
|
||||
## SCHEMATA {#schemata}
|
||||
|
||||
Contains columns read from the [system.databases](../../operations/system-tables/databases.md) system table and columns that are not supported in ClickHouse or do not make sense (always `NULL`), but must be by the standard.
|
||||
|
||||
Columns:
|
||||
|
||||
- `catalog_name` ([String](../../sql-reference/data-types/string.md)) — The name of the database.
|
||||
- `schema_name` ([String](../../sql-reference/data-types/string.md)) — The name of the database.
|
||||
- `schema_owner` ([String](../../sql-reference/data-types/string.md)) — Schema owner name, always `'default'`.
|
||||
- `default_character_set_catalog` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `default_character_set_schema` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `default_character_set_name` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
- `sql_path` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — `NULL`, not supported.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM information_schema.schemata WHERE schema_name ILIKE 'information_schema' LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
catalog_name: INFORMATION_SCHEMA
|
||||
schema_name: INFORMATION_SCHEMA
|
||||
schema_owner: default
|
||||
default_character_set_catalog: ᴺᵁᴸᴸ
|
||||
default_character_set_schema: ᴺᵁᴸᴸ
|
||||
default_character_set_name: ᴺᵁᴸᴸ
|
||||
sql_path: ᴺᵁᴸᴸ
|
||||
```
|
||||
|
||||
## TABLES {#tables}
|
||||
|
||||
Contains columns read from the [system.tables](../../operations/system-tables/tables.md) system table.
|
||||
|
||||
Columns:
|
||||
|
||||
- `table_catalog` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_schema` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `table_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Table type. Possible values:
|
||||
- `BASE TABLE`
|
||||
- `VIEW`
|
||||
- `FOREIGN TABLE`
|
||||
- `LOCAL TEMPORARY`
|
||||
- `SYSTEM VIEW`
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (table_schema = currentDatabase() OR table_schema = '') AND table_name NOT LIKE '%inner%' LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
table_catalog: default
|
||||
table_schema: default
|
||||
table_name: describe_example
|
||||
table_type: BASE TABLE
|
||||
```
|
||||
|
||||
## VIEWS {#views}
|
||||
|
||||
Contains columns read from the [system.tables](../../operations/system-tables/tables.md) system table, when the table engine [View](../../engines/table-engines/special/view.md) is used.
|
||||
|
||||
Columns:
|
||||
|
||||
- `table_catalog` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_schema` ([String](../../sql-reference/data-types/string.md)) — The name of the database in which the table is located.
|
||||
- `table_name` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `view_definition` ([String](../../sql-reference/data-types/string.md)) — `SELECT` query for view.
|
||||
- `check_option` ([String](../../sql-reference/data-types/string.md)) — `NONE`, no checking.
|
||||
- `is_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the view is not updated.
|
||||
- `is_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — Shows whether the created view is [materialized](../../sql-reference/statements/create/view/#materialized). Possible values:
|
||||
- `NO` — The created view is not materialized.
|
||||
- `YES` — The created view is materialized.
|
||||
- `is_trigger_updatable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the trigger is not updated.
|
||||
- `is_trigger_deletable` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, the trigger is not deleted.
|
||||
- `is_trigger_insertable_into` ([Enum8](../../sql-reference/data-types/enum.md)) — `NO`, no data is inserted into the trigger.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE VIEW v (n Nullable(Int32), f Float64) AS SELECT n, f FROM t;
|
||||
CREATE MATERIALIZED VIEW mv ENGINE = Null AS SELECT * FROM system.one;
|
||||
SELECT * FROM information_schema.views WHERE table_schema = currentDatabase() LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
table_catalog: default
|
||||
table_schema: default
|
||||
table_name: mv
|
||||
view_definition: SELECT * FROM system.one
|
||||
check_option: NONE
|
||||
is_updatable: NO
|
||||
is_insertable_into: YES
|
||||
is_trigger_updatable: NO
|
||||
is_trigger_deletable: NO
|
||||
is_trigger_insertable_into: NO
|
||||
```
|
@ -8,43 +8,48 @@ Example:
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.replicas
|
||||
WHERE table = 'visits'
|
||||
WHERE table = 'test_table'
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
``` text
|
||||
Query id: dc6dcbcb-dc28-4df9-ae27-4354f5b3b13e
|
||||
|
||||
Row 1:
|
||||
──────
|
||||
database: merge
|
||||
table: visits
|
||||
engine: ReplicatedCollapsingMergeTree
|
||||
is_leader: 1
|
||||
can_become_leader: 1
|
||||
is_readonly: 0
|
||||
is_session_expired: 0
|
||||
future_parts: 1
|
||||
parts_to_check: 0
|
||||
zookeeper_path: /clickhouse/tables/01-06/visits
|
||||
replica_name: example01-06-1.yandex.ru
|
||||
replica_path: /clickhouse/tables/01-06/visits/replicas/example01-06-1.yandex.ru
|
||||
columns_version: 9
|
||||
queue_size: 1
|
||||
inserts_in_queue: 0
|
||||
merges_in_queue: 1
|
||||
part_mutations_in_queue: 0
|
||||
queue_oldest_time: 2020-02-20 08:34:30
|
||||
inserts_oldest_time: 1970-01-01 00:00:00
|
||||
merges_oldest_time: 2020-02-20 08:34:30
|
||||
part_mutations_oldest_time: 1970-01-01 00:00:00
|
||||
oldest_part_to_get:
|
||||
oldest_part_to_merge_to: 20200220_20284_20840_7
|
||||
───────
|
||||
database: db
|
||||
table: test_table
|
||||
engine: ReplicatedMergeTree
|
||||
is_leader: 1
|
||||
can_become_leader: 1
|
||||
is_readonly: 0
|
||||
is_session_expired: 0
|
||||
future_parts: 0
|
||||
parts_to_check: 0
|
||||
zookeeper_path: /test/test_table
|
||||
replica_name: r1
|
||||
replica_path: /test/test_table/replicas/r1
|
||||
columns_version: -1
|
||||
queue_size: 27
|
||||
inserts_in_queue: 27
|
||||
merges_in_queue: 0
|
||||
part_mutations_in_queue: 0
|
||||
queue_oldest_time: 2021-10-12 14:48:48
|
||||
inserts_oldest_time: 2021-10-12 14:48:48
|
||||
merges_oldest_time: 1970-01-01 03:00:00
|
||||
part_mutations_oldest_time: 1970-01-01 03:00:00
|
||||
oldest_part_to_get: 1_17_17_0
|
||||
oldest_part_to_merge_to:
|
||||
oldest_part_to_mutate_to:
|
||||
log_max_index: 596273
|
||||
log_pointer: 596274
|
||||
last_queue_update: 2020-02-20 08:34:32
|
||||
absolute_delay: 0
|
||||
total_replicas: 2
|
||||
active_replicas: 2
|
||||
log_max_index: 206
|
||||
log_pointer: 207
|
||||
last_queue_update: 2021-10-12 14:50:08
|
||||
absolute_delay: 99
|
||||
total_replicas: 5
|
||||
active_replicas: 5
|
||||
last_queue_update_exception:
|
||||
zookeeper_exception:
|
||||
replica_is_active: {'r1':1,'r2':1}
|
||||
```
|
||||
|
||||
Columns:
|
||||
@ -82,6 +87,8 @@ The next 4 columns have a non-zero value only where there is an active session w
|
||||
- `absolute_delay` (`UInt64`) - How big lag in seconds the current replica has.
|
||||
- `total_replicas` (`UInt8`) - The total number of known replicas of this table.
|
||||
- `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ZooKeeper (i.e., the number of functioning replicas).
|
||||
- `last_queue_update_exception` (`String`) - When the queue contains broken entries. Especially important when ClickHouse breaks backward compatibility between versions and log entries written by newer versions aren't parseable by old versions.
|
||||
- `zookeeper_exception` (`String`) - The last exception message, got if the error happened when fetching the info from ZooKeeper.
|
||||
- `replica_is_active` ([Map(String, UInt8)](../../sql-reference/data-types/map.md)) — Map between replica name and is replica active.
|
||||
|
||||
If you request all the columns, the table may work a bit slowly, since several reads from ZooKeeper are made for each row.
|
||||
|
@ -30,6 +30,8 @@ Columns:
|
||||
|
||||
- `engine_full` ([String](../../sql-reference/data-types/string.md)) - Parameters of the table engine.
|
||||
|
||||
- `as_select` ([String](../../sql-reference/data-types/string.md)) - `SELECT` query for view.
|
||||
|
||||
- `partition_key` ([String](../../sql-reference/data-types/string.md)) - The partition key expression specified in the table.
|
||||
|
||||
- `sorting_key` ([String](../../sql-reference/data-types/string.md)) - The sorting key expression specified in the table.
|
||||
@ -56,6 +58,7 @@ Columns:
|
||||
|
||||
- `comment` ([String](../../sql-reference/data-types/string.md)) - The comment for the table.
|
||||
|
||||
- `has_own_data` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag that indicates whether the table itself stores some data on disk or only accesses some other source.
|
||||
|
||||
The `system.tables` table is used in `SHOW TABLES` query implementation.
|
||||
|
||||
@ -80,6 +83,7 @@ dependencies_database: []
|
||||
dependencies_table: []
|
||||
create_table_query: CREATE TABLE base.t1 (`n` UInt64) ENGINE = MergeTree ORDER BY n SETTINGS index_granularity = 8192
|
||||
engine_full: MergeTree ORDER BY n SETTINGS index_granularity = 8192
|
||||
as_select: SELECT database AS table_catalog
|
||||
partition_key:
|
||||
sorting_key: n
|
||||
primary_key: n
|
||||
@ -90,6 +94,7 @@ total_bytes: 99
|
||||
lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
|
||||
Row 2:
|
||||
──────
|
||||
@ -105,6 +110,7 @@ dependencies_database: []
|
||||
dependencies_table: []
|
||||
create_table_query: CREATE TABLE default.`53r93yleapyears` (`id` Int8, `febdays` Int8) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192
|
||||
engine_full: MergeTree ORDER BY id SETTINGS index_granularity = 8192
|
||||
as_select: SELECT name AS catalog_name
|
||||
partition_key:
|
||||
sorting_key: id
|
||||
primary_key: id
|
||||
@ -115,6 +121,5 @@ total_bytes: 155
|
||||
lifetime_rows: ᴺᵁᴸᴸ
|
||||
lifetime_bytes: ᴺᵁᴸᴸ
|
||||
comment:
|
||||
has_own_data: 0
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/system-tables/tables) <!--hide-->
|
||||
|
@ -70,7 +70,7 @@ For HDD, enable the write cache.
|
||||
|
||||
## File System {#file-system}
|
||||
|
||||
Ext4 is the most reliable option. Set the mount options `noatime, nobarrier`.
|
||||
Ext4 is the most reliable option. Set the mount options `noatime`.
|
||||
XFS is also suitable, but it hasn’t been as thoroughly tested with ClickHouse.
|
||||
Most other file systems should also work fine. File systems with delayed allocation work better.
|
||||
|
||||
|
@ -47,7 +47,7 @@ Parameters:
|
||||
## Format of Zookeeper.xml {#format-of-zookeeper-xml}
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<size>100M</size>
|
||||
@ -60,13 +60,13 @@ Parameters:
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Configuration of Copying Tasks {#configuration-of-copying-tasks}
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<!-- Configuration of clusters as in an ordinary server config -->
|
||||
<remote_servers>
|
||||
<source_cluster>
|
||||
@ -179,7 +179,7 @@ Parameters:
|
||||
</table_visits>
|
||||
...
|
||||
</tables>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
`clickhouse-copier` tracks the changes in `/task/path/description` and applies them on the fly. For instance, if you change the value of `max_workers`, the number of processes running tasks will also change.
|
||||
|
@ -0,0 +1,43 @@
|
||||
---
|
||||
toc_priority: 302
|
||||
---
|
||||
|
||||
# entropy {#entropy}
|
||||
|
||||
Calculates [Shannon entropy](https://en.wikipedia.org/wiki/Entropy_(information_theory)) of a column of values.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
entropy(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` — Column of values of any type.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Shannon entropy.
|
||||
|
||||
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE entropy (`vals` UInt32,`strings` String) ENGINE = Memory;
|
||||
|
||||
INSERT INTO entropy VALUES (1, 'A'), (1, 'A'), (1,'A'), (1,'A'), (2,'B'), (2,'B'), (2,'C'), (2,'D');
|
||||
|
||||
SELECT entropy(vals), entropy(strings) FROM entropy;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─entropy(vals)─┬─entropy(strings)─┐
|
||||
│ 1 │ 1.75 │
|
||||
└───────────────┴──────────────────┘
|
||||
```
|
@ -26,7 +26,7 @@ You can view the list of external dictionaries and their statuses in the `system
|
||||
The configuration looks like this:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
<layout>
|
||||
@ -36,7 +36,7 @@ The configuration looks like this:
|
||||
</layout>
|
||||
...
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Corresponding [DDL-query](../../../sql-reference/statements/create/dictionary.md):
|
||||
@ -53,15 +53,17 @@ LAYOUT(LAYOUT_TYPE(param value)) -- layout settings
|
||||
- [flat](#flat)
|
||||
- [hashed](#dicts-external_dicts_dict_layout-hashed)
|
||||
- [sparse_hashed](#dicts-external_dicts_dict_layout-sparse_hashed)
|
||||
- [cache](#cache)
|
||||
- [ssd_cache](#ssd-cache)
|
||||
- [direct](#direct)
|
||||
- [range_hashed](#range-hashed)
|
||||
- [complex_key_hashed](#complex-key-hashed)
|
||||
- [complex_key_sparse_hashed](#complex-key-sparse-hashed)
|
||||
- [hashed_array](#dicts-external_dicts_dict_layout-hashed-array)
|
||||
- [complex_key_hashed_array](#complex-key-hashed-array)
|
||||
- [range_hashed](#range-hashed)
|
||||
- [complex_key_range_hashed](#complex-key-range-hashed)
|
||||
- [cache](#cache)
|
||||
- [complex_key_cache](#complex-key-cache)
|
||||
- [ssd_cache](#ssd-cache)
|
||||
- [ssd_complex_key_cache](#complex-key-ssd-cache)
|
||||
- [complex_key_ssd_cache](#complex-key-ssd-cache)
|
||||
- [direct](#direct)
|
||||
- [complex_key_direct](#complex-key-direct)
|
||||
- [ip_trie](#ip-trie)
|
||||
|
||||
@ -151,13 +153,15 @@ Configuration example:
|
||||
</layout>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
LAYOUT(COMPLEX_KEY_HASHED())
|
||||
```
|
||||
|
||||
### complex_key_sparse_hashed {#complex-key-sparse-hashed}
|
||||
|
||||
This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `sparse_hashed`.
|
||||
This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to [sparse_hashed](#dicts-external_dicts_dict_layout-sparse_hashed).
|
||||
|
||||
Configuration example:
|
||||
|
||||
@ -167,13 +171,15 @@ Configuration example:
|
||||
</layout>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
LAYOUT(COMPLEX_KEY_SPARSE_HASHED())
|
||||
```
|
||||
|
||||
### hashed_array {#dicts-external_dicts_dict_layout-hashed-array}
|
||||
|
||||
The dictionary is completely stored in memory. Each attribute is stored in array. Key attribute is stored in the form of hashed table where value is index in attributes array. The dictionary can contain any number of elements with any identifiers In practice, the number of keys can reach tens of millions of items.
|
||||
The dictionary is completely stored in memory. Each attribute is stored in an array. The key attribute is stored in the form of a hashed table where value is an index in the attributes array. The dictionary can contain any number of elements with any identifiers. In practice, the number of keys can reach tens of millions of items.
|
||||
|
||||
All types of sources are supported. When updating, data (from a file or from a table) is read in its entirety.
|
||||
|
||||
@ -194,7 +200,7 @@ LAYOUT(HASHED_ARRAY())
|
||||
|
||||
### complex_key_hashed_array {#complex-key-hashed-array}
|
||||
|
||||
This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to `hashed_array`.
|
||||
This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). Similar to [hashed_array](#dicts-external_dicts_dict_layout-hashed-array).
|
||||
|
||||
Configuration example:
|
||||
|
||||
@ -204,11 +210,12 @@ Configuration example:
|
||||
</layout>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
LAYOUT(COMPLEX_KEY_HASHED_ARRAY())
|
||||
```
|
||||
|
||||
|
||||
### range_hashed {#range-hashed}
|
||||
|
||||
The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values.
|
||||
@ -282,7 +289,7 @@ Details of the algorithm:
|
||||
Configuration example:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
|
||||
@ -310,7 +317,7 @@ Configuration example:
|
||||
</structure>
|
||||
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
@ -356,8 +363,9 @@ When searching for a dictionary, the cache is searched first. For each block of
|
||||
|
||||
If keys are not found in dictionary, then update cache task is created and added into update queue. Update queue properties can be controlled with settings `max_update_queue_size`, `update_queue_push_timeout_milliseconds`, `query_wait_timeout_milliseconds`, `max_threads_for_updates`.
|
||||
|
||||
For cache dictionaries, the expiration [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used and key becomes expired, and it is re-requested the next time it needs to be used this behaviour can be configured with setting `allow_read_expired_keys`.
|
||||
This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the `system.dictionaries` table.
|
||||
For cache dictionaries, the expiration [lifetime](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-lifetime.md) of data in the cache can be set. If more time than `lifetime` has passed since loading the data in a cell, the cell’s value is not used and key becomes expired. The key is re-requested the next time it needs to be used. This behaviour can be configured with setting `allow_read_expired_keys`.
|
||||
|
||||
This is the least effective of all the ways to store dictionaries. The speed of the cache depends strongly on correct settings and the usage scenario. A cache type dictionary performs well only when the hit rates are high enough (recommended 99% and higher). You can view the average hit rate in the [system.dictionaries](../../../operations/system-tables/dictionaries.md) table.
|
||||
|
||||
If setting `allow_read_expired_keys` is set to 1, by default 0. Then dictionary can support asynchronous updates. If a client requests keys and all of them are in cache, but some of them are expired, then dictionary will return expired keys for a client and request them asynchronously from the source.
|
||||
|
||||
@ -545,4 +553,3 @@ dictGetString('prefix', 'asn', tuple(IPv6StringToNum('2001:db8::1')))
|
||||
Other types are not supported yet. The function returns the attribute for the prefix that corresponds to this IP address. If there are overlapping prefixes, the most specific one is returned.
|
||||
|
||||
Data must completely fit into RAM.
|
||||
|
||||
|
@ -10,7 +10,7 @@ An external dictionary can be connected from many different sources.
|
||||
If dictionary is configured using xml-file, the configuration looks like this:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
...
|
||||
<source>
|
||||
@ -21,7 +21,7 @@ If dictionary is configured using xml-file, the configuration looks like this:
|
||||
...
|
||||
</dictionary>
|
||||
...
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
In case of [DDL-query](../../../sql-reference/statements/create/dictionary.md), equal configuration will looks like:
|
||||
@ -311,7 +311,7 @@ Configuring `/etc/odbc.ini` (or `~/.odbc.ini` if you signed in under a user that
|
||||
The dictionary configuration in ClickHouse:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>table_name</name>
|
||||
<source>
|
||||
@ -340,7 +340,7 @@ The dictionary configuration in ClickHouse:
|
||||
</attribute>
|
||||
</structure>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
@ -416,7 +416,7 @@ Remarks:
|
||||
Configuring the dictionary in ClickHouse:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>test</name>
|
||||
<source>
|
||||
@ -446,7 +446,7 @@ Configuring the dictionary in ClickHouse:
|
||||
</attribute>
|
||||
</structure>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
|
@ -26,7 +26,7 @@ The [dictionaries](../../../operations/system-tables/dictionaries.md#system_tabl
|
||||
The dictionary configuration file has the following format:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<comment>An optional element with any content. Ignored by the ClickHouse server.</comment>
|
||||
|
||||
<!--Optional element. File name with substitutions-->
|
||||
@ -38,7 +38,7 @@ The dictionary configuration file has the following format:
|
||||
<!-- There can be any number of <dictionary> sections in the configuration file. -->
|
||||
</dictionary>
|
||||
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
You can [configure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict.md) any number of dictionaries in the same file.
|
||||
|
@ -82,3 +82,76 @@ An exception is thrown when dividing by zero or when dividing a minimal negative
|
||||
Returns the least common multiple of the numbers.
|
||||
An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one.
|
||||
|
||||
## max2 {#max2}
|
||||
|
||||
Compares two values and returns the maximum. The returned value is converted to [Float64](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
max2(value1, value2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value1` — First value. [Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
- `value2` — Second value. [Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The maximum of two values.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT max2(-1, 2);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─max2(-1, 2)─┐
|
||||
│ 2 │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
## min2 {#min2}
|
||||
|
||||
Compares two values and returns the minimum. The returned value is converted to [Float64](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
max2(value1, value2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value1` — First value. [Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
- `value2` — Second value. [Int/UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The minimum of two values.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT min2(-1, 2);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─min2(-1, 2)─┐
|
||||
│ -1 │
|
||||
└─────────────┘
|
||||
```
|
||||
|
@ -53,7 +53,7 @@ The first column is `id`, the second column is `c1`.
|
||||
Configure the external dictionary:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>ext-dict-test</name>
|
||||
<source>
|
||||
@ -77,7 +77,7 @@ Configure the external dictionary:
|
||||
</structure>
|
||||
<lifetime>0</lifetime>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Perform the query:
|
||||
@ -113,7 +113,7 @@ The first column is `id`, the second is `c1`, the third is `c2`.
|
||||
Configure the external dictionary:
|
||||
|
||||
``` xml
|
||||
<yandex>
|
||||
<clickhouse>
|
||||
<dictionary>
|
||||
<name>ext-dict-mult</name>
|
||||
<source>
|
||||
@ -142,7 +142,7 @@ Configure the external dictionary:
|
||||
</structure>
|
||||
<lifetime>0</lifetime>
|
||||
</dictionary>
|
||||
</yandex>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
Perform the query:
|
||||
|
@ -2,13 +2,13 @@
|
||||
toc_title: S2 Geometry
|
||||
---
|
||||
|
||||
# Functions for Working with S2 Index {#s2Index}
|
||||
# Functions for Working with S2 Index {#s2index}
|
||||
|
||||
[S2](https://s2geometry.io/) is a geographical indexing system where all geographical data is represented on a three-dimensional sphere (similar to a globe).
|
||||
|
||||
In the S2 library points are represented as unit length vectors called S2 point indices (points on the surface of a three dimensional unit sphere) as opposed to traditional (latitude, longitude) pairs.
|
||||
In the S2 library points are represented as the S2 Index - a specific number which encodes internally a point on the surface of a unit sphere, unlike traditional (latitude, longitude) pairs. To get the S2 point index for a given point specified in the format (latitude, longitude) use the [geoToS2](#geotos2) function. Also, you can use the [s2ToGeo](#s2togeo) function for getting geographical coordinates corresponding to the specified S2 point index.
|
||||
|
||||
## geoToS2 {#geoToS2}
|
||||
## geoToS2 {#geotos2}
|
||||
|
||||
Returns [S2](#s2index) point index corresponding to the provided coordinates `(longitude, latitude)`.
|
||||
|
||||
@ -34,7 +34,7 @@ Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT geoToS2(37.79506683, 55.71290588) as s2Index;
|
||||
SELECT geoToS2(37.79506683, 55.71290588) AS s2Index;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -45,7 +45,7 @@ Result:
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## s2ToGeo {#s2ToGeo}
|
||||
## s2ToGeo {#s2togeo}
|
||||
|
||||
Returns geo coordinates `(longitude, latitude)` corresponding to the provided [S2](#s2index) point index.
|
||||
|
||||
@ -57,20 +57,20 @@ s2ToGeo(s2index)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2Index` — S2 Index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2index` — S2 Index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- A tuple consisting of two values: `tuple(lon,lat)`.
|
||||
|
||||
Type: `lon` - [Float64](../../../sql-reference/data-types/float.md). `lat` — [Float64](../../../sql-reference/data-types/float.md).
|
||||
Type: `lon` — [Float64](../../../sql-reference/data-types/float.md). `lat` — [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2ToGeo(4704772434919038107) as s2Coodrinates;
|
||||
SELECT s2ToGeo(4704772434919038107) AS s2Coodrinates;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -81,9 +81,9 @@ Result:
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2GetNeighbors {#s2GetNeighbors}
|
||||
## s2GetNeighbors {#s2getneighbors}
|
||||
|
||||
Returns S2 neighbor indices corresponding to the provided [S2](#s2index)). Each cell in the S2 system is a quadrilateral bounded by four geodesics. So, each cell has 4 neighbors.
|
||||
Returns S2 neighbor indixes corresponding to the provided [S2](#s2index). Each cell in the S2 system is a quadrilateral bounded by four geodesics. So, each cell has 4 neighbors.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -97,16 +97,16 @@ s2GetNeighbors(s2index)
|
||||
|
||||
**Returned values**
|
||||
|
||||
- An array consisting of the 4 neighbor indices: `array[s2index1, s2index3, s2index2, s2index4]`.
|
||||
- An array consisting of 4 neighbor indexes: `array[s2index1, s2index3, s2index2, s2index4]`.
|
||||
|
||||
Type: Each S2 index is [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select s2GetNeighbors(5074766849661468672) AS s2Neighbors;
|
||||
SELECT s2GetNeighbors(5074766849661468672) AS s2Neighbors;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -117,9 +117,9 @@ Result:
|
||||
└───────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2CellsIntersect {#s2CellsIntersect}
|
||||
## s2CellsIntersect {#s2cellsintersect}
|
||||
|
||||
Determines if the two provided [S2](#s2index)) cell indices intersect or not.
|
||||
Determines if the two provided [S2](#s2index) cells intersect or not.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -133,8 +133,8 @@ s2CellsIntersect(s2index1, s2index2)
|
||||
|
||||
**Returned values**
|
||||
|
||||
- 1 — If the S2 cell indices intersect.
|
||||
- 0 — If the S2 cell indices don't intersect.
|
||||
- 1 — If the cells intersect.
|
||||
- 0 — If the cells don't intersect.
|
||||
|
||||
Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
@ -143,7 +143,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select s2CellsIntersect(9926595209846587392, 9926594385212866560) as intersect;
|
||||
SELECT s2CellsIntersect(9926595209846587392, 9926594385212866560) AS intersect;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -154,11 +154,9 @@ Result:
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
## s2CapContains {#s2CapContains}
|
||||
## s2CapContains {#s2capcontains}
|
||||
|
||||
A cap represents a portion of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
Determines if a cap contains a s2 point index.
|
||||
Determines if a cap contains a S2 point. A cap represents a part of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -168,9 +166,9 @@ s2CapContains(center, degrees, point)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `center` - S2 point index corresponding to the cap. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `degrees` - Radius of the cap in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `point` - S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `center` — S2 point index corresponding to the cap. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `degrees` — Radius of the cap in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `point` — S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -184,7 +182,7 @@ Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select s2CapContains(1157339245694594829, 1.0, 1157347770437378819) as capContains;
|
||||
SELECT s2CapContains(1157339245694594829, 1.0, 1157347770437378819) AS capContains;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -195,11 +193,9 @@ Result:
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
## s2CapUnion {#s2CapUnion}
|
||||
## s2CapUnion {#s2capunion}
|
||||
|
||||
A cap represents a portion of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
Determines the smallest cap that contains the given two input caps.
|
||||
Determines the smallest cap that contains the given two input caps. A cap represents a portion of the sphere that has been cut off by a plane. It is defined by a point on a sphere and a radius in degrees.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -209,13 +205,13 @@ s2CapUnion(center1, radius1, center2, radius2)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `center1`, `center2` - S2 point indices corresponding to the two input caps. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius1`, `radius2` - Radii of the two input caps in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `center1`, `center2` — S2 point indixes corresponding to the two input caps. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius1`, `radius2` — Radius of the two input caps in degrees. [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `center` - S2 point index corresponding the center of the smallest cap containing the two input caps. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius` - Radius of the smallest cap containing the two input caps. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `center` — S2 point index corresponding the center of the smallest cap containing the two input caps. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `radius` — Radius of the smallest cap containing the two input caps. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -233,11 +229,9 @@ Result:
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2RectAdd{#s2RectAdd}
|
||||
## s2RectAdd {#s2rectadd}
|
||||
|
||||
In the S2 system, a rectangle is represented by a type of S2Region called a S2LatLngRect that represents a rectangle in latitude-longitude space.
|
||||
|
||||
Increases the size of the bounding rectangle to include the given S2 point index.
|
||||
Increases the size of the bounding rectangle to include the given S2 point. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -247,21 +241,21 @@ s2RectAdd(s2pointLow, s2pointHigh, s2Point)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2PointLow` - Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` - High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` - Target S2 point index that the bound rectangle should be grown to include. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointLow` — Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` — High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` — Target S2 point index that the bound rectangle should be grown to include. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `s2PointLow` - Low S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` - Hight S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/float.md).
|
||||
- `s2PointLow` — Low S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` — Hight S2 cell id corresponding to the grown rectangle. Type: [UInt64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectAdd(5178914411069187297, 5177056748191934217, 5179056748191934217) as rectAdd;
|
||||
SELECT s2RectAdd(5178914411069187297, 5177056748191934217, 5179056748191934217) AS rectAdd;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -272,11 +266,9 @@ Result:
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2RectContains{#s2RectContains}
|
||||
## s2RectContains {#s2rectcontains}
|
||||
|
||||
In the S2 system, a rectangle is represented by a type of S2Region called a S2LatLngRect that represents a rectangle in latitude-longitude space.
|
||||
|
||||
Determines if a given rectangle contains a S2 point index.
|
||||
Determines if a given rectangle contains a S2 point. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -286,9 +278,9 @@ s2RectContains(s2PointLow, s2PointHi, s2Point)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2PointLow` - Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` - High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` - Target S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointLow` — Low S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2PointHigh` — High S2 point index corresponding to the rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Point` — Target S2 point index. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
@ -300,7 +292,7 @@ s2RectContains(s2PointLow, s2PointHi, s2Point)
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectContains(5179062030687166815, 5177056748191934217, 5177914411069187297) AS rectContains
|
||||
SELECT s2RectContains(5179062030687166815, 5177056748191934217, 5177914411069187297) AS rectContains;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -311,11 +303,9 @@ Result:
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
## s2RectUinion{#s2RectUnion}
|
||||
## s2RectUinion {#s2rectunion}
|
||||
|
||||
In the S2 system, a rectangle is represented by a type of S2Region called a S2LatLngRect that represents a rectangle in latitude-longitude space.
|
||||
|
||||
Returns the smallest rectangle containing the union of this rectangle and the given rectangle.
|
||||
Returns the smallest rectangle containing the union of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -325,20 +315,20 @@ s2RectUnion(s2Rect1PointLow, s2Rect1PointHi, s2Rect2PointLow, s2Rect2PointHi)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` - Low and High S2 point indices corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` - Low and High S2 point indices corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` — Low and High S2 point indexes corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` — Low and High S2 point indexes corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `s2UnionRect2PointLow` - Low S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` - High S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointLow` — Low S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` — High S2 cell id corresponding to the union rectangle. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectUnion
|
||||
SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectUnion;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -349,9 +339,9 @@ Result:
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## s2RectIntersection{#s2RectIntersection}
|
||||
## s2RectIntersection {#s2rectintersection}
|
||||
|
||||
Returns the smallest Rectangle containing the intersection of this rectangle and the given rectangle.
|
||||
Returns the smallest rectangle containing the intersection of this rectangle and the given rectangle. In the S2 system, a rectangle is represented by a type of S2Region called a `S2LatLngRect` that represents a rectangle in latitude-longitude space.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -361,20 +351,20 @@ s2RectIntersection(s2Rect1PointLow, s2Rect1PointHi, s2Rect2PointLow, s2Rect2Poin
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` - Low and High S2 point indices corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` - Low and High S2 point indices corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect1PointLow`, `s2Rect1PointHi` — Low and High S2 point indexes corresponding to the first rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2Rect2PointLow`, `s2Rect2PointHi` — Low and High S2 point indexes corresponding to the second rectangle. [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- `s2UnionRect2PointLow` - Low S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` - Hi S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointLow` — Low S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
- `s2UnionRect2PointHi` — High S2 cell id corresponding to the rectangle containing the intersection of the given rectangles. Type: [UInt64](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT s2RectIntersection(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectIntersection
|
||||
SELECT s2RectIntersection(5178914411069187297, 5177056748191934217, 5179062030687166815, 5177056748191934217) AS rectIntersection;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -306,6 +306,77 @@ Result:
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## JSON_EXISTS(json, path) {#json-exists}
|
||||
|
||||
If the value exists in the JSON document, `1` will be returned.
|
||||
|
||||
If the value does not exist, `0` will be returned.
|
||||
|
||||
Examples:
|
||||
|
||||
``` sql
|
||||
SELECT JSON_EXISTS('{"hello":1}', '$.hello');
|
||||
SELECT JSON_EXISTS('{"hello":{"world":1}}', '$.hello.world');
|
||||
SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[*]');
|
||||
SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[0]');
|
||||
```
|
||||
|
||||
!!! note "Note"
|
||||
before version 21.11 the order of arguments was wrong, i.e. JSON_EXISTS(path, json)
|
||||
|
||||
## JSON_QUERY(json, path) {#json-query}
|
||||
|
||||
Parses a JSON and extract a value as JSON array or JSON object.
|
||||
|
||||
If the value does not exist, an empty string will be returned.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
SELECT JSON_QUERY('{"hello":"world"}', '$.hello');
|
||||
SELECT JSON_QUERY('{"array":[[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5]]}', '$.array[*][0 to 2, 4]');
|
||||
SELECT JSON_QUERY('{"hello":2}', '$.hello');
|
||||
SELECT toTypeName(JSON_QUERY('{"hello":2}', '$.hello'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
["world"]
|
||||
[0, 1, 4, 0, -1, -4]
|
||||
[2]
|
||||
String
|
||||
```
|
||||
!!! note "Note"
|
||||
before version 21.11 the order of arguments was wrong, i.e. JSON_QUERY(path, json)
|
||||
|
||||
## JSON_VALUE(json, path) {#json-value}
|
||||
|
||||
Parses a JSON and extract a value as JSON scalar.
|
||||
|
||||
If the value does not exist, an empty string will be returned.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
SELECT JSON_VALUE('{"hello":"world"}', '$.hello');
|
||||
SELECT JSON_VALUE('{"array":[[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5]]}', '$.array[*][0 to 2, 4]');
|
||||
SELECT JSON_VALUE('{"hello":2}', '$.hello');
|
||||
SELECT toTypeName(JSON_VALUE('{"hello":2}', '$.hello'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
"world"
|
||||
0
|
||||
2
|
||||
String
|
||||
```
|
||||
|
||||
!!! note "Note"
|
||||
before version 21.11 the order of arguments was wrong, i.e. JSON_VALUE(path, json)
|
||||
|
||||
## toJSONString {#tojsonstring}
|
||||
|
||||
Serializes a value to its JSON representation. Various data types and nested structures are supported.
|
||||
|
@ -16,81 +16,3 @@ The [stochasticLinearRegression](../../sql-reference/aggregate-functions/referen
|
||||
## stochasticLogisticRegression {#stochastic-logistic-regression}
|
||||
|
||||
The [stochasticLogisticRegression](../../sql-reference/aggregate-functions/reference/stochasticlogisticregression.md#agg_functions-stochasticlogisticregression) aggregate function implements stochastic gradient descent method for binary classification problem. Uses `evalMLMethod` to predict on new data.
|
||||
|
||||
## bayesAB {#bayesab}
|
||||
|
||||
Compares test groups (variants) and calculates for each group the probability to be the best one. The first group is used as a control group.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
bayesAB(distribution_name, higher_is_better, variant_names, x, y)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `distribution_name` — Name of the probability distribution. [String](../../sql-reference/data-types/string.md). Possible values:
|
||||
|
||||
- `beta` for [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution)
|
||||
- `gamma` for [Gamma distribution](https://en.wikipedia.org/wiki/Gamma_distribution)
|
||||
|
||||
- `higher_is_better` — Boolean flag. [Boolean](../../sql-reference/data-types/boolean.md). Possible values:
|
||||
|
||||
- `0` — lower values are considered to be better than higher
|
||||
- `1` — higher values are considered to be better than lower
|
||||
|
||||
- `variant_names` — Variant names. [Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md)).
|
||||
|
||||
- `x` — Numbers of tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)).
|
||||
|
||||
- `y` — Numbers of successful tests for the corresponding variants. [Array](../../sql-reference/data-types/array.md)([Float64](../../sql-reference/data-types/float.md)).
|
||||
|
||||
!!! note "Note"
|
||||
All three arrays must have the same size. All `x` and `y` values must be non-negative constant numbers. `y` cannot be larger than `x`.
|
||||
|
||||
**Returned values**
|
||||
|
||||
For each variant the function calculates:
|
||||
- `beats_control` — long-term probability to out-perform the first (control) variant
|
||||
- `to_be_best` — long-term probability to out-perform all other variants
|
||||
|
||||
Type: JSON.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT bayesAB('beta', 1, ['Control', 'A', 'B'], [3000., 3000., 3000.], [100., 90., 110.]) FORMAT PrettySpace;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
{
|
||||
"data":[
|
||||
{
|
||||
"variant_name":"Control",
|
||||
"x":3000,
|
||||
"y":100,
|
||||
"beats_control":0,
|
||||
"to_be_best":0.22619
|
||||
},
|
||||
{
|
||||
"variant_name":"A",
|
||||
"x":3000,
|
||||
"y":90,
|
||||
"beats_control":0.23469,
|
||||
"to_be_best":0.04671
|
||||
},
|
||||
{
|
||||
"variant_name":"B",
|
||||
"x":3000,
|
||||
"y":110,
|
||||
"beats_control":0.7580899999999999,
|
||||
"to_be_best":0.7271
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -2427,3 +2427,39 @@ Type: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||
**See Also**
|
||||
|
||||
- [shardNum()](#shard-num) function example also contains `shardCount()` function call.
|
||||
|
||||
## getOSKernelVersion {#getoskernelversion}
|
||||
|
||||
Returns a string with the current OS kernel version.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
getOSKernelVersion()
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The current OS kernel version.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT getOSKernelVersion();
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─getOSKernelVersion()────┐
|
||||
│ Linux 4.15.0-55-generic │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
@ -213,7 +213,7 @@ SELECT splitByNonAlpha(' 1! a, b. ');
|
||||
|
||||
## arrayStringConcat(arr\[, separator\]) {#arraystringconcatarr-separator}
|
||||
|
||||
Concatenates the strings listed in the array with the separator.’separator’ is an optional parameter: a constant string, set to an empty string by default.
|
||||
Concatenates string representations of values listed in the array with the separator. `separator` is an optional parameter: a constant string, set to an empty string by default.
|
||||
Returns the string.
|
||||
|
||||
## alphaTokens(s) {#alphatokenss}
|
||||
@ -270,3 +270,70 @@ Result:
|
||||
│ [['abc','123'],['8','"hkl"']] │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## ngrams {#ngrams}
|
||||
|
||||
Splits the UTF-8 string into n-grams of `ngramsize` symbols.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
ngrams(string, ngramsize)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `string` — String. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
- `ngramsize` — The size of an n-gram. [UInt](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Array with n-grams.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([FixedString](../../sql-reference/data-types/fixedstring.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT ngrams('ClickHouse', 3);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─ngrams('ClickHouse', 3)───────────────────────────┐
|
||||
│ ['Cli','lic','ick','ckH','kHo','Hou','ous','use'] │
|
||||
└───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tokens {#tokens}
|
||||
|
||||
Splits a string into tokens using non-alphanumeric ASCII characters as separators.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `input_string` — Any set of bytes represented as the [String](../../sql-reference/data-types/string.md) data type object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The resulting array of tokens from input string.
|
||||
|
||||
Type: [Array](../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT tokens('test1,;\\ test2,;\\ test3,;\\ test4') AS tokens;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tokens────────────────────────────┐
|
||||
│ ['test1','test2','test3','test4'] │
|
||||
└───────────────────────────────────┘
|
||||
```
|
@ -313,32 +313,6 @@ SELECT toValidUTF8('\x61\xF0\x80\x80\x80b');
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## tokens {#tokens}
|
||||
|
||||
Split string into tokens using non-alpha numeric ASCII characters as separators.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `input_string` — Any set of bytes represented as the [String](../../sql-reference/data-types/string.md) data type object.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The resulting array of tokens from input string.
|
||||
|
||||
Type: [Array](../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT tokens('test1,;\\ test2,;\\ test3,;\\ test4') AS tokens;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─tokens────────────────────────────┐
|
||||
│ ['test1','test2','test3','test4'] │
|
||||
└───────────────────────────────────┘
|
||||
```
|
||||
|
||||
## repeat {#repeat}
|
||||
|
||||
Repeats a string as many times as specified and concatenates the replicated values as a single string.
|
||||
@ -810,6 +784,150 @@ Result:
|
||||
└─────┘
|
||||
```
|
||||
|
||||
## normalizeUTF8NFC {#normalizeutf8nfc}
|
||||
|
||||
Converts a string to [NFC normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
normalizeUTF8NFC(words)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `words` — Input string that contains UTF-8 encoded text. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- String transformed to NFC normalization form.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT length('â'), normalizeUTF8NFC('â') AS nfc, length(nfc) AS nfc_len;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─length('â')─┬─nfc─┬─nfc_len─┐
|
||||
│ 2 │ â │ 2 │
|
||||
└─────────────┴─────┴─────────┘
|
||||
```
|
||||
|
||||
## normalizeUTF8NFD {#normalizeutf8nfd}
|
||||
|
||||
Converts a string to [NFD normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
normalizeUTF8NFD(words)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `words` — Input string that contains UTF-8 encoded text. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- String transformed to NFD normalization form.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT length('â'), normalizeUTF8NFD('â') AS nfd, length(nfd) AS nfd_len;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─length('â')─┬─nfd─┬─nfd_len─┐
|
||||
│ 2 │ â │ 3 │
|
||||
└─────────────┴─────┴─────────┘
|
||||
```
|
||||
|
||||
## normalizeUTF8NFKC {#normalizeutf8nfkc}
|
||||
|
||||
Converts a string to [NFKC normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
normalizeUTF8NFKC(words)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `words` — Input string that contains UTF-8 encoded text. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- String transformed to NFKC normalization form.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT length('â'), normalizeUTF8NFKC('â') AS nfkc, length(nfkc) AS nfkc_len;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─length('â')─┬─nfkc─┬─nfkc_len─┐
|
||||
│ 2 │ â │ 2 │
|
||||
└─────────────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
## normalizeUTF8NFKD {#normalizeutf8nfkd}
|
||||
|
||||
Converts a string to [NFKD normalized form](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms), assuming the string contains a set of bytes that make up a UTF-8 encoded text.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
normalizeUTF8NFKD(words)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `words` — Input string that contains UTF-8 encoded text. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- String transformed to NFKD normalization form.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT length('â'), normalizeUTF8NFKD('â') AS nfkd, length(nfkd) AS nfkd_len;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─length('â')─┬─nfkd─┬─nfkd_len─┐
|
||||
│ 2 │ â │ 3 │
|
||||
└─────────────┴──────┴──────────┘
|
||||
```
|
||||
|
||||
## encodeXMLComponent {#encode-xml-component}
|
||||
|
||||
Escapes characters to place string into XML text node or attribute.
|
||||
|
@ -165,3 +165,878 @@ Result:
|
||||
│ 2 │
|
||||
└─────────────────┘
|
||||
```
|
||||
|
||||
## tupleToNameValuePairs {#tupletonamevaluepairs}
|
||||
|
||||
Turns a named tuple into an array of (name, value) pairs. For a `Tuple(a T, b T, ..., c T)` returns `Array(Tuple(String, T), ...)`
|
||||
in which the `Strings` represents the named fields of the tuple and `T` are the values associated with those names. All values in the tuple should be of the same type.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
tupleToNameValuePairs(tuple)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — Named tuple. [Tuple](../../sql-reference/data-types/tuple.md) with any types of values.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- An array with (name, value) pairs.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md), ...)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (`col` Tuple(user_ID UInt64, session_ID UInt64) ENGINE = Memory;
|
||||
|
||||
INSERT INTO tupletest VALUES (tuple( 100, 2502)), (tuple(1,100));
|
||||
|
||||
SELECT tupleToNameValuePairs(col) FROM tupletest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tupleToNameValuePairs(col)────────────┐
|
||||
│ [('user_ID',100),('session_ID',2502)] │
|
||||
│ [('user_ID',1),('session_ID',100)] │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
It is possible to transform colums to rows using this function:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (`col` Tuple(CPU Float64, Memory Float64, Disk Float64)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO tupletest VALUES(tuple(3.3, 5.5, 6.6));
|
||||
|
||||
SELECT arrayJoin(tupleToNameValuePairs(col))FROM tupletest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─arrayJoin(tupleToNameValuePairs(col))─┐
|
||||
│ ('CPU',3.3) │
|
||||
│ ('Memory',5.5) │
|
||||
│ ('Disk',6.6) │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
If you pass a simple tuple to the function, ClickHouse uses the indexes of the values as their names:
|
||||
|
||||
``` sql
|
||||
SELECT tupleToNameValuePairs(tuple(3, 2, 1));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tupleToNameValuePairs(tuple(3, 2, 1))─┐
|
||||
│ [('1',3),('2',2),('3',1)] │
|
||||
└───────────────────────────────────────┘
|
||||
|
||||
## tuplePlus {#tupleplus}
|
||||
|
||||
Calculates the sum of corresponding values of two tuples of the same size.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tuplePlus(tuple1, tuple2)
|
||||
```
|
||||
|
||||
Alias: `vectorSum`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Tuple with the sum.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tuplePlus((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─tuplePlus((1, 2), (2, 3))─┐
|
||||
│ (3,5) │
|
||||
└───────────────────────────┘
|
||||
```
|
||||
|
||||
## tupleMinus {#tupleminus}
|
||||
|
||||
Calculates the subtraction of corresponding values of two tuples of the same size.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tupleMinus(tuple1, tuple2)
|
||||
```
|
||||
|
||||
Alias: `vectorDifference`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Tuple with the result of subtraction.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tupleMinus((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─tupleMinus((1, 2), (2, 3))─┐
|
||||
│ (-1,-1) │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## tupleMultiply {#tuplemultiply}
|
||||
|
||||
Calculates the multiplication of corresponding values of two tuples of the same size.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tupleMultiply(tuple1, tuple2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Tuple with the multiplication.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tupleMultiply((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─tupleMultiply((1, 2), (2, 3))─┐
|
||||
│ (2,6) │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## tupleDivide {#tupledivide}
|
||||
|
||||
Calculates the division of corresponding values of two tuples of the same size. Note that division by zero will return `inf`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tupleDivide(tuple1, tuple2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Tuple with the result of division.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tupleDivide((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─tupleDivide((1, 2), (2, 3))─┐
|
||||
│ (0.5,0.6666666666666666) │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## tupleNegate {#tuplenegate}
|
||||
|
||||
Calculates the negation of the tuple values.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tupleNegate(tuple)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Tuple with the result of negation.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tupleNegate((1, 2));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─tupleNegate((1, 2))─┐
|
||||
│ (-1,-2) │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## tupleMultiplyByNumber {#tuplemultiplybynumber}
|
||||
|
||||
Returns a tuple with all values multiplied by a number.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tupleMultiplyByNumber(tuple, number)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `number` — Multiplier. [Int/UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Tuple with multiplied values.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tupleMultiplyByNumber((1, 2), -2.1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─tupleMultiplyByNumber((1, 2), -2.1)─┐
|
||||
│ (-2.1,-4.2) │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tupleDivideByNumber {#tupledividebynumber}
|
||||
|
||||
Returns a tuple with all values divided by a number. Note that division by zero will return `inf`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
tupleDivideByNumber(tuple, number)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `number` — Divider. [Int/UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Tuple with divided values.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tupleDivideByNumber((1, 2), 0.5);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─tupleDivideByNumber((1, 2), 0.5)─┐
|
||||
│ (2,4) │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## dotProduct {#dotproduct}
|
||||
|
||||
Calculates the scalar product of two tuples of the same size.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
dotProduct(tuple1, tuple2)
|
||||
```
|
||||
|
||||
Alias: `scalarProduct`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Scalar product.
|
||||
|
||||
Type: [Int/UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT dotProduct((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─dotProduct((1, 2), (2, 3))─┐
|
||||
│ 8 │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## L1Norm {#l1norm}
|
||||
|
||||
Calculates the sum of absolute values of a tuple.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
L1Norm(tuple)
|
||||
```
|
||||
|
||||
Alias: `normL1`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- L1-norm or [taxicab geometry](https://en.wikipedia.org/wiki/Taxicab_geometry) distance.
|
||||
|
||||
Type: [UInt](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT L1Norm((1, 2));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─L1Norm((1, 2))─┐
|
||||
│ 3 │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## L2Norm {#l2norm}
|
||||
|
||||
Calculates the square root of the sum of the squares of the tuple values.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
L2Norm(tuple)
|
||||
```
|
||||
|
||||
Alias: `normL2`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- L2-norm or [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance).
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT L2Norm((1, 2));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌───L2Norm((1, 2))─┐
|
||||
│ 2.23606797749979 │
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## LinfNorm {#linfnorm}
|
||||
|
||||
Calculates the maximum of absolute values of a tuple.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
LinfNorm(tuple)
|
||||
```
|
||||
|
||||
Alias: `normLinf`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Linf-norm or the maximum absolute value.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT LinfNorm((1, -2));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─LinfNorm((1, -2))─┐
|
||||
│ 2 │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## LpNorm {#lpnorm}
|
||||
|
||||
Calculates the root of `p`-th power of the sum of the absolute values of a tuple in the power of `p`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
LpNorm(tuple, p)
|
||||
```
|
||||
|
||||
Alias: `normLp`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `p` — The power. Possible values: real number in `[1; inf)`. [UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- [Lp-norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm)
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT LpNorm((1, -2), 2);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─LpNorm((1, -2), 2)─┐
|
||||
│ 2.23606797749979 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## L1Distance {#l1distance}
|
||||
|
||||
Calculates the distance between two points (the values of the tuples are the coordinates) in `L1` space (1-norm ([taxicab geometry](https://en.wikipedia.org/wiki/Taxicab_geometry) distance)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
L1Distance(tuple1, tuple2)
|
||||
```
|
||||
|
||||
Alias: `distanceL1`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple1` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 1-norm distance.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT L1Distance((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─L1Distance((1, 2), (2, 3))─┐
|
||||
│ 2 │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## L2Distance {#l2distance}
|
||||
|
||||
Calculates the distance between two points (the values of the tuples are the coordinates) in Euclidean space ([Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
L2Distance(tuple1, tuple2)
|
||||
```
|
||||
|
||||
Alias: `distanceL2`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple1` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- 2-norm distance.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT L2Distance((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─L2Distance((1, 2), (2, 3))─┐
|
||||
│ 1.4142135623730951 │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## LinfDistance {#linfdistance}
|
||||
|
||||
Calculates the distance between two points (the values of the tuples are the coordinates) in `L_{inf}` space ([maximum norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#Maximum_norm_(special_case_of:_infinity_norm,_uniform_norm,_or_supremum_norm))).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
LinfDistance(tuple1, tuple2)
|
||||
```
|
||||
|
||||
Alias: `distanceLinf`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple1` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Infinity-norm distance.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT LinfDistance((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─LinfDistance((1, 2), (2, 3))─┐
|
||||
│ 1 │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
## LpDistance {#lpdistance}
|
||||
|
||||
Calculates the distance between two points (the values of the tuples are the coordinates) in `Lp` space ([p-norm distance](https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
LpDistance(tuple1, tuple2, p)
|
||||
```
|
||||
|
||||
Alias: `distanceLp`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple1` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `p` — The power. Possible values: real number from `[1; inf)`. [UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- p-norm distance.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT LpDistance((1, 2), (2, 3), 3);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─LpDistance((1, 2), (2, 3), 3)─┐
|
||||
│ 1.2599210498948732 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## L1Normalize {#l1normalize}
|
||||
|
||||
Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in `L1` space ([taxicab geometry](https://en.wikipedia.org/wiki/Taxicab_geometry)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
L1Normalize(tuple)
|
||||
```
|
||||
|
||||
Alias: `normalizeL1`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Unit vector.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md) of [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT L1Normalize((1, 2));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─L1Normalize((1, 2))─────────────────────┐
|
||||
│ (0.3333333333333333,0.6666666666666666) │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## L2Normalize {#l2normalize}
|
||||
|
||||
Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in Euclidean space (using [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
L2Normalize(tuple)
|
||||
```
|
||||
|
||||
Alias: `normalizeL1`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Unit vector.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md) of [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT L2Normalize((3, 4));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─L2Normalize((3, 4))─┐
|
||||
│ (0.6,0.8) │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## LinfNormalize {#linfnormalize}
|
||||
|
||||
Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in `L_{inf}` space (using [maximum norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#Maximum_norm_(special_case_of:_infinity_norm,_uniform_norm,_or_supremum_norm))).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
LinfNormalize(tuple)
|
||||
```
|
||||
|
||||
Alias: `normalizeLinf `.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Unit vector.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md) of [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT LinfNormalize((3, 4));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─LinfNormalize((3, 4))─┐
|
||||
│ (0.75,1) │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## LpNormalize {#lpnormalize}
|
||||
|
||||
Calculates the unit vector of a given vector (the values of the tuple are the coordinates) in `Lp` space (using [p-norm](https://en.wikipedia.org/wiki/Norm_(mathematics)#p-norm)).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
LpNormalize(tuple, p)
|
||||
```
|
||||
|
||||
Alias: `normalizeLp `.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `p` — The power. Possible values: any number from [1;inf). [UInt](../../sql-reference/data-types/int-uint.md) or [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Unit vector.
|
||||
|
||||
Type: [Tuple](../../sql-reference/data-types/tuple.md) of [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT LpNormalize((3, 4),5);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─LpNormalize((3, 4), 5)──────────────────┐
|
||||
│ (0.7187302630182624,0.9583070173576831) │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## cosineDistance {#cosinedistance}
|
||||
|
||||
Calculates the cosine distance between two vectors (the values of the tuples are the coordinates). The less the returned value is, the more similar are the vectors.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
cosineDistance(tuple1, tuple2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Cosine of the angle between two vectors substracted from one.
|
||||
|
||||
Type: [Float](../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT cosineDistance((1, 2), (2, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─cosineDistance((1, 2), (2, 3))─┐
|
||||
│ 0.007722123286332261 │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
@ -22,7 +22,7 @@ map(key1, value1[, key2, value2, ...])
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Data structure as `key:value` pairs.
|
||||
- Data structure as `key:value` pairs.
|
||||
|
||||
Type: [Map(key, value)](../../sql-reference/data-types/map.md).
|
||||
|
||||
@ -165,9 +165,6 @@ Result:
|
||||
## mapPopulateSeries {#function-mappopulateseries}
|
||||
|
||||
Fills missing keys in the maps (key and value array pair), where keys are integers. Also, it supports specifying the max key, which is used to extend the keys array.
|
||||
Arguments are [maps](../../sql-reference/data-types/map.md) or two [arrays](../../sql-reference/data-types/array.md#data-type-array), where the first array represent keys, and the second array contains values for the each key.
|
||||
|
||||
For array arguments the number of elements in `keys` and `values` must be the same for each row.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -178,12 +175,17 @@ mapPopulateSeries(map[, max])
|
||||
|
||||
Generates a map (a tuple with two arrays or a value of `Map` type, depending on the arguments), where keys are a series of numbers, from minimum to maximum keys (or `max` argument if it specified) taken from the map with a step size of one, and corresponding values. If the value is not specified for the key, then it uses the default value in the resulting map. For repeated keys, only the first value (in order of appearing) gets associated with the key.
|
||||
|
||||
For array arguments the number of elements in `keys` and `values` must be the same for each row.
|
||||
|
||||
**Arguments**
|
||||
|
||||
Arguments are [maps](../../sql-reference/data-types/map.md) or two [arrays](../../sql-reference/data-types/array.md#data-type-array), where the first array represent keys, and the second array contains values for the each key.
|
||||
|
||||
Mapped arrays:
|
||||
|
||||
- `keys` — Array of keys. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#uint-ranges)).
|
||||
- `values` — Array of values. [Array](../../sql-reference/data-types/array.md#data-type-array)([Int](../../sql-reference/data-types/int-uint.md#uint-ranges)).
|
||||
- `max` — Maximum key value. Optional. [Int8, Int16, Int32, Int64, Int128, Int256](../../sql-reference/data-types/int-uint.md#int-ranges).
|
||||
|
||||
or
|
||||
|
||||
@ -191,14 +193,14 @@ or
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Depending on the arguments returns a [map](../../sql-reference/data-types/map.md) or a [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array): keys in sorted order, and values the corresponding keys.
|
||||
- Depending on the arguments returns a [map](../../sql-reference/data-types/map.md) or a [tuple](../../sql-reference/data-types/tuple.md#tuplet1-t2) of two [arrays](../../sql-reference/data-types/array.md#data-type-array): keys in sorted order, and values the corresponding keys.
|
||||
|
||||
**Example**
|
||||
|
||||
Query with mapped arrays:
|
||||
|
||||
```sql
|
||||
select mapPopulateSeries([1,2,4], [11,22,44], 5) as res, toTypeName(res) as type;
|
||||
SELECT mapPopulateSeries([1,2,4], [11,22,44], 5) AS res, toTypeName(res) AS type;
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -352,4 +354,81 @@ Result:
|
||||
└──────────────────┘
|
||||
```
|
||||
|
||||
## mapContainsKeyLike {#mapContainsKeyLike}
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapContainsKeyLike(map, pattern)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `1` if `map` contains `key` like specified pattern, `0` if not.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE test (a Map(String,String)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO test VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'});
|
||||
|
||||
SELECT mapContainsKeyLike(a, 'a%') FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─mapContainsKeyLike(a, 'a%')─┐
|
||||
│ 1 │
|
||||
│ 0 │
|
||||
└─────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapExtractKeyLike {#mapExtractKeyLike}
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapExtractKeyLike(map, pattern)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `map` — Map. [Map](../../sql-reference/data-types/map.md).
|
||||
- `pattern` - String pattern to match.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A map contained elements the key of which matchs the specified pattern. If there are no elements matched the pattern, it will return an empty map.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
CREATE TABLE test (a Map(String,String)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO test VALUES ({'abc':'abc','def':'def'}), ({'hij':'hij','klm':'klm'});
|
||||
|
||||
SELECT mapExtractKeyLike(a, 'a%') FROM test;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─mapExtractKeyLike(a, 'a%')─┐
|
||||
│ {'abc':'abc'} │
|
||||
│ {} │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/tuple-map-functions/) <!--hide-->
|
||||
|
@ -17,20 +17,30 @@ ClickHouse transforms operators to their corresponding functions at the query pa
|
||||
|
||||
`-a` – The `negate (a)` function.
|
||||
|
||||
For tuple negation: [tupleNegate](../../sql-reference/functions/tuple-functions.md#tuplenegate).
|
||||
|
||||
## Multiplication and Division Operators {#multiplication-and-division-operators}
|
||||
|
||||
`a * b` – The `multiply (a, b)` function.
|
||||
|
||||
For multiplying tuple by number: [tupleMultiplyByNumber](../../sql-reference/functions/tuple-functions.md#tuplemultiplybynumber), for scalar profuct: [dotProduct](../../sql-reference/functions/tuple-functions.md#dotproduct).
|
||||
|
||||
`a / b` – The `divide(a, b)` function.
|
||||
|
||||
For dividing tuple by number: [tupleDivideByNumber](../../sql-reference/functions/tuple-functions.md#tupledividebynumber).
|
||||
|
||||
`a % b` – The `modulo(a, b)` function.
|
||||
|
||||
## Addition and Subtraction Operators {#addition-and-subtraction-operators}
|
||||
|
||||
`a + b` – The `plus(a, b)` function.
|
||||
|
||||
For tuple addiction: [tuplePlus](../../sql-reference/functions/tuple-functions.md#tupleplus).
|
||||
|
||||
`a - b` – The `minus(a, b)` function.
|
||||
|
||||
For tuple subtraction: [tupleMinus](../../sql-reference/functions/tuple-functions.md#tupleminus).
|
||||
|
||||
## Comparison Operators {#comparison-operators}
|
||||
|
||||
`a = b` – The `equals(a, b)` function.
|
||||
@ -71,6 +81,53 @@ ClickHouse transforms operators to their corresponding functions at the query pa
|
||||
|
||||
`a GLOBAL NOT IN ...` – The `globalNotIn(a, b)` function.
|
||||
|
||||
`a = ANY (subquery)` – The `in(a, subquery)` function.
|
||||
|
||||
`a != ANY (subquery)` – The same as `a NOT IN (SELECT singleValueOrNull(*) FROM subquery)`.
|
||||
|
||||
`a = ALL (subquery)` – The same as `a IN (SELECT singleValueOrNull(*) FROM subquery)`.
|
||||
|
||||
`a != ALL (subquery)` – The `notIn(a, subquery)` function.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
Query with ALL:
|
||||
|
||||
``` sql
|
||||
SELECT number AS a FROM numbers(10) WHERE a > ALL (SELECT number FROM numbers(3, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─a─┐
|
||||
│ 6 │
|
||||
│ 7 │
|
||||
│ 8 │
|
||||
│ 9 │
|
||||
└───┘
|
||||
```
|
||||
|
||||
Query with ANY:
|
||||
|
||||
``` sql
|
||||
SELECT number AS a FROM numbers(10) WHERE a > ANY (SELECT number FROM numbers(3, 3));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─a─┐
|
||||
│ 4 │
|
||||
│ 5 │
|
||||
│ 6 │
|
||||
│ 7 │
|
||||
│ 8 │
|
||||
│ 9 │
|
||||
└───┘
|
||||
```
|
||||
|
||||
## Operators for Working with Dates and Times {#operators-datetime}
|
||||
|
||||
### EXTRACT {#operator-extract}
|
||||
|
@ -7,7 +7,7 @@ toc_title: PROJECTION
|
||||
|
||||
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
|
||||
|
||||
- `ALTER TABLE [db].name ADD PROJECTION name AS SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY]` - Adds projection description to tables metadata.
|
||||
- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
|
||||
- `ALTER TABLE [db].name DROP PROJECTION name` - Removes projection description from tables metadata and deletes projection files from disk.
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user