mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into grace_hash_join
This commit is contained in:
commit
6d4b6c452a
10
.clang-tidy
10
.clang-tidy
@ -1,6 +1,14 @@
|
||||
# To run clang-tidy from CMake, build ClickHouse with -DENABLE_CLANG_TIDY=1. To show all warnings, it is
|
||||
# recommended to pass "-k0" to Ninja.
|
||||
|
||||
# Enable all checks + disale selected checks. Feel free to remove disabled checks from below list if
|
||||
# a) the new check is not controversial (this includes many checks in readability-* and google-*) or
|
||||
# b) too noisy (checks with > 100 new warnings are considered noisy, this includes e.g. cppcoreguidelines-*).
|
||||
|
||||
# TODO Let clang-tidy check headers in further directories
|
||||
# --> HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$'
|
||||
HeaderFilterRegex: '^.*/(base)/.*(h|hpp)$'
|
||||
|
||||
Checks: '*,
|
||||
-abseil-*,
|
||||
|
||||
@ -54,8 +62,6 @@ Checks: '*,
|
||||
-cppcoreguidelines-slicing,
|
||||
-cppcoreguidelines-special-member-functions,
|
||||
|
||||
-concurrency-mt-unsafe,
|
||||
|
||||
-darwin-*,
|
||||
|
||||
-fuchsia-*,
|
||||
|
2
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
2
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
@ -1,6 +1,6 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Wrong behaviour (visible to users) in official ClickHouse release.
|
||||
about: Wrong behavior (visible to users) in the official ClickHouse release.
|
||||
title: ''
|
||||
labels: 'potential bug'
|
||||
assignees: ''
|
||||
|
45
.github/workflows/backport_branches.yml
vendored
45
.github/workflows/backport_branches.yml
vendored
@ -143,6 +143,8 @@ jobs:
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
@ -188,6 +190,8 @@ jobs:
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # For a proper version and performance artifacts
|
||||
- name: Build
|
||||
run: |
|
||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
||||
@ -346,6 +350,36 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push
|
||||
python3 docker_server.py --release-type head --no-push --no-ubuntu \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# shellcheck disable=SC2046
|
||||
docker kill $(docker ps -q) ||:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
@ -360,7 +394,7 @@ jobs:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
CHECK_NAME=ClickHouse build check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
@ -403,7 +437,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -443,7 +477,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (debug, actions)
|
||||
CHECK_NAME=Stateful tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -487,7 +521,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread, actions)
|
||||
CHECK_NAME=Stress test (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -526,7 +560,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -556,6 +590,7 @@ jobs:
|
||||
FinishCheck:
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- DockerServerImages
|
||||
- BuilderReport
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatefulTestDebug
|
||||
|
@ -8,7 +8,7 @@ concurrency:
|
||||
group: cherry-pick
|
||||
on: # yamllint disable-line rule:truthy
|
||||
schedule:
|
||||
- cron: '0 */3 * * *'
|
||||
- cron: '0 * * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
3
.github/workflows/docs_check.yml
vendored
3
.github/workflows/docs_check.yml
vendored
@ -102,6 +102,9 @@ jobs:
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{ runner.temp }}/style_check
|
||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||
RCSK
|
||||
EOF
|
||||
- name: Download changed images
|
||||
# even if artifact does not exist, e.g. on `do not test` label or failed Docker job
|
||||
|
1
.github/workflows/docs_release.yml
vendored
1
.github/workflows/docs_release.yml
vendored
@ -13,7 +13,6 @@ concurrency:
|
||||
- master
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'benchmark/**'
|
||||
- 'docker/docs/release/**'
|
||||
- 'docs/**'
|
||||
- 'utils/list-versions/version_date.tsv'
|
||||
|
254
.github/workflows/master.yml
vendored
254
.github/workflows/master.yml
vendored
@ -108,7 +108,7 @@ jobs:
|
||||
- name: Style Check
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 style_check.py
|
||||
python3 style_check.py --no-push
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -151,8 +151,8 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SplitBuildSmokeTest:
|
||||
needs: [BuilderDebSplitted]
|
||||
SharedBuildSmokeTest:
|
||||
needs: [BuilderDebShared]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -171,7 +171,7 @@ jobs:
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Split build check
|
||||
- name: Shared build check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
@ -598,7 +598,7 @@ jobs:
|
||||
##########################################################################################
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderDebSplitted:
|
||||
BuilderDebShared:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -609,7 +609,7 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_splitted
|
||||
BUILD_NAME=binary_shared
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
@ -643,7 +643,7 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinTidy:
|
||||
BuilderBinClangTidy:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -971,7 +971,7 @@ jobs:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
CHECK_NAME=ClickHouse build check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
@ -1011,8 +1011,8 @@ jobs:
|
||||
- BuilderBinFreeBSD
|
||||
# - BuilderBinGCC
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinTidy
|
||||
- BuilderDebSplitted
|
||||
- BuilderBinClangTidy
|
||||
- BuilderDebShared
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -1020,7 +1020,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=ClickHouse special build check (actions)
|
||||
CHECK_NAME=ClickHouse special build check
|
||||
NEEDS_DATA_PATH=${{runner.temp}}/needs.json
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1061,7 +1061,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, actions)
|
||||
CHECK_NAME=Stateless tests (release)
|
||||
REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -1098,7 +1098,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_release_database_ordinary
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, DatabaseOrdinary, actions)
|
||||
CHECK_NAME=Stateless tests (release, DatabaseOrdinary)
|
||||
REPO_COPY=${{runner.temp}}/stateless_release_database_ordinary/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -1126,6 +1126,84 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated0:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
|
||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# shellcheck disable=SC2046
|
||||
docker kill $(docker ps -q) ||:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseDatabaseReplicated1:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_database_replicated
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, DatabaseReplicated)
|
||||
REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# shellcheck disable=SC2046
|
||||
docker kill $(docker ps -q) ||:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatelessTestReleaseS3:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -1135,7 +1213,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, s3 storage, actions)
|
||||
CHECK_NAME=Stateless tests (release, s3 storage)
|
||||
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -1172,7 +1250,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (aarch64, actions)
|
||||
CHECK_NAME=Stateless tests (aarch64)
|
||||
REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -1209,7 +1287,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1248,7 +1326,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1287,7 +1365,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1326,7 +1404,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1365,7 +1443,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1404,7 +1482,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (ubsan, actions)
|
||||
CHECK_NAME=Stateless tests (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -1441,7 +1519,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1480,7 +1558,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1519,7 +1597,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1558,7 +1636,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1597,7 +1675,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1636,7 +1714,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1678,7 +1756,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (release, actions)
|
||||
CHECK_NAME=Stateful tests (release)
|
||||
REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1706,43 +1784,6 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatefulTestReleaseDatabaseOrdinary:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_release_database_ordinary
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (release, DatabaseOrdinary, actions)
|
||||
REPO_COPY=${{runner.temp}}/stateful_release_database_ordinary/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Functional test
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# shellcheck disable=SC2046
|
||||
docker kill $(docker ps -q) ||:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
FunctionalStatefulTestAarch64:
|
||||
needs: [BuilderDebAarch64]
|
||||
runs-on: [self-hosted, func-tester-aarch64]
|
||||
@ -1752,7 +1793,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (aarch64, actions)
|
||||
CHECK_NAME=Stateful tests (aarch64)
|
||||
REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1789,7 +1830,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (address, actions)
|
||||
CHECK_NAME=Stateful tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1826,7 +1867,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (thread, actions)
|
||||
CHECK_NAME=Stateful tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1863,7 +1904,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (memory, actions)
|
||||
CHECK_NAME=Stateful tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1900,7 +1941,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (ubsan, actions)
|
||||
CHECK_NAME=Stateful tests (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1937,7 +1978,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (debug, actions)
|
||||
CHECK_NAME=Stateful tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1977,7 +2018,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (address, actions)
|
||||
CHECK_NAME=Stress test (asan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2017,7 +2058,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread, actions)
|
||||
CHECK_NAME=Stress test (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2053,7 +2094,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (memory, actions)
|
||||
CHECK_NAME=Stress test (msan)
|
||||
REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2089,7 +2130,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_undefined
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (undefined, actions)
|
||||
CHECK_NAME=Stress test (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2125,7 +2166,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (debug, actions)
|
||||
CHECK_NAME=Stress test (debug)
|
||||
REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2164,7 +2205,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
@ -2202,7 +2243,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
@ -2240,7 +2281,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
@ -2278,7 +2319,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2316,7 +2357,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2354,7 +2395,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2392,7 +2433,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2430,7 +2471,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
@ -2468,7 +2509,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
@ -2509,7 +2550,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (ASan, actions)
|
||||
CHECK_NAME=AST fuzzer (asan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2545,7 +2586,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (TSan, actions)
|
||||
CHECK_NAME=AST fuzzer (tsan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_tsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2581,7 +2622,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (UBSan, actions)
|
||||
CHECK_NAME=AST fuzzer (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2617,7 +2658,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (MSan, actions)
|
||||
CHECK_NAME=AST fuzzer (msan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_msan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2653,7 +2694,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (debug, actions)
|
||||
CHECK_NAME=AST fuzzer (debug)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_debug/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2692,7 +2733,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (asan, actions)
|
||||
CHECK_NAME=Unit tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2728,7 +2769,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (release-clang, actions)
|
||||
CHECK_NAME=Unit tests (release-clang)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2764,7 +2805,7 @@ jobs:
|
||||
# cat >> "$GITHUB_ENV" << 'EOF'
|
||||
# TEMP_PATH=${{runner.temp}}/unit_tests_asan
|
||||
# REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
# CHECK_NAME=Unit tests (release-gcc, actions)
|
||||
# CHECK_NAME=Unit tests (release-gcc)
|
||||
# REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse
|
||||
# EOF
|
||||
# - name: Download json reports
|
||||
@ -2800,7 +2841,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (tsan, actions)
|
||||
CHECK_NAME=Unit tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_tsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2836,7 +2877,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (msan, actions)
|
||||
CHECK_NAME=Unit tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_msan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2872,7 +2913,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/unit_tests_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Unit tests (ubsan, actions)
|
||||
CHECK_NAME=Unit tests (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/unit_tests_ubsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2902,7 +2943,7 @@ jobs:
|
||||
#############################################################################################
|
||||
#################################### PERFORMANCE TESTS ######################################
|
||||
#############################################################################################
|
||||
PerformanceComparison0:
|
||||
PerformanceComparisonX86-0:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
@ -2940,7 +2981,7 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison1:
|
||||
PerformanceComparisonX86-1:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
@ -2978,7 +3019,7 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison2:
|
||||
PerformanceComparisonX86-2:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
@ -3016,7 +3057,7 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
PerformanceComparison3:
|
||||
PerformanceComparisonX86-3:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
@ -3063,6 +3104,8 @@ jobs:
|
||||
- FunctionalStatelessTestDebug2
|
||||
- FunctionalStatelessTestRelease
|
||||
- FunctionalStatelessTestReleaseDatabaseOrdinary
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated0
|
||||
- FunctionalStatelessTestReleaseDatabaseReplicated1
|
||||
- FunctionalStatelessTestAarch64
|
||||
- FunctionalStatelessTestAsan0
|
||||
- FunctionalStatelessTestAsan1
|
||||
@ -3075,7 +3118,6 @@ jobs:
|
||||
- FunctionalStatelessTestUBsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- FunctionalStatefulTestRelease
|
||||
- FunctionalStatefulTestReleaseDatabaseOrdinary
|
||||
- FunctionalStatelessTestReleaseS3
|
||||
- FunctionalStatefulTestAarch64
|
||||
- FunctionalStatefulTestAsan
|
||||
@ -3096,10 +3138,10 @@ jobs:
|
||||
- IntegrationTestsTsan1
|
||||
- IntegrationTestsTsan2
|
||||
- IntegrationTestsTsan3
|
||||
- PerformanceComparison0
|
||||
- PerformanceComparison1
|
||||
- PerformanceComparison2
|
||||
- PerformanceComparison3
|
||||
- PerformanceComparisonX86-0
|
||||
- PerformanceComparisonX86-1
|
||||
- PerformanceComparisonX86-2
|
||||
- PerformanceComparisonX86-3
|
||||
- CompatibilityCheck
|
||||
- ASTFuzzerTestDebug
|
||||
- ASTFuzzerTestAsan
|
||||
@ -3111,7 +3153,7 @@ jobs:
|
||||
- UnitTestsMsan
|
||||
- UnitTestsUBsan
|
||||
- UnitTestsReleaseClang
|
||||
- SplitBuildSmokeTest
|
||||
- SharedBuildSmokeTest
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
|
509
.github/workflows/pull_request.yml
vendored
509
.github/workflows/pull_request.yml
vendored
File diff suppressed because it is too large
Load Diff
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: ReleaseWorkflow
|
||||
name: PublishedReleaseCI
|
||||
# - Gets artifacts from S3
|
||||
# - Sends it to JFROG Artifactory
|
||||
# - Adds them to the release assets
|
||||
@ -15,19 +15,22 @@ jobs:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
JFROG_API_KEY=${{ secrets.JFROG_KEY_API_PACKAGES }}
|
||||
JFROG_API_KEY=${{ secrets.JFROG_ARTIFACTORY_API_KEY }}
|
||||
TEMP_PATH=${{runner.temp}}/release_packages
|
||||
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# Always use the most recent script version
|
||||
ref: master
|
||||
- name: Download packages and push to Artifactory
|
||||
run: |
|
||||
rm -rf "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY"
|
||||
python3 ./tests/ci/push_to_artifactory.py --release "${{ github.ref }}" \
|
||||
--commit '${{ github.sha }}' --all
|
||||
--commit '${{ github.sha }}' --artifactory-url "${{ secrets.JFROG_ARTIFACTORY_URL }}" --all
|
||||
- name: Upload packages to release assets
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
|
105
.github/workflows/release_branches.yml
vendored
105
.github/workflows/release_branches.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: ReleaseCI
|
||||
name: ReleaseBranchCI
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
@ -427,6 +427,36 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
DockerServerImages:
|
||||
needs:
|
||||
- BuilderDebRelease
|
||||
- BuilderDebAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
run: |
|
||||
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself
|
||||
- name: Check docker clickhouse/clickhouse-server building
|
||||
run: |
|
||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||
python3 docker_server.py --release-type head --no-push
|
||||
python3 docker_server.py --release-type head --no-push --no-ubuntu \
|
||||
--image-repo clickhouse/clickhouse-keeper --image-path docker/keeper
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
# shellcheck disable=SC2046
|
||||
docker kill $(docker ps -q) ||:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
############################################################################################
|
||||
##################################### BUILD REPORTER #######################################
|
||||
############################################################################################
|
||||
BuilderReport:
|
||||
@ -443,7 +473,7 @@ jobs:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
CHECK_NAME=ClickHouse build check (actions)
|
||||
CHECK_NAME=ClickHouse build check
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
TEMP_PATH=${{runner.temp}}/report_check
|
||||
@ -487,7 +517,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (release, actions)
|
||||
CHECK_NAME=Stateless tests (release)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -524,7 +554,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (aarch64, actions)
|
||||
CHECK_NAME=Stateless tests (aarch64)
|
||||
REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -561,7 +591,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -600,7 +630,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address, actions)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -639,7 +669,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -678,7 +708,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -717,7 +747,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread, actions)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -756,7 +786,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (ubsan, actions)
|
||||
CHECK_NAME=Stateless tests (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -793,7 +823,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -832,7 +862,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -871,7 +901,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory, actions)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -910,7 +940,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -949,7 +979,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -988,7 +1018,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (debug, actions)
|
||||
CHECK_NAME=Stateless tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1030,7 +1060,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (release, actions)
|
||||
CHECK_NAME=Stateful tests (release)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1067,7 +1097,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (aarch64, actions)
|
||||
CHECK_NAME=Stateful tests (aarch64)
|
||||
REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1104,7 +1134,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (address, actions)
|
||||
CHECK_NAME=Stateful tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1141,7 +1171,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (thread, actions)
|
||||
CHECK_NAME=Stateful tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1178,7 +1208,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (memory, actions)
|
||||
CHECK_NAME=Stateful tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1215,7 +1245,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (ubsan, actions)
|
||||
CHECK_NAME=Stateful tests (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1252,7 +1282,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (debug, actions)
|
||||
CHECK_NAME=Stateful tests (debug)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1292,7 +1322,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (address, actions)
|
||||
CHECK_NAME=Stress test (asan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1332,7 +1362,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread, actions)
|
||||
CHECK_NAME=Stress test (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1368,7 +1398,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (memory, actions)
|
||||
CHECK_NAME=Stress test (msan)
|
||||
REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1404,7 +1434,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_undefined
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (undefined, actions)
|
||||
CHECK_NAME=Stress test (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1440,7 +1470,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (debug, actions)
|
||||
CHECK_NAME=Stress test (debug)
|
||||
REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1479,7 +1509,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
@ -1517,7 +1547,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
@ -1555,7 +1585,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (asan, actions)
|
||||
CHECK_NAME=Integration tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=3
|
||||
@ -1593,7 +1623,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -1631,7 +1661,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -1669,7 +1699,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -1707,7 +1737,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread, actions)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -1745,7 +1775,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
@ -1783,7 +1813,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (release, actions)
|
||||
CHECK_NAME=Integration tests (release)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=2
|
||||
@ -1815,6 +1845,7 @@ jobs:
|
||||
FinishCheck:
|
||||
needs:
|
||||
- DockerHubPush
|
||||
- DockerServerImages
|
||||
- BuilderReport
|
||||
- FunctionalStatelessTestDebug0
|
||||
- FunctionalStatelessTestDebug1
|
||||
|
20
.github/workflows/tags_stable.yml
vendored
20
.github/workflows/tags_stable.yml
vendored
@ -13,13 +13,24 @@ on: # yamllint disable-line rule:truthy
|
||||
- 'v*-prestable'
|
||||
- 'v*-stable'
|
||||
- 'v*-lts'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Test tag'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
|
||||
jobs:
|
||||
UpdateVersions:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set test tag
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV"
|
||||
- name: Get tag name
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV"
|
||||
- name: Check out repository code
|
||||
@ -29,25 +40,28 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Generate versions
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
run: |
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
GID=$(id -g "${UID}")
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 \
|
||||
--volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/utils/changelog/changelog.py -vv --gh-user-or-token="$GITHUB_TOKEN" \
|
||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" --jobs=5 "${GITHUB_TAG}"
|
||||
/ClickHouse/utils/changelog/changelog.py -v --debug-helpers \
|
||||
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
|
||||
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
|
||||
git add "./docs/changelogs/${GITHUB_TAG}.md"
|
||||
git diff HEAD
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v3
|
||||
with:
|
||||
author: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }}
|
||||
committer: "robot-clickhouse <robot-clickhouse@users.noreply.github.com>"
|
||||
commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
branch: auto/${{ env.GITHUB_TAG }}
|
||||
delete-branch: true
|
||||
title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
labels: do not test
|
||||
body: |
|
||||
Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }}
|
||||
|
||||
|
20
.gitmodules
vendored
20
.gitmodules
vendored
@ -86,9 +86,6 @@
|
||||
[submodule "contrib/h3"]
|
||||
path = contrib/h3
|
||||
url = https://github.com/ClickHouse/h3
|
||||
[submodule "contrib/hyperscan"]
|
||||
path = contrib/hyperscan
|
||||
url = https://github.com/ClickHouse/hyperscan.git
|
||||
[submodule "contrib/libunwind"]
|
||||
path = contrib/libunwind
|
||||
url = https://github.com/ClickHouse/libunwind.git
|
||||
@ -204,7 +201,7 @@
|
||||
[submodule "contrib/boringssl"]
|
||||
path = contrib/boringssl
|
||||
url = https://github.com/ClickHouse/boringssl.git
|
||||
branch = MergeWithUpstream
|
||||
branch = unknown_branch_from_artur
|
||||
[submodule "contrib/NuRaft"]
|
||||
path = contrib/NuRaft
|
||||
url = https://github.com/ClickHouse/NuRaft.git
|
||||
@ -262,9 +259,24 @@
|
||||
[submodule "contrib/minizip-ng"]
|
||||
path = contrib/minizip-ng
|
||||
url = https://github.com/zlib-ng/minizip-ng
|
||||
[submodule "contrib/qpl"]
|
||||
path = contrib/qpl
|
||||
url = https://github.com/intel/qpl.git
|
||||
[submodule "contrib/wyhash"]
|
||||
path = contrib/wyhash
|
||||
url = https://github.com/wangyi-fudan/wyhash.git
|
||||
[submodule "contrib/hashidsxx"]
|
||||
path = contrib/hashidsxx
|
||||
url = https://github.com/schoentoon/hashidsxx.git
|
||||
[submodule "contrib/nats-io"]
|
||||
path = contrib/nats-io
|
||||
url = https://github.com/ClickHouse/nats.c.git
|
||||
[submodule "contrib/vectorscan"]
|
||||
path = contrib/vectorscan
|
||||
url = https://github.com/VectorCamp/vectorscan.git
|
||||
[submodule "contrib/liburing"]
|
||||
path = contrib/liburing
|
||||
url = https://github.com/axboe/liburing.git
|
||||
[submodule "contrib/c-ares"]
|
||||
path = contrib/c-ares
|
||||
url = https://github.com/ClickHouse/c-ares
|
||||
|
328
CHANGELOG.md
328
CHANGELOG.md
@ -1,11 +1,322 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v22.6, 2022-06-16](#226)**<br>
|
||||
**[ClickHouse release v22.5, 2022-05-19](#225)**<br>
|
||||
**[ClickHouse release v22.4, 2022-04-20](#224)**<br>
|
||||
**[ClickHouse release v22.3-lts, 2022-03-17](#223)**<br>
|
||||
**[ClickHouse release v22.2, 2022-02-17](#222)**<br>
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br>
|
||||
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br>
|
||||
**[ClickHouse release v22.8, 2022-08-18](#228)**<br/>
|
||||
**[ClickHouse release v22.7, 2022-07-21](#227)**<br/>
|
||||
**[ClickHouse release v22.6, 2022-06-16](#226)**<br/>
|
||||
**[ClickHouse release v22.5, 2022-05-19](#225)**<br/>
|
||||
**[ClickHouse release v22.4, 2022-04-20](#224)**<br/>
|
||||
**[ClickHouse release v22.3-lts, 2022-03-17](#223)**<br/>
|
||||
**[ClickHouse release v22.2, 2022-02-17](#222)**<br/>
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
|
||||
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
|
||||
|
||||
|
||||
### <a id="228"></a> ClickHouse release 22.8, 2022-08-18
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Extended range of `Date32` and `DateTime64` to support dates from the year 1900 to 2299. In previous versions, the supported interval was only from the year 1925 to 2283. The implementation is using the proleptic Gregorian calendar (which is conformant with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601):2004 (clause 3.2.1 The Gregorian calendar)) instead of accounting for historical transitions from the Julian to the Gregorian calendar. This change affects implementation-specific behavior for out-of-range arguments. E.g. if in previous versions the value of `1899-01-01` was clamped to `1925-01-01`, in the new version it will be clamped to `1900-01-01`. It changes the behavior of rounding with `toStartOfInterval` if you pass `INTERVAL 3 QUARTER` up to one quarter because the intervals are counted from an implementation-specific point of time. Closes [#28216](https://github.com/ClickHouse/ClickHouse/issues/28216), improves [#38393](https://github.com/ClickHouse/ClickHouse/issues/38393). [#39425](https://github.com/ClickHouse/ClickHouse/pull/39425) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Now, all relevant dictionary sources respect `remote_url_allow_hosts` setting. It was already done for HTTP, Cassandra, Redis. Added ClickHouse, MongoDB, MySQL, PostgreSQL. Host is checked only for dictionaries created from DDL. [#39184](https://github.com/ClickHouse/ClickHouse/pull/39184) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Make the remote filesystem cache composable, allow not to evict certain files (regarding idx, mrk, ..), delete old cache version. Now it is possible to configure cache over Azure blob storage disk, over Local disk, over StaticWeb disk, etc. This PR is marked backward incompatible because cache configuration changes and in order for cache to work need to update the config file. Old cache will still be used with new configuration. The server will startup fine with the old cache configuration. Closes https://github.com/ClickHouse/ClickHouse/issues/36140. Closes https://github.com/ClickHouse/ClickHouse/issues/37889. ([Kseniia Sumarokova](https://github.com/kssenii)). [#36171](https://github.com/ClickHouse/ClickHouse/pull/36171))
|
||||
|
||||
#### New Feature
|
||||
* Query parameters can be set in interactive mode as `SET param_abc = 'def'` and transferred via the native protocol as settings. [#39906](https://github.com/ClickHouse/ClickHouse/pull/39906) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Quota key can be set in the native protocol ([Yakov Olkhovsky](https://github.com/ClickHouse/ClickHouse/pull/39874)).
|
||||
* Added a setting `exact_rows_before_limit` (0/1). When enabled, ClickHouse will provide exact value for `rows_before_limit_at_least` statistic, but with the cost that the data before limit will have to be read completely. This closes [#6613](https://github.com/ClickHouse/ClickHouse/issues/6613). [#25333](https://github.com/ClickHouse/ClickHouse/pull/25333) ([kevin wan](https://github.com/MaxWk)).
|
||||
* Added support for parallel distributed insert select with `s3Cluster` table function into tables with `Distributed` and `Replicated` engine [#34670](https://github.com/ClickHouse/ClickHouse/issues/34670). [#39107](https://github.com/ClickHouse/ClickHouse/pull/39107) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add new settings to control schema inference from text formats: - `input_format_try_infer_dates` - try infer dates from strings. - `input_format_try_infer_datetimes` - try infer datetimes from strings. - `input_format_try_infer_integers` - try infer `Int64` instead of `Float64`. - `input_format_json_try_infer_numbers_from_strings` - try infer numbers from json strings in JSON formats. [#39186](https://github.com/ClickHouse/ClickHouse/pull/39186) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* An option to provide JSON formatted log output. The purpose is to allow easier ingestion and query in log analysis tools. [#39277](https://github.com/ClickHouse/ClickHouse/pull/39277) ([Mallik Hassan](https://github.com/SadiHassan)).
|
||||
* Add function `nowInBlock` which allows getting the current time during long-running and continuous queries. Closes [#39522](https://github.com/ClickHouse/ClickHouse/issues/39522). Notes: there are no functions `now64InBlock` neither `todayInBlock`. [#39533](https://github.com/ClickHouse/ClickHouse/pull/39533) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to specify settings for an `executable()` table function. [#39681](https://github.com/ClickHouse/ClickHouse/pull/39681) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Implemented automatic conversion of database engine from `Ordinary` to `Atomic`. Create empty `convert_ordinary_to_atomic` file in `flags` directory and all `Ordinary` databases will be converted automatically on next server start. Resolves [#39546](https://github.com/ClickHouse/ClickHouse/issues/39546). [#39933](https://github.com/ClickHouse/ClickHouse/pull/39933) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Support `SELECT ... INTO OUTFILE '...' AND STDOUT`. [#37490](https://github.com/ClickHouse/ClickHouse/issues/37490). [#39054](https://github.com/ClickHouse/ClickHouse/pull/39054) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Add formats `PrettyMonoBlock`, `PrettyNoEscapesMonoBlock`, `PrettyCompactNoEscapes`, `PrettyCompactNoEscapesMonoBlock`, `PrettySpaceNoEscapes`, `PrettySpaceMonoBlock`, `PrettySpaceNoEscapesMonoBlock`. [#39646](https://github.com/ClickHouse/ClickHouse/pull/39646) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add new setting schema_inference_hints that allows to specify structure hints in schema inference for specific columns. Closes [#39569](https://github.com/ClickHouse/ClickHouse/issues/39569). [#40068](https://github.com/ClickHouse/ClickHouse/pull/40068) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Support SQL standard DELETE FROM syntax on merge tree tables and lightweight delete implementation for merge tree families. [#37893](https://github.com/ClickHouse/ClickHouse/pull/37893) ([Jianmei Zhang](https://github.com/zhangjmruc)) ([Alexander Gololobov](https://github.com/davenger)). Note: this new feature does not make ClickHouse an HTAP DBMS.
|
||||
|
||||
#### Performance Improvement
|
||||
* Improved memory usage during memory efficient merging of aggregation results. [#39429](https://github.com/ClickHouse/ClickHouse/pull/39429) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Added concurrency control logic to limit total number of concurrent threads created by queries. [#37558](https://github.com/ClickHouse/ClickHouse/pull/37558) ([Sergei Trifonov](https://github.com/serxa)). Add `concurrent_threads_soft_limit parameter` to increase performance in case of high QPS by means of limiting total number of threads for all queries. [#37285](https://github.com/ClickHouse/ClickHouse/pull/37285) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#36654](https://github.com/ClickHouse/ClickHouse/pull/36654) ([jasperzhu](https://github.com/jinjunzh)). [#39494](https://github.com/ClickHouse/ClickHouse/pull/39494) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* `DISTINCT` in order with `ORDER BY`: Deduce way to sort based on input stream sort description. Skip sorting if input stream is already sorted. [#38719](https://github.com/ClickHouse/ClickHouse/pull/38719) ([Igor Nikonov](https://github.com/devcrafter)). Improve memory usage (significantly) and query execution time + use `DistinctSortedChunkTransform` for final distinct when `DISTINCT` columns match `ORDER BY` columns, but rename to `DistinctSortedStreamTransform` in `EXPLAIN PIPELINE` → this improves memory usage significantly + remove unnecessary allocations in hot loop in `DistinctSortedChunkTransform`. [#39432](https://github.com/ClickHouse/ClickHouse/pull/39432) ([Igor Nikonov](https://github.com/devcrafter)). Use `DistinctSortedTransform` only when sort description is applicable to DISTINCT columns, otherwise fall back to ordinary DISTINCT implementation + it allows making less checks during `DistinctSortedTransform` execution. [#39528](https://github.com/ClickHouse/ClickHouse/pull/39528) ([Igor Nikonov](https://github.com/devcrafter)). Fix: `DistinctSortedTransform` didn't take advantage of sorting. It never cleared HashSet since clearing_columns were detected incorrectly (always empty). So, it basically worked as ordinary `DISTINCT` (`DistinctTransform`). The fix reduces memory usage significantly. [#39538](https://github.com/ClickHouse/ClickHouse/pull/39538) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Use local node as first priority to get structure of remote table when executing `cluster` and similar table functions. [#39440](https://github.com/ClickHouse/ClickHouse/pull/39440) ([Mingliang Pan](https://github.com/liangliangpan)).
|
||||
* Optimize filtering by numeric columns with AVX512VBMI2 compress store. [#39633](https://github.com/ClickHouse/ClickHouse/pull/39633) ([Guo Wangyang](https://github.com/guowangy)). For systems with AVX512 VBMI2, this PR improves performance by ca. 6% for SSB benchmark queries queries 3.1, 3.2 and 3.3 (SF=100). Tested on Intel Icelake Xeon 8380 * 2 socket. [#40033](https://github.com/ClickHouse/ClickHouse/pull/40033) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Optimize index analysis with functional expressions in multi-thread scenario. [#39812](https://github.com/ClickHouse/ClickHouse/pull/39812) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
* Optimizations for complex queries: Don't visit the AST for UDFs if none are registered. [#40069](https://github.com/ClickHouse/ClickHouse/pull/40069) ([Raúl Marín](https://github.com/Algunenano)). Optimize CurrentMemoryTracker alloc and free. [#40078](https://github.com/ClickHouse/ClickHouse/pull/40078) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Improved Base58 encoding/decoding. [#39292](https://github.com/ClickHouse/ClickHouse/pull/39292) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Improve bytes to bits mask transform for SSE/AVX/AVX512. [#39586](https://github.com/ClickHouse/ClickHouse/pull/39586) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
|
||||
#### Improvement
|
||||
* Normalize `AggregateFunction` types and state representations because optimizations like [#35788](https://github.com/ClickHouse/ClickHouse/pull/35788) will treat `count(not null columns)` as `count()`, which might confuses distributed interpreters with the following error : `Conversion from AggregateFunction(count) to AggregateFunction(count, Int64) is not supported`. [#39420](https://github.com/ClickHouse/ClickHouse/pull/39420) ([Amos Bird](https://github.com/amosbird)). The functions with identical states can be used in materialized views interchangeably.
|
||||
* Rework and simplify the `system.backups` table, remove the `internal` column, allow user to set the ID of operation, add columns `num_files`, `uncompressed_size`, `compressed_size`, `start_time`, `end_time`. [#39503](https://github.com/ClickHouse/ClickHouse/pull/39503) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Improved structure of DDL query result table for `Replicated` database (separate columns with shard and replica name, more clear status) - `CREATE TABLE ... ON CLUSTER` queries can be normalized on initiator first if `distributed_ddl_entry_format_version` is set to 3 (default value). It means that `ON CLUSTER` queries may not work if initiator does not belong to the cluster that specified in query. Fixes [#37318](https://github.com/ClickHouse/ClickHouse/issues/37318), [#39500](https://github.com/ClickHouse/ClickHouse/issues/39500) - Ignore `ON CLUSTER` clause if database is `Replicated` and cluster name equals to database name. Related to [#35570](https://github.com/ClickHouse/ClickHouse/issues/35570) - Miscellaneous minor fixes for `Replicated` database engine - Check metadata consistency when starting up `Replicated` database, start replica recovery in case of mismatch of local metadata and metadata in Keeper. Resolves [#24880](https://github.com/ClickHouse/ClickHouse/issues/24880). [#37198](https://github.com/ClickHouse/ClickHouse/pull/37198) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add result_rows and result_bytes to progress reports (`X-ClickHouse-Summary`). [#39567](https://github.com/ClickHouse/ClickHouse/pull/39567) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Improve primary key analysis for MergeTree. [#25563](https://github.com/ClickHouse/ClickHouse/pull/25563) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* `timeSlots` now works with DateTime64; subsecond duration and slot size available when working with DateTime64. [#37951](https://github.com/ClickHouse/ClickHouse/pull/37951) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Added support of `LEFT SEMI` and `LEFT ANTI` direct join with `EmbeddedRocksDB` tables. [#38956](https://github.com/ClickHouse/ClickHouse/pull/38956) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add profile events for fsync operations. [#39179](https://github.com/ClickHouse/ClickHouse/pull/39179) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add the second argument to the ordinary function `file(path[, default])`, which function returns in the case when a file does not exists. [#39218](https://github.com/ClickHouse/ClickHouse/pull/39218) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Some small fixes for reading via http, allow to retry partial content in case if 200 OK. [#39244](https://github.com/ClickHouse/ClickHouse/pull/39244) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support queries `CREATE TEMPORARY TABLE ... (<list of columns>) AS ...`. [#39462](https://github.com/ClickHouse/ClickHouse/pull/39462) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support of `!`/`*` (exclamation/asterisk) in custom TLDs (`cutToFirstSignificantSubdomainCustom()`/`cutToFirstSignificantSubdomainCustomWithWWW()`/`firstSignificantSubdomainCustom()`). [#39496](https://github.com/ClickHouse/ClickHouse/pull/39496) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add support for TLS connections to NATS. Implements [#39525](https://github.com/ClickHouse/ClickHouse/issues/39525). [#39527](https://github.com/ClickHouse/ClickHouse/pull/39527) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* `clickhouse-obfuscator` (a tool for database obfuscation for testing and load generation) now has the new `--save` and `--load` parameters to work with pre-trained models. This closes [#39534](https://github.com/ClickHouse/ClickHouse/issues/39534). [#39541](https://github.com/ClickHouse/ClickHouse/pull/39541) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect behavior of log rotation during restart. [#39558](https://github.com/ClickHouse/ClickHouse/pull/39558) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix building aggregate projections when external aggregation is on. Mark as improvement because the case is rare and there exists easy workaround to fix it via changing settings. This fixes [#39667](https://github.com/ClickHouse/ClickHouse/issues/39667) . [#39671](https://github.com/ClickHouse/ClickHouse/pull/39671) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to execute hash functions with arguments of type `Map`. [#39685](https://github.com/ClickHouse/ClickHouse/pull/39685) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add a configuration parameter to hide addresses in stack traces. It may improve security a little but generally, it is harmful and should not be used. [#39690](https://github.com/ClickHouse/ClickHouse/pull/39690) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Change the prefix size of AggregateFunctionDistinct to make sure nested function data memory segment is aligned. [#39696](https://github.com/ClickHouse/ClickHouse/pull/39696) ([Pxl](https://github.com/BiteTheDDDDt)).
|
||||
* Properly escape credentials passed to the `clickhouse-diagnostic` tool. [#39707](https://github.com/ClickHouse/ClickHouse/pull/39707) ([Dale McDiarmid](https://github.com/gingerwizard)).
|
||||
* ClickHouse Keeper improvement: create a snapshot on exit. It can be controlled with the config `keeper_server.create_snapshot_on_exit`, `true` by default. [#39755](https://github.com/ClickHouse/ClickHouse/pull/39755) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Support primary key analysis for `row_policy_filter` and `additional_filter`. It also helps fix issues like [#37454](https://github.com/ClickHouse/ClickHouse/issues/37454) . [#39826](https://github.com/ClickHouse/ClickHouse/pull/39826) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix two usability issues in Play UI: - it was non-pixel-perfect on iPad due to parasitic border radius and margins; - the progress indication did not display after the first query. This closes [#39957](https://github.com/ClickHouse/ClickHouse/issues/39957). This closes [#39960](https://github.com/ClickHouse/ClickHouse/issues/39960). [#39961](https://github.com/ClickHouse/ClickHouse/pull/39961) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Play UI: add row numbers; add cell selection on click; add hysteresis for table cells. [#39962](https://github.com/ClickHouse/ClickHouse/pull/39962) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Play UI: recognize tab key in textarea, but at the same time don't mess up with tab navigation. [#40053](https://github.com/ClickHouse/ClickHouse/pull/40053) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The client will show server-side elapsed time. This is important for the performance comparison of ClickHouse services in remote datacenters. This closes [#38070](https://github.com/ClickHouse/ClickHouse/issues/38070). See also [this](https://github.com/ClickHouse/ClickBench/blob/main/hardware/benchmark-cloud.sh#L37) for motivation. [#39968](https://github.com/ClickHouse/ClickHouse/pull/39968) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Adds `parseDateTime64BestEffortUS`, `parseDateTime64BestEffortUSOrNull`, `parseDateTime64BestEffortUSOrZero` functions, closing [#37492](https://github.com/ClickHouse/ClickHouse/issues/37492). [#40015](https://github.com/ClickHouse/ClickHouse/pull/40015) ([Tanya Bragin](https://github.com/tbragin)).
|
||||
* Extend the `system.processors_profile_log` with more information such as input rows. [#40121](https://github.com/ClickHouse/ClickHouse/pull/40121) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Display server-side time in `clickhouse-benchmark` by default if it is available (since ClickHouse version 22.8). This is needed to correctly compare the performance of clouds. This behavior can be changed with the new `--client-side-time` command line option. Change the `--randomize` command line option from `--randomize 1` to the form without argument. [#40193](https://github.com/ClickHouse/ClickHouse/pull/40193) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add counters (ProfileEvents) for cases when query complexity limitation has been set and has reached (a separate counter for `overflow_mode` = `break` and `throw`). For example, if you have set up `max_rows_to_read` with `read_overflow_mode = 'break'`, looking at the value of `OverflowBreak` counter will allow distinguishing incomplete results. [#40205](https://github.com/ClickHouse/ClickHouse/pull/40205) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix memory accounting in case of "Memory limit exceeded" errors (previously [peak] memory usage was takes failed allocations into account). [#40249](https://github.com/ClickHouse/ClickHouse/pull/40249) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add metrics for filesystem cache: `FilesystemCacheSize` and `FilesystemCacheElements`. [#40260](https://github.com/ClickHouse/ClickHouse/pull/40260) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support hadoop secure RPC transfer (hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#39411](https://github.com/ClickHouse/ClickHouse/pull/39411) ([michael1589](https://github.com/michael1589)).
|
||||
* Avoid continuously growing memory consumption of pattern cache when using functions multi(Fuzzy)Match(Any|AllIndices|AnyIndex)(). [#40264](https://github.com/ClickHouse/ClickHouse/pull/40264) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add cache for schema inference for file/s3/hdfs/url table functions. Now, schema inference will be performed only on the first query to the file, all subsequent queries to the same file will use the schema from cache if data wasn't changed. Add system table system.schema_inference_cache with all current schemas in cache and system queries SYSTEM DROP SCHEMA CACHE [FOR FILE/S3/HDFS/URL] to drop schemas from cache. [#38286](https://github.com/ClickHouse/ClickHouse/pull/38286) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* [ClickFiddle](https://fiddle.clickhouse.com/): A new tool for testing ClickHouse versions in read/write mode (**Igor Baliuk**).
|
||||
* ClickHouse binary is made self-extracting [#35775](https://github.com/ClickHouse/ClickHouse/pull/35775) ([Yakov Olkhovskiy, Arthur Filatenkov](https://github.com/yakov-olkhovskiy)).
|
||||
* Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Ensure LSan is effective. [#39430](https://github.com/ClickHouse/ClickHouse/pull/39430) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* TSAN has issues with clang-14 (https://github.com/google/sanitizers/issues/1552, https://github.com/google/sanitizers/issues/1540), so here we build the TSAN binaries with clang-15. [#39450](https://github.com/ClickHouse/ClickHouse/pull/39450) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove the option to build ClickHouse tools as separate executable programs. This fixes [#37847](https://github.com/ClickHouse/ClickHouse/issues/37847). [#39520](https://github.com/ClickHouse/ClickHouse/pull/39520) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Small preparations for build on s390x (which is big-endian). [#39627](https://github.com/ClickHouse/ClickHouse/pull/39627) ([Harry Lee](https://github.com/HarryLeeIBM)). [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issue in BitHelpers for s390x. [#39656](https://github.com/ClickHouse/ClickHouse/pull/39656) ([Harry Lee](https://github.com/HarryLeeIBM)). Implement a piece of code related to SipHash for s390x architecture (which is not supported by ClickHouse). [#39732](https://github.com/ClickHouse/ClickHouse/pull/39732) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed an Endian issue in Coordination snapshot code for s390x architecture (which is not supported by ClickHouse). [#39931](https://github.com/ClickHouse/ClickHouse/pull/39931) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in Codec code for s390x architecture (which is not supported by ClickHouse). [#40008](https://github.com/ClickHouse/ClickHouse/pull/40008) ([Harry Lee](https://github.com/HarryLeeIBM)). Fixed Endian issues in reading/writing BigEndian binary data in ReadHelpers and WriteHelpers code for s390x architecture (which is not supported by ClickHouse). [#40179](https://github.com/ClickHouse/ClickHouse/pull/40179) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Support build with `clang-16` (trunk). This closes [#39949](https://github.com/ClickHouse/ClickHouse/issues/39949). [#40181](https://github.com/ClickHouse/ClickHouse/pull/40181) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Prepare RISC-V 64 build to run in CI. This is for [#40141](https://github.com/ClickHouse/ClickHouse/issues/40141). [#40197](https://github.com/ClickHouse/ClickHouse/pull/40197) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Simplified function registration macro interface (`FUNCTION_REGISTER*`) to eliminate the step to add and call an extern function in the registerFunctions.cpp, it also makes incremental builds of a new function faster. [#38615](https://github.com/ClickHouse/ClickHouse/pull/38615) ([Li Yin](https://github.com/liyinsg)).
|
||||
* Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix
|
||||
* Fix possible segfault in `CapnProto` input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix the case when the order of columns can be incorrect if the `IN` operator is used with a table with `ENGINE = Set` containing multiple columns. This fixes [#13014](https://github.com/ClickHouse/ClickHouse/issues/13014). [#40225](https://github.com/ClickHouse/ClickHouse/pull/40225) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix duplicate columns in join plan. Finally, solve [#26809](https://github.com/ClickHouse/ClickHouse/issues/26809). [#40009](https://github.com/ClickHouse/ClickHouse/pull/40009) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed query hanging for SELECT with ORDER BY WITH FILL with different date/time types. [#37849](https://github.com/ClickHouse/ClickHouse/pull/37849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix ORDER BY that matches projections ORDER BY (before it simply returns unsorted result). [#38725](https://github.com/ClickHouse/ClickHouse/pull/38725) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Do not optimise functions in GROUP BY statements if they shadow one of the table columns or expressions. Fixes [#37032](https://github.com/ClickHouse/ClickHouse/issues/37032). [#39103](https://github.com/ClickHouse/ClickHouse/pull/39103) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Fix wrong table name in logs after RENAME TABLE. This fixes [#38018](https://github.com/ClickHouse/ClickHouse/issues/38018). [#39227](https://github.com/ClickHouse/ClickHouse/pull/39227) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix positional arguments in case of columns pruning when optimising the query. Closes [#38433](https://github.com/ClickHouse/ClickHouse/issues/38433). [#39293](https://github.com/ClickHouse/ClickHouse/pull/39293) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix bug in schema inference in case of empty messages in Protobuf/CapnProto formats that allowed to create column with empty `Tuple` type. Closes [#39051](https://github.com/ClickHouse/ClickHouse/issues/39051) Add 2 new settings `input_format_{protobuf/capnproto}_skip_fields_with_unsupported_types_in_schema_inference` that allow to skip fields with unsupported types while schema inference for Protobuf and CapnProto formats. [#39357](https://github.com/ClickHouse/ClickHouse/pull/39357) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* (Window View is an experimental feature) Fix segmentation fault on `CREATE WINDOW VIEW .. ON CLUSTER ... INNER`. Closes [#39363](https://github.com/ClickHouse/ClickHouse/issues/39363). [#39384](https://github.com/ClickHouse/ClickHouse/pull/39384) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix WriteBuffer finalize when cancelling insert into function (in previous versions it may leat to std::terminate). [#39458](https://github.com/ClickHouse/ClickHouse/pull/39458) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix storing of columns of type `Object` in sparse serialization. [#39464](https://github.com/ClickHouse/ClickHouse/pull/39464) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix possible "Not found column in block" exception when using projections. This closes [#39469](https://github.com/ClickHouse/ClickHouse/issues/39469). [#39470](https://github.com/ClickHouse/ClickHouse/pull/39470) ([小路](https://github.com/nicelulu)).
|
||||
* Fix exception on race between DROP and INSERT with materialized views. [#39477](https://github.com/ClickHouse/ClickHouse/pull/39477) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* A bug in Apache Avro library: fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix rare bug in asynchronous reading (with setting `local_filesystem_read_method='pread_threadpool'`) with enabled `O_DIRECT` (enabled by setting `min_bytes_to_use_direct_io`). [#39506](https://github.com/ClickHouse/ClickHouse/pull/39506) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* (only on FreeBSD) Fixes "Code: 49. DB::Exception: FunctionFactory: the function name '' is not unique. (LOGICAL_ERROR)" observed on FreeBSD when starting clickhouse. [#39551](https://github.com/ClickHouse/ClickHouse/pull/39551) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix bug with the recently introduced "maxsplit" argument for `splitByChar`, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix bug in ASOF JOIN with `enable_optimize_predicate_expression`, close [#37813](https://github.com/ClickHouse/ClickHouse/issues/37813). [#39556](https://github.com/ClickHouse/ClickHouse/pull/39556) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed `CREATE/DROP INDEX` query with `ON CLUSTER` or `Replicated` database and `ReplicatedMergeTree`. It used to be executed on all replicas (causing error or DDL queue stuck). Fixes [#39511](https://github.com/ClickHouse/ClickHouse/issues/39511). [#39565](https://github.com/ClickHouse/ClickHouse/pull/39565) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix "column not found" error for push down with join, close [#39505](https://github.com/ClickHouse/ClickHouse/issues/39505). [#39575](https://github.com/ClickHouse/ClickHouse/pull/39575) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix the wrong `REGEXP_REPLACE` alias. This fixes https://github.com/ClickHouse/ClickBench/issues/9. [#39592](https://github.com/ClickHouse/ClickHouse/pull/39592) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed point of origin for exponential decay window functions to the last value in window. Previously, decay was calculated by formula `exp((t - curr_row_t) / decay_length)`, which is incorrect when right boundary of window is not `CURRENT ROW`. It was changed to: `exp((t - last_row_t) / decay_length)`. There is no change in results for windows with `ROWS BETWEEN (smth) AND CURRENT ROW`. [#39593](https://github.com/ClickHouse/ClickHouse/pull/39593) ([Vladimir Chebotaryov](https://github.com/quickhouse)).
|
||||
* Fix Decimal division overflow, which can be detected based on operands scale. [#39600](https://github.com/ClickHouse/ClickHouse/pull/39600) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix settings `output_format_arrow_string_as_string` and `output_format_arrow_low_cardinality_as_dictionary` work in combination. Closes [#39624](https://github.com/ClickHouse/ClickHouse/issues/39624). [#39647](https://github.com/ClickHouse/ClickHouse/pull/39647) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed a bug in default database resolution in distributed table reads. [#39674](https://github.com/ClickHouse/ClickHouse/pull/39674) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* (Only with the obsolete Ordinary databases) Select might read data of dropped table if cache for mmap IO is used and database engine is Ordinary and new tables was created with the same name as dropped one had. It's fixed. [#39708](https://github.com/ClickHouse/ClickHouse/pull/39708) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix possible error `Invalid column type for ColumnUnique::insertRangeFrom. Expected String, got ColumnLowCardinality` Fixes [#38460](https://github.com/ClickHouse/ClickHouse/issues/38460). [#39716](https://github.com/ClickHouse/ClickHouse/pull/39716) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Field names in the `meta` section of JSON format were erroneously double escaped. This closes [#39693](https://github.com/ClickHouse/ClickHouse/issues/39693). [#39747](https://github.com/ClickHouse/ClickHouse/pull/39747) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix `EmbeddedRocksDB` tables filtering by key using params. [#39757](https://github.com/ClickHouse/ClickHouse/pull/39757) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix error `Invalid number of columns in chunk pushed to OutputPort` which was caused by ARRAY JOIN optimization. Fixes [#39164](https://github.com/ClickHouse/ClickHouse/issues/39164). [#39799](https://github.com/ClickHouse/ClickHouse/pull/39799) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* A workaround for a bug in Linux kernel. Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* (Only on NFS) Fix broken NFS mkdir for root-squashed volumes. [#39898](https://github.com/ClickHouse/ClickHouse/pull/39898) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Remove dictionaries from prometheus metrics on DETACH/DROP. [#39926](https://github.com/ClickHouse/ClickHouse/pull/39926) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix read of StorageFile with virtual columns. Closes [#39907](https://github.com/ClickHouse/ClickHouse/issues/39907). [#39943](https://github.com/ClickHouse/ClickHouse/pull/39943) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* (experimental feature) Fix `hashId` crash and salt parameter not being used. [#40002](https://github.com/ClickHouse/ClickHouse/pull/40002) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* `EXCEPT` and `INTERSECT` operators may lead to crash if a specific combination of constant and non-constant columns were used. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fixed "Part directory doesn't exist" and "`tmp_<part_name>` ... No such file or directory" errors during too slow INSERT or too long merge/mutation. Also fixed issue that may cause some replication queue entries to stuck without any errors or warnings in logs if previous attempt to fetch part failed, but `tmp-fetch_<part_name>` directory was not cleaned up. [#40031](https://github.com/ClickHouse/ClickHouse/pull/40031) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix rare cases of parsing of arrays of tuples in format `Values`. [#40034](https://github.com/ClickHouse/ClickHouse/pull/40034) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixes ArrowColumn format Dictionary(X) & Dictionary(Nullable(X)) conversion to ClickHouse LowCardinality(X) & LowCardinality(Nullable(X)) respectively. [#40037](https://github.com/ClickHouse/ClickHouse/pull/40037) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix potential deadlock in writing to S3 during task scheduling failure. [#40070](https://github.com/ClickHouse/ClickHouse/pull/40070) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix bug in collectFilesToSkip() by adding correct file extension (.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* A fix for reverse DNS resolution. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix unexpected result `arrayDifference` of `Array(UInt32). [#40211](https://github.com/ClickHouse/ClickHouse/pull/40211) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
|
||||
### <a id="227"></a> ClickHouse release 22.7, 2022-07-21
|
||||
|
||||
#### Upgrade Notes
|
||||
* Enable setting `enable_positional_arguments` by default. It allows queries like `SELECT ... ORDER BY 1, 2` where 1, 2 are the references to the select clause. If you need to return the old behavior, disable this setting. [#38204](https://github.com/ClickHouse/ClickHouse/pull/38204) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable `format_csv_allow_single_quotes` by default. See [#37096](https://github.com/ClickHouse/ClickHouse/issues/37096). ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* `Ordinary` database engine and old storage definition syntax for `*MergeTree` tables are deprecated. By default it's not possible to create new databases with `Ordinary` engine. If `system` database has `Ordinary` engine it will be automatically converted to `Atomic` on server startup. There are settings to keep old behavior (`allow_deprecated_database_ordinary` and `allow_deprecated_syntax_for_merge_tree`), but these settings may be removed in future releases. [#38335](https://github.com/ClickHouse/ClickHouse/pull/38335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Force rewriting comma join to inner by default (set default value `cross_to_inner_join_rewrite = 2`). To have old behavior set `cross_to_inner_join_rewrite = 1`. [#39326](https://github.com/ClickHouse/ClickHouse/pull/39326) ([Vladimir C](https://github.com/vdimir)). If you will face any incompatibilities, you can turn this setting back.
|
||||
|
||||
#### New Feature
|
||||
* Support expressions with window functions. Closes [#19857](https://github.com/ClickHouse/ClickHouse/issues/19857). [#37848](https://github.com/ClickHouse/ClickHouse/pull/37848) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Add new `direct` join algorithm for `EmbeddedRocksDB` tables, see [#33582](https://github.com/ClickHouse/ClickHouse/issues/33582). [#35363](https://github.com/ClickHouse/ClickHouse/pull/35363) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Added full sorting merge join algorithm. [#35796](https://github.com/ClickHouse/ClickHouse/pull/35796) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Implement NATS table engine, which allows to pub/sub to NATS. Closes [#32388](https://github.com/ClickHouse/ClickHouse/issues/32388). [#37171](https://github.com/ClickHouse/ClickHouse/pull/37171) ([tchepavel](https://github.com/tchepavel)). ([Kseniia Sumarokova](https://github.com/kssenii))
|
||||
* Implement table function `mongodb`. Allow writes into `MongoDB` storage / table function. [#37213](https://github.com/ClickHouse/ClickHouse/pull/37213) ([aaapetrenko](https://github.com/aaapetrenko)). ([Kseniia Sumarokova](https://github.com/kssenii))
|
||||
* Add `SQLInsert` output format. Closes [#38441](https://github.com/ClickHouse/ClickHouse/issues/38441). [#38477](https://github.com/ClickHouse/ClickHouse/pull/38477) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Introduced settings `additional_table_filters`. Using this setting, you can specify additional filtering condition for a table which will be applied directly after reading. Example: `select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x settings additional_table_filters={'system.numbers : 'number != 3', 'table_1' : 'x != 2'}`. Introduced setting `additional_result_filter` which specifies additional filtering condition for query result. Closes [#37918](https://github.com/ClickHouse/ClickHouse/issues/37918). [#38475](https://github.com/ClickHouse/ClickHouse/pull/38475) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add `compatibility` setting and `system.settings_changes` system table that contains information about changes in settings through ClickHouse versions. Closes [#35972](https://github.com/ClickHouse/ClickHouse/issues/35972). [#38957](https://github.com/ClickHouse/ClickHouse/pull/38957) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add functions `translate(string, from_string, to_string)` and `translateUTF8(string, from_string, to_string)`. It translates some characters to another. [#38935](https://github.com/ClickHouse/ClickHouse/pull/38935) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Support `parseTimeDelta` function. It can be used like ` ;-+,:` can be used as separators, eg. `1yr-2mo`, `2m:6s`: `SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ; 33 seconds')`. [#39071](https://github.com/ClickHouse/ClickHouse/pull/39071) ([jiahui-97](https://github.com/jiahui-97)).
|
||||
* Added `CREATE TABLE ... EMPTY AS SELECT` query. It automatically deduces table structure from the SELECT query, but does not fill the table after creation. Resolves [#38049](https://github.com/ClickHouse/ClickHouse/issues/38049). [#38272](https://github.com/ClickHouse/ClickHouse/pull/38272) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Added options to limit IO operations with remote storage: `max_remote_read_network_bandwidth_for_server` and `max_remote_write_network_bandwidth_for_server`. [#39095](https://github.com/ClickHouse/ClickHouse/pull/39095) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add `group_by_use_nulls` setting to make aggregation key columns nullable in the case of ROLLUP, CUBE and GROUPING SETS. Closes [#37359](https://github.com/ClickHouse/ClickHouse/issues/37359). [#38642](https://github.com/ClickHouse/ClickHouse/pull/38642) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Add the ability to specify compression level during data export. [#38907](https://github.com/ClickHouse/ClickHouse/pull/38907) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Add an option to require explicit grants to SELECT from the `system` database. Details: [#38970](https://github.com/ClickHouse/ClickHouse/pull/38970) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Functions `multiMatchAny`, `multiMatchAnyIndex`, `multiMatchAllIndices` and their fuzzy variants now accept non-const pattern array argument. [#38485](https://github.com/ClickHouse/ClickHouse/pull/38485) ([Robert Schulze](https://github.com/rschu1ze)). SQL function `multiSearchAllPositions` now accepts non-const needle arguments. [#39167](https://github.com/ClickHouse/ClickHouse/pull/39167) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add a setting `zstd_window_log_max` to configure max memory usage on zstd decoding when importing external files. Closes [#35693](https://github.com/ClickHouse/ClickHouse/issues/35693). [#37015](https://github.com/ClickHouse/ClickHouse/pull/37015) ([wuxiaobai24](https://github.com/wuxiaobai24)).
|
||||
* Add `send_logs_source_regexp` setting. Send server text logs with specified regexp to match log source name. Empty means all sources. [#39161](https://github.com/ClickHouse/ClickHouse/pull/39161) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Support `ALTER` for `Hive` tables. [#38214](https://github.com/ClickHouse/ClickHouse/pull/38214) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Support `isNullable` function. This function checks whether it's argument is nullable and return 1 or 0. Closes [#38611](https://github.com/ClickHouse/ClickHouse/issues/38611). [#38841](https://github.com/ClickHouse/ClickHouse/pull/38841) ([lokax](https://github.com/lokax)).
|
||||
* Added functions for base58 encoding/decoding. [#38159](https://github.com/ClickHouse/ClickHouse/pull/38159) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Add chart visualization to Play UI. [#38197](https://github.com/ClickHouse/ClickHouse/pull/38197) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added L2 Squared distance and norm functions for both arrays and tuples. [#38545](https://github.com/ClickHouse/ClickHouse/pull/38545) ([Julian Gilyadov](https://github.com/israelg99)).
|
||||
* Add ability to pass HTTP headers to the `url` table function / storage via SQL. Closes [#37897](https://github.com/ClickHouse/ClickHouse/issues/37897). [#38176](https://github.com/ClickHouse/ClickHouse/pull/38176) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add `clickhouse-diagnostics` binary to the packages. [#38647](https://github.com/ClickHouse/ClickHouse/pull/38647) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Adds new setting `implicit_transaction` to run standalone queries inside a transaction. It handles both creation and closing (via COMMIT if the query succeeded or ROLLBACK if it didn't) of the transaction automatically. [#38344](https://github.com/ClickHouse/ClickHouse/pull/38344) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Distinct optimization for sorted columns. Use specialized distinct transformation in case input stream is sorted by column(s) in distinct. Optimization can be applied to pre-distinct, final distinct, or both. Initial implementation by @dimarub2000. [#37803](https://github.com/ClickHouse/ClickHouse/pull/37803) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Improve performance of `ORDER BY`, `MergeTree` merges, window functions using batch version of `BinaryHeap`. [#38022](https://github.com/ClickHouse/ClickHouse/pull/38022) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* More parallel execution for queries with `FINAL` [#36396](https://github.com/ClickHouse/ClickHouse/pull/36396) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix significant join performance regression which was introduced in [#35616](https://github.com/ClickHouse/ClickHouse/pull/35616). It's interesting that common join queries such as ssb queries have been 10 times slower for almost 3 months while no one complains. [#38052](https://github.com/ClickHouse/ClickHouse/pull/38052) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Migrate from the Intel hyperscan library to vectorscan, this speeds up many string matching on non-x86 platforms. [#38171](https://github.com/ClickHouse/ClickHouse/pull/38171) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Increased parallelism of query plan steps executed after aggregation. [#38295](https://github.com/ClickHouse/ClickHouse/pull/38295) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Improve performance of insertion to columns of type `JSON`. [#38320](https://github.com/ClickHouse/ClickHouse/pull/38320) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Optimized insertion and lookups in the HashTable. [#38413](https://github.com/ClickHouse/ClickHouse/pull/38413) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix performance degradation from [#32493](https://github.com/ClickHouse/ClickHouse/issues/32493). [#38417](https://github.com/ClickHouse/ClickHouse/pull/38417) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Norm and Distance functions for arrays speed up 1.2-2 times. [#38740](https://github.com/ClickHouse/ClickHouse/pull/38740) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Add AVX-512 VBMI optimized `copyOverlap32Shuffle` for LZ4 decompression. In other words, LZ4 decompression performance is improved. [#37891](https://github.com/ClickHouse/ClickHouse/pull/37891) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
* `ORDER BY (a, b)` will use all the same benefits as `ORDER BY a, b`. [#38873](https://github.com/ClickHouse/ClickHouse/pull/38873) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Align branches within a 32B boundary to make benchmark more stable. [#38988](https://github.com/ClickHouse/ClickHouse/pull/38988) ([Guo Wangyang](https://github.com/guowangy)). It improves performance 1..2% on average for Intel.
|
||||
* Executable UDF, executable dictionaries, and Executable tables will avoid wasting one second during wait for subprocess termination. [#38929](https://github.com/ClickHouse/ClickHouse/pull/38929) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||
* Optimize accesses to `system.stack_trace` table if not all columns are selected. [#39177](https://github.com/ClickHouse/ClickHouse/pull/39177) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Improve isNullable/isConstant/isNull/isNotNull performance for LowCardinality argument. [#39192](https://github.com/ClickHouse/ClickHouse/pull/39192) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Optimized processing of ORDER BY in window functions. [#34632](https://github.com/ClickHouse/ClickHouse/pull/34632) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* The table `system.asynchronous_metric_log` is further optimized for storage space. This closes [#38134](https://github.com/ClickHouse/ClickHouse/issues/38134). See the [YouTube video](https://www.youtube.com/watch?v=0fSp9SF8N8A). [#38428](https://github.com/ClickHouse/ClickHouse/pull/38428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove subprocess run for kerberos initialization. Added new integration test. Closes [#27651](https://github.com/ClickHouse/ClickHouse/issues/27651). [#38105](https://github.com/ClickHouse/ClickHouse/pull/38105) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* * Add setting `multiple_joins_try_to_keep_original_names` to not rewrite identifier name on multiple JOINs rewrite, close [#34697](https://github.com/ClickHouse/ClickHouse/issues/34697). [#38149](https://github.com/ClickHouse/ClickHouse/pull/38149) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Improved trace-visualizer UX. [#38169](https://github.com/ClickHouse/ClickHouse/pull/38169) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Enable stack trace collection and query profiler for AArch64. [#38181](https://github.com/ClickHouse/ClickHouse/pull/38181) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Do not skip symlinks in `user_defined` directory during SQL user defined functions loading. Closes [#38042](https://github.com/ClickHouse/ClickHouse/issues/38042). [#38184](https://github.com/ClickHouse/ClickHouse/pull/38184) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Added background cleanup of subdirectories in `store/`. In some cases clickhouse-server might left garbage subdirectories in `store/` (for example, on unsuccessful table creation) and those dirs were never been removed. Fixes [#33710](https://github.com/ClickHouse/ClickHouse/issues/33710). [#38265](https://github.com/ClickHouse/ClickHouse/pull/38265) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add `DESCRIBE CACHE` query to show cache settings from config. Add `SHOW CACHES` query to show available filesystem caches list. [#38279](https://github.com/ClickHouse/ClickHouse/pull/38279) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add access check for `system drop filesystem cache`. Support ON CLUSTER. [#38319](https://github.com/ClickHouse/ClickHouse/pull/38319) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix PostgreSQL database engine incompatibility on upgrade from 21.3 to 22.3. Closes [#36659](https://github.com/ClickHouse/ClickHouse/issues/36659). [#38369](https://github.com/ClickHouse/ClickHouse/pull/38369) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* `filesystemAvailable` and similar functions now work in `clickhouse-local`. This closes [#38423](https://github.com/ClickHouse/ClickHouse/issues/38423). [#38424](https://github.com/ClickHouse/ClickHouse/pull/38424) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `revision` function. [#38555](https://github.com/ClickHouse/ClickHouse/pull/38555) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix GCS via proxy tunnel usage. [#38726](https://github.com/ClickHouse/ClickHouse/pull/38726) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Support `\i file` in clickhouse client / local (similar to psql \i). [#38813](https://github.com/ClickHouse/ClickHouse/pull/38813) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* New option `optimize = 1` in `EXPLAIN AST`. If enabled, it shows AST after it's rewritten, otherwise AST of original query. Disabled by default. [#38910](https://github.com/ClickHouse/ClickHouse/pull/38910) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Allow trailing comma in columns list. closes [#38425](https://github.com/ClickHouse/ClickHouse/issues/38425). [#38440](https://github.com/ClickHouse/ClickHouse/pull/38440) ([chen](https://github.com/xiedeyantu)).
|
||||
* Bugfixes and performance improvements for `parallel_hash` JOIN method. [#37648](https://github.com/ClickHouse/ClickHouse/pull/37648) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Support hadoop secure RPC transfer (hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#37852](https://github.com/ClickHouse/ClickHouse/pull/37852) ([Peng Liu](https://github.com/michael1589)).
|
||||
* Add struct type support in `StorageHive`. [#38118](https://github.com/ClickHouse/ClickHouse/pull/38118) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* S3 single objects are now removed with `RemoveObjectRequest`. Implement compatibility with GCP which did not allow to use `removeFileIfExists` effectively breaking approximately half of `remove` functionality. Automatic detection for `DeleteObjects` S3 API, that is not supported by GCS. This will allow to use GCS without explicit `support_batch_delete=0` in configuration. [#37882](https://github.com/ClickHouse/ClickHouse/pull/37882) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Expose basic ClickHouse Keeper related monitoring data (via ProfileEvents and CurrentMetrics). [#38072](https://github.com/ClickHouse/ClickHouse/pull/38072) ([lingpeng0314](https://github.com/lingpeng0314)).
|
||||
* Support `auto_close` option for PostgreSQL engine connection. Closes [#31486](https://github.com/ClickHouse/ClickHouse/issues/31486). [#38363](https://github.com/ClickHouse/ClickHouse/pull/38363) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Allow `NULL` modifier in columns declaration for table functions. [#38816](https://github.com/ClickHouse/ClickHouse/pull/38816) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Deactivate `mutations_finalizing_task` before shutdown to avoid benign `TABLE_IS_READ_ONLY` errors during shutdown. [#38851](https://github.com/ClickHouse/ClickHouse/pull/38851) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Eliminate unnecessary waiting of SELECT queries after ALTER queries in presence of INSERT queries if you use deprecated Ordinary databases. [#38864](https://github.com/ClickHouse/ClickHouse/pull/38864) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* New option `rewrite` in `EXPLAIN AST`. If enabled, it shows AST after it's rewritten, otherwise AST of original query. Disabled by default. [#38910](https://github.com/ClickHouse/ClickHouse/pull/38910) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Stop reporting Zookeeper "Node exists" exceptions in system.errors when they are expected. [#38961](https://github.com/ClickHouse/ClickHouse/pull/38961) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* `clickhouse-keeper`: add support for real-time digest calculation and verification. It is disabled by default. [#37555](https://github.com/ClickHouse/ClickHouse/pull/37555) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Allow to specify globs `* or {expr1, expr2, expr3}` inside a key for `clickhouse-extract-from-config` tool. [#38966](https://github.com/ClickHouse/ClickHouse/pull/38966) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* clearOldLogs: Don't report KEEPER_EXCEPTION on concurrent deletes. [#39016](https://github.com/ClickHouse/ClickHouse/pull/39016) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* clickhouse-keeper improvement: persist meta-information about keeper servers to disk. [#39069](https://github.com/ClickHouse/ClickHouse/pull/39069) ([Antonio Andelic](https://github.com/antonio2368)). This will make it easier to operate if you shutdown or restart all keeper nodes at the same time.
|
||||
* Continue without exception when running out of disk space when using filesystem cache. [#39106](https://github.com/ClickHouse/ClickHouse/pull/39106) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Handling SIGTERM signals from k8s. [#39130](https://github.com/ClickHouse/ClickHouse/pull/39130) ([Timur Solodovnikov](https://github.com/tsolodov)).
|
||||
* Add `merge_algorithm` column (Undecided, Horizontal, Vertical) to system.part_log. [#39181](https://github.com/ClickHouse/ClickHouse/pull/39181) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Don't increment a counter in `system.errors` when the disk is not rotational. [#39216](https://github.com/ClickHouse/ClickHouse/pull/39216) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* The metric `result_bytes` for `INSERT` queries in `system.query_log` shows number of bytes inserted. Previously value was incorrect and stored the same value as `result_rows`. [#39225](https://github.com/ClickHouse/ClickHouse/pull/39225) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* The CPU usage metric in clickhouse-client will be displayed in a better way. Fixes [#38756](https://github.com/ClickHouse/ClickHouse/issues/38756). [#39280](https://github.com/ClickHouse/ClickHouse/pull/39280) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Rethrow exception on filesystem cache initialization on server startup, better error message. [#39386](https://github.com/ClickHouse/ClickHouse/pull/39386) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* OpenTelemetry now collects traces without Processors spans by default (there are too many). To enable Processors spans collection `opentelemetry_trace_processors` setting. [#39170](https://github.com/ClickHouse/ClickHouse/pull/39170) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Functions `multiMatch[Fuzzy](AllIndices/Any/AnyIndex)` - don't throw a logical error if the needle argument is empty. [#39012](https://github.com/ClickHouse/ClickHouse/pull/39012) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Allow to declare `RabbitMQ` queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Apply Clang Thread Safety Analysis (TSA) annotations to ClickHouse. [#38068](https://github.com/ClickHouse/ClickHouse/pull/38068) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Adapt universal installation script for FreeBSD. [#39302](https://github.com/ClickHouse/ClickHouse/pull/39302) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Preparation for building on `s390x` platform. [#39193](https://github.com/ClickHouse/ClickHouse/pull/39193) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Fix a bug in `jemalloc` library [#38757](https://github.com/ClickHouse/ClickHouse/pull/38757) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Hardware benchmark now has support for automatic results uploading. [#38427](https://github.com/ClickHouse/ClickHouse/pull/38427) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* System table "system.licenses" is now correctly populated on Mac (Darwin). [#38294](https://github.com/ClickHouse/ClickHouse/pull/38294) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
* Fix rounding for `Decimal128/Decimal256` with more than 19-digits long scale. [#38027](https://github.com/ClickHouse/ClickHouse/pull/38027) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fixed crash caused by data race in storage `Hive` (integration table engine). [#38887](https://github.com/ClickHouse/ClickHouse/pull/38887) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Correct glob expansion in case of `{0..10}` forms. Fixes [#38498](https://github.com/ClickHouse/ClickHouse/issues/38498) Current Implementation is similar to what shell does mentiond by @rschu1ze [here](https://github.com/ClickHouse/ClickHouse/pull/38502#issuecomment-1169057723). [#38502](https://github.com/ClickHouse/ClickHouse/pull/38502) ([Heena Bansal](https://github.com/HeenaBansal2009)).
|
||||
* Fix crash for `mapUpdate`, `mapFilter` functions when using with constant map argument. Closes [#38547](https://github.com/ClickHouse/ClickHouse/issues/38547). [#38553](https://github.com/ClickHouse/ClickHouse/pull/38553) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Fix `toHour` monotonicity information for query optimization which can lead to incorrect query result (incorrect index analysis). This fixes [#38333](https://github.com/ClickHouse/ClickHouse/issues/38333). [#38675](https://github.com/ClickHouse/ClickHouse/pull/38675) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix checking whether s3 storage support parallel writes. It resulted in s3 parallel writes not working. [#38792](https://github.com/ClickHouse/ClickHouse/pull/38792) ([chen](https://github.com/xiedeyantu)).
|
||||
* Fix s3 seekable reads with parallel read buffer. (Affected memory usage during query). Closes [#38258](https://github.com/ClickHouse/ClickHouse/issues/38258). [#38802](https://github.com/ClickHouse/ClickHouse/pull/38802) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update `simdjson`. This fixes [#38621](https://github.com/ClickHouse/ClickHouse/issues/38621) - a buffer overflow on machines with the latest Intel CPUs with AVX-512 VBMI. [#38838](https://github.com/ClickHouse/ClickHouse/pull/38838) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible logical error for Vertical merges. [#38859](https://github.com/ClickHouse/ClickHouse/pull/38859) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix settings profile with seconds unit. [#38896](https://github.com/ClickHouse/ClickHouse/pull/38896) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix incorrect partition pruning when there is a nullable partition key. Note: most likely you don't use nullable partition keys - this is an obscure feature you should not use. Nullable keys are a nonsense and this feature is only needed for some crazy use-cases. This fixes [#38941](https://github.com/ClickHouse/ClickHouse/issues/38941). [#38946](https://github.com/ClickHouse/ClickHouse/pull/38946) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Improve `fsync_part_directory` for fetches. [#38993](https://github.com/ClickHouse/ClickHouse/pull/38993) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible dealock inside `OvercommitTracker`. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix some corner cases of interpretation of the arguments of window expressions. Fixes [#38538](https://github.com/ClickHouse/ClickHouse/issues/38538) Allow using of higher-order functions in window expressions. [#39112](https://github.com/ClickHouse/ClickHouse/pull/39112) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Keep `LowCardinality` type in `tuple` function. Previously `LowCardinality` type was dropped and elements of created tuple had underlying type of `LowCardinality`. [#39113](https://github.com/ClickHouse/ClickHouse/pull/39113) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix unexpected query result when both `optimize_trivial_count_query` and `empty_result_for_aggregation_by_empty_set` are set to true. This fixes [#39140](https://github.com/ClickHouse/ClickHouse/issues/39140). [#39155](https://github.com/ClickHouse/ClickHouse/pull/39155) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix extremely rare race condition in during hardlinks for remote filesystem. The only way to reproduce it is concurrent run of backups. [#39190](https://github.com/ClickHouse/ClickHouse/pull/39190) ([alesapin](https://github.com/alesapin)).
|
||||
* (zero-copy replication is an experimental feature that should not be used in production) Fix fetch of in-memory part with `allow_remote_fs_zero_copy_replication`. [#39214](https://github.com/ClickHouse/ClickHouse/pull/39214) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* (MaterializedPostgreSQL - experimental feature). Fix segmentation fault in MaterializedPostgreSQL database engine, which could happen if some exception occurred at replication initialisation. Closes [#36939](https://github.com/ClickHouse/ClickHouse/issues/36939). [#39272](https://github.com/ClickHouse/ClickHouse/pull/39272) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix incorrect fetch of table metadata from PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix projection exception when aggregation keys are wrapped inside other functions. This fixes [#37151](https://github.com/ClickHouse/ClickHouse/issues/37151). [#37155](https://github.com/ClickHouse/ClickHouse/pull/37155) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix possible logical error `... with argument with type Nothing and default implementation for Nothing is expected to return result with type Nothing, got ...` in some functions. Closes: [#37610](https://github.com/ClickHouse/ClickHouse/issues/37610) Closes: [#37741](https://github.com/ClickHouse/ClickHouse/issues/37741). [#37759](https://github.com/ClickHouse/ClickHouse/pull/37759) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix incorrect columns order in subqueries of UNION (in case of duplicated columns in subselects may produce incorrect result). [#37887](https://github.com/ClickHouse/ClickHouse/pull/37887) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix incorrect work of MODIFY ALTER Column with column names that contain dots. Closes [#37907](https://github.com/ClickHouse/ClickHouse/issues/37907). [#37971](https://github.com/ClickHouse/ClickHouse/pull/37971) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix possible crash in `Distributed` async insert in case of removing a replica from config. [#38029](https://github.com/ClickHouse/ClickHouse/pull/38029) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix "Missing columns" for GLOBAL JOIN with CTE without alias. [#38056](https://github.com/ClickHouse/ClickHouse/pull/38056) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Rewrite tuple functions as literals in backwards-compatibility mode. [#38096](https://github.com/ClickHouse/ClickHouse/pull/38096) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Fix redundant memory reservation for output block during `ORDER BY`. [#38127](https://github.com/ClickHouse/ClickHouse/pull/38127) ([iyupeng](https://github.com/iyupeng)).
|
||||
* Fix possible logical error `Bad cast from type DB::IColumn* to DB::ColumnNullable*` in array mapped functions. Closes [#38006](https://github.com/ClickHouse/ClickHouse/issues/38006). [#38132](https://github.com/ClickHouse/ClickHouse/pull/38132) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix temporary name clash in partial merge join, close [#37928](https://github.com/ClickHouse/ClickHouse/issues/37928). [#38135](https://github.com/ClickHouse/ClickHouse/pull/38135) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Some minr issue with queries like `CREATE TABLE nested_name_tuples (`a` Tuple(x String, y Tuple(i Int32, j String))) ENGINE = Memory;` [#38136](https://github.com/ClickHouse/ClickHouse/pull/38136) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* (Window View is a experimental feature) Fix LOGICAL_ERROR for WINDOW VIEW with incorrect structure. [#38205](https://github.com/ClickHouse/ClickHouse/pull/38205) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Update librdkafka submodule to fix crash when an OAUTHBEARER refresh callback is set. [#38225](https://github.com/ClickHouse/ClickHouse/pull/38225) ([Rafael Acevedo](https://github.com/racevedoo)).
|
||||
* Fix INSERT into Distributed hung due to ProfileEvents. [#38307](https://github.com/ClickHouse/ClickHouse/pull/38307) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix retries in PostgreSQL engine. [#38310](https://github.com/ClickHouse/ClickHouse/pull/38310) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix optimization in PartialSortingTransform (SIGSEGV and possible incorrect result). [#38324](https://github.com/ClickHouse/ClickHouse/pull/38324) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix RabbitMQ with formats based on PeekableReadBuffer. Closes [#38061](https://github.com/ClickHouse/ClickHouse/issues/38061). [#38356](https://github.com/ClickHouse/ClickHouse/pull/38356) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* MaterializedPostgreSQL - experimentail feature. Fix possible `Invalid number of rows in Chunk` in MaterializedPostgreSQL. Closes [#37323](https://github.com/ClickHouse/ClickHouse/issues/37323). [#38360](https://github.com/ClickHouse/ClickHouse/pull/38360) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix RabbitMQ configuration with connection string setting. Closes [#36531](https://github.com/ClickHouse/ClickHouse/issues/36531). [#38365](https://github.com/ClickHouse/ClickHouse/pull/38365) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix PostgreSQL engine not using PostgreSQL schema when retrieving array dimension size. Closes [#36755](https://github.com/ClickHouse/ClickHouse/issues/36755). Closes [#36772](https://github.com/ClickHouse/ClickHouse/issues/36772). [#38366](https://github.com/ClickHouse/ClickHouse/pull/38366) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possibly incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix wrong results of countSubstrings() & position() on patterns with 0-bytes. [#38589](https://github.com/ClickHouse/ClickHouse/pull/38589) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
|
||||
* `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix use-after-free for aggregate functions with `Map` combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
### <a id="226"></a> ClickHouse release 22.6, 2022-06-16
|
||||
|
||||
@ -34,7 +345,6 @@
|
||||
* Add two new settings `input_format_csv_skip_first_lines/input_format_tsv_skip_first_lines` to allow skipping specified number of lines in the beginning of the file in CSV/TSV formats. [#37537](https://github.com/ClickHouse/ClickHouse/pull/37537) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* `showCertificate` function shows current server's SSL certificate. [#37540](https://github.com/ClickHouse/ClickHouse/pull/37540) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* HTTP source for Data Dictionaries in Named Collections is supported. [#37581](https://github.com/ClickHouse/ClickHouse/pull/37581) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Added a new window function `nonNegativeDerivative(metric_column, timestamp_column[, INTERVAL x SECOND])`. [#37628](https://github.com/ClickHouse/ClickHouse/pull/37628) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Implemented changing the comment for `ReplicatedMergeTree` tables. [#37416](https://github.com/ClickHouse/ClickHouse/pull/37416) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Added `SYSTEM UNFREEZE` query that deletes the whole backup regardless if the corresponding table is deleted or not. [#36424](https://github.com/ClickHouse/ClickHouse/pull/36424) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
||||
|
||||
@ -91,7 +401,7 @@
|
||||
* Allows providing `NULL`/`NOT NULL` right after type in column declaration. [#37337](https://github.com/ClickHouse/ClickHouse/pull/37337) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([xiedeyantu](https://github.com/xiedeyantu)).
|
||||
* Try to improve short circuit functions processing to fix problems with stress tests. [#37384](https://github.com/ClickHouse/ClickHouse/pull/37384) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Closes [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)).
|
||||
* Generate multiple columns with UUID (generateUUIDv4(1), generateUUIDv4(2)) [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)).
|
||||
* Fix extremely rare deadlock during part fetch in zero-copy replication. Fixes [#37423](https://github.com/ClickHouse/ClickHouse/issues/37423). [#37424](https://github.com/ClickHouse/ClickHouse/pull/37424) ([metahys](https://github.com/metahys)).
|
||||
* Don't allow to create storage with unknown data format. [#37450](https://github.com/ClickHouse/ClickHouse/pull/37450) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Set `global_memory_usage_overcommit_max_wait_microseconds` default value to 5 seconds. Add info about `OvercommitTracker` to OOM exception message. Add `MemoryOvercommitWaitTimeMicroseconds` profile event. [#37460](https://github.com/ClickHouse/ClickHouse/pull/37460) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
113
CMakeLists.txt
113
CMakeLists.txt
@ -1,4 +1,4 @@
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
cmake_minimum_required(VERSION 3.15)
|
||||
|
||||
project(ClickHouse LANGUAGES C CXX ASM)
|
||||
|
||||
@ -74,18 +74,12 @@ message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
|
||||
option(USE_STATIC_LIBRARIES "Disable to use shared libraries" ON)
|
||||
|
||||
if (NOT USE_STATIC_LIBRARIES)
|
||||
# DEVELOPER ONLY.
|
||||
# Faster linking if turned on.
|
||||
option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files")
|
||||
|
||||
option(CLICKHOUSE_SPLIT_BINARY
|
||||
"Make several binaries (clickhouse-server, clickhouse-client etc.) instead of one bundled")
|
||||
endif ()
|
||||
# DEVELOPER ONLY.
|
||||
# Faster linking if turned on.
|
||||
option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files" OFF)
|
||||
|
||||
if (USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
message(FATAL_ERROR "Defining SPLIT_SHARED_LIBRARIES=1 without USE_STATIC_LIBRARIES=0 has no effect.")
|
||||
message(FATAL_ERROR "SPLIT_SHARED_LIBRARIES=1 must not be used together with USE_STATIC_LIBRARIES=1")
|
||||
endif()
|
||||
|
||||
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
@ -159,6 +153,8 @@ if (COMPILER_CLANG)
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
|
||||
endif ()
|
||||
|
||||
# If compiler has support for -Wreserved-identifier. It is difficult to detect by clang version,
|
||||
@ -168,7 +164,6 @@ if (HAS_RESERVED_IDENTIFIER)
|
||||
add_compile_definitions (HAS_RESERVED_IDENTIFIER)
|
||||
endif ()
|
||||
|
||||
# If turned `ON`, assumes the user has either the system GTest library or the bundled one.
|
||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
|
||||
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
|
||||
|
||||
@ -204,8 +199,8 @@ endif ()
|
||||
option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.")
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
# Can be lld or ld-lld.
|
||||
if (LINKER_NAME MATCHES "lld$")
|
||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index")
|
||||
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
||||
@ -223,11 +218,25 @@ if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL")
|
||||
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT ON)
|
||||
else()
|
||||
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT OFF)
|
||||
endif()
|
||||
# Provides faster linking and lower binary size.
|
||||
# Tradeoff is the inability to debug some source files with e.g. gdb
|
||||
# (empty stack frames and no local variables)."
|
||||
option(OMIT_HEAVY_DEBUG_SYMBOLS
|
||||
"Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)"
|
||||
${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT})
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(USE_DEBUG_HELPERS ON)
|
||||
endif()
|
||||
|
||||
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
||||
|
||||
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
|
||||
if (NOT BUILD_STANDALONE_KEEPER)
|
||||
option(CREATE_KEEPER_SYMLINK "Create symlink for clickhouse-keeper to main server binary" ON)
|
||||
@ -236,7 +245,8 @@ else ()
|
||||
endif ()
|
||||
|
||||
# Create BuildID when using lld. For other linkers it is created by default.
|
||||
if (LINKER_NAME MATCHES "lld$")
|
||||
# (NOTE: LINKER_NAME can be either path or name, and in different variants)
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
# SHA1 is not cryptographically secure but it is the best what lld is offering.
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
|
||||
endif ()
|
||||
@ -252,10 +262,10 @@ else ()
|
||||
endif ()
|
||||
|
||||
# Optionally split binaries and debug symbols.
|
||||
option(INSTALL_STRIPPED_BINARIES "Split binaries and debug symbols" OFF)
|
||||
if (INSTALL_STRIPPED_BINARIES)
|
||||
option(SPLIT_DEBUG_SYMBOLS "Split binaries and debug symbols" OFF)
|
||||
if (SPLIT_DEBUG_SYMBOLS)
|
||||
message(STATUS "Will split binaries and debug symbols")
|
||||
set(STRIPPED_BINARIES_OUTPUT "stripped" CACHE STRING "A separate directory for stripped information")
|
||||
set(SPLITTED_DEBUG_SYMBOLS_DIR "stripped" CACHE STRING "A separate directory for stripped information")
|
||||
endif()
|
||||
|
||||
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
|
||||
@ -321,6 +331,22 @@ if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||
endif ()
|
||||
|
||||
if (ARCH_AMD64)
|
||||
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
|
||||
# which makes benchmark results more stable.
|
||||
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
|
||||
if (COMPILER_GCC)
|
||||
# gcc is in assembler, need to add "-Wa," prefix
|
||||
set(BRANCHES_WITHIN_32B_BOUNDARIES "-Wa,${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
||||
endif()
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag("${BRANCHES_WITHIN_32B_BOUNDARIES}" HAS_BRANCHES_WITHIN_32B_BOUNDARIES)
|
||||
if (HAS_BRANCHES_WITHIN_32B_BOUNDARIES)
|
||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (COMPILER_GCC)
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcoroutines")
|
||||
endif ()
|
||||
@ -477,7 +503,7 @@ endif ()
|
||||
message (STATUS
|
||||
"Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ;
|
||||
USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES}
|
||||
SPLIT_SHARED=${SPLIT_SHARED_LIBRARIES}")
|
||||
SPLIT_SHARED_LIBRARIES=${SPLIT_SHARED_LIBRARIES}")
|
||||
|
||||
include (GNUInstallDirs)
|
||||
|
||||
@ -493,14 +519,14 @@ if (NOT ENABLE_JEMALLOC)
|
||||
message (WARNING "Non default allocator is disabled. This is not recommended for production builds.")
|
||||
endif ()
|
||||
|
||||
macro (add_executable target)
|
||||
macro (clickhouse_add_executable target)
|
||||
# invoke built-in add_executable
|
||||
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
|
||||
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
|
||||
if (ARCH_AMD64 AND GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
|
||||
add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
|
||||
else ()
|
||||
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
|
||||
add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
|
||||
endif ()
|
||||
|
||||
get_target_property (type ${target} TYPE)
|
||||
@ -529,6 +555,16 @@ macro (add_executable target)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# With cross-compiling, all targets are built for the target platform which usually different from the host
|
||||
# platform. This is problematic if a build artifact X (e.g. a file or an executable) is generated by running
|
||||
# another executable Y previously produced in the build. This is solved by compiling and running Y for/on
|
||||
# the host platform. Add target to the list:
|
||||
# add_native_target(<target> ...)
|
||||
set_property (GLOBAL PROPERTY NATIVE_BUILD_TARGETS)
|
||||
function (add_native_target)
|
||||
set_property (GLOBAL APPEND PROPERTY NATIVE_BUILD_TARGETS ${ARGV})
|
||||
endfunction (add_native_target)
|
||||
|
||||
set(ConfigIncludePath ${CMAKE_CURRENT_BINARY_DIR}/includes/configs CACHE INTERNAL "Path to generated configuration files.")
|
||||
include_directories(${ConfigIncludePath})
|
||||
|
||||
@ -543,3 +579,34 @@ add_subdirectory (tests)
|
||||
add_subdirectory (utils)
|
||||
|
||||
include (cmake/sanitize_target_link_libraries.cmake)
|
||||
|
||||
# Build native targets if necessary
|
||||
get_property(NATIVE_BUILD_TARGETS GLOBAL PROPERTY NATIVE_BUILD_TARGETS)
|
||||
if (NATIVE_BUILD_TARGETS
|
||||
AND NOT(
|
||||
CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
||||
AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR
|
||||
)
|
||||
)
|
||||
message (STATUS "Building native targets...")
|
||||
|
||||
set (NATIVE_BUILD_DIR "${CMAKE_BINARY_DIR}/native")
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
|
||||
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
|
||||
"-DENABLE_CCACHE=${ENABLE_CCACHE}"
|
||||
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
|
||||
${CMAKE_SOURCE_DIR}
|
||||
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
|
||||
COMMAND_ECHO STDOUT)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build "${NATIVE_BUILD_DIR}" --target ${NATIVE_BUILD_TARGETS}
|
||||
COMMAND_ECHO STDOUT)
|
||||
endif ()
|
||||
|
@ -7,12 +7,8 @@
|
||||
# How to install Ninja on Ubuntu:
|
||||
# sudo apt-get install ninja-build
|
||||
|
||||
# CLion does not support Ninja
|
||||
# You can add your vote on CLion task tracker:
|
||||
# https://youtrack.jetbrains.com/issue/CPP-2659
|
||||
# https://youtrack.jetbrains.com/issue/CPP-870
|
||||
|
||||
if (NOT DEFINED ENV{CLION_IDE} AND NOT DEFINED ENV{XCODE_IDE})
|
||||
if (NOT DEFINED ENV{XCODE_IDE})
|
||||
find_program(NINJA_PATH ninja)
|
||||
if (NINJA_PATH)
|
||||
set(CMAKE_GENERATOR "Ninja" CACHE INTERNAL "")
|
||||
@ -62,9 +58,10 @@ execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCH)
|
||||
# By default, prefer clang on Linux
|
||||
# But note, that you still may change the compiler with -DCMAKE_C_COMPILER/-DCMAKE_CXX_COMPILER.
|
||||
if (OS MATCHES "Linux"
|
||||
# some build systems may use CC/CXX env variables
|
||||
AND "$ENV{CC}" STREQUAL ""
|
||||
AND "$ENV{CXX}" STREQUAL "")
|
||||
AND "$ENV{CXX}" STREQUAL ""
|
||||
AND NOT DEFINED CMAKE_C_COMPILER
|
||||
AND NOT DEFINED CMAKE_CXX_COMPILER)
|
||||
find_program(CLANG_PATH clang)
|
||||
if (CLANG_PATH)
|
||||
set(CMAKE_C_COMPILER "clang" CACHE INTERNAL "")
|
||||
@ -87,8 +84,7 @@ if (OS MATCHES "Linux"
|
||||
set (CMAKE_TOOLCHAIN_FILE "cmake/linux/toolchain-aarch64.cmake" CACHE INTERNAL "")
|
||||
elseif (ARCH MATCHES "^(ppc64le.*|PPC64LE.*)")
|
||||
set (CMAKE_TOOLCHAIN_FILE "cmake/linux/toolchain-ppc64le.cmake" CACHE INTERNAL "")
|
||||
else ()
|
||||
else ()
|
||||
message (FATAL_ERROR "Unsupported architecture: ${ARCH}")
|
||||
endif ()
|
||||
|
||||
endif()
|
||||
|
@ -1,4 +1,4 @@
|
||||
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/ClickHouse/raw/master/website/images/logo-400x240.png)](https://clickhouse.com)
|
||||
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/clickhouse-presentations/raw/master/images/logo-400x240.png)](https://clickhouse.com)
|
||||
|
||||
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
|
||||
|
||||
@ -12,4 +12,7 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
||||
* [Contacts](https://clickhouse.com/company/#contact) can help to get your questions answered if there are any.
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming events
|
||||
* [**v22.8 Release Webinar**](https://clickhouse.com/company/events/v22-8-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
|
12
SECURITY.md
12
SECURITY.md
@ -10,9 +10,11 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 22.8 | ✔️ |
|
||||
| 22.7 | ✔️ |
|
||||
| 22.6 | ✔️ |
|
||||
| 22.5 | ✔️ |
|
||||
| 22.4 | ✔️ |
|
||||
| 22.5 | ❌ |
|
||||
| 22.4 | ❌ |
|
||||
| 22.3 | ✔️ |
|
||||
| 22.2 | ❌ |
|
||||
| 22.1 | ❌ |
|
||||
@ -20,7 +22,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 21.11 | ❌ |
|
||||
| 21.10 | ❌ |
|
||||
| 21.9 | ❌ |
|
||||
| 21.8 | ✔️ |
|
||||
| 21.8 | ❌ |
|
||||
| 21.7 | ❌ |
|
||||
| 21.6 | ❌ |
|
||||
| 21.5 | ❌ |
|
||||
@ -37,7 +39,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
We're extremely grateful for security researchers and users that report vulnerabilities to the ClickHouse Open Source Community. All reports are thoroughly investigated by developers.
|
||||
|
||||
To report a potential vulnerability in ClickHouse please send the details about it to [security@clickhouse.com](mailto:security@clickhouse.com).
|
||||
To report a potential vulnerability in ClickHouse please send the details about it to [security@clickhouse.com](mailto:security@clickhouse.com). We do not offer any financial rewards for reporting issues to us using this method. Alternatively, you can also submit your findings through our public bug bounty program hosted by [Bugcrowd](https://bugcrowd.com/clickhouse) and be rewarded for it as per the program scope and rules of engagement.
|
||||
|
||||
### When Should I Report a Vulnerability?
|
||||
|
||||
@ -57,5 +59,5 @@ As the security issue moves from triage, to identified fix, to release planning
|
||||
|
||||
## Public Disclosure Timing
|
||||
|
||||
A public disclosure date is negotiated by the ClickHouse maintainers and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to 90 days. For a vulnerability with a straightforward mitigation, we expect report date to disclosure date to be on the order of 7 days.
|
||||
A public disclosure date is negotiated by the ClickHouse maintainers and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to 90 days. For a vulnerability with a straightforward mitigation, we expect the report date to disclosure date to be on the order of 7 days.
|
||||
|
||||
|
@ -89,7 +89,7 @@ public:
|
||||
inline void returnObject(T && object_to_return)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(objects_mutex);
|
||||
std::lock_guard lock(objects_mutex);
|
||||
|
||||
objects.emplace_back(std::move(object_to_return));
|
||||
--borrowed_objects_size;
|
||||
@ -107,14 +107,14 @@ public:
|
||||
/// Allocated objects size by the pool. If allocatedObjectsSize == maxSize then pool is full.
|
||||
inline size_t allocatedObjectsSize() const
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||
std::lock_guard lock(objects_mutex);
|
||||
return allocated_objects_size;
|
||||
}
|
||||
|
||||
/// Returns allocatedObjectsSize == maxSize
|
||||
inline bool isFull() const
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||
std::lock_guard lock(objects_mutex);
|
||||
return allocated_objects_size == max_size;
|
||||
}
|
||||
|
||||
@ -122,7 +122,7 @@ public:
|
||||
/// Then client will wait during borrowObject function call.
|
||||
inline size_t borrowedObjectsSize() const
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(objects_mutex);
|
||||
std::lock_guard lock(objects_mutex);
|
||||
return borrowed_objects_size;
|
||||
}
|
||||
|
||||
|
@ -52,15 +52,15 @@ struct Decimal
|
||||
constexpr Decimal(Decimal<T> &&) noexcept = default;
|
||||
constexpr Decimal(const Decimal<T> &) = default;
|
||||
|
||||
constexpr Decimal(const T & value_): value(value_) {}
|
||||
constexpr Decimal(const T & value_): value(value_) {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename U>
|
||||
constexpr Decimal(const Decimal<U> & x): value(x.value) {}
|
||||
constexpr Decimal(const Decimal<U> & x): value(x.value) {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
constexpr Decimal<T> & operator=(Decimal<T> &&) noexcept = default;
|
||||
constexpr Decimal<T> & operator = (const Decimal<T> &) = default;
|
||||
|
||||
constexpr operator T () const { return value; }
|
||||
constexpr operator T () const { return value; } // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename U>
|
||||
constexpr U convertTo() const
|
||||
@ -111,7 +111,7 @@ public:
|
||||
using Base::Base;
|
||||
using NativeType = Base::NativeType;
|
||||
|
||||
constexpr DateTime64(const Base & v): Base(v) {}
|
||||
constexpr DateTime64(const Base & v): Base(v) {} // NOLINT(google-explicit-constructor)
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -36,14 +36,14 @@ struct DecomposedFloat
|
||||
{
|
||||
using Traits = FloatTraits<T>;
|
||||
|
||||
DecomposedFloat(T x)
|
||||
explicit DecomposedFloat(T x)
|
||||
{
|
||||
memcpy(&x_uint, &x, sizeof(x));
|
||||
}
|
||||
|
||||
typename Traits::UInt x_uint;
|
||||
|
||||
bool is_negative() const
|
||||
bool isNegative() const
|
||||
{
|
||||
return x_uint >> (Traits::bits - 1);
|
||||
}
|
||||
@ -53,7 +53,7 @@ struct DecomposedFloat
|
||||
{
|
||||
return (exponent() == 0 && mantissa() == 0)
|
||||
? 0
|
||||
: (is_negative()
|
||||
: (isNegative()
|
||||
? -1
|
||||
: 1);
|
||||
}
|
||||
@ -63,7 +63,7 @@ struct DecomposedFloat
|
||||
return (x_uint >> (Traits::mantissa_bits)) & (((1ull << (Traits::exponent_bits + 1)) - 1) >> 1);
|
||||
}
|
||||
|
||||
int16_t normalized_exponent() const
|
||||
int16_t normalizedExponent() const
|
||||
{
|
||||
return int16_t(exponent()) - ((1ull << (Traits::exponent_bits - 1)) - 1);
|
||||
}
|
||||
@ -73,20 +73,20 @@ struct DecomposedFloat
|
||||
return x_uint & ((1ull << Traits::mantissa_bits) - 1);
|
||||
}
|
||||
|
||||
int64_t mantissa_with_sign() const
|
||||
int64_t mantissaWithSign() const
|
||||
{
|
||||
return is_negative() ? -mantissa() : mantissa();
|
||||
return isNegative() ? -mantissa() : mantissa();
|
||||
}
|
||||
|
||||
/// NOTE Probably floating point instructions can be better.
|
||||
bool is_integer_in_representable_range() const
|
||||
bool isIntegerInRepresentableRange() const
|
||||
{
|
||||
return x_uint == 0
|
||||
|| (normalized_exponent() >= 0 /// The number is not less than one
|
||||
|| (normalizedExponent() >= 0 /// The number is not less than one
|
||||
/// The number is inside the range where every integer has exact representation in float
|
||||
&& normalized_exponent() <= static_cast<int16_t>(Traits::mantissa_bits)
|
||||
&& normalizedExponent() <= static_cast<int16_t>(Traits::mantissa_bits)
|
||||
/// After multiplying by 2^exp, the fractional part becomes zero, means the number is integer
|
||||
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0));
|
||||
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
|
||||
}
|
||||
|
||||
|
||||
@ -102,15 +102,15 @@ struct DecomposedFloat
|
||||
return sign();
|
||||
|
||||
/// Different signs
|
||||
if (is_negative() && rhs > 0)
|
||||
if (isNegative() && rhs > 0)
|
||||
return -1;
|
||||
if (!is_negative() && rhs < 0)
|
||||
if (!isNegative() && rhs < 0)
|
||||
return 1;
|
||||
|
||||
/// Fractional number with magnitude less than one
|
||||
if (normalized_exponent() < 0)
|
||||
if (normalizedExponent() < 0)
|
||||
{
|
||||
if (!is_negative())
|
||||
if (!isNegative())
|
||||
return rhs > 0 ? -1 : 1;
|
||||
else
|
||||
return rhs >= 0 ? -1 : 1;
|
||||
@ -121,11 +121,11 @@ struct DecomposedFloat
|
||||
{
|
||||
if (rhs == std::numeric_limits<Int>::lowest())
|
||||
{
|
||||
assert(is_negative());
|
||||
assert(isNegative());
|
||||
|
||||
if (normalized_exponent() < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
if (normalizedExponent() < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return 1;
|
||||
if (normalized_exponent() > static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
if (normalizedExponent() > static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return -1;
|
||||
|
||||
if (mantissa() == 0)
|
||||
@ -136,44 +136,44 @@ struct DecomposedFloat
|
||||
}
|
||||
|
||||
/// Too large number: abs(float) > abs(rhs). Also the case with infinities and NaN.
|
||||
if (normalized_exponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return is_negative() ? -1 : 1;
|
||||
if (normalizedExponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return isNegative() ? -1 : 1;
|
||||
|
||||
using UInt = std::conditional_t<(sizeof(Int) > sizeof(typename Traits::UInt)), make_unsigned_t<Int>, typename Traits::UInt>;
|
||||
UInt uint_rhs = rhs < 0 ? -rhs : rhs;
|
||||
|
||||
/// Smaller octave: abs(rhs) < abs(float)
|
||||
/// FYI, TIL: octave is also called "binade", https://en.wikipedia.org/wiki/Binade
|
||||
if (uint_rhs < (static_cast<UInt>(1) << normalized_exponent()))
|
||||
return is_negative() ? -1 : 1;
|
||||
if (uint_rhs < (static_cast<UInt>(1) << normalizedExponent()))
|
||||
return isNegative() ? -1 : 1;
|
||||
|
||||
/// Larger octave: abs(rhs) > abs(float)
|
||||
if (normalized_exponent() + 1 < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>)
|
||||
&& uint_rhs >= (static_cast<UInt>(1) << (normalized_exponent() + 1)))
|
||||
return is_negative() ? 1 : -1;
|
||||
if (normalizedExponent() + 1 < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>)
|
||||
&& uint_rhs >= (static_cast<UInt>(1) << (normalizedExponent() + 1)))
|
||||
return isNegative() ? 1 : -1;
|
||||
|
||||
/// The same octave
|
||||
/// uint_rhs == 2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits)
|
||||
/// uint_rhs == 2 ^ normalizedExponent + mantissa * 2 ^ (normalizedExponent - mantissa_bits)
|
||||
|
||||
bool large_and_always_integer = normalized_exponent() >= static_cast<int16_t>(Traits::mantissa_bits);
|
||||
bool large_and_always_integer = normalizedExponent() >= static_cast<int16_t>(Traits::mantissa_bits);
|
||||
|
||||
UInt a = large_and_always_integer
|
||||
? static_cast<UInt>(mantissa()) << (normalized_exponent() - Traits::mantissa_bits)
|
||||
: static_cast<UInt>(mantissa()) >> (Traits::mantissa_bits - normalized_exponent());
|
||||
? static_cast<UInt>(mantissa()) << (normalizedExponent() - Traits::mantissa_bits)
|
||||
: static_cast<UInt>(mantissa()) >> (Traits::mantissa_bits - normalizedExponent());
|
||||
|
||||
UInt b = uint_rhs - (static_cast<UInt>(1) << normalized_exponent());
|
||||
UInt b = uint_rhs - (static_cast<UInt>(1) << normalizedExponent());
|
||||
|
||||
if (a < b)
|
||||
return is_negative() ? 1 : -1;
|
||||
return isNegative() ? 1 : -1;
|
||||
if (a > b)
|
||||
return is_negative() ? -1 : 1;
|
||||
return isNegative() ? -1 : 1;
|
||||
|
||||
/// Float has no fractional part means that the numbers are equal.
|
||||
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0)
|
||||
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
|
||||
return 0;
|
||||
else
|
||||
/// Float has fractional part means its abs value is larger.
|
||||
return is_negative() ? -1 : 1;
|
||||
return isNegative() ? -1 : 1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -12,7 +12,14 @@
|
||||
#define JSON_MAX_DEPTH 100
|
||||
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
|
||||
#endif
|
||||
POCO_IMPLEMENT_EXCEPTION(JSONException, Poco::Exception, "JSONException") // NOLINT(cert-err60-cpp, modernize-use-noexcept, hicpp-use-noexcept)
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
|
||||
/// Прочитать беззнаковое целое в простом формате из не-0-terminated строки.
|
||||
@ -662,18 +669,18 @@ std::string JSON::getName() const
|
||||
return getString();
|
||||
}
|
||||
|
||||
StringRef JSON::getRawString() const
|
||||
std::string_view JSON::getRawString() const
|
||||
{
|
||||
Pos s = ptr_begin;
|
||||
if (*s != '"')
|
||||
throw JSONException(std::string("JSON: expected \", got ") + *s);
|
||||
while (++s != ptr_end && *s != '"');
|
||||
if (s != ptr_end)
|
||||
return StringRef(ptr_begin + 1, s - ptr_begin - 1);
|
||||
return std::string_view(ptr_begin + 1, s - ptr_begin - 1);
|
||||
throw JSONException("JSON: incorrect syntax (expected end of string, found end of JSON).");
|
||||
}
|
||||
|
||||
StringRef JSON::getRawName() const
|
||||
std::string_view JSON::getRawName() const
|
||||
{
|
||||
return getRawString();
|
||||
}
|
||||
|
@ -38,8 +38,16 @@
|
||||
*/
|
||||
|
||||
|
||||
// NOLINTBEGIN(google-explicit-constructor)
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
|
||||
#endif
|
||||
POCO_DECLARE_EXCEPTION(Foundation_API, JSONException, Poco::Exception)
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
// NOLINTEND(google-explicit-constructor)
|
||||
|
||||
class JSON
|
||||
{
|
||||
@ -55,7 +63,7 @@ public:
|
||||
checkInit();
|
||||
}
|
||||
|
||||
JSON(const std::string & s) : ptr_begin(s.data()), ptr_end(s.data() + s.size()), level(0)
|
||||
explicit JSON(std::string_view s) : ptr_begin(s.data()), ptr_end(s.data() + s.size()), level(0)
|
||||
{
|
||||
checkInit();
|
||||
}
|
||||
@ -65,13 +73,7 @@ public:
|
||||
*this = rhs;
|
||||
}
|
||||
|
||||
JSON & operator=(const JSON & rhs)
|
||||
{
|
||||
ptr_begin = rhs.ptr_begin;
|
||||
ptr_end = rhs.ptr_end;
|
||||
level = rhs.level;
|
||||
return *this;
|
||||
}
|
||||
JSON & operator=(const JSON & rhs) = default;
|
||||
|
||||
const char * data() const { return ptr_begin; }
|
||||
const char * dataEnd() const { return ptr_end; }
|
||||
@ -130,8 +132,8 @@ public:
|
||||
std::string getName() const; /// Получить имя name-value пары.
|
||||
JSON getValue() const; /// Получить значение name-value пары.
|
||||
|
||||
StringRef getRawString() const;
|
||||
StringRef getRawName() const;
|
||||
std::string_view getRawString() const;
|
||||
std::string_view getRawName() const;
|
||||
|
||||
/// Получить значение элемента; если элемент - строка, то распарсить значение из строки; если не строка или число - то исключение.
|
||||
double toDouble() const;
|
||||
@ -163,7 +165,7 @@ public:
|
||||
|
||||
/// Перейти к следующему элементу массива или следующей name-value паре объекта.
|
||||
iterator & operator++();
|
||||
iterator operator++(int);
|
||||
iterator operator++(int); // NOLINT(cert-dcl21-cpp)
|
||||
|
||||
/// Есть ли в строке escape-последовательности
|
||||
bool hasEscapes() const;
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <replxx.hxx>
|
||||
|
||||
#include <base/types.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
class LineReader
|
||||
{
|
||||
@ -20,8 +21,8 @@ public:
|
||||
void addWords(Words && new_words);
|
||||
|
||||
private:
|
||||
Words words;
|
||||
Words words_no_case;
|
||||
Words words TSA_GUARDED_BY(mutex);
|
||||
Words words_no_case TSA_GUARDED_BY(mutex);
|
||||
|
||||
std::mutex mutex;
|
||||
};
|
||||
@ -29,7 +30,7 @@ public:
|
||||
using Patterns = std::vector<const char *>;
|
||||
|
||||
LineReader(const String & history_file_path, bool multiline, Patterns extenders, Patterns delimiters);
|
||||
virtual ~LineReader() {}
|
||||
virtual ~LineReader() = default;
|
||||
|
||||
/// Reads the whole line until delimiter (in multiline mode) or until the last line without extender.
|
||||
/// If resulting line is empty, it means the user interrupted the input.
|
||||
|
@ -27,7 +27,7 @@ void trim(String & s)
|
||||
|
||||
std::string getEditor()
|
||||
{
|
||||
const char * editor = std::getenv("EDITOR");
|
||||
const char * editor = std::getenv("EDITOR"); // NOLINT(concurrency-mt-unsafe)
|
||||
|
||||
if (!editor || !*editor)
|
||||
editor = "vim";
|
||||
@ -76,7 +76,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
||||
if (!in)
|
||||
{
|
||||
rx.print("Cannot open %s reading (for conversion): %s\n",
|
||||
path.c_str(), errnoToString(errno).c_str());
|
||||
path.c_str(), errnoToString().c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -84,7 +84,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
||||
if (getline(in, line).bad())
|
||||
{
|
||||
rx.print("Cannot read from %s (for conversion): %s\n",
|
||||
path.c_str(), errnoToString(errno).c_str());
|
||||
path.c_str(), errnoToString().c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
|
||||
if (!out)
|
||||
{
|
||||
rx.print("Cannot open %s for writing (for conversion): %s\n",
|
||||
path.c_str(), errnoToString(errno).c_str());
|
||||
path.c_str(), errnoToString().c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -151,7 +151,7 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
history_file_fd = open(history_file_path.c_str(), O_RDWR);
|
||||
if (history_file_fd < 0)
|
||||
{
|
||||
rx.print("Open of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Open of history file failed: %s\n", errnoToString().c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -159,18 +159,18 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
|
||||
if (flock(history_file_fd, LOCK_SH))
|
||||
{
|
||||
rx.print("Shared lock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Shared lock of history file failed: %s\n", errnoToString().c_str());
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!rx.history_load(history_file_path))
|
||||
{
|
||||
rx.print("Loading history failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Loading history failed: %s\n", errnoToString().c_str());
|
||||
}
|
||||
|
||||
if (flock(history_file_fd, LOCK_UN))
|
||||
{
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -225,7 +225,7 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
ReplxxLineReader::~ReplxxLineReader()
|
||||
{
|
||||
if (close(history_file_fd))
|
||||
rx.print("Close of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Close of history file failed: %s\n", errnoToString().c_str());
|
||||
}
|
||||
|
||||
LineReader::InputStatus ReplxxLineReader::readOneLine(const String & prompt)
|
||||
@ -250,7 +250,7 @@ void ReplxxLineReader::addToHistory(const String & line)
|
||||
// and that is why flock() is added here.
|
||||
bool locked = false;
|
||||
if (flock(history_file_fd, LOCK_EX))
|
||||
rx.print("Lock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Lock of history file failed: %s\n", errnoToString().c_str());
|
||||
else
|
||||
locked = true;
|
||||
|
||||
@ -258,10 +258,10 @@ void ReplxxLineReader::addToHistory(const String & line)
|
||||
|
||||
// flush changes to the disk
|
||||
if (!rx.history_save(history_file_path))
|
||||
rx.print("Saving history failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Saving history failed: %s\n", errnoToString().c_str());
|
||||
|
||||
if (locked && 0 != flock(history_file_fd, LOCK_UN))
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString().c_str());
|
||||
}
|
||||
|
||||
/// See comments in ShellCommand::executeImpl()
|
||||
@ -275,7 +275,7 @@ int ReplxxLineReader::executeEditor(const std::string & path)
|
||||
static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork");
|
||||
if (!real_vfork)
|
||||
{
|
||||
rx.print("Cannot find symbol vfork in myself: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Cannot find symbol vfork in myself: %s\n", errnoToString().c_str());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -283,7 +283,7 @@ int ReplxxLineReader::executeEditor(const std::string & path)
|
||||
|
||||
if (-1 == pid)
|
||||
{
|
||||
rx.print("Cannot vfork: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Cannot vfork: %s\n", errnoToString().c_str());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -292,11 +292,11 @@ int ReplxxLineReader::executeEditor(const std::string & path)
|
||||
{
|
||||
sigset_t mask;
|
||||
sigemptyset(&mask);
|
||||
sigprocmask(0, nullptr, &mask);
|
||||
sigprocmask(SIG_UNBLOCK, &mask, nullptr);
|
||||
sigprocmask(0, nullptr, &mask); // NOLINT(concurrency-mt-unsafe) // ok in newly created process
|
||||
sigprocmask(SIG_UNBLOCK, &mask, nullptr); // NOLINT(concurrency-mt-unsafe) // ok in newly created process
|
||||
|
||||
execvp(editor.c_str(), argv);
|
||||
rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str());
|
||||
rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString().c_str());
|
||||
_exit(-1);
|
||||
}
|
||||
|
||||
@ -309,7 +309,7 @@ int ReplxxLineReader::executeEditor(const std::string & path)
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString().c_str());
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
@ -324,7 +324,7 @@ void ReplxxLineReader::openEditor()
|
||||
int fd = ::mkstemps(filename, 4);
|
||||
if (-1 == fd)
|
||||
{
|
||||
rx.print("Cannot create temporary file to edit query: %s\n", errnoToString(errno).c_str());
|
||||
rx.print("Cannot create temporary file to edit query: %s\n", errnoToString().c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -338,7 +338,7 @@ void ReplxxLineReader::openEditor()
|
||||
ssize_t res = ::write(fd, begin + bytes_written, offset - bytes_written);
|
||||
if ((-1 == res || 0 == res) && errno != EINTR)
|
||||
{
|
||||
rx.print("Cannot write to temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
rx.print("Cannot write to temporary query file %s: %s\n", filename, errnoToString().c_str());
|
||||
break;
|
||||
}
|
||||
bytes_written += res;
|
||||
@ -346,7 +346,7 @@ void ReplxxLineReader::openEditor()
|
||||
|
||||
if (0 != ::close(fd))
|
||||
{
|
||||
rx.print("Cannot close temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
rx.print("Cannot close temporary query file %s: %s\n", filename, errnoToString().c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -364,7 +364,7 @@ void ReplxxLineReader::openEditor()
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
rx.print("Cannot read from temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
rx.print("Cannot read from temporary query file %s: %s\n", filename, errnoToString().c_str());
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -373,7 +373,7 @@ void ReplxxLineReader::openEditor()
|
||||
enableBracketedPaste();
|
||||
|
||||
if (0 != ::unlink(filename))
|
||||
rx.print("Cannot remove temporary query file %s: %s\n", filename, errnoToString(errno).c_str());
|
||||
rx.print("Cannot remove temporary query file %s: %s\n", filename, errnoToString().c_str());
|
||||
}
|
||||
|
||||
void ReplxxLineReader::enableBracketedPaste()
|
||||
|
@ -55,10 +55,9 @@ struct StringRef
|
||||
bool empty() const { return size == 0; }
|
||||
|
||||
std::string toString() const { return std::string(data, size); }
|
||||
|
||||
explicit operator std::string() const { return toString(); }
|
||||
std::string_view toView() const { return std::string_view(data, size); }
|
||||
|
||||
std::string_view toView() const { return std::string_view(data, size); }
|
||||
constexpr explicit operator std::string_view() const { return std::string_view(data, size); }
|
||||
};
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
namespace Poco::Util
|
||||
{
|
||||
class LayeredConfiguration;
|
||||
class LayeredConfiguration; // NOLINT(cppcoreguidelines-virtual-class-destructor)
|
||||
}
|
||||
|
||||
/// Import extra command line arguments to configuration. These are command line arguments after --.
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <base/extended_types.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
// NOLINTBEGIN(google-runtime-int)
|
||||
|
||||
namespace common
|
||||
{
|
||||
@ -206,3 +207,5 @@ namespace common
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTEND(google-runtime-int)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <string.h>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
|
||||
|
@ -100,6 +100,13 @@
|
||||
# define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED ALWAYS_INLINE
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) && defined(__clang_major__) && __clang_major__ >= 14
|
||||
# define DISABLE_SANITIZER_INSTRUMENTATION __attribute__((disable_sanitizer_instrumentation))
|
||||
#else
|
||||
# define DISABLE_SANITIZER_INSTRUMENTATION
|
||||
#endif
|
||||
|
||||
|
||||
#if !__has_include(<sanitizer/asan_interface.h>) || !defined(ADDRESS_SANITIZER)
|
||||
# define ASAN_UNPOISON_MEMORY_REGION(a, b)
|
||||
# define ASAN_POISON_MEMORY_REGION(a, b)
|
||||
@ -124,6 +131,39 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/// Macros for Clang Thread Safety Analysis (TSA). They can be safely ignored by other compilers.
|
||||
/// Feel free to extend, but please stay close to https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#mutexheader
|
||||
#if defined(__clang__)
|
||||
# define TSA_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__))) /// data is protected by given capability
|
||||
# define TSA_PT_GUARDED_BY(...) __attribute__((pt_guarded_by(__VA_ARGS__))) /// pointed-to data is protected by the given capability
|
||||
# define TSA_REQUIRES(...) __attribute__((requires_capability(__VA_ARGS__))) /// thread needs exclusive possession of given capability
|
||||
# define TSA_REQUIRES_SHARED(...) __attribute__((requires_shared_capability(__VA_ARGS__))) /// thread needs shared possession of given capability
|
||||
# define TSA_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) /// annotated lock must be locked after given lock
|
||||
# define TSA_NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) /// disable TSA for a function
|
||||
|
||||
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
|
||||
/// Consider adding a comment before using these macros.
|
||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }())
|
||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }())
|
||||
|
||||
/// This macro is useful when only one thread writes to a member
|
||||
/// and you want to read this member from the same thread without locking a mutex.
|
||||
/// It's safe (because no concurrent writes are possible), but TSA generates a warning.
|
||||
/// (Seems like there's no way to verify it, but it makes sense to distinguish it from TSA_SUPPRESS_WARNING_FOR_READ for readability)
|
||||
# define TSA_READ_ONE_THREAD(x) TSA_SUPPRESS_WARNING_FOR_READ(x)
|
||||
|
||||
#else
|
||||
# define TSA_GUARDED_BY(...)
|
||||
# define TSA_PT_GUARDED_BY(...)
|
||||
# define TSA_REQUIRES(...)
|
||||
# define TSA_REQUIRES_SHARED(...)
|
||||
# define TSA_NO_THREAD_SAFETY_ANALYSIS
|
||||
|
||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x)
|
||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x)
|
||||
# define TSA_READ_ONE_THREAD(x)
|
||||
#endif
|
||||
|
||||
/// A template function for suppressing warnings about unused variables or function results.
|
||||
template <typename... Args>
|
||||
constexpr void UNUSED(Args &&... args [[maybe_unused]])
|
||||
|
@ -27,6 +27,6 @@ struct FreeingDeleter
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::unique_ptr<char, FreeingDeleter> DemangleResult;
|
||||
using DemangleResult = std::unique_ptr<char, FreeingDeleter>;
|
||||
|
||||
DemangleResult tryDemangle(const char * name);
|
||||
|
@ -3,10 +3,11 @@
|
||||
#include <fmt/format.h>
|
||||
|
||||
|
||||
std::string errnoToString(int code, int the_errno)
|
||||
std::string errnoToString(int the_errno)
|
||||
{
|
||||
const size_t buf_size = 128;
|
||||
char buf[buf_size];
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
int rc = strerror_r(the_errno, buf, buf_size);
|
||||
#ifdef OS_DARWIN
|
||||
@ -15,7 +16,7 @@ std::string errnoToString(int code, int the_errno)
|
||||
if (rc != 0)
|
||||
#endif
|
||||
{
|
||||
std::string tmp = std::to_string(code);
|
||||
std::string tmp = std::to_string(the_errno);
|
||||
const char * code_str = tmp.c_str();
|
||||
const char * unknown_message = "Unknown error ";
|
||||
strcpy(buf, unknown_message);
|
||||
@ -23,7 +24,6 @@ std::string errnoToString(int code, int the_errno)
|
||||
}
|
||||
return fmt::format("errno: {}, strerror: {}", the_errno, buf);
|
||||
#else
|
||||
(void)code;
|
||||
return fmt::format("errno: {}, strerror: {}", the_errno, strerror_r(the_errno, buf, sizeof(buf)));
|
||||
#endif
|
||||
}
|
||||
|
@ -3,4 +3,4 @@
|
||||
#include <cerrno>
|
||||
#include <string>
|
||||
|
||||
std::string errnoToString(int code, int the_errno = errno);
|
||||
std::string errnoToString(int the_errno = errno);
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <base/types.h>
|
||||
#include <base/wide_integer.h>
|
||||
|
||||
|
||||
using Int128 = wide::integer<128, signed>;
|
||||
using UInt128 = wide::integer<128, unsigned>;
|
||||
using Int256 = wide::integer<256, signed>;
|
||||
@ -18,7 +17,7 @@ static_assert(sizeof(UInt256) == 32);
|
||||
/// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior.
|
||||
/// So instead of using the std type_traits, we use our own version which allows extension.
|
||||
template <typename T>
|
||||
struct is_signed
|
||||
struct is_signed // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = std::is_signed_v<T>;
|
||||
};
|
||||
@ -30,7 +29,7 @@ template <typename T>
|
||||
inline constexpr bool is_signed_v = is_signed<T>::value;
|
||||
|
||||
template <typename T>
|
||||
struct is_unsigned
|
||||
struct is_unsigned // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = std::is_unsigned_v<T>;
|
||||
};
|
||||
@ -51,7 +50,7 @@ template <class T> concept is_integer =
|
||||
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
|
||||
|
||||
template <typename T>
|
||||
struct is_arithmetic
|
||||
struct is_arithmetic // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = std::is_arithmetic_v<T>;
|
||||
};
|
||||
@ -66,9 +65,9 @@ template <typename T>
|
||||
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
|
||||
template <typename T>
|
||||
struct make_unsigned
|
||||
struct make_unsigned // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
typedef std::make_unsigned_t<T> type;
|
||||
using type = std::make_unsigned_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
||||
@ -79,9 +78,9 @@ template <> struct make_unsigned<UInt256> { using type = UInt256; };
|
||||
template <typename T> using make_unsigned_t = typename make_unsigned<T>::type;
|
||||
|
||||
template <typename T>
|
||||
struct make_signed
|
||||
struct make_signed // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
typedef std::make_signed_t<T> type;
|
||||
using type = std::make_signed_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_signed<Int128> { using type = Int128; };
|
||||
@ -92,7 +91,7 @@ template <> struct make_signed<UInt256> { using type = Int256; };
|
||||
template <typename T> using make_signed_t = typename make_signed<T>::type;
|
||||
|
||||
template <typename T>
|
||||
struct is_big_int
|
||||
struct is_big_int // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = false;
|
||||
};
|
||||
@ -104,4 +103,3 @@ template <> struct is_big_int<UInt256> { static constexpr bool value = true; };
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_big_int_v = is_big_int<T>::value;
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
* Allow to search for next character from the set of 'symbols...' in a string.
|
||||
* It is similar to 'strpbrk', 'strcspn' (and 'strchr', 'memchr' in the case of one symbol and '\0'),
|
||||
* but with the following differencies:
|
||||
* but with the following differences:
|
||||
* - works with any memory ranges, including containing zero bytes;
|
||||
* - doesn't require terminating zero byte: end of memory range is passed explicitly;
|
||||
* - if not found, returns pointer to end instead of nullptr;
|
||||
|
@ -120,6 +120,7 @@ Out & dumpDispatchPriorities(Out & out, T && x, std::decay_t<decltype(dumpImpl<p
|
||||
return dumpImpl<priority>(out, x);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(google-explicit-constructor)
|
||||
struct LowPriority { LowPriority(void *) {} };
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
|
@ -91,10 +91,10 @@ template <size_t N>
|
||||
using DivisionBy10PowN = typename SelectType
|
||||
<
|
||||
N,
|
||||
Division<uint8_t, 0, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, 1, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, 0, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, 0, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
>::Result;
|
||||
|
||||
template <size_t N>
|
||||
@ -352,7 +352,7 @@ static inline char * writeUIntText(T x, char * p)
|
||||
static_assert(is_unsigned_v<T>);
|
||||
|
||||
int len = digits10(x);
|
||||
auto pp = p + len;
|
||||
auto * pp = p + len;
|
||||
while (x >= 100)
|
||||
{
|
||||
const auto i = x % 100;
|
||||
|
@ -5,13 +5,13 @@
|
||||
#include <utility>
|
||||
|
||||
template <class F>
|
||||
class [[nodiscard]] basic_scope_guard
|
||||
class [[nodiscard]] BasicScopeGuard
|
||||
{
|
||||
public:
|
||||
constexpr basic_scope_guard() = default;
|
||||
constexpr basic_scope_guard(basic_scope_guard && src) : function{src.release()} {}
|
||||
constexpr BasicScopeGuard() = default;
|
||||
constexpr BasicScopeGuard(BasicScopeGuard && src) : function{src.release()} {} // NOLINT(hicpp-noexcept-move, performance-noexcept-move-constructor)
|
||||
|
||||
constexpr basic_scope_guard & operator=(basic_scope_guard && src)
|
||||
constexpr BasicScopeGuard & operator=(BasicScopeGuard && src) // NOLINT(hicpp-noexcept-move, performance-noexcept-move-constructor)
|
||||
{
|
||||
if (this != &src)
|
||||
{
|
||||
@ -23,11 +23,11 @@ public:
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard(basic_scope_guard<G> && src) : function{src.release()} {}
|
||||
constexpr BasicScopeGuard(BasicScopeGuard<G> && src) : function{src.release()} {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard & operator=(basic_scope_guard<G> && src)
|
||||
constexpr BasicScopeGuard & operator=(BasicScopeGuard<G> && src)
|
||||
{
|
||||
if (this != &src)
|
||||
{
|
||||
@ -39,13 +39,13 @@ public:
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard(const G & function_) : function{function_} {}
|
||||
constexpr BasicScopeGuard(const G & function_) : function{function_} {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard(G && function_) : function{std::move(function_)} {}
|
||||
constexpr BasicScopeGuard(G && function_) : function{std::move(function_)} {} // NOLINT(google-explicit-constructor, bugprone-forwarding-reference-overload, bugprone-move-forwarding-reference)
|
||||
|
||||
~basic_scope_guard() { invoke(); }
|
||||
~BasicScopeGuard() { invoke(); }
|
||||
|
||||
static constexpr bool is_nullable = std::is_constructible_v<bool, F>;
|
||||
|
||||
@ -70,7 +70,7 @@ public:
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
basic_scope_guard<F> & join(basic_scope_guard<G> && other)
|
||||
BasicScopeGuard<F> & join(BasicScopeGuard<G> && other)
|
||||
{
|
||||
if (other.function)
|
||||
{
|
||||
@ -102,14 +102,13 @@ private:
|
||||
F function = F{};
|
||||
};
|
||||
|
||||
using scope_guard = basic_scope_guard<std::function<void(void)>>;
|
||||
using scope_guard = BasicScopeGuard<std::function<void(void)>>;
|
||||
|
||||
|
||||
template <class F>
|
||||
inline basic_scope_guard<F> make_scope_guard(F && function_) { return std::forward<F>(function_); }
|
||||
inline BasicScopeGuard<F> make_scope_guard(F && function_) { return std::forward<F>(function_); }
|
||||
|
||||
#define SCOPE_EXIT_CONCAT(n, ...) \
|
||||
const auto scope_exit##n = make_scope_guard([&] { __VA_ARGS__; })
|
||||
#define SCOPE_EXIT_FWD(n, ...) SCOPE_EXIT_CONCAT(n, __VA_ARGS__)
|
||||
#define SCOPE_EXIT(...) SCOPE_EXIT_FWD(__LINE__, __VA_ARGS__)
|
||||
|
||||
|
@ -16,7 +16,7 @@ void setTerminalEcho(bool enable)
|
||||
struct termios tty{};
|
||||
|
||||
if (0 != tcgetattr(STDIN_FILENO, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno));
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString());
|
||||
|
||||
if (enable)
|
||||
tty.c_lflag |= ECHO;
|
||||
@ -24,5 +24,5 @@ void setTerminalEcho(bool enable)
|
||||
tty.c_lflag &= ~ECHO;
|
||||
|
||||
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno));
|
||||
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString());
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ template <typename Comparator>
|
||||
class DebugLessComparator
|
||||
{
|
||||
public:
|
||||
constexpr DebugLessComparator(Comparator & cmp_)
|
||||
constexpr DebugLessComparator(Comparator & cmp_) // NOLINT(google-explicit-constructor)
|
||||
: cmp(cmp_)
|
||||
{}
|
||||
|
||||
|
@ -23,10 +23,10 @@ public:
|
||||
constexpr StrongTypedef(): t() {}
|
||||
|
||||
constexpr StrongTypedef(const Self &) = default;
|
||||
constexpr StrongTypedef(Self &&) = default;
|
||||
constexpr StrongTypedef(Self &&) noexcept(std::is_nothrow_move_constructible_v<T>) = default;
|
||||
|
||||
Self & operator=(const Self &) = default;
|
||||
Self & operator=(Self &&) = default;
|
||||
Self & operator=(Self &&) noexcept(std::is_nothrow_move_assignable_v<T>)= default;
|
||||
|
||||
template <class Enable = typename std::is_copy_assignable<T>::type>
|
||||
Self & operator=(const T & rhs) { t = rhs; return *this;}
|
||||
@ -34,8 +34,10 @@ public:
|
||||
template <class Enable = typename std::is_move_assignable<T>::type>
|
||||
Self & operator=(T && rhs) { t = std::move(rhs); return *this;}
|
||||
|
||||
// NOLINTBEGIN(google-explicit-constructor)
|
||||
operator const T & () const { return t; }
|
||||
operator T & () { return t; }
|
||||
// NOLINTEND(google-explicit-constructor)
|
||||
|
||||
bool operator==(const Self & rhs) const { return t == rhs.t; }
|
||||
bool operator<(const Self & rhs) const { return t < rhs.t; }
|
||||
@ -58,7 +60,10 @@ namespace std
|
||||
};
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-macro-parentheses)
|
||||
|
||||
#define STRONG_TYPEDEF(T, D) \
|
||||
struct D ## Tag {}; \
|
||||
using D = StrongTypedef<T, D ## Tag>; \
|
||||
|
||||
// NOLINTEND(bugprone-macro-parentheses)
|
||||
|
@ -1,2 +1,2 @@
|
||||
add_executable (dump_variable dump_variable.cpp)
|
||||
clickhouse_add_executable (dump_variable dump_variable.cpp)
|
||||
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <time.h>
|
||||
#include <ctime>
|
||||
|
||||
#if defined (OS_DARWIN) || defined (OS_SUNOS)
|
||||
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
|
||||
|
@ -1,9 +1,48 @@
|
||||
#pragma once
|
||||
|
||||
#include <string.h>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <bit>
|
||||
|
||||
|
||||
inline void reverseMemcpy(void * dst, const void * src, size_t size)
|
||||
{
|
||||
uint8_t * uint_dst = reinterpret_cast<uint8_t *>(dst);
|
||||
const uint8_t * uint_src = reinterpret_cast<const uint8_t *>(src);
|
||||
|
||||
uint_dst += size;
|
||||
while (size)
|
||||
{
|
||||
--uint_dst;
|
||||
*uint_dst = *uint_src;
|
||||
++uint_src;
|
||||
--size;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T unalignedLoadLE(const void * address)
|
||||
{
|
||||
T res {};
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(&res, address, sizeof(res));
|
||||
else
|
||||
reverseMemcpy(&res, address, sizeof(res));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
inline void unalignedStoreLE(void * address,
|
||||
const typename std::enable_if<true, T>::type & src)
|
||||
{
|
||||
static_assert(std::is_trivially_copyable_v<T>);
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
memcpy(address, &src, sizeof(src));
|
||||
else
|
||||
reverseMemcpy(address, &src, sizeof(src));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T unalignedLoad(const void * address)
|
||||
{
|
||||
|
@ -10,9 +10,11 @@ constexpr size_t GiB = 1024 * MiB;
|
||||
# pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
// NOLINTBEGIN(google-runtime-int)
|
||||
constexpr size_t operator"" _KiB(unsigned long long val) { return val * KiB; }
|
||||
constexpr size_t operator"" _MiB(unsigned long long val) { return val * MiB; }
|
||||
constexpr size_t operator"" _GiB(unsigned long long val) { return val * GiB; }
|
||||
// NOLINTEND(google-runtime-int)
|
||||
|
||||
#ifdef HAS_RESERVED_IDENTIFIER
|
||||
# pragma clang diagnostic pop
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <type_traits>
|
||||
#include <initializer_list>
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
|
||||
namespace wide
|
||||
{
|
||||
template <size_t Bits, typename Signed>
|
||||
@ -257,4 +259,7 @@ struct hash<wide::integer<Bits, Signed>>;
|
||||
|
||||
}
|
||||
|
||||
// NOLINTEND(*)
|
||||
|
||||
#include "wide_integer_impl.h"
|
||||
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
|
||||
/// Use same extended double for all platforms
|
||||
#if (LDBL_MANT_DIG == 64)
|
||||
#define CONSTEXPR_FROM_DOUBLE constexpr
|
||||
@ -1478,3 +1480,5 @@ struct hash<wide::integer<Bits, Signed>>
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
// NOLINTEND(*)
|
||||
|
@ -51,8 +51,8 @@ struct fmt::formatter<wide::integer<Bits, Signed>>
|
||||
{
|
||||
constexpr auto parse(format_parse_context & ctx)
|
||||
{
|
||||
auto it = ctx.begin();
|
||||
auto end = ctx.end();
|
||||
const auto * it = ctx.begin();
|
||||
const auto * end = ctx.end();
|
||||
|
||||
/// Only support {}.
|
||||
if (it != end && *it != '}')
|
||||
|
@ -63,7 +63,7 @@
|
||||
* Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
|
||||
* so we don't pay attention to using non-temporary stores.
|
||||
*
|
||||
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
|
||||
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most beneficial,
|
||||
* even comparing to non-temporary aligned unrolled stores even with the most wide registers.
|
||||
*
|
||||
* memcpy can be written in asm, C or C++. The latter can also use inline asm.
|
||||
@ -214,4 +214,3 @@ tail:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
22
base/glibc-compatibility/musl/dup3.c
Normal file
22
base/glibc-compatibility/musl/dup3.c
Normal file
@ -0,0 +1,22 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include "syscall.h"
|
||||
|
||||
int dup3(int old, int new, int flags)
|
||||
{
|
||||
int r;
|
||||
#ifdef SYS_dup2
|
||||
if (old==new) return __syscall_ret(-EINVAL);
|
||||
if (flags & O_CLOEXEC) {
|
||||
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
|
||||
if (r!=-ENOSYS) return __syscall_ret(r);
|
||||
}
|
||||
while ((r=__syscall(SYS_dup2, old, new))==-EBUSY);
|
||||
if (flags & O_CLOEXEC) __syscall(SYS_fcntl, new, F_SETFD, FD_CLOEXEC);
|
||||
#else
|
||||
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
|
||||
#endif
|
||||
return __syscall_ret(r);
|
||||
}
|
@ -1,68 +1,192 @@
|
||||
#include <sys/auxv.h>
|
||||
#include "atomic.h"
|
||||
#include <unistd.h> // __environ
|
||||
#include <sys/auxv.h>
|
||||
#include <fcntl.h> // open
|
||||
#include <sys/stat.h> // O_RDONLY
|
||||
#include <unistd.h> // read, close
|
||||
#include <stdlib.h> // ssize_t
|
||||
#include <stdio.h> // perror, fprintf
|
||||
#include <link.h> // ElfW
|
||||
#include <errno.h>
|
||||
|
||||
// We don't have libc struct available here. Compute aux vector manually.
|
||||
static unsigned long * __auxv = NULL;
|
||||
static unsigned long __auxv_secure = 0;
|
||||
#define ARRAY_SIZE(a) sizeof((a))/sizeof((a[0]))
|
||||
|
||||
static size_t __find_auxv(unsigned long type)
|
||||
/// Suppress TSan since it is possible for this code to be called from multiple threads,
|
||||
/// and initialization is safe to be done multiple times from multiple threads.
|
||||
#if defined(__clang__)
|
||||
# define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread")))
|
||||
#else
|
||||
# define NO_SANITIZE_THREAD
|
||||
#endif
|
||||
|
||||
// We don't have libc struct available here.
|
||||
// Compute aux vector manually (from /proc/self/auxv).
|
||||
//
|
||||
// Right now there is only 51 AT_* constants,
|
||||
// so 64 should be enough until this implementation will be replaced with musl.
|
||||
static unsigned long __auxv_procfs[64];
|
||||
static unsigned long __auxv_secure = 0;
|
||||
// Common
|
||||
static unsigned long * __auxv_environ = NULL;
|
||||
|
||||
static void * volatile getauxval_func;
|
||||
|
||||
static unsigned long __auxv_init_environ(unsigned long type);
|
||||
|
||||
//
|
||||
// auxv from procfs interface
|
||||
//
|
||||
ssize_t __retry_read(int fd, void * buf, size_t count)
|
||||
{
|
||||
for (;;)
|
||||
{
|
||||
ssize_t ret = read(fd, buf, count);
|
||||
if (ret == -1)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
perror("Cannot read /proc/self/auxv");
|
||||
abort();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
unsigned long NO_SANITIZE_THREAD __getauxval_procfs(unsigned long type)
|
||||
{
|
||||
if (type == AT_SECURE)
|
||||
{
|
||||
return __auxv_secure;
|
||||
}
|
||||
|
||||
if (type >= ARRAY_SIZE(__auxv_procfs))
|
||||
{
|
||||
errno = ENOENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return __auxv_procfs[type];
|
||||
}
|
||||
static unsigned long NO_SANITIZE_THREAD __auxv_init_procfs(unsigned long type)
|
||||
{
|
||||
// For debugging:
|
||||
// - od -t dL /proc/self/auxv
|
||||
// - LD_SHOW_AUX= ls
|
||||
int fd = open("/proc/self/auxv", O_RDONLY);
|
||||
// It is possible in case of:
|
||||
// - no procfs mounted
|
||||
// - on android you are not able to read it unless running from shell or debugging
|
||||
// - some other issues
|
||||
if (fd == -1)
|
||||
{
|
||||
// Fallback to environ.
|
||||
a_cas_p(&getauxval_func, (void *)__auxv_init_procfs, (void *)__auxv_init_environ);
|
||||
return __auxv_init_environ(type);
|
||||
}
|
||||
|
||||
ElfW(auxv_t) aux;
|
||||
|
||||
/// NOTE: sizeof(aux) is very small (less then PAGE_SIZE), so partial read should not be possible.
|
||||
_Static_assert(sizeof(aux) < 4096, "Unexpected sizeof(aux)");
|
||||
while (__retry_read(fd, &aux, sizeof(aux)) == sizeof(aux))
|
||||
{
|
||||
if (aux.a_type == AT_NULL)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (aux.a_type == AT_IGNORE || aux.a_type == AT_IGNOREPPC)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (aux.a_type >= ARRAY_SIZE(__auxv_procfs))
|
||||
{
|
||||
fprintf(stderr, "AT_* is out of range: %li (maximum allowed is %zu)\n", aux.a_type, ARRAY_SIZE(__auxv_procfs));
|
||||
abort();
|
||||
}
|
||||
if (__auxv_procfs[aux.a_type])
|
||||
{
|
||||
/// It is possible due to race on initialization.
|
||||
}
|
||||
__auxv_procfs[aux.a_type] = aux.a_un.a_val;
|
||||
}
|
||||
close(fd);
|
||||
|
||||
__auxv_secure = __getauxval_procfs(AT_SECURE);
|
||||
|
||||
// Now we've initialized __auxv_procfs, next time getauxval() will only call __get_auxval().
|
||||
a_cas_p(&getauxval_func, (void *)__auxv_init_procfs, (void *)__getauxval_procfs);
|
||||
|
||||
return __getauxval_procfs(type);
|
||||
}
|
||||
|
||||
//
|
||||
// auxv from environ interface
|
||||
//
|
||||
// NOTE: environ available only after static initializers,
|
||||
// so you cannot rely on this if you need getauxval() before.
|
||||
//
|
||||
// Good example of such user is sanitizers, for example
|
||||
// LSan will not work with __auxv_init_environ(),
|
||||
// since it needs getauxval() before.
|
||||
//
|
||||
static size_t NO_SANITIZE_THREAD __find_auxv(unsigned long type)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; __auxv[i]; i += 2)
|
||||
for (i = 0; __auxv_environ[i]; i += 2)
|
||||
{
|
||||
if (__auxv[i] == type)
|
||||
if (__auxv_environ[i] == type)
|
||||
{
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
return (size_t) -1;
|
||||
}
|
||||
|
||||
unsigned long __getauxval(unsigned long type)
|
||||
unsigned long NO_SANITIZE_THREAD __getauxval_environ(unsigned long type)
|
||||
{
|
||||
if (type == AT_SECURE)
|
||||
return __auxv_secure;
|
||||
|
||||
if (__auxv)
|
||||
if (__auxv_environ)
|
||||
{
|
||||
size_t index = __find_auxv(type);
|
||||
if (index != ((size_t) -1))
|
||||
return __auxv[index];
|
||||
return __auxv_environ[index];
|
||||
}
|
||||
|
||||
errno = ENOENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void * volatile getauxval_func;
|
||||
|
||||
static unsigned long __auxv_init(unsigned long type)
|
||||
static unsigned long NO_SANITIZE_THREAD __auxv_init_environ(unsigned long type)
|
||||
{
|
||||
if (!__environ)
|
||||
{
|
||||
// __environ is not initialized yet so we can't initialize __auxv right now.
|
||||
// __environ is not initialized yet so we can't initialize __auxv_environ right now.
|
||||
// That's normally occurred only when getauxval() is called from some sanitizer's internal code.
|
||||
errno = ENOENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Initialize __auxv and __auxv_secure.
|
||||
// Initialize __auxv_environ and __auxv_secure.
|
||||
size_t i;
|
||||
for (i = 0; __environ[i]; i++);
|
||||
__auxv = (unsigned long *) (__environ + i + 1);
|
||||
__auxv_environ = (unsigned long *) (__environ + i + 1);
|
||||
|
||||
size_t secure_idx = __find_auxv(AT_SECURE);
|
||||
if (secure_idx != ((size_t) -1))
|
||||
__auxv_secure = __auxv[secure_idx];
|
||||
__auxv_secure = __auxv_environ[secure_idx];
|
||||
|
||||
// Now we've initialized __auxv, next time getauxval() will only call __get_auxval().
|
||||
a_cas_p(&getauxval_func, (void *)__auxv_init, (void *)__getauxval);
|
||||
// Now we need to switch to __getauxval_environ for all later calls, since
|
||||
// everything is initialized.
|
||||
a_cas_p(&getauxval_func, (void *)__auxv_init_environ, (void *)__getauxval_environ);
|
||||
|
||||
return __getauxval(type);
|
||||
return __getauxval_environ(type);
|
||||
}
|
||||
|
||||
// First time getauxval() will call __auxv_init().
|
||||
static void * volatile getauxval_func = (void *)__auxv_init;
|
||||
// Callchain:
|
||||
// - __auxv_init_procfs -> __getauxval_environ
|
||||
// - __auxv_init_procfs -> __auxv_init_environ -> __getauxval_environ
|
||||
static void * volatile getauxval_func = (void *)__auxv_init_procfs;
|
||||
|
||||
unsigned long getauxval(unsigned long type)
|
||||
{
|
||||
|
26
base/glibc-compatibility/musl/inotify.c
Normal file
26
base/glibc-compatibility/musl/inotify.c
Normal file
@ -0,0 +1,26 @@
|
||||
#include <sys/inotify.h>
|
||||
#include <errno.h>
|
||||
#include "syscall.h"
|
||||
|
||||
int inotify_init()
|
||||
{
|
||||
return inotify_init1(0);
|
||||
}
|
||||
int inotify_init1(int flags)
|
||||
{
|
||||
int r = __syscall(SYS_inotify_init1, flags);
|
||||
#ifdef SYS_inotify_init
|
||||
if (r==-ENOSYS && !flags) r = __syscall(SYS_inotify_init);
|
||||
#endif
|
||||
return __syscall_ret(r);
|
||||
}
|
||||
|
||||
int inotify_add_watch(int fd, const char *pathname, uint32_t mask)
|
||||
{
|
||||
return syscall(SYS_inotify_add_watch, fd, pathname, mask);
|
||||
}
|
||||
|
||||
int inotify_rm_watch(int fd, int wd)
|
||||
{
|
||||
return syscall(SYS_inotify_rm_watch, fd, wd);
|
||||
}
|
@ -49,6 +49,8 @@
|
||||
#include <cxxabi.h>
|
||||
#endif
|
||||
|
||||
// NOLINTBEGIN(readability-identifier-naming, modernize-use-using, bugprone-macro-parentheses, google-explicit-constructor)
|
||||
|
||||
/*
|
||||
* Abstractions for compiler-specific directives
|
||||
*/
|
||||
@ -90,7 +92,6 @@
|
||||
#define PCG_EMULATED_128BIT_MATH 1
|
||||
#endif
|
||||
|
||||
|
||||
namespace pcg_extras {
|
||||
|
||||
/*
|
||||
@ -552,4 +553,6 @@ std::ostream& operator<<(std::ostream& out, printable_typename<T>) {
|
||||
|
||||
} // namespace pcg_extras
|
||||
|
||||
// NOLINTEND(readability-identifier-naming, modernize-use-using, bugprone-macro-parentheses, google-explicit-constructor)
|
||||
|
||||
#endif // PCG_EXTRAS_HPP_INCLUDED
|
||||
|
@ -101,7 +101,7 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The pcg_extras namespace contains some support code that is likley to
|
||||
* The pcg_extras namespace contains some support code that is likely to
|
||||
* be useful for a variety of RNGs, including:
|
||||
* - 128-bit int support for platforms where it isn't available natively
|
||||
* - bit twiddling operations
|
||||
@ -113,6 +113,8 @@
|
||||
|
||||
#include "pcg_extras.hpp"
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct PcgSerializer;
|
||||
@ -1777,4 +1779,6 @@ typedef pcg_engines::ext_oneseq_xsh_rs_64_32<14,32,true> pcg32_k16384_fast;
|
||||
#pragma warning(default:4146)
|
||||
#endif
|
||||
|
||||
// NOLINTEND(*)
|
||||
|
||||
#endif // PCG_RAND_HPP_INCLUDED
|
||||
|
@ -22,7 +22,7 @@
|
||||
/*
|
||||
* This code provides a a C++ class that can provide 128-bit (or higher)
|
||||
* integers. To produce 2K-bit integers, it uses two K-bit integers,
|
||||
* placed in a union that allowes the code to also see them as four K/2 bit
|
||||
* placed in a union that allows the code to also see them as four K/2 bit
|
||||
* integers (and access them either directly name, or by index).
|
||||
*
|
||||
* It may seem like we're reinventing the wheel here, because several
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
|
||||
/* Special width values */
|
||||
enum {
|
||||
widechar_nonprint = -1, // The character is not printable.
|
||||
@ -518,4 +520,6 @@ inline int widechar_wcwidth(wchar_t c) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// NOLINTEND(*)
|
||||
|
||||
#endif // WIDECHAR_WIDTH_H
|
||||
|
1
benchmark/README.md
Normal file
1
benchmark/README.md
Normal file
@ -0,0 +1 @@
|
||||
Benchmark is located in a separate repository: https://github.com/ClickHouse/ClickBench
|
@ -1,180 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# script to run query to databases
|
||||
|
||||
function usage()
|
||||
{
|
||||
cat <<EOF
|
||||
usage: $0 options
|
||||
|
||||
This script run benhmark for database
|
||||
|
||||
OPTIONS:
|
||||
-c config file where some script variables are defined
|
||||
-n table name
|
||||
|
||||
-h Show this message
|
||||
-t how many times execute each query. default is '3'
|
||||
-q query file
|
||||
-e expect file
|
||||
-s /etc/init.d/service
|
||||
-p table name pattern to be replaced to name. default is 'hits_10m'
|
||||
EOF
|
||||
}
|
||||
|
||||
TIMES=3
|
||||
table_name_pattern=hits_10m
|
||||
|
||||
while getopts “c:ht:n:q:e:s:r” OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
c)
|
||||
source $OPTARG
|
||||
;;
|
||||
?)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
OPTIND=1
|
||||
|
||||
while getopts “c:ht:n:q:e:s:r” OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
t)
|
||||
TIMES=$OPTARG
|
||||
;;
|
||||
n)
|
||||
table_name=$OPTARG
|
||||
;;
|
||||
q)
|
||||
test_file=$OPTARG
|
||||
;;
|
||||
e)
|
||||
expect_file=$OPTARG
|
||||
;;
|
||||
s)
|
||||
etc_init_d_service=$OPTARG
|
||||
;;
|
||||
p)
|
||||
table_name_pattern=$OPTARG
|
||||
;;
|
||||
c)
|
||||
;;
|
||||
r)
|
||||
restart_server_each_query=1
|
||||
;;
|
||||
?)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ! -f $expect_file ]]; then
|
||||
echo "Not found: expect file"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f $test_file ]]; then
|
||||
echo "Not found: test file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f $etc_init_d_service ]]; then
|
||||
echo "Not found: /etc/init.d/service with path=$etc_init_d_service"
|
||||
use_service=0
|
||||
else
|
||||
use_service=1
|
||||
fi
|
||||
|
||||
if [[ "$table_name_pattern" == "" ]]; then
|
||||
echo "Empty table_name_pattern"
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$table_name" == "" ]]; then
|
||||
echo "Empty table_name"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function execute()
|
||||
{
|
||||
queries=("${@}")
|
||||
queries_count=${#queries[@]}
|
||||
|
||||
if [ -z $TIMES ]; then
|
||||
TIMES=1
|
||||
fi
|
||||
|
||||
index=0
|
||||
while [ "$index" -lt "$queries_count" ]; do
|
||||
query=${queries[$index]}
|
||||
|
||||
if [[ $query == "" ]]; then
|
||||
let "index = $index + 1"
|
||||
continue
|
||||
fi
|
||||
|
||||
comment_re='--.*'
|
||||
if [[ $query =~ $comment_re ]]; then
|
||||
echo "$query"
|
||||
echo
|
||||
else
|
||||
sync
|
||||
sudo sh -c "echo 3 > /proc/sys/vm/drop_caches"
|
||||
|
||||
if [[ "$restart_server_each_query" == "1" && "$use_service" == "1" ]]; then
|
||||
echo "restart server: $etc_init_d_service restart"
|
||||
sudo $etc_init_d_service restart
|
||||
fi
|
||||
|
||||
for i in $(seq $TIMES)
|
||||
do
|
||||
if [[ -f $etc_init_d_service && "$use_service" == "1" ]]; then
|
||||
sudo $etc_init_d_service status
|
||||
server_status=$?
|
||||
expect -f $expect_file ""
|
||||
|
||||
if [[ "$?" != "0" || $server_status != "0" ]]; then
|
||||
echo "restart server: $etc_init_d_service restart"
|
||||
sudo $etc_init_d_service restart
|
||||
fi
|
||||
|
||||
#wait until can connect to server
|
||||
restart_timer=0
|
||||
restart_limit=60
|
||||
expect -f $expect_file "" &> /dev/null
|
||||
while [ "$?" != "0" ]; do
|
||||
echo "waiting"
|
||||
sleep 1
|
||||
let "restart_timer = $restart_timer + 1"
|
||||
if (( $restart_limit < $restart_timer )); then
|
||||
sudo $etc_init_d_service restart
|
||||
restart_timer=0
|
||||
fi
|
||||
expect -f $expect_file "" &> /dev/null
|
||||
done
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "times: $i"
|
||||
|
||||
echo "query:" "$query"
|
||||
expect -f $expect_file "$query"
|
||||
|
||||
done
|
||||
fi
|
||||
|
||||
let "index = $index + 1"
|
||||
done
|
||||
}
|
||||
|
||||
temp_test_file=temp_queries_$table_name
|
||||
cat $test_file | sed s/$table_name_pattern/$table_name/g > $temp_test_file
|
||||
mapfile -t test_queries < $temp_test_file
|
||||
|
||||
echo "start time: $(date)"
|
||||
time execute "${test_queries[@]}"
|
||||
echo "stop time: $(date)"
|
@ -1,22 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
QUERIES_FILE="queries.sql"
|
||||
TABLE=$1
|
||||
TRIES=3
|
||||
|
||||
cat "$QUERIES_FILE" | sed "s|{table}|\"${TABLE}\"|g" | while read query; do
|
||||
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
while true; do
|
||||
RES=$(command time -f %e -o /dev/stdout curl -sS -G --data-urlencode "query=$query" --data "default_format=Null&max_memory_usage=100000000000&max_memory_usage_for_all_queries=100000000000&max_concurrent_queries_for_user=100&database=*$YT_CLIQUE_ID" --location-trusted -H "Authorization: OAuth $YT_TOKEN" "$YT_PROXY.yt.yandex.net/query" 2>/dev/null);
|
||||
if [[ $? == 0 ]]; then
|
||||
[[ $RES =~ 'fail|Exception' ]] || break;
|
||||
fi
|
||||
done
|
||||
|
||||
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||
done
|
||||
echo "],"
|
||||
done
|
@ -1,29 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
QUERIES_FILE="queries.sql"
|
||||
TABLE=$1
|
||||
TRIES=3
|
||||
|
||||
if [ -x ./clickhouse ]
|
||||
then
|
||||
CLICKHOUSE_CLIENT="./clickhouse client"
|
||||
elif command -v clickhouse-client >/dev/null 2>&1
|
||||
then
|
||||
CLICKHOUSE_CLIENT="clickhouse-client"
|
||||
else
|
||||
echo "clickhouse-client is not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
||||
sync
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
|
||||
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
RES=$(${CLICKHOUSE_CLIENT} --time --format=Null --max_memory_usage=100G --query="$query" 2>&1)
|
||||
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||
done
|
||||
echo "],"
|
||||
done
|
@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
QUERIES_FILE="queries.sql"
|
||||
TABLE=$1
|
||||
TRIES=3
|
||||
|
||||
cat "$QUERIES_FILE" | sed "s|{table}|\"${TABLE}\"|g" | while read query; do
|
||||
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
while true; do
|
||||
RES=$(command time -f %e -o time ./yql --clickhouse --syntax-version 1 -f empty <<< "USE chyt.hume; PRAGMA max_memory_usage = 100000000000; PRAGMA max_memory_usage_for_all_queries = 100000000000; $query" >/dev/null 2>&1 && cat time) && break;
|
||||
done
|
||||
|
||||
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||
done
|
||||
echo "],"
|
||||
done
|
@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
QUERIES_FILE="queries.sql"
|
||||
TABLE=$1
|
||||
TRIES=3
|
||||
|
||||
PARAMS="--host ... --secure --password ..."
|
||||
|
||||
if [ -x ./clickhouse ]
|
||||
then
|
||||
CLICKHOUSE_CLIENT="./clickhouse client"
|
||||
elif command -v clickhouse-client >/dev/null 2>&1
|
||||
then
|
||||
CLICKHOUSE_CLIENT="clickhouse-client"
|
||||
else
|
||||
echo "clickhouse-client is not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
QUERY_ID_PREFIX="benchmark_$RANDOM"
|
||||
QUERY_NUM=1
|
||||
|
||||
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query
|
||||
do
|
||||
for i in $(seq 1 $TRIES)
|
||||
do
|
||||
QUERY_ID="${QUERY_ID_PREFIX}_${QUERY_NUM}_${i}"
|
||||
${CLICKHOUSE_CLIENT} ${PARAMS} --query_id "${QUERY_ID}" --format=Null --max_memory_usage=100G --query="$query"
|
||||
echo -n '.'
|
||||
done
|
||||
QUERY_NUM=$((QUERY_NUM + 1))
|
||||
echo
|
||||
done
|
||||
|
||||
sleep 10
|
||||
|
||||
${CLICKHOUSE_CLIENT} ${PARAMS} --query "
|
||||
WITH extractGroups(query_id, '(\d+)_(\d+)\$') AS num_run, num_run[1]::UInt8 AS num, num_run[2]::UInt8 AS run
|
||||
SELECT groupArrayInsertAt(query_duration_ms / 1000, (run - 1)::UInt8)::String || ','
|
||||
FROM clusterAllReplicas(default, system.query_log)
|
||||
WHERE event_date >= yesterday() AND type = 2 AND query_id LIKE '${QUERY_ID_PREFIX}%'
|
||||
GROUP BY num ORDER BY num FORMAT TSV
|
||||
"
|
@ -1,43 +0,0 @@
|
||||
SELECT count() FROM {table};
|
||||
SELECT count() FROM {table} WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(), avg(ResolutionWidth) FROM {table} ;
|
||||
SELECT sum(UserID) FROM {table} ;
|
||||
SELECT uniq(UserID) FROM {table} ;
|
||||
SELECT uniq(SearchPhrase) FROM {table} ;
|
||||
SELECT min(EventDate), max(EventDate) FROM {table} ;
|
||||
SELECT AdvEngineID, count() FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count() DESC;
|
||||
SELECT RegionID, uniq(UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count() AS c, avg(ResolutionWidth), uniq(UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, uniq(UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, uniq(UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count() AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, uniq(UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count() AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count() FROM {table} GROUP BY UserID ORDER BY count() DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count() FROM {table} GROUP BY UserID, SearchPhrase ORDER BY count() DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count() FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, toMinute(EventTime) AS m, SearchPhrase, count() FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count() DESC LIMIT 10;
|
||||
SELECT UserID FROM {table} WHERE UserID = 12345678901234567890;
|
||||
SELECT count() FROM {table} WHERE URL LIKE '%metrika%';
|
||||
SELECT SearchPhrase, any(URL), count() AS c FROM {table} WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, any(URL), any(Title), count() AS c, uniq(UserID) FROM {table} WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM {table} WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(length(URL)) AS l, count() AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING c > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT domainWithoutWWW(Referer) AS key, avg(length(Referer)) AS l, count() AS c, any(Referer) FROM {table} WHERE Referer != '' GROUP BY key HAVING c > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table};
|
||||
SELECT SearchEngineID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count() AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count() AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP AS x, x - 1, x - 2, x - 3, count() AS c FROM {table} GROUP BY x, x - 1, x - 2, x - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND notEmpty(URL) GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND notEmpty(Title) GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, ((SearchEngineID = 0 AND AdvEngineID = 0) ? Referer : '') AS Src, URL AS Dst, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = halfMD5('http://example.ru/') GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND NOT DontCountHits AND URLHash = halfMD5('http://example.ru/') GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT toStartOfMinute(EventTime) AS Minute, count() AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
|
@ -1,3 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
table=hits_10m; time clickhouse-client --max_bytes_before_external_sort=30000000000 --query="SELECT toInt64(WatchID), JavaEnable, Title, GoodEvent, (EventTime < toDateTime('1971-01-01 00:00:00') ? toDateTime('1971-01-01 00:00:01') : EventTime), (EventDate < toDate('1971-01-01') ? toDate('1971-01-01') : EventDate), CounterID, ClientIP, RegionID, toInt64(UserID), CounterClass, OS, UserAgent, URL, Referer, Refresh, RefererCategoryID, RefererRegionID, URLCategoryID, URLRegionID, ResolutionWidth, ResolutionHeight, ResolutionDepth, FlashMajor, FlashMinor, FlashMinor2, NetMajor, NetMinor, UserAgentMajor, UserAgentMinor, CookieEnable, JavascriptEnable, IsMobile, MobilePhone, MobilePhoneModel, Params, IPNetworkID, TraficSourceID, SearchEngineID, SearchPhrase, AdvEngineID, IsArtifical, WindowClientWidth, WindowClientHeight, ClientTimeZone, (ClientEventTime < toDateTime('1971-01-01 00:00:01') ? toDateTime('1971-01-01 00:00:01') : ClientEventTime), SilverlightVersion1, SilverlightVersion2, SilverlightVersion3, SilverlightVersion4, PageCharset, CodeVersion, IsLink, IsDownload, IsNotBounce, toInt64(FUniqID), OriginalURL, HID, IsOldCounter, IsEvent, IsParameter, DontCountHits, WithHash, HitColor, (LocalEventTime < toDateTime('1971-01-01 00:00:01') ? toDateTime('1971-01-01 00:00:01') : LocalEventTime), Age, Sex, Income, Interests, Robotness, RemoteIP, WindowName, OpenerName, HistoryLength, BrowserLanguage, BrowserCountry, SocialNetwork, SocialAction, HTTPError, SendTiming, DNSTiming, ConnectTiming, ResponseStartTiming, ResponseEndTiming, FetchTiming, SocialSourceNetworkID, SocialSourcePage, ParamPrice, ParamOrderID, ParamCurrency, ParamCurrencyID, OpenstatServiceName, OpenstatCampaignID, OpenstatAdID, OpenstatSourceID, UTMSource, UTMMedium, UTMCampaign, UTMContent, UTMTerm, FromTag, HasGCLID, toInt64(RefererHash), toInt64(URLHash), CLID FROM $table ORDER BY rand()" | corrector_utf8 > /opt/dumps/${table}_corrected.tsv
|
15906
benchmark/duckdb/log
15906
benchmark/duckdb/log
File diff suppressed because one or more lines are too long
@ -1,43 +0,0 @@
|
||||
SELECT count(*) FROM hits;
|
||||
SELECT count(*) FROM hits WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits;
|
||||
SELECT sum(UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT UserID) FROM hits;
|
||||
SELECT COUNT(DISTINCT SearchPhrase) FROM hits;
|
||||
SELECT min(EventDate), max(EventDate) FROM hits;
|
||||
SELECT AdvEngineID, count(*) FROM hits WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
SELECT RegionID, COUNT(DISTINCT UserID) AS u FROM hits GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), COUNT(DISTINCT UserID) FROM hits GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
SELECT MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT MobilePhone, MobilePhoneModel, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(MobilePhoneModel) > 0 GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, COUNT(DISTINCT UserID) AS u FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT UserID, count(*) FROM hits GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
SELECT UserID, extract(minute FROM (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS m, SearchPhrase, count(*) FROM hits GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
SELECT UserID FROM hits WHERE UserID = 12345678901234567890;
|
||||
SELECT count(*) FROM hits WHERE URL::TEXT LIKE '%metrika%';
|
||||
SELECT SearchPhrase, min(URL), count(*) AS c FROM hits WHERE URL::TEXT LIKE '%metrika%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT SearchPhrase, min(URL), min(Title), count(*) AS c, COUNT(DISTINCT UserID) FROM hits WHERE Title::TEXT LIKE '%Яндекс%' AND URL::TEXT NOT LIKE '%.yandex.%' AND octet_length(SearchPhrase) > 0 GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT * FROM hits WHERE URL::TEXT LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY SearchPhrase LIMIT 10;
|
||||
SELECT SearchPhrase FROM hits WHERE octet_length(SearchPhrase) > 0 ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
SELECT CounterID, avg(octet_length(URL)) AS l, count(*) AS c FROM hits WHERE octet_length(URL) > 0 GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT regexp_replace(Referer::TEXT, '^https?://(?:www\.)?([^/]+)/.*$', '\1') AS key, avg(octet_length(Referer)) AS l, count(*) AS c, min(Referer) FROM hits WHERE octet_length(Referer) > 0 GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits;
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits WHERE octet_length(SearchPhrase) > 0 GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum("refresh"), avg(ResolutionWidth) FROM hits GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS c FROM hits GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT 1, URL, count(*) AS c FROM hits GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(URL) > 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT Title, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND DontCountHits = 0 AND "refresh" = 0 AND octet_length(Title) > 0 GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
SELECT URL, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND IsLink != 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 686716256552154761 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100;
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-31' AND "refresh" = 0 AND DontCountHits = 0 AND URLHash = 686716256552154761 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) AS "Minute", count(*) AS PageViews FROM hits WHERE CounterID = 62 AND (DATE '1970-01-01' + EventDate) >= '2013-07-01' AND (DATE '1970-01-01' + EventDate) <= '2013-07-02' AND "refresh" = 0 AND DontCountHits = 0 GROUP BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime))) ORDER BY DATE_TRUNC('minute', (TIMESTAMP '1970-01-01 00:00:00' + to_seconds(EventTime)));
|
File diff suppressed because one or more lines are too long
@ -1,43 +0,0 @@
|
||||
Folder structure
|
||||
______________
|
||||
dump_dataset_from_ch.sh - bash script that dumps a dataset from Clickhouse
|
||||
schema.sql - schema for a Greenplum cluster to load dumped dataset in
|
||||
load_data_set.sql - the script that loads up a dumped dataset
|
||||
queries.sql - SQL statements used in the benchmark
|
||||
benchmark.sh - this piece of bash conducts a benchmark
|
||||
result_parser.py - script to parse benchmark.sh's output and produce python code to build a graph to compare up to 4 benchmark results.
|
||||
Requirements
|
||||
____________
|
||||
|
||||
Greenplum uses a separate server as a point of entry, so you need 2 servers at least to run a cluster: master and segment hosts. 2 segments host and 56 segments(28 per host) had been used while conducting the test.
|
||||
You has has to put segment hostnames in the benchmark.sh.
|
||||
Greenplum quick installation instructions
|
||||
_________________________________________
|
||||
|
||||
Obtain a stable Greenplum version here(4.3.9.1 was used while conducting the benchmark):
|
||||
https://network.pivotal.io/products/pivotal-gpdb
|
||||
|
||||
and install it using this detailed guide:
|
||||
http://gpdb.docs.pivotal.io/4340/install_guide/install_guide.html
|
||||
|
||||
You should change gp_interconnect_type to 'tcp' if cluster members are connected via 1GB link or lower.
|
||||
There are some variables that has to be changed prior the first benchmark run: gp_vmem_protect_limit and max_statement_mem to allow each segment to use more virtual memory. Here are commands to change this GUCS that has to be executed as gpadmin at the master host:
|
||||
|
||||
gpconfig -c gp_interconnect_type -v tcp
|
||||
gpconfig -c gp_vmem_protect_limit -v 3000
|
||||
gpconfig -c max_statement_mem -v '4000MB'
|
||||
|
||||
How to prepare data
|
||||
-------------------
|
||||
|
||||
One can prepare datasets to run the benchmark on using dump_dataset_from_ch.sh script from this repo. The script has to be run at at Clickhouse host. It takes a long time to get dumps.
|
||||
|
||||
Upload the datasets into Greenplum master.Then run schema.sql to prepare schema and load_data_set.sql to load data up. This operation also takes a long time.
|
||||
|
||||
How to conduct the benchmark
|
||||
__________________________
|
||||
There is a benchmark.sh that take some arguments. Here is the syntax:
|
||||
|
||||
./benchmark.sh sql_statements_file tablename dbname orca_switch
|
||||
|
||||
If you don't know about the last one then just use a default value.
|
@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
filename=${1-queries.sql}
|
||||
table=$2
|
||||
dbname=$3
|
||||
orca=${4-on}
|
||||
host1=somehost
|
||||
host2=somehost
|
||||
mem='15GB'
|
||||
cat $filename | sed "s/{table}/$table/g" | while read query ;
|
||||
do
|
||||
ssh -n $host1 'echo 3 | tee /proc/sys/vm/drop_caches; sync' > /dev/null
|
||||
ssh -n $host2 'echo 3 | tee /proc/sys/vm/drop_caches; sync' > /dev/null
|
||||
sleep 5
|
||||
echo $query | egrep "SELECT UserID, date_trunc\('minute', EventTime\) AS m|SELECT Referer AS key, avg\(length\(Referer\)\) AS l|SELECT URL, count(1) AS c FROM.*GROUP BY URL|SELECT 1, URL, count\(1\) AS c FROM.*GROUP BY 1" && mem='10GB'
|
||||
echo $query | egrep 'SELECT DISTINCT|GROUP BY UserID, SearchPhrase LIMIT 10|count\(DISTINCT UserID\) AS u' && mem='5GB'
|
||||
echo "####################"
|
||||
echo "$query"
|
||||
echo "Timestamp_begin:$(date)"
|
||||
echo "\\timing off \\\\set optimizer=$orca; set effective_cache_size='256MB'; set statement_mem='$mem';\\timing on \\\\ $query;" | psql -p 5432 -h 'localhost' -o /dev/null -U gpadmin ${dbname}
|
||||
echo "Timestamp_end:$(date)"
|
||||
echo "Timestamp_begin:$(date)"
|
||||
echo "\\timing off \\\\set optimizer=$orca; set effective_cache_size='50GB'; set statement_mem='$mem';\\timing on \\\\ $query;" | psql -p 5432 -h 'localhost' -o /dev/null -U gpadmin ${dbname}
|
||||
echo "Timestamp_end:$(date)"
|
||||
echo "Timestamp_begin:$(date)"
|
||||
echo "\\timing off \\\\set optimizer=$orca; set effective_cache_size='50GB'; set statement_mem='$mem';\\timing on \\\\ $query;" | psql -p 5432 -h 'localhost' -o /dev/null -U gpadmin ${dbname}
|
||||
echo "Timestamp_end:$(date)"
|
||||
echo "$query"
|
||||
echo '####################'
|
||||
done
|
@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
for table in hits_10m_single hits_100m_single hits_1000m_single; do
|
||||
clickhouse-client -q "SELECT (round(WatchID/2), JavaEnable, Title, GoodEvent, EventTime, EventDate, CounterID, ClientIP, RegionID,round(UserID/2), CounterClass, OS, UserAgent, URL, Referer, Refresh, RefererCategoryID, RefererRegionID, URLCategoryID, URLRegionID, ResolutionWidth, ResolutionHeight, ResolutionDepth, FlashMajor, FlashMinor, FlashMinor2, NetMajor, NetMinor, UserAgentMajor, CookieEnable, JavascriptEnable, IsMobile, MobilePhone, MobilePhoneModel, Params, IPNetworkID, TraficSourceID, SearchEngineID, SearchPhrase, AdvEngineID, IsArtifical, WindowClientWidth, WindowClientHeight, ClientTimeZone, ClientEventTime, SilverlightVersion1, SilverlightVersion2, SilverlightVersion3, SilverlightVersion4, PageCharset, CodeVersion, IsLink, IsDownload, IsNotBounce,round(FUniqID/2), OriginalURL, HID, IsOldCounter, IsEvent, IsParameter, DontCountHits, WithHash, HitColor, LocalEventTime, Age, Sex, Income, Interests, Robotness, RemoteIP, WindowName, OpenerName, HistoryLength, SocialNetwork, SocialAction, HTTPError, SendTiming, DNSTiming, ConnectTiming, ResponseStartTiming, ResponseEndTiming, FetchTiming, SocialSourceNetworkID, SocialSourcePage, ParamPrice, ParamOrderID, OpenstatServiceName, OpenstatCampaignID, OpenstatAdID, OpenstatSourceID, UTMSource, UTMMedium, UTMCampaign, UTMContent, UTMTerm, FromTag, HasGCLID,round(RefererHash/2),round(URLHash/2), CLID) FROM $table FORMAT CSV" > $table
|
||||
done
|
@ -1,12 +0,0 @@
|
||||
COPY hits_all_10m FROM '/data/hits_10m_single.dump' CSV SEGMENT REJECT LIMIT 30 PERCENT;
|
||||
CREATE INDEX pk_counterid_eventdate_userid_10m ON hits_all_10m USING btree (counterid, eventdate, userid);
|
||||
CREATE INDEX idx_10m_counterid on hits_all_10m using btree (counterid); CREATE INDEX idx_10m_userid on hits_all_10m using btree (userid);
|
||||
ANALYZE hits_all_10m;
|
||||
COPY hits_all_100m from '/data/hits_100m_single.dump' CSV SEGMENT REJECT LIMIT 30 PERCENT;
|
||||
CREATE INDEX pk_counterid_eventdate_userid_100m ON hits_all_100m USING btree (counterid, eventdate, userid);
|
||||
CREATE INDEX idx_100m_counterid on hits_all_100m using btree (counterid); CREATE INDEX idx_100m_userid on hits_all_100m using btree (userid);
|
||||
ANALYZE hits_all_100m;
|
||||
COPY hits_all_1000m from '/data/hits_1000m_single.dump' CSV SEGMENT REJECT LIMIT 30 PERCENT;
|
||||
CREATE INDEX pk_counterid_eventdate_userid_1000m ON hits_all_1000m USING btree (counterid, eventdate, userid);
|
||||
CREATE INDEX idx_1000m_counterid on hits_all_1000m using btree (counterid); CREATE INDEX idx_1000m_userid on hits_all_1000m using btree (userid);
|
||||
ANALYZE hits_all_1000m;
|
@ -1,43 +0,0 @@
|
||||
SELECT count(1) FROM {table}
|
||||
SELECT count(1) FROM {table} WHERE AdvEngineID != 0
|
||||
SELECT sum(AdvEngineID), count(1), avg(ResolutionWidth) FROM {table}
|
||||
SELECT sum(UserID) FROM {table}
|
||||
SELECT count(UserID) FROM ( SELECT DISTINCT UserID FROM {table} ) AS d
|
||||
SELECT count(SearchPhrase) FROM ( SELECT DISTINCT SearchPhrase FROM {table} ) AS d
|
||||
SELECT min(EventDate), max(EventDate) FROM {table}
|
||||
SELECT AdvEngineID, count(1) FROM {table} WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY 2 DESC
|
||||
SELECT RegionID, count(DISTINCT UserID) AS u FROM {table} GROUP BY RegionID ORDER BY u DESC LIMIT 10
|
||||
SELECT RegionID, sum(AdvEngineID), count(1) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM {table} GROUP BY RegionID ORDER BY c DESC LIMIT 10
|
||||
SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10
|
||||
SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM {table} WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10
|
||||
SELECT SearchPhrase, count(1) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10
|
||||
SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM {table} WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10
|
||||
SELECT SearchEngineID, SearchPhrase, count(1) AS c FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10
|
||||
SELECT UserID, count(1) FROM {table} GROUP BY UserID ORDER BY 2 DESC LIMIT 10
|
||||
SELECT UserID, SearchPhrase, count(1) FROM {table} GROUP BY UserID, SearchPhrase ORDER BY 3 DESC LIMIT 10
|
||||
SELECT UserID, SearchPhrase, count(1) FROM {table} GROUP BY UserID, SearchPhrase LIMIT 10
|
||||
SELECT UserID, date_trunc('minute', EventTime) AS m, SearchPhrase, count(1) FROM {table} GROUP BY UserID, m, SearchPhrase ORDER BY count(1) DESC LIMIT 10
|
||||
SELECT UserID FROM {table} WHERE UserID = 12345678901234567890
|
||||
SELECT count(1) FROM {table} WHERE URL LIKE '%metrika%'
|
||||
SELECT SearchPhrase, max(URL) as URL, count(1) AS c FROM {table} h WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10
|
||||
SELECT SearchPhrase, max(URL) as URL, min(Title) as Title, count(1) AS c, count(DISTINCT UserID) FROM {table} WHERE Title LIKE '%\xd0\xaf\xd0\xbd\xd0\xb4\xd0\xb5\xd0\xba\xd1\x81%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
SELECT count(1) FROM {table}
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10
|
||||
SELECT SearchPhrase FROM {table} WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(1) AS c FROM {table} WHERE URL != '' GROUP BY CounterID HAVING count(1) > 100000 ORDER BY l DESC LIMIT 25
|
||||
SELECT Referer AS key, avg(length(Referer)) AS l, count(1) AS c, Referer FROM {table} WHERE Referer != '' GROUP BY key HAVING count(1) > 100000 ORDER BY l DESC LIMIT 25
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM {table}
|
||||
SELECT SearchEngineID, ClientIP, count(1) AS c, sum(Refresh), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10
|
||||
SELECT WatchID, ClientIP, count(1) AS c, sum(Refresh), avg(ResolutionWidth) FROM {table} WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10
|
||||
SELECT WatchID, ClientIP, count(1) AS c, sum(Refresh), avg(ResolutionWidth) FROM {table} GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10
|
||||
SELECT URL, count(1) AS c FROM {table} GROUP BY URL ORDER BY c DESC LIMIT 10
|
||||
SELECT 1, URL, count(1) AS c FROM {table} GROUP BY 1, URL ORDER BY c DESC LIMIT 10
|
||||
SELECT ClientIP AS x, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(1) AS c FROM {table} GROUP BY x, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10
|
||||
SELECT URL, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND DontCountHits =0 AND Refresh = 0 AND URL <>'' GROUP BY URL ORDER BY PageViews DESC LIMIT 10
|
||||
SELECT Title, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate BETWEEN '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND DontCountHits=0 AND Refresh=0 AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10
|
||||
SELECT URL, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND EventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh = 0 AND IsLink <> 0 AND IsDownload = 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, case when (SearchEngineID = 0 AND AdvEngineID = 0) THEN Referer ELSE '' END Src, URL AS Dst, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh = 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
SELECT URLHash, EventDate, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND TraficSourceID IN (-1, 6) AND RefererHash = 7135345792483900000 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND DontCountHits =0 AND URLHash = 7135345792483900000 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
SELECT date_trunc('minute', EventTime) AS Minute, count(1) AS PageViews FROM {table} WHERE CounterID = 62 AND eventDate between '2013-07-01'::timestamp AND '2013-07-31'::timestamp AND Refresh =0 AND DontCountHits =0 GROUP BY Minute ORDER BY Minute;
|
@ -1,150 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
def parse_block(block=[], options=[]):
|
||||
|
||||
# print('block is here', block)
|
||||
# show_query = False
|
||||
# show_query = options.show_query
|
||||
result = []
|
||||
query = block[0].strip()
|
||||
if len(block) > 4:
|
||||
timing1 = block[1].strip().split()[1]
|
||||
timing2 = block[3].strip().split()[1]
|
||||
timing3 = block[5].strip().split()[1]
|
||||
else:
|
||||
timing1 = block[1].strip().split()[1]
|
||||
timing2 = block[2].strip().split()[1]
|
||||
timing3 = block[3].strip().split()[1]
|
||||
if options.show_queries:
|
||||
result.append(query)
|
||||
if not options.show_first_timings:
|
||||
result += [timing1, timing2, timing3]
|
||||
else:
|
||||
result.append(timing1)
|
||||
return result
|
||||
|
||||
|
||||
def read_stats_file(options, fname):
|
||||
result = []
|
||||
int_result = []
|
||||
block = []
|
||||
time_count = 1
|
||||
with open(fname) as f:
|
||||
|
||||
for line in f.readlines():
|
||||
|
||||
if "SELECT" in line:
|
||||
if len(block) > 1:
|
||||
result.append(parse_block(block, options))
|
||||
block = [line]
|
||||
elif "Time:" in line:
|
||||
block.append(line)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def compare_stats_files(options, arguments):
|
||||
result = []
|
||||
file_output = []
|
||||
pyplot_colors = ["y", "b", "g", "r"]
|
||||
for fname in arguments[1:]:
|
||||
file_output.append((read_stats_file(options, fname)))
|
||||
if len(file_output[0]) > 0:
|
||||
timings_count = len(file_output[0])
|
||||
for idx, data_set in enumerate(file_output):
|
||||
int_result = []
|
||||
for timing in data_set:
|
||||
int_result.append(float(timing[0])) # y values
|
||||
result.append(
|
||||
[
|
||||
[x for x in range(0, len(int_result))],
|
||||
int_result,
|
||||
pyplot_colors[idx] + "^",
|
||||
]
|
||||
)
|
||||
# result.append([x for x in range(1, len(int_result)) ]) #x values
|
||||
# result.append( pyplot_colors[idx] + '^' )
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def parse_args():
|
||||
from optparse import OptionParser
|
||||
|
||||
parser = OptionParser(usage="usage: %prog [options] [result_file_path]..")
|
||||
parser.add_option(
|
||||
"-q",
|
||||
"--show-queries",
|
||||
help="Show statements along with timings",
|
||||
action="store_true",
|
||||
dest="show_queries",
|
||||
)
|
||||
parser.add_option(
|
||||
"-f",
|
||||
"--show-first-timings",
|
||||
help="Show only first tries timings",
|
||||
action="store_true",
|
||||
dest="show_first_timings",
|
||||
)
|
||||
parser.add_option(
|
||||
"-c",
|
||||
"--compare-mode",
|
||||
help="Prepare output for pyplot comparing result files.",
|
||||
action="store",
|
||||
dest="compare_mode",
|
||||
)
|
||||
(options, arguments) = parser.parse_args(sys.argv)
|
||||
if len(arguments) < 2:
|
||||
parser.print_usage()
|
||||
sys.exit(1)
|
||||
return (options, arguments)
|
||||
|
||||
|
||||
def gen_pyplot_code(options, arguments):
|
||||
result = ""
|
||||
data_sets = compare_stats_files(options, arguments)
|
||||
for idx, data_set in enumerate(data_sets, start=0):
|
||||
x_values, y_values, line_style = data_set
|
||||
result += "\nplt.plot("
|
||||
result += "%s, %s, '%s'" % (x_values, y_values, line_style)
|
||||
result += ", label='%s try')" % idx
|
||||
print("import matplotlib.pyplot as plt")
|
||||
print(result)
|
||||
print("plt.xlabel('Try number')")
|
||||
print("plt.ylabel('Timing')")
|
||||
print("plt.title('Benchmark query timings')")
|
||||
print("plt.legend()")
|
||||
print("plt.show()")
|
||||
|
||||
|
||||
def gen_html_json(options, arguments):
|
||||
tuples = read_stats_file(options, arguments[1])
|
||||
print("{")
|
||||
print('"system: GreenPlum(x2),')
|
||||
print(('"version": "%s",' % "4.3.9.1"))
|
||||
print('"data_size": 10000000,')
|
||||
print('"time": "",')
|
||||
print('"comments": "",')
|
||||
print('"result":')
|
||||
print("[")
|
||||
for s in tuples:
|
||||
print(s)
|
||||
print("]")
|
||||
print("}")
|
||||
|
||||
|
||||
def main():
|
||||
(options, arguments) = parse_args()
|
||||
if len(arguments) > 2:
|
||||
gen_pyplot_code(options, arguments)
|
||||
else:
|
||||
gen_html_json(options, arguments)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,3 +0,0 @@
|
||||
CREATE TABLE hits_all_10m ( WatchID bigint, JavaEnable int, Title text, GoodEvent int, EventTime timestamp, EventDate timestamp, CounterID bigint, ClientIP bigint, RegionID bigint, UserID bigint, CounterClass int, OS int, UserAgent int, URL text, Referer text, Refresh int, RefererCategoryID int, RefererRegionID bigint, URLCategoryID int, URLRegionID bigint, ResolutionWidth int, ResolutionHeight int, ResolutionDepth int, FlashMajor int, FlashMinor int, FlashMinor2 text, NetMajor int, NetMinor int, UserAgentMajor int, CookieEnable int, JavascriptEnable int, IsMobile int, MobilePhone int, MobilePhoneModel text, Params text, IPNetworkID bigint, TraficSourceID int, SearchEngineID int, SearchPhrase text, AdvEngineID int, IsArtifical int, WindowClientWidth int, WindowClientHeight int, ClientTimeZone int, ClientEventTime timestamp, SilverlightVersion1 int, SilverlightVersion2 int, SilverlightVersion3 bigint, SilverlightVersion4 int, PageCharset text, CodeVersion bigint, IsLink int, IsDownload int, IsNotBounce int, FUniqID bigint, OriginalURL text, HID bigint, IsOldCounter int, IsEvent int, IsParameter int, DontCountHits int, WithHash int, HitColor varchar(3), LocalEventTime timestamp, Age int, Sex int, Income int, Interests int, Robotness int, RemoteIP bigint, WindowName int, OpenerName int, HistoryLength int, SocialNetwork text, SocialAction text, HTTPError int, SendTiming bigint, DNSTiming bigint, ConnectTiming bigint, ResponseStartTiming bigint, ResponseEndTiming bigint, FetchTiming bigint, SocialSourceNetworkID int, SocialSourcePage text, ParamPrice int, ParamOrderID text, OpenstatServiceName text, OpenstatCampaignID text, OpenstatAdID text, OpenstatSourceID text, UTMSource text, UTMMedium text, UTMCampaign text, UTMContent text, UTMTerm text, FromTag text, HasGCLID int, RefererHash bigint, URLHash bigint, CLID bigint) WITH (appendonly=true, orientation=column, compresstype=quicklz) DISTRIBUTED BY (userid) ;
|
||||
CREATE TABLE hits_all_100m ( WatchID bigint, JavaEnable int, Title text, GoodEvent int, EventTime timestamp, EventDate timestamp, CounterID bigint, ClientIP bigint, RegionID bigint, UserID bigint, CounterClass int, OS int, UserAgent int, URL text, Referer text, Refresh int, RefererCategoryID int, RefererRegionID bigint, URLCategoryID int, URLRegionID bigint, ResolutionWidth int, ResolutionHeight int, ResolutionDepth int, FlashMajor int, FlashMinor int, FlashMinor2 text, NetMajor int, NetMinor int, UserAgentMajor int, CookieEnable int, JavascriptEnable int, IsMobile int, MobilePhone int, MobilePhoneModel text, Params text, IPNetworkID bigint, TraficSourceID int, SearchEngineID int, SearchPhrase text, AdvEngineID int, IsArtifical int, WindowClientWidth int, WindowClientHeight int, ClientTimeZone int, ClientEventTime timestamp, SilverlightVersion1 int, SilverlightVersion2 int, SilverlightVersion3 bigint, SilverlightVersion4 int, PageCharset text, CodeVersion bigint, IsLink int, IsDownload int, IsNotBounce int, FUniqID bigint, OriginalURL text, HID bigint, IsOldCounter int, IsEvent int, IsParameter int, DontCountHits int, WithHash int, HitColor varchar(3), LocalEventTime timestamp, Age int, Sex int, Income int, Interests int, Robotness int, RemoteIP bigint, WindowName int, OpenerName int, HistoryLength int, SocialNetwork text, SocialAction text, HTTPError int, SendTiming bigint, DNSTiming bigint, ConnectTiming bigint, ResponseStartTiming bigint, ResponseEndTiming bigint, FetchTiming bigint, SocialSourceNetworkID int, SocialSourcePage text, ParamPrice int, ParamOrderID text, OpenstatServiceName text, OpenstatCampaignID text, OpenstatAdID text, OpenstatSourceID text, UTMSource text, UTMMedium text, UTMCampaign text, UTMContent text, UTMTerm text, FromTag text, HasGCLID int, RefererHash bigint, URLHash bigint, CLID bigint) WITH (appendonly=true, orientation=column, compresstype=quicklz) DISTRIBUTED BY (userid) ;
|
||||
CREATE TABLE hits_all_1000m ( WatchID bigint, JavaEnable int, Title text, GoodEvent int, EventTime timestamp, EventDate timestamp, CounterID bigint, ClientIP bigint, RegionID bigint, UserID bigint, CounterClass int, OS int, UserAgent int, URL text, Referer text, Refresh int, RefererCategoryID int, RefererRegionID bigint, URLCategoryID int, URLRegionID bigint, ResolutionWidth int, ResolutionHeight int, ResolutionDepth int, FlashMajor int, FlashMinor int, FlashMinor2 text, NetMajor int, NetMinor int, UserAgentMajor int, CookieEnable int, JavascriptEnable int, IsMobile int, MobilePhone int, MobilePhoneModel text, Params text, IPNetworkID bigint, TraficSourceID int, SearchEngineID int, SearchPhrase text, AdvEngineID int, IsArtifical int, WindowClientWidth int, WindowClientHeight int, ClientTimeZone int, ClientEventTime timestamp, SilverlightVersion1 int, SilverlightVersion2 int, SilverlightVersion3 bigint, SilverlightVersion4 int, PageCharset text, CodeVersion bigint, IsLink int, IsDownload int, IsNotBounce int, FUniqID bigint, OriginalURL text, HID bigint, IsOldCounter int, IsEvent int, IsParameter int, DontCountHits int, WithHash int, HitColor varchar(3), LocalEventTime timestamp, Age int, Sex int, Income int, Interests int, Robotness int, RemoteIP bigint, WindowName int, OpenerName int, HistoryLength int, SocialNetwork text, SocialAction text, HTTPError int, SendTiming bigint, DNSTiming bigint, ConnectTiming bigint, ResponseStartTiming bigint, ResponseEndTiming bigint, FetchTiming bigint, SocialSourceNetworkID int, SocialSourcePage text, ParamPrice int, ParamOrderID text, OpenstatServiceName text, OpenstatCampaignID text, OpenstatAdID text, OpenstatSourceID text, UTMSource text, UTMMedium text, UTMCampaign text, UTMContent text, UTMTerm text, FromTag text, HasGCLID int, RefererHash bigint, URLHash bigint, CLID bigint) WITH (appendonly=true, orientation=column,compresstype=quicklz) DISTRIBUTED BY (userid) ;
|
@ -1,189 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
if [[ -n $1 ]]; then
|
||||
SCALE=$1
|
||||
else
|
||||
SCALE=100
|
||||
fi
|
||||
|
||||
TABLE="hits_${SCALE}m_obfuscated"
|
||||
DATASET="${TABLE}_v1.tar.xz"
|
||||
QUERIES_FILE="queries.sql"
|
||||
TRIES=3
|
||||
|
||||
# Note: on older Ubuntu versions, 'axel' does not support IPv6. If you are using IPv6-only servers on very old Ubuntu, just don't install 'axel'.
|
||||
|
||||
FASTER_DOWNLOAD=wget
|
||||
if command -v axel >/dev/null; then
|
||||
FASTER_DOWNLOAD=axel
|
||||
else
|
||||
echo "It's recommended to install 'axel' for faster downloads."
|
||||
fi
|
||||
|
||||
if command -v pixz >/dev/null; then
|
||||
TAR_PARAMS='-Ipixz'
|
||||
else
|
||||
echo "It's recommended to install 'pixz' for faster decompression of the dataset."
|
||||
fi
|
||||
|
||||
mkdir -p clickhouse-benchmark-$SCALE
|
||||
pushd clickhouse-benchmark-$SCALE
|
||||
|
||||
OS=$(uname -s)
|
||||
ARCH=$(uname -m)
|
||||
|
||||
DIR=
|
||||
|
||||
if [ "${OS}" = "Linux" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="amd64"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
then
|
||||
DIR="aarch64"
|
||||
elif [ "${ARCH}" = "powerpc64le" ]
|
||||
then
|
||||
DIR="powerpc64le"
|
||||
fi
|
||||
elif [ "${OS}" = "FreeBSD" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="freebsd"
|
||||
elif [ "${ARCH}" = "aarch64" ]
|
||||
then
|
||||
DIR="freebsd-aarch64"
|
||||
elif [ "${ARCH}" = "powerpc64le" ]
|
||||
then
|
||||
DIR="freebsd-powerpc64le"
|
||||
fi
|
||||
elif [ "${OS}" = "Darwin" ]
|
||||
then
|
||||
if [ "${ARCH}" = "x86_64" ]
|
||||
then
|
||||
DIR="macos"
|
||||
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
|
||||
then
|
||||
DIR="macos-aarch64"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "${DIR}" ]
|
||||
then
|
||||
echo "The '${OS}' operating system with the '${ARCH}' architecture is not supported."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
URL="https://builds.clickhouse.com/master/${DIR}/clickhouse"
|
||||
echo
|
||||
echo "Will download ${URL}"
|
||||
echo
|
||||
curl -O "${URL}" && chmod a+x clickhouse || exit 1
|
||||
echo
|
||||
echo "Successfully downloaded the ClickHouse binary"
|
||||
|
||||
chmod a+x clickhouse
|
||||
|
||||
if [[ ! -f $QUERIES_FILE ]]; then
|
||||
wget "https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/benchmark/clickhouse/$QUERIES_FILE"
|
||||
fi
|
||||
|
||||
if [[ ! -d data ]]; then
|
||||
if [[ ! -f $DATASET ]]; then
|
||||
$FASTER_DOWNLOAD "https://datasets.clickhouse.com/hits/partitions/$DATASET"
|
||||
fi
|
||||
|
||||
tar $TAR_PARAMS --strip-components=1 --directory=. -x -v -f $DATASET
|
||||
fi
|
||||
|
||||
uptime
|
||||
|
||||
echo "Starting clickhouse-server"
|
||||
|
||||
./clickhouse server > server.log 2>&1 &
|
||||
PID=$!
|
||||
|
||||
function finish {
|
||||
kill $PID
|
||||
wait
|
||||
}
|
||||
trap finish EXIT
|
||||
|
||||
echo "Waiting for clickhouse-server to start"
|
||||
|
||||
for i in {1..30}; do
|
||||
sleep 1
|
||||
./clickhouse client --query "SELECT 'The dataset size is: ', count() FROM $TABLE" 2>/dev/null && break || echo '.'
|
||||
if [[ $i == 30 ]]; then exit 1; fi
|
||||
done
|
||||
|
||||
echo
|
||||
echo "Will perform benchmark. Results:"
|
||||
echo
|
||||
|
||||
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
||||
sync
|
||||
if [ "${OS}" = "Darwin" ]
|
||||
then
|
||||
sudo purge > /dev/null
|
||||
else
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
|
||||
fi
|
||||
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
RES=$(./clickhouse client --max_memory_usage 100G --time --format=Null --query="$query" 2>&1 ||:)
|
||||
[[ "$?" == "0" ]] && echo -n "${RES}" || echo -n "null"
|
||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||
done
|
||||
echo "],"
|
||||
done
|
||||
|
||||
|
||||
echo
|
||||
echo "Benchmark complete. System info:"
|
||||
echo
|
||||
|
||||
if [ "${OS}" = "Darwin" ]
|
||||
then
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}', version())"
|
||||
sw_vers | grep BuildVersion
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
sysctl hw.model
|
||||
sysctl -a | grep -E 'hw.activecpu|hw.memsize|hw.byteorder|cachesize'
|
||||
echo '----Disk Free and Total--------'
|
||||
df -h .
|
||||
echo '----Memory Free and Total-------'
|
||||
vm_stat
|
||||
echo '----Physical Memory Amount------'
|
||||
ls -l /var/vm
|
||||
echo '--------------------------------'
|
||||
else
|
||||
echo '----Version, build id-----------'
|
||||
./clickhouse local --query "SELECT format('Version: {}, build id: {}', version(), buildId())"
|
||||
./clickhouse local --query "SELECT format('The number of threads is: {}', value) FROM system.settings WHERE name = 'max_threads'" --output-format TSVRaw
|
||||
./clickhouse local --query "SELECT format('Current time: {}', toString(now(), 'UTC'))"
|
||||
echo '----CPU-------------------------'
|
||||
cat /proc/cpuinfo | grep -i -F 'model name' | uniq
|
||||
lscpu
|
||||
echo '----Block Devices---------------'
|
||||
lsblk
|
||||
echo '----Disk Free and Total--------'
|
||||
df -h .
|
||||
echo '----Memory Free and Total-------'
|
||||
free -h
|
||||
echo '----Physical Memory Amount------'
|
||||
cat /proc/meminfo | grep MemTotal
|
||||
echo '----RAID Info-------------------'
|
||||
cat /proc/mdstat
|
||||
#echo '----PCI-------------------------'
|
||||
#lspci
|
||||
#echo '----All Hardware Info-----------'
|
||||
#lshw
|
||||
echo '--------------------------------'
|
||||
fi
|
||||
echo
|
@ -1,4 +0,0 @@
|
||||
CONF_DIR=/home/kartavyy/benchmark/hive
|
||||
expect_file=$CONF_DIR/expect.tcl
|
||||
test_file=$CONF_DIR/queries.sql
|
||||
etc_init_d_service=
|
@ -1,9 +0,0 @@
|
||||
create table hits_10m_raw ( WatchID BIGINT, JavaEnable SMALLINT, Title STRING, GoodEvent SMALLINT, EventTime TIMESTAMP, EventDate TIMESTAMP, CounterID BIGINT, ClientIP BIGINT, RegionID BIGINT, UserID BIGINT, CounterClass TINYINT, OS SMALLINT, UserAgent SMALLINT, URL STRING, Referer STRING, Refresh TINYINT, RefererCategoryID INT, RefererRegionID BIGINT, URLCategoryID INT, URLRegionID BIGINT, ResolutionWidth INT, ResolutionHeight INT, ResolutionDepth SMALLINT, FlashMajor SMALLINT, FlashMinor SMALLINT, FlashMinor2 STRING, NetMajor SMALLINT, NetMinor SMALLINT, UserAgentMajor INT, UserAgentMinor STRING, CookieEnable SMALLINT, JavascriptEnable SMALLINT, IsMobile SMALLINT, MobilePhone SMALLINT, MobilePhoneModel STRING, Params STRING, IPNetworkID BIGINT, TraficSourceID SMALLINT, SearchEngineID INT, SearchPhrase STRING, AdvEngineID SMALLINT, IsArtifical SMALLINT, WindowClientWidth INT, WindowClientHeight INT, ClientTimeZone INT, ClientEventTime TIMESTAMP, SilverlightVersion1 SMALLINT, SilverlightVersion2 SMALLINT, SilverlightVersion3 BIGINT, SilverlightVersion4 INT, PageCharset STRING, CodeVersion BIGINT, IsLink SMALLINT, IsDownload SMALLINT, IsNotBounce SMALLINT, FUniqID BIGINT, OriginalURL STRING, HID BIGINT, IsOldCounter SMALLINT, IsEvent SMALLINT, IsParameter SMALLINT, DontCountHits SMALLINT, WithHash SMALLINT, HitColor STRING, LocalEventTime TIMESTAMP, Age SMALLINT, Sex SMALLINT, Income SMALLINT, Interests INT, Robotness SMALLINT, RemoteIP BIGINT, WindowName INT, OpenerName INT, HistoryLength SMALLINT, BrowserLanguage STRING, BrowserCountry STRING, SocialNetwork STRING, SocialAction STRING, HTTPError INT, SendTiming BIGINT, DNSTiming BIGINT, ConnectTiming BIGINT, ResponseStartTiming BIGINT, ResponseEndTiming BIGINT, FetchTiming BIGINT, SocialSourceNetworkID SMALLINT, SocialSourcePage STRING, ParamPrice BIGINT, ParamOrderID STRING, ParamCurrency STRING, ParamCurrencyID INT, OpenstatServiceName STRING, OpenstatCampaignID STRING, OpenstatAdID STRING, OpenstatSourceID STRING, UTMSource STRING, UTMMedium STRING, UTMCampaign STRING, UTMContent STRING, UTMTerm STRING, FromTag STRING, HasGCLID SMALLINT, RefererHash BIGINT, URLHash BIGINT, CLID BIGINT, UserIDHash BIGINT ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' STORED AS TEXTFILE;
|
||||
|
||||
load data local inpath '/opt/dump/dump_0.3/dump_hits_10m_meshed_utf8.tsv' overwrite into table hits_10m_raw;
|
||||
|
||||
create table hits_10m ( WatchID BIGINT, JavaEnable SMALLINT, Title STRING, GoodEvent SMALLINT, EventTime TIMESTAMP, EventDate TIMESTAMP, CounterID BIGINT, ClientIP BIGINT, RegionID BIGINT, UserID BIGINT, CounterClass TINYINT, OS SMALLINT, UserAgent SMALLINT, URL STRING, Referer STRING, Refresh TINYINT, RefererCategoryID INT, RefererRegionID BIGINT, URLCategoryID INT, URLRegionID BIGINT, ResolutionWidth INT, ResolutionHeight INT, ResolutionDepth SMALLINT, FlashMajor SMALLINT, FlashMinor SMALLINT, FlashMinor2 STRING, NetMajor SMALLINT, NetMinor SMALLINT, UserAgentMajor INT, UserAgentMinor STRING, CookieEnable SMALLINT, JavascriptEnable SMALLINT, IsMobile SMALLINT, MobilePhone SMALLINT, MobilePhoneModel STRING, Params STRING, IPNetworkID BIGINT, TraficSourceID SMALLINT, SearchEngineID INT, SearchPhrase STRING, AdvEngineID SMALLINT, IsArtifical SMALLINT, WindowClientWidth INT, WindowClientHeight INT, ClientTimeZone INT, ClientEventTime TIMESTAMP, SilverlightVersion1 SMALLINT, SilverlightVersion2 SMALLINT, SilverlightVersion3 BIGINT, SilverlightVersion4 INT, PageCharset STRING, CodeVersion BIGINT, IsLink SMALLINT, IsDownload SMALLINT, IsNotBounce SMALLINT, FUniqID BIGINT, OriginalURL STRING, HID BIGINT, IsOldCounter SMALLINT, IsEvent SMALLINT, IsParameter SMALLINT, DontCountHits SMALLINT, WithHash SMALLINT, HitColor STRING, LocalEventTime TIMESTAMP, Age SMALLINT, Sex SMALLINT, Income SMALLINT, Interests INT, Robotness SMALLINT, RemoteIP BIGINT, WindowName INT, OpenerName INT, HistoryLength SMALLINT, BrowserLanguage STRING, BrowserCountry STRING, SocialNetwork STRING, SocialAction STRING, HTTPError INT, SendTiming BIGINT, DNSTiming BIGINT, ConnectTiming BIGINT, ResponseStartTiming BIGINT, ResponseEndTiming BIGINT, FetchTiming BIGINT, SocialSourceNetworkID SMALLINT, SocialSourcePage STRING, ParamPrice BIGINT, ParamOrderID STRING, ParamCurrency STRING, ParamCurrencyID INT, OpenstatServiceName STRING, OpenstatCampaignID STRING, OpenstatAdID STRING, OpenstatSourceID STRING, UTMSource STRING, UTMMedium STRING, UTMCampaign STRING, UTMContent STRING, UTMTerm STRING, FromTag STRING, HasGCLID SMALLINT, RefererHash BIGINT, URLHash BIGINT, CLID BIGINT, UserIDHash BIGINT ) CLUSTERED BY (EventDate) SORTED BY(CounterID, EventDate, UserIDHash, EventTime) INTO 10 BUCKETS STORED AS ORC tblproperties("orc.compress"="ZLIB");
|
||||
|
||||
insert overwrite table hits_10m select * from hits_10m_raw;
|
||||
|
||||
--drop table hits_10m_raw;
|
@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/expect
|
||||
|
||||
# Set timeout
|
||||
set timeout 600
|
||||
|
||||
# Get arguments
|
||||
set query [lindex $argv 0]
|
||||
|
||||
spawn hive
|
||||
|
||||
expect "hive>"
|
||||
send "$query;\r"
|
||||
|
||||
expect "hive>"
|
||||
send "quit;\r"
|
||||
|
||||
expect eof
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,624 +0,0 @@
|
||||
start time: Вт. сент. 10 18:46:00 MSK 2013
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_15579@mturlrep13_201309101846_67163557.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT count(*) FROM hits_10m;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_16038@mturlrep13_201309101846_623079473.txt
|
||||
hive> SELECT count(*) FROM hits_10m;;
|
||||
Total MapReduce jobs = 1
|
||||
Launching Job 1 out of 1
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0036
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
||||
2013-09-10 18:46:20,061 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:46:27,089 Stage-1 map = 7%, reduce = 0%
|
||||
2013-09-10 18:46:33,113 Stage-1 map = 14%, reduce = 0%
|
||||
2013-09-10 18:46:36,127 Stage-1 map = 22%, reduce = 0%
|
||||
2013-09-10 18:46:39,143 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:40,149 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:41,156 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:42,162 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:43,168 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:44,174 Stage-1 map = 29%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:45,179 Stage-1 map = 36%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:46,185 Stage-1 map = 36%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:47,191 Stage-1 map = 36%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:48,197 Stage-1 map = 43%, reduce = 0%, Cumulative CPU 46.41 sec
|
||||
2013-09-10 18:46:49,205 Stage-1 map = 47%, reduce = 0%, Cumulative CPU 62.51 sec
|
||||
2013-09-10 18:46:50,211 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:51,217 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:52,222 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:53,227 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:54,233 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:55,238 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:56,244 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:57,250 Stage-1 map = 54%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:58,255 Stage-1 map = 57%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:46:59,261 Stage-1 map = 57%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:00,266 Stage-1 map = 57%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:01,272 Stage-1 map = 61%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:02,277 Stage-1 map = 61%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:03,282 Stage-1 map = 65%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:04,287 Stage-1 map = 65%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:05,305 Stage-1 map = 65%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:06,310 Stage-1 map = 69%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:07,316 Stage-1 map = 73%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:08,321 Stage-1 map = 73%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:09,326 Stage-1 map = 76%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:10,331 Stage-1 map = 80%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:11,336 Stage-1 map = 80%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:12,341 Stage-1 map = 84%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:13,346 Stage-1 map = 88%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:14,351 Stage-1 map = 88%, reduce = 17%, Cumulative CPU 83.95 sec
|
||||
2013-09-10 18:47:15,356 Stage-1 map = 93%, reduce = 17%, Cumulative CPU 118.21 sec
|
||||
2013-09-10 18:47:16,372 Stage-1 map = 93%, reduce = 17%, Cumulative CPU 118.21 sec
|
||||
2013-09-10 18:47:17,379 Stage-1 map = 93%, reduce = 17%, Cumulative CPU 118.21 sec
|
||||
2013-09-10 18:47:18,384 Stage-1 map = 97%, reduce = 17%, Cumulative CPU 118.21 sec
|
||||
2013-09-10 18:47:19,388 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 162.76 sec
|
||||
2013-09-10 18:47:20,393 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 162.76 sec
|
||||
2013-09-10 18:47:21,397 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 162.76 sec
|
||||
2013-09-10 18:47:22,404 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 165.27 sec
|
||||
2013-09-10 18:47:23,410 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 165.27 sec
|
||||
2013-09-10 18:47:24,415 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 165.27 sec
|
||||
MapReduce Total cumulative CPU time: 2 minutes 45 seconds 270 msec
|
||||
Ended Job = job_201309101627_0036
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 165.27 sec HDFS Read: 1082943442 HDFS Write: 9 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 2 minutes 45 seconds 270 msec
|
||||
OK
|
||||
10000000
|
||||
Time taken: 74.228 seconds, Fetched: 1 row(s)
|
||||
hive> quit;
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_17475@mturlrep13_201309101847_1783698271.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_17882@mturlrep13_201309101847_1295809350.txt
|
||||
hive> SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;;
|
||||
Total MapReduce jobs = 1
|
||||
Launching Job 1 out of 1
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0037
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
||||
2013-09-10 18:47:44,058 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:47:49,086 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.21 sec
|
||||
2013-09-10 18:47:50,093 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.21 sec
|
||||
2013-09-10 18:47:51,101 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.21 sec
|
||||
2013-09-10 18:47:52,107 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.21 sec
|
||||
2013-09-10 18:47:53,113 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.21 sec
|
||||
2013-09-10 18:47:54,119 Stage-1 map = 75%, reduce = 0%, Cumulative CPU 18.18 sec
|
||||
2013-09-10 18:47:55,125 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.81 sec
|
||||
2013-09-10 18:47:56,130 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.81 sec
|
||||
2013-09-10 18:47:57,138 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 25.64 sec
|
||||
2013-09-10 18:47:58,144 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 25.64 sec
|
||||
2013-09-10 18:47:59,150 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 25.64 sec
|
||||
MapReduce Total cumulative CPU time: 25 seconds 640 msec
|
||||
Ended Job = job_201309101627_0037
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 25.64 sec HDFS Read: 907716 HDFS Write: 7 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 25 seconds 640 msec
|
||||
OK
|
||||
171127
|
||||
Time taken: 25.153 seconds, Fetched: 1 row(s)
|
||||
hive> quit;
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_19147@mturlrep13_201309101848_1891179156.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_19567@mturlrep13_201309101848_690102300.txt
|
||||
hive> SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;;
|
||||
Total MapReduce jobs = 1
|
||||
Launching Job 1 out of 1
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0038
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
||||
2013-09-10 18:48:18,837 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:48:25,865 Stage-1 map = 39%, reduce = 0%
|
||||
2013-09-10 18:48:26,875 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 18.45 sec
|
||||
2013-09-10 18:48:27,882 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 18.45 sec
|
||||
2013-09-10 18:48:28,889 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 18.45 sec
|
||||
2013-09-10 18:48:29,895 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 18.45 sec
|
||||
2013-09-10 18:48:30,901 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 18.45 sec
|
||||
2013-09-10 18:48:31,907 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 18.45 sec
|
||||
2013-09-10 18:48:32,914 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 18.45 sec
|
||||
2013-09-10 18:48:33,920 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 34.59 sec
|
||||
2013-09-10 18:48:34,925 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 34.59 sec
|
||||
2013-09-10 18:48:35,930 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 34.59 sec
|
||||
2013-09-10 18:48:36,935 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 34.59 sec
|
||||
2013-09-10 18:48:37,940 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 34.59 sec
|
||||
2013-09-10 18:48:38,945 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 35.24 sec
|
||||
2013-09-10 18:48:39,952 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 36.63 sec
|
||||
2013-09-10 18:48:40,958 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 36.63 sec
|
||||
MapReduce Total cumulative CPU time: 36 seconds 630 msec
|
||||
Ended Job = job_201309101627_0038
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 36.63 sec HDFS Read: 8109219 HDFS Write: 30 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 36 seconds 630 msec
|
||||
OK
|
||||
Time taken: 31.961 seconds, Fetched: 1 row(s)
|
||||
hive> quit;
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_20898@mturlrep13_201309101848_327652001.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT sum(UserID) FROM hits_10m;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_21336@mturlrep13_201309101848_1975614127.txt
|
||||
hive> SELECT sum(UserID) FROM hits_10m;;
|
||||
Total MapReduce jobs = 1
|
||||
Launching Job 1 out of 1
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0039
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
||||
2013-09-10 18:49:00,561 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:49:07,617 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 15.12 sec
|
||||
2013-09-10 18:49:08,626 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 15.12 sec
|
||||
2013-09-10 18:49:09,634 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 15.12 sec
|
||||
2013-09-10 18:49:10,639 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 15.12 sec
|
||||
2013-09-10 18:49:11,646 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 15.12 sec
|
||||
2013-09-10 18:49:12,652 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 15.12 sec
|
||||
2013-09-10 18:49:13,658 Stage-1 map = 75%, reduce = 0%, Cumulative CPU 21.86 sec
|
||||
2013-09-10 18:49:14,664 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 30.08 sec
|
||||
2013-09-10 18:49:15,670 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 30.08 sec
|
||||
2013-09-10 18:49:16,675 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 30.08 sec
|
||||
2013-09-10 18:49:17,680 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 30.08 sec
|
||||
2013-09-10 18:49:18,685 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 30.08 sec
|
||||
2013-09-10 18:49:19,690 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 30.08 sec
|
||||
2013-09-10 18:49:20,697 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 32.07 sec
|
||||
2013-09-10 18:49:21,703 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 32.07 sec
|
||||
MapReduce Total cumulative CPU time: 32 seconds 70 msec
|
||||
Ended Job = job_201309101627_0039
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 32.07 sec HDFS Read: 57312623 HDFS Write: 21 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 32 seconds 70 msec
|
||||
OK
|
||||
-4662894107982093709
|
||||
Time taken: 30.94 seconds, Fetched: 1 row(s)
|
||||
hive> quit;
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_22560@mturlrep13_201309101849_2023198520.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT count(DISTINCT UserID) FROM hits_10m;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_22993@mturlrep13_201309101849_961728603.txt
|
||||
hive> SELECT count(DISTINCT UserID) FROM hits_10m;;
|
||||
Total MapReduce jobs = 1
|
||||
Launching Job 1 out of 1
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0040
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
||||
2013-09-10 18:49:41,232 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:49:48,264 Stage-1 map = 43%, reduce = 0%
|
||||
2013-09-10 18:49:51,283 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:52,291 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:53,298 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:54,304 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:55,310 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:56,317 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:57,332 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:58,337 Stage-1 map = 96%, reduce = 17%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:49:59,342 Stage-1 map = 96%, reduce = 17%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:50:00,348 Stage-1 map = 96%, reduce = 17%, Cumulative CPU 27.01 sec
|
||||
2013-09-10 18:50:01,353 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 55.01 sec
|
||||
2013-09-10 18:50:02,360 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 55.01 sec
|
||||
2013-09-10 18:50:03,365 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 55.01 sec
|
||||
2013-09-10 18:50:04,369 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 55.01 sec
|
||||
2013-09-10 18:50:05,375 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 55.01 sec
|
||||
2013-09-10 18:50:06,379 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 55.01 sec
|
||||
2013-09-10 18:50:07,385 Stage-1 map = 100%, reduce = 88%, Cumulative CPU 55.01 sec
|
||||
2013-09-10 18:50:08,391 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.95 sec
|
||||
2013-09-10 18:50:09,397 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.95 sec
|
||||
2013-09-10 18:50:10,402 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 62.95 sec
|
||||
MapReduce Total cumulative CPU time: 1 minutes 2 seconds 950 msec
|
||||
Ended Job = job_201309101627_0040
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 62.95 sec HDFS Read: 57312623 HDFS Write: 8 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 1 minutes 2 seconds 950 msec
|
||||
OK
|
||||
2037258
|
||||
Time taken: 38.84 seconds, Fetched: 1 row(s)
|
||||
hive> quit;
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_24634@mturlrep13_201309101850_840502487.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT count(DISTINCT SearchPhrase) FROM hits_10m;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_25401@mturlrep13_201309101850_84750246.txt
|
||||
hive> SELECT count(DISTINCT SearchPhrase) FROM hits_10m;;
|
||||
Total MapReduce jobs = 1
|
||||
Launching Job 1 out of 1
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0041
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
||||
2013-09-10 18:50:31,472 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:50:38,501 Stage-1 map = 43%, reduce = 0%
|
||||
2013-09-10 18:50:40,517 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:41,523 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:42,531 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:43,536 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:44,542 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:45,548 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:46,555 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:47,561 Stage-1 map = 96%, reduce = 17%, Cumulative CPU 21.42 sec
|
||||
2013-09-10 18:50:48,566 Stage-1 map = 97%, reduce = 17%, Cumulative CPU 31.8 sec
|
||||
2013-09-10 18:50:49,571 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:50,576 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:51,581 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:52,587 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:53,592 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:54,597 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:55,602 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:56,607 Stage-1 map = 100%, reduce = 92%, Cumulative CPU 42.95 sec
|
||||
2013-09-10 18:50:57,615 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.6 sec
|
||||
2013-09-10 18:50:58,642 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 50.6 sec
|
||||
MapReduce Total cumulative CPU time: 50 seconds 600 msec
|
||||
Ended Job = job_201309101627_0041
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 50.6 sec HDFS Read: 27820105 HDFS Write: 8 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 50 seconds 600 msec
|
||||
OK
|
||||
1110413
|
||||
Time taken: 37.04 seconds, Fetched: 1 row(s)
|
||||
hive> quit;
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_26718@mturlrep13_201309101851_285967686.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT min(EventDate), max(EventDate) FROM hits_10m;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_27149@mturlrep13_201309101851_2135309314.txt
|
||||
hive> SELECT min(EventDate), max(EventDate) FROM hits_10m;;
|
||||
Total MapReduce jobs = 1
|
||||
Launching Job 1 out of 1
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0042
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 1
|
||||
2013-09-10 18:51:19,077 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:51:25,106 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 13.92 sec
|
||||
2013-09-10 18:51:26,114 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 13.92 sec
|
||||
2013-09-10 18:51:27,123 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 13.92 sec
|
||||
2013-09-10 18:51:28,129 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 13.92 sec
|
||||
2013-09-10 18:51:29,135 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 13.92 sec
|
||||
2013-09-10 18:51:30,141 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 13.92 sec
|
||||
2013-09-10 18:51:31,147 Stage-1 map = 75%, reduce = 0%, Cumulative CPU 20.4 sec
|
||||
2013-09-10 18:51:32,152 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 27.44 sec
|
||||
2013-09-10 18:51:33,158 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 27.44 sec
|
||||
2013-09-10 18:51:34,163 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 27.44 sec
|
||||
2013-09-10 18:51:35,168 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 27.44 sec
|
||||
2013-09-10 18:51:36,173 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 27.44 sec
|
||||
2013-09-10 18:51:37,179 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 27.44 sec
|
||||
2013-09-10 18:51:38,184 Stage-1 map = 100%, reduce = 25%, Cumulative CPU 27.44 sec
|
||||
2013-09-10 18:51:39,192 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.39 sec
|
||||
2013-09-10 18:51:40,198 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 29.39 sec
|
||||
MapReduce Total cumulative CPU time: 29 seconds 390 msec
|
||||
Ended Job = job_201309101627_0042
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 1 Cumulative CPU: 29.39 sec HDFS Read: 597016 HDFS Write: 6 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 29 seconds 390 msec
|
||||
OK
|
||||
Time taken: 30.908 seconds, Fetched: 1 row(s)
|
||||
hive> quit;
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_28401@mturlrep13_201309101851_891001725.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_28836@mturlrep13_201309101851_1054092389.txt
|
||||
hive> SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;;
|
||||
Total MapReduce jobs = 2
|
||||
Launching Job 1 out of 2
|
||||
Number of reduce tasks not specified. Estimated from input data size: 2
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0043
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
||||
2013-09-10 18:51:59,809 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:52:04,838 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.48 sec
|
||||
2013-09-10 18:52:05,847 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.48 sec
|
||||
2013-09-10 18:52:06,855 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.48 sec
|
||||
2013-09-10 18:52:07,861 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.48 sec
|
||||
2013-09-10 18:52:08,868 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 12.48 sec
|
||||
2013-09-10 18:52:09,875 Stage-1 map = 75%, reduce = 0%, Cumulative CPU 18.07 sec
|
||||
2013-09-10 18:52:10,881 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
||||
2013-09-10 18:52:11,887 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 23.92 sec
|
||||
2013-09-10 18:52:12,894 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 25.68 sec
|
||||
2013-09-10 18:52:13,901 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 27.53 sec
|
||||
2013-09-10 18:52:14,908 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 27.53 sec
|
||||
MapReduce Total cumulative CPU time: 27 seconds 530 msec
|
||||
Ended Job = job_201309101627_0043
|
||||
Launching Job 2 out of 2
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0044
|
||||
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
||||
2013-09-10 18:52:17,388 Stage-2 map = 0%, reduce = 0%
|
||||
2013-09-10 18:52:19,396 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:20,401 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:21,406 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:22,411 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:23,415 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:24,420 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:25,425 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:26,430 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 0.75 sec
|
||||
2013-09-10 18:52:27,436 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
||||
2013-09-10 18:52:28,442 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
||||
2013-09-10 18:52:29,448 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 2.14 sec
|
||||
MapReduce Total cumulative CPU time: 2 seconds 140 msec
|
||||
Ended Job = job_201309101627_0044
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 27.53 sec HDFS Read: 907716 HDFS Write: 384 SUCCESS
|
||||
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 2.14 sec HDFS Read: 1153 HDFS Write: 60 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 29 seconds 670 msec
|
||||
OK
|
||||
Time taken: 39.506 seconds, Fetched: 9 row(s)
|
||||
hive> quit;
|
||||
-- мощная фильтрация. После фильтрации почти ничего не остаётся, но делаем ещё агрегацию.;
|
||||
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_30667@mturlrep13_201309101852_966681525.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_31123@mturlrep13_201309101852_1252745596.txt
|
||||
hive> SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;;
|
||||
Total MapReduce jobs = 2
|
||||
Launching Job 1 out of 2
|
||||
Number of reduce tasks not specified. Estimated from input data size: 2
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0045
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
||||
2013-09-10 18:52:49,457 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:52:56,485 Stage-1 map = 43%, reduce = 0%
|
||||
2013-09-10 18:52:59,503 Stage-1 map = 46%, reduce = 0%, Cumulative CPU 14.56 sec
|
||||
2013-09-10 18:53:00,511 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:01,519 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:02,526 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:03,533 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:04,539 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:05,545 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:06,550 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:07,557 Stage-1 map = 92%, reduce = 17%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:08,563 Stage-1 map = 92%, reduce = 17%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:09,569 Stage-1 map = 92%, reduce = 17%, Cumulative CPU 29.73 sec
|
||||
2013-09-10 18:53:10,575 Stage-1 map = 97%, reduce = 17%, Cumulative CPU 44.01 sec
|
||||
2013-09-10 18:53:11,598 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 58.47 sec
|
||||
2013-09-10 18:53:12,604 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 58.47 sec
|
||||
2013-09-10 18:53:13,609 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 58.47 sec
|
||||
2013-09-10 18:53:14,615 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 58.47 sec
|
||||
2013-09-10 18:53:15,620 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 58.47 sec
|
||||
2013-09-10 18:53:16,627 Stage-1 map = 100%, reduce = 63%, Cumulative CPU 65.64 sec
|
||||
2013-09-10 18:53:17,634 Stage-1 map = 100%, reduce = 63%, Cumulative CPU 65.64 sec
|
||||
2013-09-10 18:53:18,640 Stage-1 map = 100%, reduce = 63%, Cumulative CPU 65.64 sec
|
||||
2013-09-10 18:53:19,646 Stage-1 map = 100%, reduce = 63%, Cumulative CPU 65.64 sec
|
||||
2013-09-10 18:53:20,653 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 71.27 sec
|
||||
2013-09-10 18:53:21,659 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 71.27 sec
|
||||
MapReduce Total cumulative CPU time: 1 minutes 11 seconds 270 msec
|
||||
Ended Job = job_201309101627_0045
|
||||
Launching Job 2 out of 2
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0046
|
||||
Hadoop job information for Stage-2: number of mappers: 1; number of reducers: 1
|
||||
2013-09-10 18:53:25,187 Stage-2 map = 0%, reduce = 0%
|
||||
2013-09-10 18:53:27,196 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:28,202 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:29,207 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:30,211 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:31,216 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:32,220 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:33,226 Stage-2 map = 100%, reduce = 0%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:34,231 Stage-2 map = 100%, reduce = 33%, Cumulative CPU 1.42 sec
|
||||
2013-09-10 18:53:35,237 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.16 sec
|
||||
2013-09-10 18:53:36,243 Stage-2 map = 100%, reduce = 100%, Cumulative CPU 3.16 sec
|
||||
MapReduce Total cumulative CPU time: 3 seconds 160 msec
|
||||
Ended Job = job_201309101627_0046
|
||||
MapReduce Jobs Launched:
|
||||
Job 0: Map: 4 Reduce: 2 Cumulative CPU: 71.27 sec HDFS Read: 67340015 HDFS Write: 100142 SUCCESS
|
||||
Job 1: Map: 1 Reduce: 1 Cumulative CPU: 3.16 sec HDFS Read: 100911 HDFS Write: 96 SUCCESS
|
||||
Total MapReduce CPU Time Spent: 1 minutes 14 seconds 430 msec
|
||||
OK
|
||||
Time taken: 56.439 seconds, Fetched: 10 row(s)
|
||||
hive> quit;
|
||||
-- агрегация, среднее количество ключей.;
|
||||
|
||||
status
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_609@mturlrep13_201309101853_355533849.txt
|
||||
hive> ;
|
||||
hive> quit;
|
||||
|
||||
times: 1
|
||||
query: SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
spawn hive
|
||||
|
||||
Logging initialized using configuration in file:/opt/hive/conf/hive-log4j.properties
|
||||
Hive history file=/tmp/kartavyy/hive_job_log_kartavyy_1183@mturlrep13_201309101853_289725544.txt
|
||||
hive> SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;;
|
||||
Total MapReduce jobs = 2
|
||||
Launching Job 1 out of 2
|
||||
Number of reduce tasks not specified. Estimated from input data size: 2
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0047
|
||||
Hadoop job information for Stage-1: number of mappers: 4; number of reducers: 2
|
||||
2013-09-10 18:53:55,838 Stage-1 map = 0%, reduce = 0%
|
||||
2013-09-10 18:54:02,865 Stage-1 map = 29%, reduce = 0%
|
||||
2013-09-10 18:54:05,876 Stage-1 map = 43%, reduce = 0%
|
||||
2013-09-10 18:54:08,894 Stage-1 map = 46%, reduce = 0%, Cumulative CPU 16.8 sec
|
||||
2013-09-10 18:54:09,901 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:10,909 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:11,915 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:12,921 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:13,927 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:14,932 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:15,938 Stage-1 map = 50%, reduce = 0%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:16,943 Stage-1 map = 80%, reduce = 17%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:17,949 Stage-1 map = 80%, reduce = 17%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:18,954 Stage-1 map = 80%, reduce = 17%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:19,959 Stage-1 map = 96%, reduce = 17%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:20,964 Stage-1 map = 96%, reduce = 17%, Cumulative CPU 34.85 sec
|
||||
2013-09-10 18:54:21,970 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:22,975 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:23,980 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:24,986 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:25,991 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:26,997 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:28,002 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:29,008 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:30,014 Stage-1 map = 100%, reduce = 17%, Cumulative CPU 67.35 sec
|
||||
2013-09-10 18:54:31,021 Stage-1 map = 100%, reduce = 58%, Cumulative CPU 74.39 sec
|
||||
2013-09-10 18:54:32,027 Stage-1 map = 100%, reduce = 96%, Cumulative CPU 74.39 sec
|
||||
2013-09-10 18:54:33,033 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 84.05 sec
|
||||
2013-09-10 18:54:34,038 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 84.05 sec
|
||||
2013-09-10 18:54:35,044 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 84.05 sec
|
||||
MapReduce Total cumulative CPU time: 1 minutes 24 seconds 50 msec
|
||||
Ended Job = job_201309101627_0047
|
||||
Launching Job 2 out of 2
|
||||
Number of reduce tasks determined at compile time: 1
|
||||
In order to change the average load for a reducer (in bytes):
|
||||
set hive.exec.reducers.bytes.per.reducer=<number>
|
||||
In order to limit the maximum number of reducers:
|
||||
set hive.exec.reducers.max=<number>
|
||||
In order to set a constant number of reducers:
|
||||
set mapred.reduce.tasks=<number>
|
||||
Kill Command = /usr/libexec/../bin/hadoop job -kill job_201309101627_0048
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,109 +0,0 @@
|
||||
SELECT count(*) FROM hits_10m;
|
||||
SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
||||
SELECT sum(UserID) FROM hits_10m;
|
||||
SELECT count(DISTINCT UserID) FROM hits_10m;
|
||||
SELECT count(DISTINCT SearchPhrase) FROM hits_10m;
|
||||
SELECT min(EventDate), max(EventDate) FROM hits_10m;
|
||||
|
||||
SELECT AdvEngineID, count(*) AS c FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY c DESC;
|
||||
-- мощная фильтрация. После фильтрации почти ничего не остаётся, но делаем ещё агрегацию.;
|
||||
|
||||
SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
-- агрегация, среднее количество ключей.;
|
||||
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY c DESC LIMIT 10;
|
||||
-- агрегация, среднее количество ключей, несколько агрегатных функций.;
|
||||
|
||||
SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
-- мощная фильтрация по строкам, затем агрегация по строкам.;
|
||||
|
||||
SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
-- мощная фильтрация по строкам, затем агрегация по паре из числа и строки.;
|
||||
|
||||
SELECT SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
-- средняя фильтрация по строкам, затем агрегация по строкам, большое количество ключей.;
|
||||
|
||||
SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
-- агрегация чуть сложнее.;
|
||||
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) AS c FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
-- агрегация по числу и строке, большое количество ключей.;
|
||||
|
||||
SELECT UserID, count(*) AS c FROM hits_10m GROUP BY UserID ORDER BY c DESC LIMIT 10;
|
||||
-- агрегация по очень большому количеству ключей, может не хватить оперативки.;
|
||||
|
||||
SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
-- ещё более сложная агрегация.;
|
||||
|
||||
SELECT UserID, SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
-- то же самое, но без сортировки.;
|
||||
|
||||
SELECT UserID, minute(EventTime), SearchPhrase, count(*) AS c FROM hits_10m GROUP BY UserID, minute(EventTime), SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
-- ещё более сложная агрегация, не стоит выполнять на больших таблицах.;
|
||||
|
||||
SELECT UserID FROM hits_10m WHERE UserID = 12345678901234567890;
|
||||
-- мощная фильтрация по столбцу типа UInt64.;
|
||||
|
||||
SELECT count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%';
|
||||
-- фильтрация по поиску подстроки в строке.;
|
||||
|
||||
SELECT SearchPhrase, MAX(URL), count(*) AS c FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
-- вынимаем большие столбцы, фильтрация по строке.;
|
||||
|
||||
SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10;
|
||||
-- чуть больше столбцы.;
|
||||
|
||||
SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
-- плохой запрос - вынимаем все столбцы.;
|
||||
|
||||
SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
-- большая сортировка.;
|
||||
|
||||
SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
-- большая сортировка по строкам.;
|
||||
|
||||
SELECT SearchPhrase, EventTime FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
-- большая сортировка по кортежу.;
|
||||
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) AS c FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
-- считаем средние длины URL для крупных счётчиков.;
|
||||
|
||||
SELECT SUBSTRING(SUBSTRING(Referer, instr(Referer, '//') + 2), 1, if(0 < instr(SUBSTRING(Referer, instr(Referer, '//') + 2), '/') - 1, instr(SUBSTRING(Referer, instr(Referer, '//') + 2), '/' ) - 1, 0)), avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_100m WHERE Referer != '' GROUP BY SUBSTRING(SUBSTRING(Referer, instr(Referer, '//') + 2), 1, if(0 < instr(SUBSTRING(Referer, instr(Referer, '//') + 2), '/') - 1, instr(SUBSTRING(Referer, instr(Referer, '//') + 2), '/' ) - 1, 0)) HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
-- то же самое, но с разбивкой по доменам.;
|
||||
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;
|
||||
-- много тупых агрегатных функций.;
|
||||
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
-- сложная агрегация, для больших таблиц может не хватить оперативки.;
|
||||
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
-- агрегация по двум полям, которая ничего не агрегирует. Для больших таблиц выполнить не получится.;
|
||||
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY c DESC LIMIT 10;
|
||||
-- то же самое, но ещё и без фильтрации.;
|
||||
|
||||
SELECT URL, count(*) AS c FROM hits_10m GROUP BY URL ORDER BY c DESC LIMIT 10;
|
||||
-- агрегация по URL.;
|
||||
|
||||
SELECT 1, URL, count(*) AS c FROM hits_10m GROUP BY 1, URL ORDER BY c DESC LIMIT 10;
|
||||
-- агрегация по URL и числу.;
|
||||
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) AS c FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY c DESC LIMIT 10;
|
||||
|
||||
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
|
||||
|
||||
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT DontCountHits != 0 AND NOT Refresh != 0 AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
|
||||
|
||||
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND IsLink != 0 AND NOT IsDownload != 0 GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, URL, count(*) as c, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') as src FROM hits_100m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, URL, if(SearchEngineID = 0 AND AdvEngineID = 0 , Referer, '') ORDER BY c DESC LIMIT 1000;
|
||||
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
|
||||
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-31') AND NOT Refresh != 0 AND NOT DontCountHits != 0 AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
|
||||
SELECT unix_timestamp(EventTime) - SECOND(EventTime) AS m, count(*) FROM hits_10m WHERE CounterID = 62 AND EventDate >= TIMESTAMP('2013-07-01') AND EventDate <= TIMESTAMP('2013-07-02') AND NOT Refresh != 0 AND NOT DontCountHits != 0 GROUP BY unix_timestamp(EventTime) - SECOND(EventTime) ORDER BY m;
|
@ -1,2 +0,0 @@
|
||||
cd /home/kartavyy/benchmark
|
||||
./benchmark.sh -c hive/conf.sh -n $1 > hive/log/log_$1
|
@ -1,4 +0,0 @@
|
||||
CONF_DIR=/home/kartavyy/benchmark/mysql
|
||||
expect_file=$CONF_DIR/expect.tcl
|
||||
test_file=$CONF_DIR/queries.sql
|
||||
etc_init_d_service=/etc/init.d/mysql
|
@ -1,7 +0,0 @@
|
||||
create table hits_10m( WatchID BIGINT, JavaEnable TINYINT UNSIGNED, Title VARCHAR(1024), GoodEvent SMALLINT, EventTime DATETIME, EventDate DATE, CounterID INTEGER UNSIGNED, ClientIP INTEGER UNSIGNED, RegionID INTEGER UNSIGNED, UserID BIGINT, CounterClass TINYINT, OS SMALLINT, UserAgent SMALLINT, URL VARCHAR(6072), Referer VARCHAR(2048), Refresh TINYINT, RefererCategoryID SMALLINT UNSIGNED, RefererRegionID INTEGER UNSIGNED, URLCategoryID SMALLINT UNSIGNED, URLRegionID INTEGER UNSIGNED, ResolutionWidth SMALLINT UNSIGNED, ResolutionHeight SMALLINT UNSIGNED, ResolutionDepth TINYINT UNSIGNED, FlashMajor TINYINT UNSIGNED, FlashMinor TINYINT UNSIGNED, FlashMinor2 VARCHAR(256), NetMajor TINYINT UNSIGNED, NetMinor TINYINT UNSIGNED, UserAgentMajor SMALLINT UNSIGNED, UserAgentMinor CHAR(2), CookieEnable TINYINT UNSIGNED, JavascriptEnable TINYINT UNSIGNED, IsMobile TINYINT UNSIGNED, MobilePhone TINYINT UNSIGNED, MobilePhoneModel VARCHAR(80), Params VARCHAR(2048), IPNetworkID INT UNSIGNED, TraficSourceID SMALLINT, SearchEngineID SMALLINT UNSIGNED, SearchPhrase VARCHAR(1024), AdvEngineID TINYINT UNSIGNED, IsArtifical TINYINT UNSIGNED, WindowClientWidth SMALLINT UNSIGNED, WindowClientHeight SMALLINT UNSIGNED, ClientTimeZone INTEGER, ClientEventTime DATETIME, SilverlightVersion1 TINYINT UNSIGNED, SilverlightVersion2 TINYINT UNSIGNED, SilverlightVersion3 INT UNSIGNED, SilverlightVersion4 SMALLINT UNSIGNED, PageCharset VARCHAR(80), CodeVersion INT UNSIGNED, IsLink TINYINT UNSIGNED, IsDownload TINYINT UNSIGNED, IsNotBounce TINYINT UNSIGNED, FUniqID BIGINT, OriginalURL VARCHAR(6072), HID INT UNSIGNED, IsOldCounter TINYINT UNSIGNED, IsEvent TINYINT UNSIGNED, IsParameter TINYINT UNSIGNED, DontCountHits TINYINT UNSIGNED, WithHash TINYINT UNSIGNED, HitColor CHAR(1), LocalEventTime DATETIME, Age TINYINT UNSIGNED, Sex TINYINT UNSIGNED, Income TINYINT UNSIGNED, Interests SMALLINT UNSIGNED, Robotness TINYINT UNSIGNED, RemoteIP INT UNSIGNED, WindowName INT, OpenerName INT, HistoryLength SMALLINT, BrowserLanguage CHAR(2), BrowserCountry CHAR(2), SocialNetwork VARCHAR(128), SocialAction VARCHAR(128), HTTPError SMALLINT UNSIGNED, SendTiming INT UNSIGNED, DNSTiming INT UNSIGNED, ConnectTiming INTEGER UNSIGNED, ResponseStartTiming INTEGER UNSIGNED, ResponseEndTiming INTEGER UNSIGNED, FetchTiming INTEGER UNSIGNED, SocialSourceNetworkID TINYINT UNSIGNED, SocialSourcePage VARCHAR(128), ParamPrice BIGINT, ParamOrderID VARCHAR(80), ParamCurrency CHAR(3), ParamCurrencyID SMALLINT UNSIGNED, OpenstatServiceName VARCHAR(80), OpenstatCampaignID VARCHAR(80), OpenstatAdID VARCHAR(80), OpenstatSourceID VARCHAR(80), UTMSource VARCHAR(256), UTMMedium VARCHAR(256), UTMCampaign VARCHAR(256), UTMContent VARCHAR(256), UTMTerm VARCHAR(256), FromTag VARCHAR(256), HasGCLID TINYINT UNSIGNED, RefererHash BIGINT, URLHash BIGINT, CLID INTEGER UNSIGNED, UserIDHash BIGINT UNSIGNED) ENGINE=MYISAM;
|
||||
|
||||
CREATE INDEX hits_10m_ind on hits_10m (CounterID, EventDate, UserIDHash, EventTime) using BTREE;
|
||||
|
||||
load data infile '/opt/dump/dump_0.3/dump_hits_10m_meshed_utf8.tsv' into table hits_10m FIELDS TERMINATED BY '\t' ESCAPED BY '\\' ;
|
||||
|
||||
|
@ -1,23 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/expect
|
||||
|
||||
# Set timeout
|
||||
set timeout 600
|
||||
|
||||
# Get arguments
|
||||
set query [lindex $argv 0]
|
||||
|
||||
spawn mysql -u root
|
||||
|
||||
expect "mysql>"
|
||||
send "use hits\r"
|
||||
|
||||
expect "mysql>"
|
||||
|
||||
send "$query\r"
|
||||
|
||||
expect "mysql>"
|
||||
|
||||
send "quit\r"
|
||||
|
||||
expect eof
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,111 +0,0 @@
|
||||
SELECT SQL_NO_CACHE count(*) FROM hits_10m;
|
||||
SELECT SQL_NO_CACHE count(*) FROM hits_10m WHERE AdvEngineID != 0;
|
||||
SELECT SQL_NO_CACHE sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
||||
SELECT SQL_NO_CACHE sum(UserID) FROM hits_10m;
|
||||
SELECT SQL_NO_CACHE count(DISTINCT UserID) FROM hits_10m;
|
||||
SELECT SQL_NO_CACHE count(DISTINCT SearchPhrase) FROM hits_10m;
|
||||
SELECT SQL_NO_CACHE min(EventDate), max(EventDate) FROM hits_10m;
|
||||
|
||||
SELECT SQL_NO_CACHE AdvEngineID, count(*) FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
-- мощная фильтрация. После фильтрации почти ничего не остаётся, но делаем ещё агрегацию.;
|
||||
|
||||
SELECT SQL_NO_CACHE RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
-- агрегация, среднее количество ключей.;
|
||||
|
||||
SELECT SQL_NO_CACHE RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация, среднее количество ключей, несколько агрегатных функций.;
|
||||
|
||||
SELECT SQL_NO_CACHE MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
-- мощная фильтрация по строкам, затем агрегация по строкам.;
|
||||
|
||||
SELECT SQL_NO_CACHE MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
-- мощная фильтрация по строкам, затем агрегация по паре из числа и строки.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchPhrase, count(*) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- средняя фильтрация по строкам, затем агрегация по строкам, большое количество ключей.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
-- агрегация чуть сложнее.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchEngineID, SearchPhrase, count(*) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по числу и строке, большое количество ключей.;
|
||||
|
||||
SELECT SQL_NO_CACHE UserID, count(*) FROM hits_10m GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по очень большому количеству ключей, может не хватить оперативки.;
|
||||
|
||||
SELECT SQL_NO_CACHE UserID, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- ещё более сложная агрегация.;
|
||||
|
||||
SELECT SQL_NO_CACHE UserID, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
-- то же самое, но без сортировки.;
|
||||
|
||||
SELECT SQL_NO_CACHE UserID, Minute(EventTime) AS m, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- ещё более сложная агрегация, не стоит выполнять на больших таблицах.;
|
||||
|
||||
SELECT SQL_NO_CACHE UserID FROM hits_10m WHERE UserID = 12345678901234567890;
|
||||
-- мощная фильтрация по столбцу типа UInt64.;
|
||||
|
||||
SELECT SQL_NO_CACHE count(*) FROM hits_10m WHERE URL LIKE '%metrika%';
|
||||
-- фильтрация по поиску подстроки в строке.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchPhrase, MAX(URL), count(*) FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- вынимаем большие столбцы, фильтрация по строке.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- чуть больше столбцы.;
|
||||
|
||||
SELECT SQL_NO_CACHE * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
-- плохой запрос - вынимаем все столбцы.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
-- большая сортировка.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
-- большая сортировка по строкам.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
-- большая сортировка по кортежу.;
|
||||
|
||||
SELECT SQL_NO_CACHE CounterID, avg(length(URL)) AS l, count(*) FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
-- считаем средние длины URL для крупных счётчиков.;
|
||||
|
||||
SELECT SQL_NO_CACHE SUBSTRING(SUBSTRING(Referer, POSITION('//' IN Referer) + 2), 1, GREATEST(0, POSITION('/' IN SUBSTRING(Referer, POSITION('//' IN Referer) + 2)) - 1)) AS k, avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY k HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
-- то же самое, но с разбивкой по доменам.;
|
||||
|
||||
SELECT SQL_NO_CACHE sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;
|
||||
-- много тупых агрегатных функций.;
|
||||
|
||||
SELECT SQL_NO_CACHE SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
||||
-- сложная агрегация, для больших таблиц может не хватить оперативки.;
|
||||
|
||||
SELECT SQL_NO_CACHE WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по двум полям, которая ничего не агрегирует. Для больших таблиц выполнить не получится.;
|
||||
|
||||
SELECT SQL_NO_CACHE WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
||||
-- то же самое, но ещё и без фильтрации.;
|
||||
|
||||
SELECT SQL_NO_CACHE URL, count(*) FROM hits_10m GROUP BY URL ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по URL.;
|
||||
|
||||
SELECT SQL_NO_CACHE 1, URL, count(*) FROM hits_10m GROUP BY 1, URL ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по URL и числу.;
|
||||
|
||||
SELECT SQL_NO_CACHE ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
|
||||
|
||||
SELECT SQL_NO_CACHE URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
|
||||
|
||||
SELECT SQL_NO_CACHE Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
|
||||
|
||||
SELECT SQL_NO_CACHE URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
|
||||
|
||||
SELECT SQL_NO_CACHE TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
|
||||
|
||||
SELECT SQL_NO_CACHE URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
|
||||
|
||||
SELECT SQL_NO_CACHE WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
|
||||
SELECT SQL_NO_CACHE EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
|
@ -1,5 +0,0 @@
|
||||
CONF_DIR=/home/kartavyy/benchmark/infobright
|
||||
expect_file=$CONF_DIR/expect.tcl
|
||||
test_file=$CONF_DIR/queries.sql
|
||||
etc_init_d_service=/etc/init.d/mysqld-ib
|
||||
|
@ -1,111 +0,0 @@
|
||||
create table hits_10m
|
||||
(
|
||||
WatchID BIGINT,
|
||||
JavaEnable SMALLINT,
|
||||
Title VARCHAR(1400),
|
||||
GoodEvent SMALLINT,
|
||||
EventTime TIMESTAMP,
|
||||
EventDate DATE,
|
||||
CounterID BIGINT,
|
||||
ClientIP BIGINT,
|
||||
RegionID BIGINT,
|
||||
UserID BIGINT,
|
||||
CounterClass TINYINT,
|
||||
OS SMALLINT,
|
||||
UserAgent SMALLINT,
|
||||
URL VARCHAR(7800),
|
||||
Referer VARCHAR(3125),
|
||||
Refresh TINYINT,
|
||||
RefererCategoryID INT,
|
||||
RefererRegionID BIGINT,
|
||||
URLCategoryID INT,
|
||||
URLRegionID BIGINT,
|
||||
ResolutionWidth INT,
|
||||
ResolutionHeight INT,
|
||||
ResolutionDepth SMALLINT,
|
||||
FlashMajor SMALLINT,
|
||||
FlashMinor SMALLINT,
|
||||
FlashMinor2 VARCHAR(256),
|
||||
NetMajor SMALLINT,
|
||||
NetMinor SMALLINT,
|
||||
UserAgentMajor INT,
|
||||
UserAgentMinor CHAR(2),
|
||||
CookieEnable SMALLINT,
|
||||
JavascriptEnable SMALLINT,
|
||||
IsMobile SMALLINT,
|
||||
MobilePhone SMALLINT,
|
||||
MobilePhoneModel VARCHAR(80),
|
||||
Params VARCHAR(2925),
|
||||
IPNetworkID BIGINT,
|
||||
TraficSourceID SMALLINT,
|
||||
SearchEngineID INT,
|
||||
SearchPhrase VARCHAR(2008),
|
||||
AdvEngineID SMALLINT,
|
||||
IsArtifical SMALLINT,
|
||||
WindowClientWidth INT,
|
||||
WindowClientHeight INT,
|
||||
ClientTimeZone INTEGER,
|
||||
ClientEventTime TIMESTAMP,
|
||||
SilverlightVersion1 SMALLINT,
|
||||
SilverlightVersion2 SMALLINT,
|
||||
SilverlightVersion3 BIGINT,
|
||||
SilverlightVersion4 INT,
|
||||
PageCharset VARCHAR(80),
|
||||
CodeVersion BIGINT,
|
||||
IsLink SMALLINT,
|
||||
IsDownload SMALLINT,
|
||||
IsNotBounce SMALLINT,
|
||||
FUniqID BIGINT,
|
||||
OriginalURL VARCHAR(8181),
|
||||
HID BIGINT,
|
||||
IsOldCounter SMALLINT,
|
||||
IsEvent SMALLINT,
|
||||
IsParameter SMALLINT,
|
||||
DontCountHits SMALLINT,
|
||||
WithHash SMALLINT,
|
||||
HitColor CHAR(1),
|
||||
LocalEventTime TIMESTAMP,
|
||||
Age SMALLINT,
|
||||
Sex SMALLINT,
|
||||
Income SMALLINT,
|
||||
Interests INT,
|
||||
Robotness SMALLINT,
|
||||
RemoteIP BIGINT,
|
||||
WindowName INT,
|
||||
OpenerName INT,
|
||||
HistoryLength SMALLINT,
|
||||
BrowserLanguage CHAR(2),
|
||||
BrowserCountry CHAR(2),
|
||||
SocialNetwork VARCHAR(128),
|
||||
SocialAction VARCHAR(128),
|
||||
HTTPError INT,
|
||||
SendTiming BIGINT,
|
||||
DNSTiming BIGINT,
|
||||
ConnectTiming BIGINT,
|
||||
ResponseStartTiming BIGINT,
|
||||
ResponseEndTiming BIGINT,
|
||||
FetchTiming BIGINT,
|
||||
SocialSourceNetworkID SMALLINT,
|
||||
SocialSourcePage VARCHAR(256),
|
||||
ParamPrice BIGINT,
|
||||
ParamOrderID VARCHAR(80),
|
||||
ParamCurrency CHAR(3),
|
||||
ParamCurrencyID INT,
|
||||
OpenstatServiceName VARCHAR(80),
|
||||
OpenstatCampaignID VARCHAR(512),
|
||||
OpenstatAdID VARCHAR(80),
|
||||
OpenstatSourceID VARCHAR(256),
|
||||
UTMSource VARCHAR(256),
|
||||
UTMMedium VARCHAR(256),
|
||||
UTMCampaign VARCHAR(407),
|
||||
UTMContent VARCHAR(256),
|
||||
UTMTerm VARCHAR(437),
|
||||
FromTag VARCHAR(428),
|
||||
HasGCLID SMALLINT,
|
||||
RefererHash BIGINT,
|
||||
URLHash BIGINT,
|
||||
CLID BIGINT,
|
||||
UserIDHash BIGINT
|
||||
);
|
||||
|
||||
LOAD DATA INFILE '/opt/dump/dump_0.3/dump_hits_10m_meshed_utf8.tsv' INTO TABLE hits_10m FIELDS TERMINATED BY '\t' ESCAPED BY '\\' ENCLOSED BY "NULL";
|
@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/expect
|
||||
|
||||
# Set timeout
|
||||
set timeout 600
|
||||
|
||||
# Get arguments
|
||||
set query [lindex $argv 0]
|
||||
|
||||
spawn mysql-ib -u root -D hits
|
||||
|
||||
expect "mysql>"
|
||||
send "$query\r"
|
||||
|
||||
expect "mysql>"
|
||||
send "quit\r"
|
||||
|
||||
expect eof
|
File diff suppressed because it is too large
Load Diff
@ -1,113 +0,0 @@
|
||||
-- set GLOBAL max_length_for_sort_data = 8388608;
|
||||
|
||||
SELECT count(*) FROM hits_10m;
|
||||
SELECT count(*) FROM hits_10m WHERE AdvEngineID != 0;
|
||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
||||
SELECT sum(UserID) FROM hits_10m;
|
||||
SELECT count(DISTINCT UserID) FROM hits_10m;
|
||||
SELECT count(DISTINCT SearchPhrase) FROM hits_10m;
|
||||
SELECT min(EventDate), max(EventDate) FROM hits_10m;
|
||||
|
||||
SELECT AdvEngineID, count(*) FROM hits_10m WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
||||
-- мощная фильтрация. После фильтрации почти ничего не остаётся, но делаем ещё агрегацию.;
|
||||
|
||||
SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
||||
-- агрегация, среднее количество ключей.;
|
||||
|
||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация, среднее количество ключей, несколько агрегатных функций.;
|
||||
|
||||
SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
-- мощная фильтрация по строкам, затем агрегация по строкам.;
|
||||
|
||||
SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
||||
-- мощная фильтрация по строкам, затем агрегация по паре из числа и строки.;
|
||||
|
||||
SELECT SearchPhrase, count(*) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- средняя фильтрация по строкам, затем агрегация по строкам, большое количество ключей.;
|
||||
|
||||
SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
||||
-- агрегация чуть сложнее.;
|
||||
|
||||
SELECT SearchEngineID, SearchPhrase, count(*) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по числу и строке, большое количество ключей.;
|
||||
|
||||
SELECT UserID, count(*) FROM hits_10m GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по очень большому количеству ключей, может не хватить оперативки.;
|
||||
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- ещё более сложная агрегация.;
|
||||
|
||||
SELECT UserID, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;
|
||||
-- то же самое, но без сортировки.;
|
||||
|
||||
SELECT UserID, Minute(EventTime) AS m, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- ещё более сложная агрегация, не стоит выполнять на больших таблицах.;
|
||||
|
||||
SELECT UserID FROM hits_10m WHERE UserID = 123456789;
|
||||
-- мощная фильтрация по столбцу типа UInt64.;
|
||||
|
||||
SELECT count(*) FROM hits_10m WHERE URL LIKE '%metrika%';
|
||||
-- фильтрация по поиску подстроки в строке.;
|
||||
|
||||
SELECT SearchPhrase, MAX(URL), count(*) FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- вынимаем большие столбцы, фильтрация по строке.;
|
||||
|
||||
SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
||||
-- чуть больше столбцы.;
|
||||
|
||||
SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
||||
-- плохой запрос - вынимаем все столбцы.;
|
||||
|
||||
SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10;
|
||||
-- большая сортировка.;
|
||||
|
||||
SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10;
|
||||
-- большая сортировка по строкам.;
|
||||
|
||||
SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
||||
-- большая сортировка по кортежу.;
|
||||
|
||||
SELECT CounterID, avg(length(URL)) AS l, count(*) FROM hits_10m WHERE URL != '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
-- считаем средние длины URL для крупных счётчиков.;
|
||||
|
||||
SELECT SUBSTRING(SUBSTRING(Referer, POSITION('//' IN Referer) + 2), 1, GREATEST(0, POSITION('/' IN SUBSTRING(Referer, POSITION('//' IN Referer) + 2)) - 1)) AS k, avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer != '' GROUP BY k HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||
-- то же самое, но с разбивкой по доменам.;
|
||||
|
||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;
|
||||
-- много тупых агрегатных функций.;
|
||||
|
||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
||||
-- сложная агрегация, для больших таблиц может не хватить оперативки.;
|
||||
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по двум полям, которая ничего не агрегирует. Для больших таблиц выполнить не получится.;
|
||||
|
||||
SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
||||
-- то же самое, но ещё и без фильтрации.;
|
||||
|
||||
SELECT URL, count(*) FROM hits_10m GROUP BY URL ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по URL.;
|
||||
|
||||
SELECT 1, URL, count(*) FROM hits_10m GROUP BY 1, URL ORDER BY count(*) DESC LIMIT 10;
|
||||
-- агрегация по URL и числу.;
|
||||
|
||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
|
||||
|
||||
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND URL != '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
||||
|
||||
|
||||
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT DontCountHits AND NOT Refresh AND Title != '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
||||
|
||||
|
||||
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
||||
|
||||
|
||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
||||
|
||||
|
||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
|
||||
|
||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-31') AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
||||
|
||||
SELECT EventTime - INTERVAL SECOND(EventTime) SECOND AS Minute, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= DATE('2013-07-01') AND EventDate <= DATE('2013-07-02') AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute;
|
@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
QUERIES_FILE="queries.sql"
|
||||
TABLE=$1
|
||||
TRIES=3
|
||||
|
||||
cat "$QUERIES_FILE" | sed "s/{table}/${TABLE}/g" | while read query; do
|
||||
sync
|
||||
echo 3 | sudo tee /proc/sys/vm/drop_caches >/dev/null
|
||||
|
||||
echo -n "["
|
||||
for i in $(seq 1 $TRIES); do
|
||||
|
||||
RES=$(mysql -u root -h 127.0.0.1 -P 3306 --database=test -t -vvv -e "$query" 2>&1 | grep ' set ' | grep -oP '\d+\.\d+')
|
||||
|
||||
[[ "$?" == "0" ]] && echo -n "$RES" || echo -n "null"
|
||||
[[ "$i" != $TRIES ]] && echo -n ", "
|
||||
done
|
||||
echo "],"
|
||||
done
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user