mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into remove-useless-line
This commit is contained in:
commit
13a129bee7
@ -1,6 +1,14 @@
|
||||
# To run clang-tidy from CMake, build ClickHouse with -DENABLE_CLANG_TIDY=1. To show all warnings, it is
|
||||
# recommended to pass "-k0" to Ninja.
|
||||
|
||||
# Enable all checks + disale selected checks. Feel free to remove disabled checks from below list if
|
||||
# a) the new check is not controversial (this includes many checks in readability-* and google-*) or
|
||||
# b) too noisy (checks with > 100 new warnings are considered noisy, this includes e.g. cppcoreguidelines-*).
|
||||
|
||||
# TODO Let clang-tidy check headers in further directories
|
||||
# --> HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$'
|
||||
HeaderFilterRegex: '^.*/(base)/.*(h|hpp)$'
|
||||
|
||||
Checks: '*,
|
||||
-abseil-*,
|
||||
|
||||
|
4
.github/workflows/backport_branches.yml
vendored
4
.github/workflows/backport_branches.yml
vendored
@ -437,7 +437,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
EOF
|
||||
@ -521,7 +521,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread)
|
||||
CHECK_NAME=Stress test (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
|
46
.github/workflows/master.yml
vendored
46
.github/workflows/master.yml
vendored
@ -1287,7 +1287,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1326,7 +1326,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1365,7 +1365,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1404,7 +1404,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1443,7 +1443,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1519,7 +1519,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1558,7 +1558,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1597,7 +1597,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1830,7 +1830,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (address)
|
||||
CHECK_NAME=Stateful tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1867,7 +1867,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (thread)
|
||||
CHECK_NAME=Stateful tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1904,7 +1904,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (memory)
|
||||
CHECK_NAME=Stateful tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -2018,7 +2018,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (address)
|
||||
CHECK_NAME=Stress test (asan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2058,7 +2058,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread)
|
||||
CHECK_NAME=Stress test (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2094,7 +2094,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (memory)
|
||||
CHECK_NAME=Stress test (msan)
|
||||
REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2130,7 +2130,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_undefined
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (undefined)
|
||||
CHECK_NAME=Stress test (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2319,7 +2319,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2357,7 +2357,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2395,7 +2395,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2433,7 +2433,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2550,7 +2550,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (ASan)
|
||||
CHECK_NAME=AST fuzzer (asan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2586,7 +2586,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (TSan)
|
||||
CHECK_NAME=AST fuzzer (tsan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_tsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2622,7 +2622,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (UBSan)
|
||||
CHECK_NAME=AST fuzzer (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2658,7 +2658,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (MSan)
|
||||
CHECK_NAME=AST fuzzer (msan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_msan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
|
48
.github/workflows/pull_request.yml
vendored
48
.github/workflows/pull_request.yml
vendored
@ -1300,7 +1300,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1339,7 +1339,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1378,7 +1378,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1417,7 +1417,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1456,7 +1456,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1532,7 +1532,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -1571,7 +1571,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -1610,7 +1610,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1766,7 +1766,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_flaky_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests flaky check (address)
|
||||
CHECK_NAME=Stateless tests flaky check (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_flaky_asan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1927,7 +1927,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (address)
|
||||
CHECK_NAME=Stateful tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1964,7 +1964,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (thread)
|
||||
CHECK_NAME=Stateful tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -2001,7 +2001,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (memory)
|
||||
CHECK_NAME=Stateful tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -2115,7 +2115,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (address)
|
||||
CHECK_NAME=Stress test (asan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2155,7 +2155,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread)
|
||||
CHECK_NAME=Stress test (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2191,7 +2191,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (memory)
|
||||
CHECK_NAME=Stress test (msan)
|
||||
REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2227,7 +2227,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_undefined
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (undefined)
|
||||
CHECK_NAME=Stress test (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2302,7 +2302,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_asan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (ASan)
|
||||
CHECK_NAME=AST fuzzer (asan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_asan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2338,7 +2338,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (TSan)
|
||||
CHECK_NAME=AST fuzzer (tsan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_tsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2374,7 +2374,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_ubsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (UBSan)
|
||||
CHECK_NAME=AST fuzzer (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2410,7 +2410,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/ast_fuzzer_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=AST fuzzer (MSan)
|
||||
CHECK_NAME=AST fuzzer (msan)
|
||||
REPO_COPY=${{runner.temp}}/ast_fuzzer_msan/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -2599,7 +2599,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2637,7 +2637,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2675,7 +2675,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -2713,7 +2713,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
|
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: ReleaseWorkflow
|
||||
name: PublishedReleaseCI
|
||||
# - Gets artifacts from S3
|
||||
# - Sends it to JFROG Artifactory
|
||||
# - Adds them to the release assets
|
||||
@ -15,7 +15,7 @@ jobs:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
JFROG_API_KEY=${{ secrets.JFROG_KEY_API_PACKAGES }}
|
||||
JFROG_API_KEY=${{ secrets.JFROG_ARTIFACTORY_API_KEY }}
|
||||
TEMP_PATH=${{runner.temp}}/release_packages
|
||||
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
|
||||
EOF
|
||||
@ -30,7 +30,7 @@ jobs:
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY"
|
||||
python3 ./tests/ci/push_to_artifactory.py --release "${{ github.ref }}" \
|
||||
--commit '${{ github.sha }}' --all
|
||||
--commit '${{ github.sha }}' --artifactory-url "${{ secrets.JFROG_ARTIFACTORY_URL }}" --all
|
||||
- name: Upload packages to release assets
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
|
40
.github/workflows/release_branches.yml
vendored
40
.github/workflows/release_branches.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: ReleaseCI
|
||||
name: ReleaseBranchCI
|
||||
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
@ -591,7 +591,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -630,7 +630,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (address)
|
||||
CHECK_NAME=Stateless tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -669,7 +669,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -708,7 +708,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -747,7 +747,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (thread)
|
||||
CHECK_NAME=Stateless tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -823,7 +823,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=0
|
||||
@ -862,7 +862,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=1
|
||||
@ -901,7 +901,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateless_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateless tests (memory)
|
||||
CHECK_NAME=Stateless tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse
|
||||
KILL_TIMEOUT=10800
|
||||
RUN_BY_HASH_NUM=2
|
||||
@ -1134,7 +1134,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_debug
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (address)
|
||||
CHECK_NAME=Stateful tests (asan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1171,7 +1171,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (thread)
|
||||
CHECK_NAME=Stateful tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1208,7 +1208,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stateful_msan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stateful tests (memory)
|
||||
CHECK_NAME=Stateful tests (msan)
|
||||
REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse
|
||||
KILL_TIMEOUT=3600
|
||||
EOF
|
||||
@ -1322,7 +1322,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (address)
|
||||
CHECK_NAME=Stress test (asan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1362,7 +1362,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_thread
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (thread)
|
||||
CHECK_NAME=Stress test (tsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1398,7 +1398,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_memory
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (memory)
|
||||
CHECK_NAME=Stress test (msan)
|
||||
REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1434,7 +1434,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/stress_undefined
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Stress test (undefined)
|
||||
CHECK_NAME=Stress test (ubsan)
|
||||
REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
@ -1623,7 +1623,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=0
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -1661,7 +1661,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=1
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -1699,7 +1699,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=2
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
@ -1737,7 +1737,7 @@ jobs:
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=Integration tests (thread)
|
||||
CHECK_NAME=Integration tests (tsan)
|
||||
REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
RUN_BY_HASH_NUM=3
|
||||
RUN_BY_HASH_TOTAL=4
|
||||
|
@ -199,8 +199,8 @@ endif ()
|
||||
option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.")
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||
# Can be lld or ld-lld.
|
||||
if (LINKER_NAME MATCHES "lld$")
|
||||
# Can be lld or ld-lld or lld-13 or /path/to/lld.
|
||||
if (LINKER_NAME MATCHES "lld")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index")
|
||||
message (STATUS "Adding .gdb-index via --gdb-index linker option.")
|
||||
|
@ -52,15 +52,15 @@ struct Decimal
|
||||
constexpr Decimal(Decimal<T> &&) noexcept = default;
|
||||
constexpr Decimal(const Decimal<T> &) = default;
|
||||
|
||||
constexpr Decimal(const T & value_): value(value_) {}
|
||||
constexpr Decimal(const T & value_): value(value_) {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename U>
|
||||
constexpr Decimal(const Decimal<U> & x): value(x.value) {}
|
||||
constexpr Decimal(const Decimal<U> & x): value(x.value) {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
constexpr Decimal<T> & operator=(Decimal<T> &&) noexcept = default;
|
||||
constexpr Decimal<T> & operator = (const Decimal<T> &) = default;
|
||||
|
||||
constexpr operator T () const { return value; }
|
||||
constexpr operator T () const { return value; } // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename U>
|
||||
constexpr U convertTo() const
|
||||
@ -111,7 +111,7 @@ public:
|
||||
using Base::Base;
|
||||
using NativeType = Base::NativeType;
|
||||
|
||||
constexpr DateTime64(const Base & v): Base(v) {}
|
||||
constexpr DateTime64(const Base & v): Base(v) {} // NOLINT(google-explicit-constructor)
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -36,14 +36,14 @@ struct DecomposedFloat
|
||||
{
|
||||
using Traits = FloatTraits<T>;
|
||||
|
||||
DecomposedFloat(T x)
|
||||
explicit DecomposedFloat(T x)
|
||||
{
|
||||
memcpy(&x_uint, &x, sizeof(x));
|
||||
}
|
||||
|
||||
typename Traits::UInt x_uint;
|
||||
|
||||
bool is_negative() const
|
||||
bool isNegative() const
|
||||
{
|
||||
return x_uint >> (Traits::bits - 1);
|
||||
}
|
||||
@ -53,7 +53,7 @@ struct DecomposedFloat
|
||||
{
|
||||
return (exponent() == 0 && mantissa() == 0)
|
||||
? 0
|
||||
: (is_negative()
|
||||
: (isNegative()
|
||||
? -1
|
||||
: 1);
|
||||
}
|
||||
@ -63,7 +63,7 @@ struct DecomposedFloat
|
||||
return (x_uint >> (Traits::mantissa_bits)) & (((1ull << (Traits::exponent_bits + 1)) - 1) >> 1);
|
||||
}
|
||||
|
||||
int16_t normalized_exponent() const
|
||||
int16_t normalizedExponent() const
|
||||
{
|
||||
return int16_t(exponent()) - ((1ull << (Traits::exponent_bits - 1)) - 1);
|
||||
}
|
||||
@ -73,20 +73,20 @@ struct DecomposedFloat
|
||||
return x_uint & ((1ull << Traits::mantissa_bits) - 1);
|
||||
}
|
||||
|
||||
int64_t mantissa_with_sign() const
|
||||
int64_t mantissaWithSign() const
|
||||
{
|
||||
return is_negative() ? -mantissa() : mantissa();
|
||||
return isNegative() ? -mantissa() : mantissa();
|
||||
}
|
||||
|
||||
/// NOTE Probably floating point instructions can be better.
|
||||
bool is_integer_in_representable_range() const
|
||||
bool isIntegerInRepresentableRange() const
|
||||
{
|
||||
return x_uint == 0
|
||||
|| (normalized_exponent() >= 0 /// The number is not less than one
|
||||
|| (normalizedExponent() >= 0 /// The number is not less than one
|
||||
/// The number is inside the range where every integer has exact representation in float
|
||||
&& normalized_exponent() <= static_cast<int16_t>(Traits::mantissa_bits)
|
||||
&& normalizedExponent() <= static_cast<int16_t>(Traits::mantissa_bits)
|
||||
/// After multiplying by 2^exp, the fractional part becomes zero, means the number is integer
|
||||
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0));
|
||||
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
|
||||
}
|
||||
|
||||
|
||||
@ -102,15 +102,15 @@ struct DecomposedFloat
|
||||
return sign();
|
||||
|
||||
/// Different signs
|
||||
if (is_negative() && rhs > 0)
|
||||
if (isNegative() && rhs > 0)
|
||||
return -1;
|
||||
if (!is_negative() && rhs < 0)
|
||||
if (!isNegative() && rhs < 0)
|
||||
return 1;
|
||||
|
||||
/// Fractional number with magnitude less than one
|
||||
if (normalized_exponent() < 0)
|
||||
if (normalizedExponent() < 0)
|
||||
{
|
||||
if (!is_negative())
|
||||
if (!isNegative())
|
||||
return rhs > 0 ? -1 : 1;
|
||||
else
|
||||
return rhs >= 0 ? -1 : 1;
|
||||
@ -121,11 +121,11 @@ struct DecomposedFloat
|
||||
{
|
||||
if (rhs == std::numeric_limits<Int>::lowest())
|
||||
{
|
||||
assert(is_negative());
|
||||
assert(isNegative());
|
||||
|
||||
if (normalized_exponent() < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
if (normalizedExponent() < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return 1;
|
||||
if (normalized_exponent() > static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
if (normalizedExponent() > static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return -1;
|
||||
|
||||
if (mantissa() == 0)
|
||||
@ -136,44 +136,44 @@ struct DecomposedFloat
|
||||
}
|
||||
|
||||
/// Too large number: abs(float) > abs(rhs). Also the case with infinities and NaN.
|
||||
if (normalized_exponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return is_negative() ? -1 : 1;
|
||||
if (normalizedExponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
|
||||
return isNegative() ? -1 : 1;
|
||||
|
||||
using UInt = std::conditional_t<(sizeof(Int) > sizeof(typename Traits::UInt)), make_unsigned_t<Int>, typename Traits::UInt>;
|
||||
UInt uint_rhs = rhs < 0 ? -rhs : rhs;
|
||||
|
||||
/// Smaller octave: abs(rhs) < abs(float)
|
||||
/// FYI, TIL: octave is also called "binade", https://en.wikipedia.org/wiki/Binade
|
||||
if (uint_rhs < (static_cast<UInt>(1) << normalized_exponent()))
|
||||
return is_negative() ? -1 : 1;
|
||||
if (uint_rhs < (static_cast<UInt>(1) << normalizedExponent()))
|
||||
return isNegative() ? -1 : 1;
|
||||
|
||||
/// Larger octave: abs(rhs) > abs(float)
|
||||
if (normalized_exponent() + 1 < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>)
|
||||
&& uint_rhs >= (static_cast<UInt>(1) << (normalized_exponent() + 1)))
|
||||
return is_negative() ? 1 : -1;
|
||||
if (normalizedExponent() + 1 < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>)
|
||||
&& uint_rhs >= (static_cast<UInt>(1) << (normalizedExponent() + 1)))
|
||||
return isNegative() ? 1 : -1;
|
||||
|
||||
/// The same octave
|
||||
/// uint_rhs == 2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits)
|
||||
/// uint_rhs == 2 ^ normalizedExponent + mantissa * 2 ^ (normalizedExponent - mantissa_bits)
|
||||
|
||||
bool large_and_always_integer = normalized_exponent() >= static_cast<int16_t>(Traits::mantissa_bits);
|
||||
bool large_and_always_integer = normalizedExponent() >= static_cast<int16_t>(Traits::mantissa_bits);
|
||||
|
||||
UInt a = large_and_always_integer
|
||||
? static_cast<UInt>(mantissa()) << (normalized_exponent() - Traits::mantissa_bits)
|
||||
: static_cast<UInt>(mantissa()) >> (Traits::mantissa_bits - normalized_exponent());
|
||||
? static_cast<UInt>(mantissa()) << (normalizedExponent() - Traits::mantissa_bits)
|
||||
: static_cast<UInt>(mantissa()) >> (Traits::mantissa_bits - normalizedExponent());
|
||||
|
||||
UInt b = uint_rhs - (static_cast<UInt>(1) << normalized_exponent());
|
||||
UInt b = uint_rhs - (static_cast<UInt>(1) << normalizedExponent());
|
||||
|
||||
if (a < b)
|
||||
return is_negative() ? 1 : -1;
|
||||
return isNegative() ? 1 : -1;
|
||||
if (a > b)
|
||||
return is_negative() ? -1 : 1;
|
||||
return isNegative() ? -1 : 1;
|
||||
|
||||
/// Float has no fractional part means that the numbers are equal.
|
||||
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0)
|
||||
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
|
||||
return 0;
|
||||
else
|
||||
/// Float has fractional part means its abs value is larger.
|
||||
return is_negative() ? -1 : 1;
|
||||
return isNegative() ? -1 : 1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
*/
|
||||
|
||||
|
||||
// NOLINTBEGIN(google-explicit-constructor)
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wdeprecated-dynamic-exception-spec"
|
||||
@ -46,6 +47,7 @@ POCO_DECLARE_EXCEPTION(Foundation_API, JSONException, Poco::Exception)
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
// NOLINTEND(google-explicit-constructor)
|
||||
|
||||
class JSON
|
||||
{
|
||||
@ -61,7 +63,7 @@ public:
|
||||
checkInit();
|
||||
}
|
||||
|
||||
JSON(const std::string & s) : ptr_begin(s.data()), ptr_end(s.data() + s.size()), level(0)
|
||||
explicit JSON(std::string_view s) : ptr_begin(s.data()), ptr_end(s.data() + s.size()), level(0)
|
||||
{
|
||||
checkInit();
|
||||
}
|
||||
@ -71,13 +73,7 @@ public:
|
||||
*this = rhs;
|
||||
}
|
||||
|
||||
JSON & operator=(const JSON & rhs)
|
||||
{
|
||||
ptr_begin = rhs.ptr_begin;
|
||||
ptr_end = rhs.ptr_end;
|
||||
level = rhs.level;
|
||||
return *this;
|
||||
}
|
||||
JSON & operator=(const JSON & rhs) = default;
|
||||
|
||||
const char * data() const { return ptr_begin; }
|
||||
const char * dataEnd() const { return ptr_end; }
|
||||
@ -169,7 +165,7 @@ public:
|
||||
|
||||
/// Перейти к следующему элементу массива или следующей name-value паре объекта.
|
||||
iterator & operator++();
|
||||
iterator operator++(int);
|
||||
iterator operator++(int); // NOLINT(cert-dcl21-cpp)
|
||||
|
||||
/// Есть ли в строке escape-последовательности
|
||||
bool hasEscapes() const;
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <base/extended_types.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
// NOLINTBEGIN(google-runtime-int)
|
||||
|
||||
namespace common
|
||||
{
|
||||
@ -206,3 +207,5 @@ namespace common
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTEND(google-runtime-int)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <string.h>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
|
||||
|
@ -143,8 +143,8 @@
|
||||
|
||||
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
|
||||
/// Consider adding a comment before using these macros.
|
||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x) [&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }()
|
||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) [&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }()
|
||||
# define TSA_SUPPRESS_WARNING_FOR_READ(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> const auto & { return (x); }())
|
||||
# define TSA_SUPPRESS_WARNING_FOR_WRITE(x) ([&]() TSA_NO_THREAD_SAFETY_ANALYSIS -> auto & { return (x); }())
|
||||
|
||||
/// This macro is useful when only one thread writes to a member
|
||||
/// and you want to read this member from the same thread without locking a mutex.
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <base/types.h>
|
||||
#include <base/wide_integer.h>
|
||||
|
||||
|
||||
using Int128 = wide::integer<128, signed>;
|
||||
using UInt128 = wide::integer<128, unsigned>;
|
||||
using Int256 = wide::integer<256, signed>;
|
||||
@ -18,7 +17,7 @@ static_assert(sizeof(UInt256) == 32);
|
||||
/// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior.
|
||||
/// So instead of using the std type_traits, we use our own version which allows extension.
|
||||
template <typename T>
|
||||
struct is_signed
|
||||
struct is_signed // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = std::is_signed_v<T>;
|
||||
};
|
||||
@ -30,7 +29,7 @@ template <typename T>
|
||||
inline constexpr bool is_signed_v = is_signed<T>::value;
|
||||
|
||||
template <typename T>
|
||||
struct is_unsigned
|
||||
struct is_unsigned // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = std::is_unsigned_v<T>;
|
||||
};
|
||||
@ -51,7 +50,7 @@ template <class T> concept is_integer =
|
||||
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
|
||||
|
||||
template <typename T>
|
||||
struct is_arithmetic
|
||||
struct is_arithmetic // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = std::is_arithmetic_v<T>;
|
||||
};
|
||||
@ -66,9 +65,9 @@ template <typename T>
|
||||
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||
|
||||
template <typename T>
|
||||
struct make_unsigned
|
||||
struct make_unsigned // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
typedef std::make_unsigned_t<T> type;
|
||||
using type = std::make_unsigned_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
||||
@ -79,9 +78,9 @@ template <> struct make_unsigned<UInt256> { using type = UInt256; };
|
||||
template <typename T> using make_unsigned_t = typename make_unsigned<T>::type;
|
||||
|
||||
template <typename T>
|
||||
struct make_signed
|
||||
struct make_signed // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
typedef std::make_signed_t<T> type;
|
||||
using type = std::make_signed_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_signed<Int128> { using type = Int128; };
|
||||
@ -92,7 +91,7 @@ template <> struct make_signed<UInt256> { using type = Int256; };
|
||||
template <typename T> using make_signed_t = typename make_signed<T>::type;
|
||||
|
||||
template <typename T>
|
||||
struct is_big_int
|
||||
struct is_big_int // NOLINT(readability-identifier-naming)
|
||||
{
|
||||
static constexpr bool value = false;
|
||||
};
|
||||
@ -104,4 +103,3 @@ template <> struct is_big_int<UInt256> { static constexpr bool value = true; };
|
||||
|
||||
template <typename T>
|
||||
inline constexpr bool is_big_int_v = is_big_int<T>::value;
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
*
|
||||
* Allow to search for next character from the set of 'symbols...' in a string.
|
||||
* It is similar to 'strpbrk', 'strcspn' (and 'strchr', 'memchr' in the case of one symbol and '\0'),
|
||||
* but with the following differencies:
|
||||
* but with the following differences:
|
||||
* - works with any memory ranges, including containing zero bytes;
|
||||
* - doesn't require terminating zero byte: end of memory range is passed explicitly;
|
||||
* - if not found, returns pointer to end instead of nullptr;
|
||||
|
@ -120,6 +120,7 @@ Out & dumpDispatchPriorities(Out & out, T && x, std::decay_t<decltype(dumpImpl<p
|
||||
return dumpImpl<priority>(out, x);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(google-explicit-constructor)
|
||||
struct LowPriority { LowPriority(void *) {} };
|
||||
|
||||
template <int priority, typename Out, typename T>
|
||||
|
@ -91,10 +91,10 @@ template <size_t N>
|
||||
using DivisionBy10PowN = typename SelectType
|
||||
<
|
||||
N,
|
||||
Division<uint8_t, 0, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, 1, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, 0, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, 0, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
>::Result;
|
||||
|
||||
template <size_t N>
|
||||
@ -352,7 +352,7 @@ static inline char * writeUIntText(T x, char * p)
|
||||
static_assert(is_unsigned_v<T>);
|
||||
|
||||
int len = digits10(x);
|
||||
auto pp = p + len;
|
||||
auto * pp = p + len;
|
||||
while (x >= 100)
|
||||
{
|
||||
const auto i = x % 100;
|
||||
|
@ -5,13 +5,13 @@
|
||||
#include <utility>
|
||||
|
||||
template <class F>
|
||||
class [[nodiscard]] basic_scope_guard
|
||||
class [[nodiscard]] BasicScopeGuard
|
||||
{
|
||||
public:
|
||||
constexpr basic_scope_guard() = default;
|
||||
constexpr basic_scope_guard(basic_scope_guard && src) : function{src.release()} {}
|
||||
constexpr BasicScopeGuard() = default;
|
||||
constexpr BasicScopeGuard(BasicScopeGuard && src) : function{src.release()} {} // NOLINT(hicpp-noexcept-move, performance-noexcept-move-constructor)
|
||||
|
||||
constexpr basic_scope_guard & operator=(basic_scope_guard && src)
|
||||
constexpr BasicScopeGuard & operator=(BasicScopeGuard && src) // NOLINT(hicpp-noexcept-move, performance-noexcept-move-constructor)
|
||||
{
|
||||
if (this != &src)
|
||||
{
|
||||
@ -23,11 +23,11 @@ public:
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard(basic_scope_guard<G> && src) : function{src.release()} {}
|
||||
constexpr BasicScopeGuard(BasicScopeGuard<G> && src) : function{src.release()} {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard & operator=(basic_scope_guard<G> && src)
|
||||
constexpr BasicScopeGuard & operator=(BasicScopeGuard<G> && src)
|
||||
{
|
||||
if (this != &src)
|
||||
{
|
||||
@ -39,13 +39,13 @@ public:
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard(const G & function_) : function{function_} {}
|
||||
constexpr BasicScopeGuard(const G & function_) : function{function_} {} // NOLINT(google-explicit-constructor)
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
constexpr basic_scope_guard(G && function_) : function{std::move(function_)} {}
|
||||
constexpr BasicScopeGuard(G && function_) : function{std::move(function_)} {} // NOLINT(google-explicit-constructor, bugprone-forwarding-reference-overload, bugprone-move-forwarding-reference)
|
||||
|
||||
~basic_scope_guard() { invoke(); }
|
||||
~BasicScopeGuard() { invoke(); }
|
||||
|
||||
static constexpr bool is_nullable = std::is_constructible_v<bool, F>;
|
||||
|
||||
@ -70,7 +70,7 @@ public:
|
||||
|
||||
template <typename G>
|
||||
requires std::is_convertible_v<G, F>
|
||||
basic_scope_guard<F> & join(basic_scope_guard<G> && other)
|
||||
BasicScopeGuard<F> & join(BasicScopeGuard<G> && other)
|
||||
{
|
||||
if (other.function)
|
||||
{
|
||||
@ -102,14 +102,13 @@ private:
|
||||
F function = F{};
|
||||
};
|
||||
|
||||
using scope_guard = basic_scope_guard<std::function<void(void)>>;
|
||||
using scope_guard = BasicScopeGuard<std::function<void(void)>>;
|
||||
|
||||
|
||||
template <class F>
|
||||
inline basic_scope_guard<F> make_scope_guard(F && function_) { return std::forward<F>(function_); }
|
||||
inline BasicScopeGuard<F> make_scope_guard(F && function_) { return std::forward<F>(function_); }
|
||||
|
||||
#define SCOPE_EXIT_CONCAT(n, ...) \
|
||||
const auto scope_exit##n = make_scope_guard([&] { __VA_ARGS__; })
|
||||
#define SCOPE_EXIT_FWD(n, ...) SCOPE_EXIT_CONCAT(n, __VA_ARGS__)
|
||||
#define SCOPE_EXIT(...) SCOPE_EXIT_FWD(__LINE__, __VA_ARGS__)
|
||||
|
||||
|
@ -14,7 +14,7 @@ template <typename Comparator>
|
||||
class DebugLessComparator
|
||||
{
|
||||
public:
|
||||
constexpr DebugLessComparator(Comparator & cmp_)
|
||||
constexpr DebugLessComparator(Comparator & cmp_) // NOLINT(google-explicit-constructor)
|
||||
: cmp(cmp_)
|
||||
{}
|
||||
|
||||
|
@ -34,8 +34,10 @@ public:
|
||||
template <class Enable = typename std::is_move_assignable<T>::type>
|
||||
Self & operator=(T && rhs) { t = std::move(rhs); return *this;}
|
||||
|
||||
// NOLINTBEGIN(google-explicit-constructor)
|
||||
operator const T & () const { return t; }
|
||||
operator T & () { return t; }
|
||||
// NOLINTEND(google-explicit-constructor)
|
||||
|
||||
bool operator==(const Self & rhs) const { return t == rhs.t; }
|
||||
bool operator<(const Self & rhs) const { return t < rhs.t; }
|
||||
@ -58,7 +60,10 @@ namespace std
|
||||
};
|
||||
}
|
||||
|
||||
// NOLINTBEGIN(bugprone-macro-parentheses)
|
||||
|
||||
#define STRONG_TYPEDEF(T, D) \
|
||||
struct D ## Tag {}; \
|
||||
using D = StrongTypedef<T, D ## Tag>; \
|
||||
|
||||
// NOLINTEND(bugprone-macro-parentheses)
|
||||
|
@ -10,9 +10,11 @@ constexpr size_t GiB = 1024 * MiB;
|
||||
# pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
// NOLINTBEGIN(google-runtime-int)
|
||||
constexpr size_t operator"" _KiB(unsigned long long val) { return val * KiB; }
|
||||
constexpr size_t operator"" _MiB(unsigned long long val) { return val * MiB; }
|
||||
constexpr size_t operator"" _GiB(unsigned long long val) { return val * GiB; }
|
||||
// NOLINTEND(google-runtime-int)
|
||||
|
||||
#ifdef HAS_RESERVED_IDENTIFIER
|
||||
# pragma clang diagnostic pop
|
||||
|
@ -51,8 +51,8 @@ struct fmt::formatter<wide::integer<Bits, Signed>>
|
||||
{
|
||||
constexpr auto parse(format_parse_context & ctx)
|
||||
{
|
||||
auto it = ctx.begin();
|
||||
auto end = ctx.end();
|
||||
const auto * it = ctx.begin();
|
||||
const auto * end = ctx.end();
|
||||
|
||||
/// Only support {}.
|
||||
if (it != end && *it != '}')
|
||||
|
@ -63,7 +63,7 @@
|
||||
* Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
|
||||
* so we don't pay attention to using non-temporary stores.
|
||||
*
|
||||
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
|
||||
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most beneficial,
|
||||
* even comparing to non-temporary aligned unrolled stores even with the most wide registers.
|
||||
*
|
||||
* memcpy can be written in asm, C or C++. The latter can also use inline asm.
|
||||
|
22
base/glibc-compatibility/musl/dup3.c
Normal file
22
base/glibc-compatibility/musl/dup3.c
Normal file
@ -0,0 +1,22 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include "syscall.h"
|
||||
|
||||
int dup3(int old, int new, int flags)
|
||||
{
|
||||
int r;
|
||||
#ifdef SYS_dup2
|
||||
if (old==new) return __syscall_ret(-EINVAL);
|
||||
if (flags & O_CLOEXEC) {
|
||||
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
|
||||
if (r!=-ENOSYS) return __syscall_ret(r);
|
||||
}
|
||||
while ((r=__syscall(SYS_dup2, old, new))==-EBUSY);
|
||||
if (flags & O_CLOEXEC) __syscall(SYS_fcntl, new, F_SETFD, FD_CLOEXEC);
|
||||
#else
|
||||
while ((r=__syscall(SYS_dup3, old, new, flags))==-EBUSY);
|
||||
#endif
|
||||
return __syscall_ret(r);
|
||||
}
|
26
base/glibc-compatibility/musl/inotify.c
Normal file
26
base/glibc-compatibility/musl/inotify.c
Normal file
@ -0,0 +1,26 @@
|
||||
#include <sys/inotify.h>
|
||||
#include <errno.h>
|
||||
#include "syscall.h"
|
||||
|
||||
int inotify_init()
|
||||
{
|
||||
return inotify_init1(0);
|
||||
}
|
||||
int inotify_init1(int flags)
|
||||
{
|
||||
int r = __syscall(SYS_inotify_init1, flags);
|
||||
#ifdef SYS_inotify_init
|
||||
if (r==-ENOSYS && !flags) r = __syscall(SYS_inotify_init);
|
||||
#endif
|
||||
return __syscall_ret(r);
|
||||
}
|
||||
|
||||
int inotify_add_watch(int fd, const char *pathname, uint32_t mask)
|
||||
{
|
||||
return syscall(SYS_inotify_add_watch, fd, pathname, mask);
|
||||
}
|
||||
|
||||
int inotify_rm_watch(int fd, int wd)
|
||||
{
|
||||
return syscall(SYS_inotify_rm_watch, fd, wd);
|
||||
}
|
@ -49,6 +49,8 @@
|
||||
#include <cxxabi.h>
|
||||
#endif
|
||||
|
||||
// NOLINTBEGIN(readability-identifier-naming, modernize-use-using, bugprone-macro-parentheses, google-explicit-constructor)
|
||||
|
||||
/*
|
||||
* Abstractions for compiler-specific directives
|
||||
*/
|
||||
@ -90,8 +92,6 @@
|
||||
#define PCG_EMULATED_128BIT_MATH 1
|
||||
#endif
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
|
||||
namespace pcg_extras {
|
||||
|
||||
/*
|
||||
@ -553,6 +553,6 @@ std::ostream& operator<<(std::ostream& out, printable_typename<T>) {
|
||||
|
||||
} // namespace pcg_extras
|
||||
|
||||
// NOLINTEND(*)
|
||||
// NOLINTEND(readability-identifier-naming, modernize-use-using, bugprone-macro-parentheses, google-explicit-constructor)
|
||||
|
||||
#endif // PCG_EXTRAS_HPP_INCLUDED
|
||||
|
@ -101,7 +101,7 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The pcg_extras namespace contains some support code that is likley to
|
||||
* The pcg_extras namespace contains some support code that is likely to
|
||||
* be useful for a variety of RNGs, including:
|
||||
* - 128-bit int support for platforms where it isn't available natively
|
||||
* - bit twiddling operations
|
||||
|
@ -22,7 +22,7 @@
|
||||
/*
|
||||
* This code provides a a C++ class that can provide 128-bit (or higher)
|
||||
* integers. To produce 2K-bit integers, it uses two K-bit integers,
|
||||
* placed in a union that allowes the code to also see them as four K/2 bit
|
||||
* placed in a union that allows the code to also see them as four K/2 bit
|
||||
* integers (and access them either directly name, or by index).
|
||||
*
|
||||
* It may seem like we're reinventing the wheel here, because several
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit 33f60f961d4914441b684af43e9e5535078ba54b
|
||||
Subproject commit bdba298189e29995892de78dcecf64d127444e81
|
2
contrib/libuv
vendored
2
contrib/libuv
vendored
@ -1 +1 @@
|
||||
Subproject commit 95081e7c16c9857babe6d4e2bc1c779198ea89ae
|
||||
Subproject commit 3a85b2eb3d83f369b8a8cafd329d7e9dc28f60cf
|
@ -15,6 +15,7 @@ set(uv_sources
|
||||
src/inet.c
|
||||
src/random.c
|
||||
src/strscpy.c
|
||||
src/strtok.c
|
||||
src/threadpool.c
|
||||
src/timer.c
|
||||
src/uv-common.c
|
||||
@ -75,13 +76,13 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
|
||||
list(APPEND uv_libraries rt)
|
||||
list(APPEND uv_sources
|
||||
src/unix/epoll.c
|
||||
src/unix/linux-core.c
|
||||
src/unix/linux-inotify.c
|
||||
src/unix/linux-syscalls.c
|
||||
src/unix/procfs-exepath.c
|
||||
src/unix/random-getrandom.c
|
||||
src/unix/random-sysctl-linux.c
|
||||
src/unix/sysinfo-loadavg.c)
|
||||
src/unix/random-sysctl-linux.c)
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
|
||||
@ -111,6 +112,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS/390")
|
||||
src/unix/pthread-fixes.c
|
||||
src/unix/pthread-barrier.c
|
||||
src/unix/os390.c
|
||||
src/unix/os390-proctitle.c
|
||||
src/unix/os390-syscalls.c)
|
||||
endif()
|
||||
|
||||
|
25
docs/changelogs/v22.3.12.19-lts.md
Normal file
25
docs/changelogs/v22.3.12.19-lts.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.3.12.19-lts (4a08f8a073b) FIXME as compared to v22.3.11.12-lts (137c5f72657)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40695](https://github.com/ClickHouse/ClickHouse/issues/40695): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40160](https://github.com/ClickHouse/ClickHouse/issues/40160): fix HashMethodOneNumber get wrong key value when column is const. [#40020](https://github.com/ClickHouse/ClickHouse/pull/40020) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Backported in [#40122](https://github.com/ClickHouse/ClickHouse/issues/40122): Fix bug in collectFilesToSkip() by adding correct file extension(.idx or idx2) for indexes to be recalculated, avoid wrong hard links. Fixed [#39896](https://github.com/ClickHouse/ClickHouse/issues/39896). [#40095](https://github.com/ClickHouse/ClickHouse/pull/40095) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Backported in [#40207](https://github.com/ClickHouse/ClickHouse/issues/40207): Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#40270](https://github.com/ClickHouse/ClickHouse/issues/40270): Fix possible segfault in CapnProto input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* fix heap buffer overflow by limiting http chunk size [#40292](https://github.com/ClickHouse/ClickHouse/pull/40292) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Reduce changelog verbosity in CI [#40360](https://github.com/ClickHouse/ClickHouse/pull/40360) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backport the upstream clickhouse_helper.py [#40490](https://github.com/ClickHouse/ClickHouse/pull/40490) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
17
docs/changelogs/v22.6.7.7-stable.md
Normal file
17
docs/changelogs/v22.6.7.7-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.6.7.7-stable (8eae2af3b9a) FIXME as compared to v22.6.6.16-stable (d2a33ebc822)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40692](https://github.com/ClickHouse/ClickHouse/issues/40692): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40531](https://github.com/ClickHouse/ClickHouse/issues/40531): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40623](https://github.com/ClickHouse/ClickHouse/issues/40623): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
|
||||
|
23
docs/changelogs/v22.7.5.13-stable.md
Normal file
23
docs/changelogs/v22.7.5.13-stable.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.7.5.13-stable (6f48d2d1f59) FIXME as compared to v22.7.4.16-stable (0b9272f8fdc)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40693](https://github.com/ClickHouse/ClickHouse/issues/40693): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40542](https://github.com/ClickHouse/ClickHouse/issues/40542): Fix potential deadlock in WriteBufferFromS3 during task scheduling failure. [#40070](https://github.com/ClickHouse/ClickHouse/pull/40070) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40450](https://github.com/ClickHouse/ClickHouse/issues/40450): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#40532](https://github.com/ClickHouse/ClickHouse/issues/40532): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40624](https://github.com/ClickHouse/ClickHouse/issues/40624): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
25
docs/changelogs/v22.8.3.13-lts.md
Normal file
25
docs/changelogs/v22.8.3.13-lts.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.8.3.13-lts (6a15b73faea) FIXME as compared to v22.8.2.11-lts (b4ed6d744ff)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#40550](https://github.com/ClickHouse/ClickHouse/issues/40550): Improve schema inference cache, respect format settings that can change the schema. [#40414](https://github.com/ClickHouse/ClickHouse/pull/40414) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#40694](https://github.com/ClickHouse/ClickHouse/issues/40694): Fix TGZ packages. [#40681](https://github.com/ClickHouse/ClickHouse/pull/40681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40451](https://github.com/ClickHouse/ClickHouse/issues/40451): Fix rare bug with column TTL for MergeTree engines family: In case of repeated vertical merge the error `Cannot unlink file ColumnName.bin ... No such file or directory.` could happen. [#40346](https://github.com/ClickHouse/ClickHouse/pull/40346) ([alesapin](https://github.com/alesapin)).
|
||||
* Backported in [#40533](https://github.com/ClickHouse/ClickHouse/issues/40533): Proxy resolver stop on first successful request to endpoint. [#40353](https://github.com/ClickHouse/ClickHouse/pull/40353) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#40625](https://github.com/ClickHouse/ClickHouse/issues/40625): Fix potential dataloss due to a bug in AWS SDK (https://github.com/aws/aws-sdk-cpp/issues/658). Bug can be triggered only when clickhouse is used over S3. [#40506](https://github.com/ClickHouse/ClickHouse/pull/40506) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* DNSResolver remove AI_V4MAPPED, AI_ALL hints [#40502](https://github.com/ClickHouse/ClickHouse/pull/40502) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
18
docs/changelogs/v22.8.4.7-lts.md
Normal file
18
docs/changelogs/v22.8.4.7-lts.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2022
|
||||
---
|
||||
|
||||
# 2022 Changelog
|
||||
|
||||
### ClickHouse release v22.8.4.7-lts (baad27bcd2f) FIXME as compared to v22.8.3.13-lts (6a15b73faea)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#40760](https://github.com/ClickHouse/ClickHouse/issues/40760): Fix possible error 'Decimal math overflow' while parsing DateTime64. [#40546](https://github.com/ClickHouse/ClickHouse/pull/40546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Backported in [#40811](https://github.com/ClickHouse/ClickHouse/issues/40811): In [#40595](https://github.com/ClickHouse/ClickHouse/issues/40595) it was reported that the `host_regexp` functionality was not working properly with a name to address resolution in `/etc/hosts`. It's fixed. [#40769](https://github.com/ClickHouse/ClickHouse/pull/40769) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Migrate artifactory [#40831](https://github.com/ClickHouse/ClickHouse/pull/40831) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -99,7 +99,7 @@ mysql> select * from mysql_table;
|
||||
Database in ClickHouse, exchanging data with the MySQL server:
|
||||
|
||||
``` sql
|
||||
CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password')
|
||||
CREATE DATABASE mysql_db ENGINE = MySQL('localhost:3306', 'test', 'my_user', 'user_password') SETTINGS read_write_timeout=10000, connect_timeout=100;
|
||||
```
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/engines/table-engines/integrations/ExternalDistributed
|
||||
sidebar_position: 12
|
||||
sidebar_label: ExternalDistributed
|
||||
title: ExternalDistributed
|
||||
---
|
||||
|
||||
# ExternalDistributed
|
||||
|
||||
The `ExternalDistributed` engine allows to perform `SELECT` queries on data that is stored on a remote servers MySQL or PostgreSQL. Accepts [MySQL](../../../engines/table-engines/integrations/mysql.md) or [PostgreSQL](../../../engines/table-engines/integrations/postgresql.md) engines as an argument so sharding is possible.
|
||||
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/engines/table-engines/integrations/materialized-postgresql
|
||||
sidebar_position: 12
|
||||
sidebar_label: MaterializedPostgreSQL
|
||||
title: MaterializedPostgreSQL
|
||||
---
|
||||
|
||||
# MaterializedPostgreSQL
|
||||
|
||||
Creates ClickHouse table with an initial data dump of PostgreSQL table and starts replication process, i.e. executes background job to apply new changes as they happen on PostgreSQL table in the remote PostgreSQL database.
|
||||
|
||||
If more than one table is required, it is highly recommended to use the [MaterializedPostgreSQL](../../../engines/database-engines/materialized-postgresql.md) database engine instead of the table engine and use the `materialized_postgresql_tables_list` setting, which specifies the tables to be replicated (will also be possible to add database `schema`). It will be much better in terms of CPU, fewer connections and fewer replication slots inside the remote PostgreSQL database.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/engines/table-engines/special/generate
|
||||
sidebar_position: 140
|
||||
sidebar_label: GenerateRandom
|
||||
title: "GenerateRandom Table Engine"
|
||||
---
|
||||
|
||||
# GenerateRandom Table Engine
|
||||
|
||||
The GenerateRandom table engine produces random data for given table schema.
|
||||
|
||||
Usage examples:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/getting-started/example-datasets/brown-benchmark
|
||||
sidebar_label: Brown University Benchmark
|
||||
description: A new analytical benchmark for machine-generated log data
|
||||
title: "Brown University Benchmark"
|
||||
---
|
||||
|
||||
# Brown University Benchmark
|
||||
|
||||
`MgBench` is a new analytical benchmark for machine-generated log data, [Andrew Crotty](http://cs.brown.edu/people/acrotty/).
|
||||
|
||||
Download the data:
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/cell-towers
|
||||
sidebar_label: Cell Towers
|
||||
title: "Cell Towers"
|
||||
---
|
||||
|
||||
# Cell Towers
|
||||
|
||||
This dataset is from [OpenCellid](https://www.opencellid.org/) - The world's largest Open Database of Cell Towers.
|
||||
|
||||
As of 2021, it contains more than 40 million records about cell towers (GSM, LTE, UMTS, etc.) around the world with their geographical coordinates and metadata (country code, network, etc).
|
||||
@ -14,7 +13,7 @@ OpenCelliD Project is licensed under a Creative Commons Attribution-ShareAlike 4
|
||||
|
||||
## Get the Dataset {#get-the-dataset}
|
||||
|
||||
1. Download the snapshot of the dataset from February 2021: [https://datasets.clickhouse.com/cell_towers.csv.xz] (729 MB).
|
||||
1. Download the snapshot of the dataset from February 2021: [cell_towers.csv.xz](https://datasets.clickhouse.com/cell_towers.csv.xz) (729 MB).
|
||||
|
||||
2. Validate the integrity (optional step):
|
||||
```
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/github-events
|
||||
sidebar_label: GitHub Events
|
||||
title: "GitHub Events Dataset"
|
||||
---
|
||||
|
||||
# GitHub Events Dataset
|
||||
|
||||
Dataset contains all events on GitHub from 2011 to Dec 6 2020, the size is 3.1 billion records. Download size is 75 GB and it will require up to 200 GB space on disk if stored in a table with lz4 compression.
|
||||
|
||||
Full dataset description, insights, download instruction and interactive queries are posted [here](https://ghe.clickhouse.tech/).
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/menus
|
||||
sidebar_label: New York Public Library "What's on the Menu?" Dataset
|
||||
title: "New York Public Library \"What's on the Menu?\" Dataset"
|
||||
---
|
||||
|
||||
# New York Public Library "What's on the Menu?" Dataset
|
||||
|
||||
The dataset is created by the New York Public Library. It contains historical data on the menus of hotels, restaurants and cafes with the dishes along with their prices.
|
||||
|
||||
Source: http://menus.nypl.org/data
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/getting-started/example-datasets/opensky
|
||||
sidebar_label: Air Traffic Data
|
||||
description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic.
|
||||
title: "Crowdsourced air traffic data from The OpenSky Network 2020"
|
||||
---
|
||||
|
||||
# Crowdsourced air traffic data from The OpenSky Network 2020
|
||||
|
||||
The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. It spans all flights seen by the network's more than 2500 members since 1 January 2019. More data will be periodically included in the dataset until the end of the COVID-19 pandemic.
|
||||
|
||||
Source: https://zenodo.org/record/5092942#.YRBCyTpRXYd
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/recipes
|
||||
sidebar_label: Recipes Dataset
|
||||
title: "Recipes Dataset"
|
||||
---
|
||||
|
||||
# Recipes Dataset
|
||||
|
||||
RecipeNLG dataset is available for download [here](https://recipenlg.cs.put.poznan.pl/dataset). It contains 2.2 million recipes. The size is slightly less than 1 GB.
|
||||
|
||||
## Download and Unpack the Dataset
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/getting-started/example-datasets/uk-price-paid
|
||||
sidebar_label: UK Property Price Paid
|
||||
sidebar_position: 1
|
||||
title: "UK Property Price Paid"
|
||||
---
|
||||
|
||||
# UK Property Price Paid
|
||||
|
||||
The dataset contains data about prices paid for real-estate property in England and Wales. The data is available since year 1995.
|
||||
The size of the dataset in uncompressed form is about 4 GiB and it will take about 278 MiB in ClickHouse.
|
||||
|
||||
|
@ -175,6 +175,10 @@ You can also choose to use [HTTP compression](https://en.wikipedia.org/wiki/HTTP
|
||||
- `br`
|
||||
- `deflate`
|
||||
- `xz`
|
||||
- `zstd`
|
||||
- `lz4`
|
||||
- `bz2`
|
||||
- `snappy`
|
||||
|
||||
To send a compressed `POST` request, append the request header `Content-Encoding: compression_method`.
|
||||
In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#settings-http_zlib_compression_level) setting for all compression methods.
|
||||
|
@ -151,4 +151,3 @@ Management queries:
|
||||
|
||||
By default, SQL-driven access control and account management is disabled for all users. You need to configure at least one user in the `users.xml` configuration file and set the value of the [access_management](../operations/settings/settings-users.md#access_management-user-setting) setting to 1.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/access_rights/) <!--hide-->
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
slug: /en/operations/backup
|
||||
sidebar_position: 49
|
||||
sidebar_label: Data Backup
|
||||
sidebar_label: Data backup and restore
|
||||
---
|
||||
|
||||
# Data Backup
|
||||
# Data backup and restore
|
||||
|
||||
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.
|
||||
|
||||
@ -16,21 +16,181 @@ Each company has different resources available and business requirements, so the
|
||||
Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly.
|
||||
:::
|
||||
|
||||
## Duplicating Source Data Somewhere Else {#duplicating-source-data-somewhere-else}
|
||||
## Configure a backup destination
|
||||
|
||||
In the examples below you will see the backup destination specified like `Disk('backups', '1.zip')`. To prepare the destination add a file to `/etc/clickhouse-server/config.d/backup_disk.xml` specifying the backup destination. For example, this file defines disk named `backups` and then adds that disk to the **backups > allowed_disk** list:
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<storage_configuration>
|
||||
<disks>
|
||||
<!--highlight-next-line -->
|
||||
<backups>
|
||||
<type>local</type>
|
||||
<path>/backups/</path>
|
||||
</backups>
|
||||
</disks>
|
||||
</storage_configuration>
|
||||
<!--highlight-start -->
|
||||
<backups>
|
||||
<allowed_disk>backups</allowed_disk>
|
||||
<allowed_path>/backups/</allowed_path>
|
||||
</backups>
|
||||
<!--highlight-end -->
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
Backups can be either full or incremental, and can include tables (including materialized views, projections, and dictionaries), and databases. Backups can be synchronous (default) or asynchronous. They can be compressed. Backups can be password protected.
|
||||
|
||||
The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a destination (or source), options and settings:
|
||||
- The destination for the backup, or the source for the restore. This is based on the disk defined earlier. For example `Disk('backups', 'filename.zip')`
|
||||
- ASYNC: backup or restore asynchronously
|
||||
- PARTITIONS: a list of partitions to restore
|
||||
- SETTINGS:
|
||||
- [`compression_method`](en/sql-reference/statements/create/table/#column-compression-codecs) and compression_level
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
|
||||
## Usage examples
|
||||
|
||||
Backup and then restore a table:
|
||||
```
|
||||
BACKUP TABLE test.table TO Disk('backups', '1.zip')
|
||||
```
|
||||
|
||||
Corresponding restore:
|
||||
```
|
||||
RESTORE TABLE test.table FROM Disk('backups', '1.zip')
|
||||
```
|
||||
|
||||
:::note
|
||||
The above RESTORE would fail if the table `test.table` contains data, you would have to drop the table in order to test the RESTORE, or use the setting `allow_non_empty_tables=true`:
|
||||
```
|
||||
RESTORE TABLE test.table FROM Disk('backups', '1.zip')
|
||||
SETTINGS allow_non_empty_tables=true
|
||||
```
|
||||
:::
|
||||
|
||||
Tables can be restored, or backed up, with new names:
|
||||
```
|
||||
RESTORE TABLE test.table AS test.table2 FROM Disk('backups', '1.zip')
|
||||
```
|
||||
|
||||
```
|
||||
BACKUP TABLE test.table3 AS test.table4 TO Disk('backups', '2.zip')
|
||||
```
|
||||
|
||||
## Incremental backups
|
||||
|
||||
Incremental backups can be taken by specifying the `base_backup`.
|
||||
:::note
|
||||
Incremental backups depend on the base backup. The base backup must be kept available in order to be able to restore from an incremental backup.
|
||||
:::
|
||||
|
||||
Incrementally store new data. The setting `base_backup` causes data since a previous backup to `Disk('backups', 'd.zip')` to be stored to `Disk('backups', 'incremental-a.zip')`:
|
||||
```
|
||||
BACKUP TABLE test.table TO Disk('backups', 'incremental-a.zip')
|
||||
SETTINGS base_backup = Disk('backups', 'd.zip')
|
||||
```
|
||||
|
||||
Restore all data from the incremental backup and the base_backup into a new table `test.table2`:
|
||||
```
|
||||
RESTORE TABLE test.table AS test.table2
|
||||
FROM Disk('backups', 'incremental-a.zip');
|
||||
```
|
||||
|
||||
## Assign a password to the backup
|
||||
|
||||
Backups written to disk can have a password applied to the file:
|
||||
```
|
||||
BACKUP TABLE test.table
|
||||
TO Disk('backups', 'password-protected.zip')
|
||||
SETTINGS password='qwerty'
|
||||
```
|
||||
|
||||
Restore:
|
||||
```
|
||||
RESTORE TABLE test.table
|
||||
FROM Disk('backups', 'password-protected.zip')
|
||||
SETTINGS password='qwerty'
|
||||
```
|
||||
|
||||
## Compression settings
|
||||
|
||||
If you would like to specify the compression method or level:
|
||||
```
|
||||
BACKUP TABLE test.table
|
||||
TO Disk('backups', 'filename.zip')
|
||||
SETTINGS compression_method='lzma', compression_level=3
|
||||
```
|
||||
|
||||
## Restore specific partitions
|
||||
If specific partitions associated with a table need to be restored these can be specified. To restore partitions 1 and 4 from backup:
|
||||
```
|
||||
RESTORE TABLE test.table PARTITIONS '2', '3'
|
||||
FROM Disk('backups', 'filename.zip')
|
||||
```
|
||||
|
||||
## Check the status of backups
|
||||
|
||||
The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file:
|
||||
```sql
|
||||
BACKUP TABLE helloworld.my_first_table TO Disk('backups', '1.zip') ASYNC
|
||||
```
|
||||
```response
|
||||
┌─id───────────────────────────────────┬─status──────────┐
|
||||
│ 7678b0b3-f519-4e6e-811f-5a0781a4eb52 │ CREATING_BACKUP │
|
||||
└──────────────────────────────────────┴─────────────────┘
|
||||
|
||||
1 row in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
```
|
||||
SELECT
|
||||
*
|
||||
FROM system.backups
|
||||
where id='7678b0b3-f519-4e6e-811f-5a0781a4eb52'
|
||||
FORMAT Vertical
|
||||
```
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
id: 7678b0b3-f519-4e6e-811f-5a0781a4eb52
|
||||
name: Disk('backups', '1.zip')
|
||||
#highlight-next-line
|
||||
status: BACKUP_FAILED
|
||||
num_files: 0
|
||||
uncompressed_size: 0
|
||||
compressed_size: 0
|
||||
#highlight-next-line
|
||||
error: Code: 598. DB::Exception: Backup Disk('backups', '1.zip') already exists. (BACKUP_ALREADY_EXISTS) (version 22.8.2.11 (official build))
|
||||
start_time: 2022-08-30 09:21:46
|
||||
end_time: 2022-08-30 09:21:46
|
||||
|
||||
1 row in set. Elapsed: 0.002 sec.
|
||||
```
|
||||
|
||||
## Alternatives
|
||||
|
||||
ClickHouse stores data on disk, and there are many ways to backup disks. These are some alternatives that have been used in the past, and that may fit in well in your environment.
|
||||
|
||||
### Duplicating Source Data Somewhere Else {#duplicating-source-data-somewhere-else}
|
||||
|
||||
Often data that is ingested into ClickHouse is delivered through some sort of persistent queue, such as [Apache Kafka](https://kafka.apache.org). In this case it is possible to configure an additional set of subscribers that will read the same data stream while it is being written to ClickHouse and store it in cold storage somewhere. Most companies already have some default recommended cold storage, which could be an object store or a distributed filesystem like [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html).
|
||||
|
||||
## Filesystem Snapshots {#filesystem-snapshots}
|
||||
### Filesystem Snapshots {#filesystem-snapshots}
|
||||
|
||||
Some local filesystems provide snapshot functionality (for example, [ZFS](https://en.wikipedia.org/wiki/ZFS)), but they might not be the best choice for serving live queries. A possible solution is to create additional replicas with this kind of filesystem and exclude them from the [Distributed](../engines/table-engines/special/distributed.md) tables that are used for `SELECT` queries. Snapshots on such replicas will be out of reach of any queries that modify data. As a bonus, these replicas might have special hardware configurations with more disks attached per server, which would be cost-effective.
|
||||
|
||||
## clickhouse-copier {#clickhouse-copier}
|
||||
### clickhouse-copier {#clickhouse-copier}
|
||||
|
||||
[clickhouse-copier](../operations/utilities/clickhouse-copier.md) is a versatile tool that was initially created to re-shard petabyte-sized tables. It can also be used for backup and restore purposes because it reliably copies data between ClickHouse tables and clusters.
|
||||
|
||||
For smaller volumes of data, a simple `INSERT INTO ... SELECT ...` to remote tables might work as well.
|
||||
|
||||
## Manipulations with Parts {#manipulations-with-parts}
|
||||
### Manipulations with Parts {#manipulations-with-parts}
|
||||
|
||||
ClickHouse allows using the `ALTER TABLE ... FREEZE PARTITION ...` query to create a local copy of table partitions. This is implemented using hardlinks to the `/var/lib/clickhouse/shadow/` folder, so it usually does not consume extra disk space for old data. The created copies of files are not handled by ClickHouse server, so you can just leave them there: you will have a simple backup that does not require any additional external system, but it will still be prone to hardware issues. For this reason, it’s better to remotely copy them to another location and then remove the local copies. Distributed filesystems and object stores are still a good options for this, but normal attached file servers with a large enough capacity might work as well (in this case the transfer will occur via the network filesystem or maybe [rsync](https://en.wikipedia.org/wiki/Rsync)).
|
||||
Data can be restored from backup using the `ALTER TABLE ... ATTACH PARTITION ...`
|
||||
@ -39,4 +199,3 @@ For more information about queries related to partition manipulations, see the [
|
||||
|
||||
A third-party tool is available to automate this approach: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup).
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/backup/) <!--hide-->
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/caches
|
||||
sidebar_position: 65
|
||||
sidebar_label: Caches
|
||||
title: "Cache Types"
|
||||
---
|
||||
|
||||
# Cache Types
|
||||
|
||||
When performing queries, ClickHouse uses different caches.
|
||||
|
||||
Main cache types:
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/external-authenticators/
|
||||
sidebar_position: 48
|
||||
sidebar_label: External User Authenticators and Directories
|
||||
title: "External User Authenticators and Directories"
|
||||
---
|
||||
|
||||
# External User Authenticators and Directories
|
||||
|
||||
ClickHouse supports authenticating and managing users using external services.
|
||||
|
||||
The following external authenticators and directories are supported:
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/external-authenticators/ldap
|
||||
title: "LDAP"
|
||||
---
|
||||
# LDAP
|
||||
|
||||
LDAP server can be used to authenticate ClickHouse users. There are two different approaches for doing this:
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/external-authenticators/ssl-x509
|
||||
title: "SSL X.509 certificate authentication"
|
||||
---
|
||||
# SSL X.509 certificate authentication
|
||||
|
||||
[SSL 'strict' option](../server-configuration-parameters/settings.md#server_configuration_parameters-openssl) enables mandatory certificate validation for the incoming connections. In this case, only connections with trusted certificates can be established. Connections with untrusted certificates will be rejected. Thus, certificate validation allows to uniquely authenticate an incoming connection. `Common Name` field of the certificate is used to identify connected user. This allows to associate multiple certificates with the same user. Additionally, reissuing and revoking of the certificates does not affect the ClickHouse configuration.
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/opentelemetry
|
||||
sidebar_position: 62
|
||||
sidebar_label: OpenTelemetry Support
|
||||
title: "[experimental] OpenTelemetry Support"
|
||||
---
|
||||
|
||||
# [experimental] OpenTelemetry Support
|
||||
|
||||
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
|
||||
|
||||
:::warning
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/performance-test
|
||||
sidebar_position: 54
|
||||
sidebar_label: Testing Hardware
|
||||
title: "How to Test Your Hardware with ClickHouse"
|
||||
---
|
||||
|
||||
# How to Test Your Hardware with ClickHouse
|
||||
|
||||
You can run a basic ClickHouse performance test on any server without installation of ClickHouse packages.
|
||||
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/quotas
|
||||
sidebar_position: 51
|
||||
sidebar_label: Quotas
|
||||
title: Quotas
|
||||
---
|
||||
|
||||
# Quotas
|
||||
|
||||
Quotas allow you to limit resource usage over a period of time or track the use of resources.
|
||||
Quotas are set up in the user config, which is usually ‘users.xml’.
|
||||
|
||||
@ -118,4 +117,3 @@ For distributed query processing, the accumulated amounts are stored on the requ
|
||||
|
||||
When the server is restarted, quotas are reset.
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/operations/quotas/) <!--hide-->
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/settings/merge-tree-settings
|
||||
title: "MergeTree tables settings"
|
||||
---
|
||||
# MergeTree tables settings
|
||||
|
||||
The values of `merge_tree` settings (for all MergeTree tables) can be viewed in the table `system.merge_tree_settings`, they can be overridden in `config.xml` in the `merge_tree` section, or set in the `SETTINGS` section of each table.
|
||||
|
||||
|
@ -966,7 +966,13 @@ Default value: 5000.
|
||||
|
||||
See also:
|
||||
|
||||
- [Apache Kafka](https://kafka.apache.org/)
|
||||
- [Apache Kafka](https://kafka.apache.org/)
|
||||
|
||||
## kafka_disable_num_consumers_limit {#kafka-disable-num-consumers-limit}
|
||||
|
||||
Disable limit on kafka_num_consumers that depends on the number of available CPU cores.
|
||||
|
||||
Default value: false.
|
||||
|
||||
## use_uncompressed_cache {#setting-use_uncompressed_cache}
|
||||
|
||||
|
@ -1,11 +1,10 @@
|
||||
---
|
||||
slug: /en/operations/storing-data
|
||||
sidebar_position: 68
|
||||
sidebar_label: External Disks for Storing Data
|
||||
sidebar_label: "External Disks for Storing Data"
|
||||
title: "External Disks for Storing Data"
|
||||
---
|
||||
|
||||
# External Disks for Storing Data
|
||||
|
||||
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely — on [Amazon S3](https://aws.amazon.com/s3/) disks or in the Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html)).
|
||||
|
||||
To work with data stored on `Amazon S3` disks use [S3](../engines/table-engines/integrations/s3.md) table engine, and to work with data in the Hadoop Distributed File System — [HDFS](../engines/table-engines/integrations/hdfs.md) table engine.
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/merge_tree_settings
|
||||
title: merge_tree_settings
|
||||
---
|
||||
# merge_tree_settings
|
||||
|
||||
Contains information about settings for `MergeTree` tables.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/time_zones
|
||||
title: time_zones
|
||||
---
|
||||
# time_zones
|
||||
|
||||
Contains a list of time zones that are supported by the ClickHouse server. This list of timezones might vary depending on the version of ClickHouse.
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/operations/tips
|
||||
sidebar_position: 58
|
||||
sidebar_label: Usage Recommendations
|
||||
title: "Usage Recommendations"
|
||||
---
|
||||
|
||||
# Usage Recommendations
|
||||
|
||||
## CPU Scaling Governor
|
||||
|
||||
Always use the `performance` scaling governor. The `on-demand` scaling governor works much worse with constantly high demand.
|
||||
|
@ -1,9 +1,8 @@
|
||||
---
|
||||
slug: /en/operations/utilities/clickhouse-compressor
|
||||
title: clickhouse-compressor
|
||||
---
|
||||
|
||||
# clickhouse-compressor
|
||||
|
||||
Simple program for data compression and decompression.
|
||||
|
||||
### Examples
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/utilities/clickhouse-format
|
||||
title: clickhouse-format
|
||||
---
|
||||
# clickhouse-format
|
||||
|
||||
Allows formatting input queries.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/utilities/clickhouse-obfuscator
|
||||
title: clickhouse-obfuscator
|
||||
---
|
||||
# clickhouse-obfuscator
|
||||
|
||||
A simple tool for table data obfuscation.
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
---
|
||||
slug: /en/operations/utilities/odbc-bridge
|
||||
title: clickhouse-odbc-bridge
|
||||
---
|
||||
# clickhouse-odbc-bridge
|
||||
|
||||
Simple HTTP-server which works like a proxy for ODBC driver. The main motivation
|
||||
was possible segfaults or another faults in ODBC implementations, which can
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/categoricalinformationvalue
|
||||
sidebar_position: 250
|
||||
title: categoricalInformationValue
|
||||
---
|
||||
|
||||
# categoricalInformationValue
|
||||
|
||||
Calculates the value of `(P(tag = 1) - P(tag = 0))(log(P(tag = 1)) - log(P(tag = 0)))` for each category.
|
||||
|
||||
``` sql
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/deltasumtimestamp
|
||||
sidebar_position: 141
|
||||
title: deltaSumTimestamp
|
||||
---
|
||||
|
||||
# deltaSumTimestamp
|
||||
|
||||
Adds the difference between consecutive rows. If the difference is negative, it is ignored.
|
||||
|
||||
This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging.
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/groupbitmapand
|
||||
sidebar_position: 129
|
||||
title: groupBitmapAnd
|
||||
---
|
||||
|
||||
# groupBitmapAnd
|
||||
|
||||
Calculations the AND of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md).
|
||||
|
||||
``` sql
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/groupbitmapor
|
||||
sidebar_position: 130
|
||||
title: groupBitmapOr
|
||||
---
|
||||
|
||||
# groupBitmapOr
|
||||
|
||||
Calculations the OR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md). This is equivalent to `groupBitmapMerge`.
|
||||
|
||||
``` sql
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/groupbitmapxor
|
||||
sidebar_position: 131
|
||||
title: groupBitmapXor
|
||||
---
|
||||
|
||||
# groupBitmapXor
|
||||
|
||||
Calculations the XOR of a bitmap column, return cardinality of type UInt64, if add suffix -State, then return [bitmap object](../../../sql-reference/functions/bitmap-functions.md).
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/aggregate-functions/reference/intervalLengthSum
|
||||
sidebar_position: 146
|
||||
sidebar_label: intervalLengthSum
|
||||
title: intervalLengthSum
|
||||
---
|
||||
|
||||
# intervalLengthSum
|
||||
|
||||
Calculates the total length of union of all ranges (segments on numeric axis).
|
||||
|
||||
**Syntax**
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/max
|
||||
sidebar_position: 3
|
||||
title: max
|
||||
---
|
||||
|
||||
# max
|
||||
|
||||
Aggregate function that calculates the maximum across a group of values.
|
||||
|
||||
Example:
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/min
|
||||
sidebar_position: 2
|
||||
title: min
|
||||
---
|
||||
|
||||
## min
|
||||
|
||||
Aggregate function that calculates the minimum across a group of values.
|
||||
|
||||
Example:
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/quantilebfloat16
|
||||
sidebar_position: 209
|
||||
title: quantileBFloat16
|
||||
---
|
||||
|
||||
# quantileBFloat16
|
||||
|
||||
Computes an approximate [quantile](https://en.wikipedia.org/wiki/Quantile) of a sample consisting of [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) numbers. `bfloat16` is a floating-point data type with 1 sign bit, 8 exponent bits and 7 fraction bits.
|
||||
The function converts input values to 32-bit floats and takes the most significant 16 bits. Then it calculates `bfloat16` quantile value and converts the result to a 64-bit float by appending zero bits.
|
||||
The function is a fast quantile estimator with a relative error no more than 0.390625%.
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/sumcount
|
||||
sidebar_position: 144
|
||||
title: sumCount
|
||||
---
|
||||
|
||||
# sumCount
|
||||
|
||||
Calculates the sum of the numbers and counts the number of rows at the same time. The function is used by ClickHouse query optimizer: if there are multiple `sum`, `count` or `avg` functions in a query, they can be replaced to single `sumCount` function to reuse the calculations. The function is rarely needed to use explicitly.
|
||||
|
||||
**Syntax**
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/sumkahan
|
||||
sidebar_position: 145
|
||||
title: sumKahan
|
||||
---
|
||||
|
||||
# sumKahan
|
||||
|
||||
Calculates the sum of the numbers with [Kahan compensated summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm)
|
||||
Slower than [sum](./sum.md) function.
|
||||
The compensation works only for [Float](../../../sql-reference/data-types/float.md) types.
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/uniqthetasketch
|
||||
sidebar_position: 195
|
||||
title: uniqTheta
|
||||
---
|
||||
|
||||
# uniqTheta
|
||||
|
||||
Calculates the approximate number of different argument values, using the [Theta Sketch Framework](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html).
|
||||
|
||||
``` sql
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/ansi
|
||||
sidebar_position: 40
|
||||
sidebar_label: ANSI Compatibility
|
||||
title: "ANSI SQL Compatibility of ClickHouse SQL Dialect"
|
||||
---
|
||||
|
||||
# ANSI SQL Compatibility of ClickHouse SQL Dialect
|
||||
|
||||
:::note
|
||||
This article relies on Table 38, “Feature taxonomy and definition for mandatory features”, Annex F of [ISO/IEC CD 9075-2:2011](https://www.iso.org/obp/ui/#iso:std:iso-iec:9075:-2:ed-4:v1:en:sec:8).
|
||||
:::
|
||||
|
@ -46,7 +46,7 @@ Binary operations on Decimal result in wider result type (with any order of argu
|
||||
Rules for scale:
|
||||
|
||||
- add, subtract: S = max(S1, S2).
|
||||
- multuply: S = S1 + S2.
|
||||
- multiply: S = S1 + S2.
|
||||
- divide: S = S1.
|
||||
|
||||
For similar operations between Decimal and integers, the result is Decimal of the same size as an argument.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/data-types/geo
|
||||
sidebar_position: 62
|
||||
sidebar_label: Geo
|
||||
title: "Geo Data Types"
|
||||
---
|
||||
|
||||
# Geo Data Types
|
||||
|
||||
ClickHouse supports data types for representing geographical objects — locations, lands, etc.
|
||||
|
||||
:::warning
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/data-types/multiword-types
|
||||
sidebar_position: 61
|
||||
sidebar_label: Multiword Type Names
|
||||
title: "Multiword Types"
|
||||
---
|
||||
|
||||
# Multiword Types
|
||||
|
||||
When creating tables, you can use data types with a name consisting of several words. This is implemented for better SQL compatibility.
|
||||
|
||||
## Multiword Types Support
|
||||
|
@ -411,7 +411,7 @@ If setting `allow_read_expired_keys` is set to 1, by default 0. Then dictionary
|
||||
|
||||
To improve cache performance, use a subquery with `LIMIT`, and call the function with the dictionary externally.
|
||||
|
||||
Supported [sources](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md): MySQL, ClickHouse, executable, HTTP.
|
||||
All types of sources are supported.
|
||||
|
||||
Example of settings:
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-polygon
|
||||
sidebar_position: 46
|
||||
sidebar_label: Polygon Dictionaries With Grids
|
||||
title: "Polygon dictionaries"
|
||||
---
|
||||
|
||||
# Polygon dictionaries
|
||||
|
||||
Polygon dictionaries allow you to efficiently search for the polygon containing specified points.
|
||||
For example: defining a city area by geographical coordinates.
|
||||
|
||||
|
@ -1069,7 +1069,7 @@ Formats a Time according to the given Format string. Format is a constant expres
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
formatDateTime(Time, Format\[, Timezone\])
|
||||
formatDateTime(Time, Format[, Timezone])
|
||||
```
|
||||
|
||||
**Returned value(s)**
|
||||
@ -1105,6 +1105,7 @@ Using replacement fields, you can define a pattern for the resulting string. “
|
||||
| %w | weekday as a decimal number with Sunday as 0 (0-6) | 2 |
|
||||
| %y | Year, last two digits (00-99) | 18 |
|
||||
| %Y | Year | 2018 |
|
||||
| %z | Time offset from UTC as +HHMM or -HHMM | -0500 |
|
||||
| %% | a % sign | % |
|
||||
|
||||
**Example**
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/encryption-functions
|
||||
sidebar_position: 67
|
||||
sidebar_label: Encryption
|
||||
title: "Encryption functions"
|
||||
---
|
||||
|
||||
# Encryption functions
|
||||
|
||||
These functions implement encryption and decryption of data with AES (Advanced Encryption Standard) algorithm.
|
||||
|
||||
Key length depends on encryption mode. It is 16, 24, and 32 bytes long for `-128-`, `-196-`, and `-256-` modes respectively.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/files
|
||||
sidebar_position: 43
|
||||
sidebar_label: Files
|
||||
title: "Functions for Working with Files"
|
||||
---
|
||||
|
||||
# Functions for Working with Files
|
||||
|
||||
## file
|
||||
|
||||
Reads file as a String. The file content is not parsed, so any information is read as one string and placed into the specified column.
|
||||
|
@ -2,11 +2,9 @@
|
||||
slug: /en/sql-reference/functions/geo/coordinates
|
||||
sidebar_label: Geographical Coordinates
|
||||
sidebar_position: 62
|
||||
title: "Functions for Working with Geographical Coordinates"
|
||||
---
|
||||
|
||||
|
||||
# Functions for Working with Geographical Coordinates
|
||||
|
||||
## greatCircleDistance
|
||||
|
||||
Calculates the distance between two points on the Earth’s surface using [the great-circle formula](https://en.wikipedia.org/wiki/Great-circle_distance).
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/functions/geo/geohash
|
||||
sidebar_label: Geohash
|
||||
title: "Functions for Working with Geohash"
|
||||
---
|
||||
|
||||
# Functions for Working with Geohash
|
||||
|
||||
[Geohash](https://en.wikipedia.org/wiki/Geohash) is the geocode system, which subdivides Earth’s surface into buckets of grid shape and encodes each cell into a short string of letters and digits. It is a hierarchical data structure, so the longer is the geohash string, the more precise is the geographic location.
|
||||
|
||||
If you need to manually convert geographic coordinates to geohash strings, you can use [geohash.org](http://geohash.org/).
|
||||
|
@ -1,10 +1,9 @@
|
||||
---
|
||||
slug: /en/sql-reference/functions/geo/h3
|
||||
sidebar_label: H3 Indexes
|
||||
title: "Functions for Working with H3 Indexes"
|
||||
---
|
||||
|
||||
# Functions for Working with H3 Indexes
|
||||
|
||||
[H3](https://eng.uber.com/h3/) is a geographical indexing system where Earth’s surface divided into a grid of even hexagonal cells. This system is hierarchical, i. e. each hexagon on the top level ("parent") can be split into seven even but smaller ones ("children"), and so on.
|
||||
|
||||
The level of the hierarchy is called `resolution` and can receive a value from `0` till `15`, where `0` is the `base` level with the largest and coarsest cells.
|
||||
|
@ -2,9 +2,9 @@
|
||||
slug: /en/sql-reference/functions/geo/
|
||||
sidebar_label: Geo
|
||||
sidebar_position: 62
|
||||
title: "Geo Functions"
|
||||
---
|
||||
|
||||
# Geo Functions
|
||||
|
||||
## Geographical Coordinates Functions
|
||||
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/nlp-functions
|
||||
sidebar_position: 67
|
||||
sidebar_label: NLP
|
||||
title: "[experimental] Natural Language Processing functions"
|
||||
---
|
||||
|
||||
# [experimental] Natural Language Processing functions
|
||||
|
||||
:::warning
|
||||
This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it.
|
||||
:::
|
||||
|
@ -495,25 +495,23 @@ If the ‘s’ string is non-empty and does not contain the ‘c’ character at
|
||||
|
||||
Returns the string ‘s’ that was converted from the encoding in ‘from’ to the encoding in ‘to’.
|
||||
|
||||
## Base58Encode(plaintext), Base58Decode(encoded_text)
|
||||
## base58Encode(plaintext)
|
||||
|
||||
Accepts a String and encodes/decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
|
||||
Accepts a String and encodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
base58Encode(decoded)
|
||||
base58Decode(encoded)
|
||||
base58Encode(plaintext)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `decoded` — [String](../../sql-reference/data-types/string.md) column or constant.
|
||||
- `encoded` — [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid base58-encoded value, an exception is thrown.
|
||||
- `plaintext` — [String](../../sql-reference/data-types/string.md) column or constant.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A string containing encoded/decoded value of 1st argument.
|
||||
- A string containing encoded value of 1st argument.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -523,17 +521,48 @@ Query:
|
||||
|
||||
``` sql
|
||||
SELECT base58Encode('Encoded');
|
||||
SELECT base58Encode('3dc8KtHrwM');
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌─encodeBase58('Encoded')─┐
|
||||
│ 3dc8KtHrwM │
|
||||
└──────────────────────────────────┘
|
||||
┌─decodeBase58('3dc8KtHrwM')─┐
|
||||
│ Encoded │
|
||||
└────────────────────────────────────┘
|
||||
┌─base58Encode('Encoded')─┐
|
||||
│ 3dc8KtHrwM │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
## base58Decode(encoded_text)
|
||||
|
||||
Accepts a String and decodes it using [Base58](https://tools.ietf.org/id/draft-msporny-base58-01.html) encoding scheme using "Bitcoin" alphabet.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
base58Decode(encoded_text)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `encoded_text` — [String](../../sql-reference/data-types/string.md) column or constant. If the string is not a valid base58-encoded value, an exception is thrown.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A string containing decoded value of 1st argument.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT base58Decode('3dc8KtHrwM');
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌─base58Decode('3dc8KtHrwM')─┐
|
||||
│ Encoded │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## base64Encode(s)
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/tuple-functions
|
||||
sidebar_position: 66
|
||||
sidebar_label: Tuples
|
||||
title: "Functions for Working with Tuples"
|
||||
---
|
||||
|
||||
# Functions for Working with Tuples
|
||||
|
||||
## tuple
|
||||
|
||||
A function that allows grouping multiple columns.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/functions/tuple-map-functions
|
||||
sidebar_position: 46
|
||||
sidebar_label: Working with maps
|
||||
title: "Functions for maps"
|
||||
---
|
||||
|
||||
# Functions for maps
|
||||
|
||||
## map
|
||||
|
||||
Arranges `key:value` pairs into [Map(key, value)](../../sql-reference/data-types/map.md) data type.
|
||||
@ -431,5 +430,119 @@ Result:
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapApply
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapApply(func, map)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `func` - [Lamda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map obtained from the original map by application of `func(map1[i], …, mapN[i])` for each element.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapApply((k, v) -> (k, v * 10), _map) AS r
|
||||
FROM
|
||||
(
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r─────────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key1':10,'key2':20} │
|
||||
│ {'key1':20,'key2':40} │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## mapFilter
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapFilter(func, map)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `func` - [Lamda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map containing only the elements in `map` for which `func(map1[i], …, mapN[i])` returns something other than 0.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapFilter((k, v) -> ((v % 2) = 0), _map) AS r
|
||||
FROM
|
||||
(
|
||||
SELECT map('key1', number, 'key2', number * 2) AS _map
|
||||
FROM numbers(3)
|
||||
)
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─r───────────────────┐
|
||||
│ {'key1':0,'key2':0} │
|
||||
│ {'key2':2} │
|
||||
│ {'key1':2,'key2':4} │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## mapUpdate
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapUpdate(map1, map2)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `map1` [Map](../../sql-reference/data-types/map.md).
|
||||
- `map2` [Map](../../sql-reference/data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a map1 with values updated of values for the corresponding keys in map2.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT mapUpdate(map('key1', 0, 'key3', 0), map('key1', 10, 'key2', 10)) AS map;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─map────────────────────────────┐
|
||||
│ {'key3':0,'key1':10,'key2':10} │
|
||||
└────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/sql-reference/functions/tuple-map-functions/) <!--hide-->
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/column
|
||||
sidebar_position: 37
|
||||
sidebar_label: COLUMN
|
||||
title: "Column Manipulations"
|
||||
---
|
||||
|
||||
# Column Manipulations
|
||||
|
||||
A set of queries that allow changing the table structure.
|
||||
|
||||
Syntax:
|
||||
|
@ -9,8 +9,8 @@ sidebar_label: CONSTRAINT
|
||||
Constraints could be added or deleted using following syntax:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression;
|
||||
ALTER TABLE [db].name DROP CONSTRAINT constraint_name;
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT constraint_name CHECK expression;
|
||||
ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT constraint_name;
|
||||
```
|
||||
|
||||
See more on [constraints](../../../sql-reference/statements/create/table.md#constraints).
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/partition
|
||||
sidebar_position: 38
|
||||
sidebar_label: PARTITION
|
||||
title: "Manipulating Partitions and Parts"
|
||||
---
|
||||
|
||||
# Manipulating Partitions and Parts
|
||||
|
||||
The following operations with [partitions](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md) are available:
|
||||
|
||||
- [DETACH PARTITION\|PART](#detach-partitionpart) — Moves a partition or part to the `detached` directory and forget it.
|
||||
|
@ -2,10 +2,9 @@
|
||||
slug: /en/sql-reference/statements/alter/projection
|
||||
sidebar_position: 49
|
||||
sidebar_label: PROJECTION
|
||||
title: "Manipulating Projections"
|
||||
---
|
||||
|
||||
# Manipulating Projections
|
||||
|
||||
The following operations with [projections](../../../engines/table-engines/mergetree-family/mergetree.md#projections) are available:
|
||||
|
||||
- `ALTER TABLE [db].name ADD PROJECTION name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user