Merge remote-tracking branch 'ClickHouse/master' into change_date

This commit is contained in:
Robert Schulze 2024-05-22 12:23:36 +00:00
commit fdb63fc7ed
No known key found for this signature in database
GPG Key ID: 26703B55FB13728A
316 changed files with 2307 additions and 881 deletions

View File

@ -22,6 +22,7 @@ Checks: [
'-bugprone-exception-escape',
'-bugprone-forward-declaration-namespace',
'-bugprone-implicit-widening-of-multiplication-result',
'-bugprone-multi-level-implicit-pointer-conversion',
'-bugprone-narrowing-conversions',
'-bugprone-not-null-terminated-result',
'-bugprone-reserved-identifier', # useful but too slow, TODO retry when https://reviews.llvm.org/rG1c282052624f9d0bd273bde0b47b30c96699c6c7 is merged
@ -98,6 +99,7 @@ Checks: [
'-modernize-use-nodiscard',
'-modernize-use-trailing-return-type',
'-performance-enum-size',
'-performance-inefficient-string-concatenation',
'-performance-no-int-to-ptr',
'-performance-avoid-endl',
@ -105,6 +107,7 @@ Checks: [
'-portability-simd-intrinsics',
'-readability-avoid-nested-conditional-operator',
'-readability-avoid-unconditional-preprocessor-if',
'-readability-braces-around-statements',
'-readability-convert-member-functions-to-static',
@ -118,6 +121,12 @@ Checks: [
'-readability-magic-numbers',
'-readability-named-parameter',
'-readability-redundant-declaration',
'-readability-redundant-inline-specifier',
'-readability-redundant-member-init', # Useful but triggers another problem. Imagine a struct S with multiple String members. Structs are often instantiated via designated
# initializer S s{.s1 = [...], .s2 = [...], [...]}. In this case, compiler warning `missing-field-initializers` requires to specify all members which are not in-struct
# initialized (example: s1 in struct S { String s1; String s2{};}; is not in-struct initialized, therefore it must be specified at instantiation time). As explicitly
# specifying all members is tedious for large structs, `missing-field-initializers` makes programmers initialize as many members as possible in-struct. Clang-tidy
# warning `readability-redundant-member-init` does the opposite thing, both are not compatible with each other.
'-readability-simplify-boolean-expr',
'-readability-suspicious-call-argument',
'-readability-uppercase-literal-suffix',
@ -125,17 +134,6 @@ Checks: [
'-zircon-*',
# These are new in clang-18, and we have to sort them out:
'-readability-avoid-nested-conditional-operator',
'-modernize-use-designated-initializers',
'-performance-enum-size',
'-readability-redundant-inline-specifier',
'-readability-redundant-member-init',
'-bugprone-crtp-constructor-accessibility',
'-bugprone-suspicious-stringview-data-usage',
'-bugprone-multi-level-implicit-pointer-conversion',
'-cert-err33-c',
# This is a good check, but clang-tidy crashes, see https://github.com/llvm/llvm-project/issues/91872
'-modernize-use-constraints',
# https://github.com/abseil/abseil-cpp/issues/1667

View File

@ -28,3 +28,10 @@ runs:
run: |
# to remove every leftovers
sudo rm -fr "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
- name: Tune vm.mmap_rnd_bits for sanitizers
shell: bash
run: |
sudo sysctl vm.mmap_rnd_bits
# https://github.com/google/sanitizers/issues/856
echo "Tune vm.mmap_rnd_bits for sanitizers"
sudo sysctl vm.mmap_rnd_bits=28

View File

@ -130,15 +130,21 @@ jobs:
with:
stage: Tests_2
data: ${{ needs.RunConfig.outputs.data }}
# stage for jobs that do not prohibit merge
Tests_3:
needs: [RunConfig, Tests_1, Tests_2]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_3
data: ${{ needs.RunConfig.outputs.data }}
################################# Reports #################################
# Reports should by run even if Builds_1/2 fail, so put them separatly in wf (not in Tests_1/2)
# Reports should by run even if Builds_1/2 fail, so put them separately in wf (not in Tests_1/2)
Builds_1_Report:
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
needs:
- RunConfig
- Builds_1
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
needs: [RunConfig, StyleCheck, Builds_1]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse build check
@ -146,25 +152,39 @@ jobs:
data: ${{ needs.RunConfig.outputs.data }}
Builds_2_Report:
# run report check for failed builds to indicate the CI error
if: ${{ !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }}
needs:
- RunConfig
- Builds_2
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }}
needs: [RunConfig, StyleCheck, Builds_2]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse special build check
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
CheckReadyForMerge:
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
filter: tree:0
- name: Check and set merge status
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
################################# Stage Final #################################
#
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2]
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2, Tests_3]
runs-on: [self-hosted, style-checker]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
filter: tree:0
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"

View File

@ -61,13 +61,16 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
# set CPU time limit to 1000 seconds
set (RLIMIT_CPU 1000)
# Sanitizers are too heavy
if (SANITIZE OR SANITIZE_COVERAGE OR WITH_COVERAGE)
set (RLIMIT_DATA 10000000000) # 10G
# Sanitizers are too heavy. Some architectures too.
if (SANITIZE OR SANITIZE_COVERAGE OR WITH_COVERAGE OR ARCH_RISCV64 OR ARCH_LOONGARCH64)
# Twice as large
set (RLIMIT_DATA 10000000000)
set (RLIMIT_AS 20000000000)
endif()
# For some files currently building RISCV64 might be too slow. TODO: Improve compilation times per file
if (ARCH_RISCV64)
# For some files currently building RISCV64/LOONGARCH64 might be too slow.
# TODO: Improve compilation times per file
if (ARCH_RISCV64 OR ARCH_LOONGARCH64)
set (RLIMIT_CPU 1800)
endif()

View File

@ -9,11 +9,18 @@
bool cgroupsV2Enabled()
{
#if defined(OS_LINUX)
/// This file exists iff the host has cgroups v2 enabled.
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
if (!std::filesystem::exists(controllers_file))
return false;
return true;
try
{
/// This file exists iff the host has cgroups v2 enabled.
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
if (!std::filesystem::exists(controllers_file))
return false;
return true;
}
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
{
return false; /// not logging the exception as most callers fall back to cgroups v1
}
#else
return false;
#endif

View File

@ -1,11 +1,23 @@
set (DEFAULT_LIBS "-nodefaultlibs")
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "amd64")
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-x86_64.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
set(system_processor "x86_64")
else ()
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
set(system_processor "${CMAKE_SYSTEM_PROCESSOR}")
endif ()
file(GLOB bprefix "/usr/local/llvm${COMPILER_VERSION_MAJOR}/lib/clang/${COMPILER_VERSION_MAJOR}/lib/${system_processor}-portbld-freebsd*/")
message(STATUS "-Bprefix: ${bprefix}")
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins-${system_processor}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
# --print-file-name simply prints what you passed in case of nothing was resolved, so let's try one other possible option
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins-${system_processor}.a")
execute_process(COMMAND ${CMAKE_CXX_COMPILER} -Bprefix=${bprefix} --print-file-name=libclang_rt.builtins.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
if (BUILTINS_LIBRARY STREQUAL "libclang_rt.builtins.a")
message(FATAL_ERROR "libclang_rt.builtins had not been found")
endif()
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread")
message(STATUS "Default libraries: ${DEFAULT_LIBS}")

View File

@ -7,7 +7,7 @@ endif()
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libbcrypt")
set(SRCS
set(SRCS
"${LIBRARY_DIR}/bcrypt.c"
"${LIBRARY_DIR}/crypt_blowfish/crypt_blowfish.c"
"${LIBRARY_DIR}/crypt_blowfish/crypt_gensalt.c"
@ -16,4 +16,13 @@ set(SRCS
add_library(_bcrypt ${SRCS})
target_include_directories(_bcrypt SYSTEM PUBLIC "${LIBRARY_DIR}")
# Avoid conflicts for crypt_r on FreeBSD [1]:
#
# - char *crypt_r(__const char *key, __const char *setting, void *data);
# - char *crypt_r(const char *, const char *, struct crypt_data *);
#
# [1]: https://github.com/freebsd/freebsd-src/commit/5f521d7ba72145092ea23ff6081d8791ad6c1f9d
#
# NOTE: ow-crypt.h is unsed only internally, so PRIVATE is enough
target_compile_definitions(_bcrypt PRIVATE -D__SKIP_GNU)
add_library(ch_contrib::bcrypt ALIAS _bcrypt)

2
contrib/libunwind vendored

@ -1 +1 @@
Subproject commit 854538ce337d631b619010528adff22cd58f9dce
Subproject commit d6a01c46327e56fd86beb8aaa31591fcd9a6b7df

View File

@ -160,10 +160,17 @@ function clone_submodules
git submodule sync
git submodule init
# --jobs does not work as fast as real parallel running
printf '%s\0' "${SUBMODULES_TO_UPDATE[@]}" | \
xargs --max-procs=100 --null --no-run-if-empty --max-args=1 \
git submodule update --depth 1 --single-branch
# Network is unreliable
for _ in {1..10}
do
# --jobs does not work as fast as real parallel running
printf '%s\0' "${SUBMODULES_TO_UPDATE[@]}" | \
xargs --max-procs=100 --null --no-run-if-empty --max-args=1 \
git submodule update --depth 1 --single-branch && break
sleep 1
done
git submodule foreach git reset --hard
git submodule foreach git checkout @ -f
git submodule foreach git clean -xfd

View File

@ -36,6 +36,11 @@
<allow_experimental_object_type>
<readonly/>
</allow_experimental_object_type>
<!-- Prevent stack overflow -->
<max_ast_depth>
<readonly/>
</max_ast_depth>
</constraints>
</default>
</profiles>

View File

@ -154,6 +154,11 @@ EOL
<allow_experimental_object_type>
<readonly/>
</allow_experimental_object_type>
<!-- Prevent stack overflow -->
<max_ast_depth>
<readonly/>
</max_ast_depth>
</constraints>
</default>
</profiles>

View File

@ -58,8 +58,14 @@ echo "ATTACH DATABASE system ENGINE=Ordinary" > /var/lib/clickhouse/metadata/sys
# Install previous release packages
install_packages previous_release_package_folder
# Save old settings from system table for settings changes check
clickhouse-local -q "select * from system.settings format Native" > old_settings.native
# NOTE: we need to run clickhouse-local under script to get settings without any adjustments, like clickhouse-local does in case of stdout is not a tty
function save_settings_clean()
{
local out=$1 && shift
script -q -c "clickhouse-local -q \"select * from system.settings into outfile '$out'\"" --log-out /dev/null
}
save_settings_clean 'old_settings.native'
# Initial run without S3 to create system.*_log on local file system to make it
# available for dump via clickhouse-local
@ -183,7 +189,7 @@ configure
IS_SANITIZED=$(clickhouse-local --query "SELECT value LIKE '%-fsanitize=%' FROM system.build_options WHERE name = 'CXX_FLAGS'")
if [ "${IS_SANITIZED}" -eq "0" ]
then
clickhouse-local -q "select * from system.settings format Native" > new_settings.native
save_settings_clean 'new_settings.native'
clickhouse-local -nmq "
CREATE TABLE old_settings AS file('old_settings.native');
CREATE TABLE new_settings AS file('new_settings.native');

View File

@ -22,7 +22,7 @@ description: In order to effectively mitigate possible human errors, you should
TEMPORARY TABLE table_name [AS table_name_in_backup] |
VIEW view_name [AS view_name_in_backup]
ALL TEMPORARY TABLES [EXCEPT ...] |
ALL DATABASES [EXCEPT ...] } [,...]
ALL [EXCEPT ...] } [,...]
[ON CLUSTER 'cluster_name']
TO|FROM File('<path>/<filename>') | Disk('<disk_name>', '<path>/') | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')
[SETTINGS base_backup = File('<path>/<filename>') | Disk(...) | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')]

View File

@ -7,27 +7,27 @@ title: "External Disks for Storing Data"
Data, processed in ClickHouse, is usually stored in the local file system — on the same machine with the ClickHouse server. That requires large-capacity disks, which can be expensive enough. To avoid that you can store the data remotely. Various storages are supported:
1. [Amazon S3](https://aws.amazon.com/s3/) object storage.
2. The Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html))
3. [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs).
2. [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs).
3. Unsupported: The Hadoop Distributed File System ([HDFS](https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html))
:::note ClickHouse also has support for external table engines, which are different from external storage option described on this page as they allow to read data stored in some general file format (like Parquet), while on this page we are describing storage configuration for ClickHouse `MergeTree` family or `Log` family tables.
1. to work with data stored on `Amazon S3` disks, use [S3](/docs/en/engines/table-engines/integrations/s3.md) table engine.
2. to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine.
3. to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) table engine.
2. to work with data stored in Azure Blob Storage use [AzureBlobStorage](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) table engine.
3. Unsupported: to work with data in the Hadoop Distributed File System — [HDFS](/docs/en/engines/table-engines/integrations/hdfs.md) table engine.
:::
## Configuring external storage {#configuring-external-storage}
[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to `S3`, `AzureBlobStorage`, `HDFS` using a disk with types `s3`, `azure_blob_storage`, `hdfs` accordingly.
[MergeTree](/docs/en/engines/table-engines/mergetree-family/mergetree.md) and [Log](/docs/en/engines/table-engines/log-family/log.md) family table engines can store data to `S3`, `AzureBlobStorage`, `HDFS` (unsupported) using a disk with types `s3`, `azure_blob_storage`, `hdfs` (unsupported) accordingly.
Disk configuration requires:
1. `type` section, equal to one of `s3`, `azure_blob_storage`, `hdfs`, `local_blob_storage`, `web`.
1. `type` section, equal to one of `s3`, `azure_blob_storage`, `hdfs` (unsupported), `local_blob_storage`, `web`.
2. Configuration of a specific external storage type.
Starting from 24.1 clickhouse version, it is possible to use a new configuration option.
It requires to specify:
1. `type` equal to `object_storage`
2. `object_storage_type`, equal to one of `s3`, `azure_blob_storage` (or just `azure` from `24.3`), `hdfs`, `local_blob_storage` (or just `local` from `24.3`), `web`.
2. `object_storage_type`, equal to one of `s3`, `azure_blob_storage` (or just `azure` from `24.3`), `hdfs` (unsupported), `local_blob_storage` (or just `local` from `24.3`), `web`.
Optionally, `metadata_type` can be specified (it is equal to `local` by default), but it can also be set to `plain`, `web` and, starting from `24.4`, `plain_rewritable`.
Usage of `plain` metadata type is described in [plain storage section](/docs/en/operations/storing-data.md/#storing-data-on-webserver), `web` metadata type can be used only with `web` object storage type, `local` metadata type stores metadata files locally (each metadata files contains mapping to files in object storage and some additional meta information about them).
@ -328,7 +328,7 @@ Configuration:
</s3_plain>
```
Starting from `24.1` it is possible configure any object storage disk (`s3`, `azure`, `hdfs`, `local`) using `plain` metadata type.
Starting from `24.1` it is possible configure any object storage disk (`s3`, `azure`, `hdfs` (unsupported), `local`) using `plain` metadata type.
Configuration:
``` xml
@ -428,12 +428,14 @@ Examples of working configurations can be found in integration tests directory (
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.
:::
## Using HDFS storage {#hdfs-storage}
## Using HDFS storage (Unsupported)
In this sample configuration:
- the disk is of type `hdfs`
- the disk is of type `hdfs` (unsupported)
- the data is hosted at `hdfs://hdfs1:9000/clickhouse/`
By the way, HDFS is unsupported and therefore there might be issues when using it. Feel free to make a pull request with the fix if any issue arises.
```xml
<clickhouse>
<storage_configuration>
@ -464,9 +466,11 @@ In this sample configuration:
</clickhouse>
```
Keep in mind that HDFS may not work in corner cases.
### Using Data Encryption {#encrypted-virtual-file-system}
You can encrypt the data stored on [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one.
You can encrypt the data stored on [S3](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-s3), or [HDFS](#configuring-hdfs) (unsupported) external disks, or on a local disk. To turn on the encryption mode, in the configuration file you must define a disk with the type `encrypted` and choose a disk on which the data will be saved. An `encrypted` disk ciphers all written files on the fly, and when you read files from an `encrypted` disk it deciphers them automatically. So you can work with an `encrypted` disk like with a normal one.
Example of disk configuration:
@ -529,7 +533,7 @@ Example of disk configuration:
It is possible to configure local cache over disks in storage configuration starting from version 22.3.
For versions 22.3 - 22.7 cache is supported only for `s3` disk type. For versions >= 22.8 cache is supported for any disk type: S3, Azure, Local, Encrypted, etc.
For versions >= 23.5 cache is supported only for remote disk types: S3, Azure, HDFS.
For versions >= 23.5 cache is supported only for remote disk types: S3, Azure, HDFS (unsupported).
Cache uses `LRU` cache policy.
@ -971,7 +975,7 @@ Use [http_max_single_read_retries](/docs/en/operations/settings/settings.md/#htt
### Zero-copy Replication (not ready for production) {#zero-copy}
Zero-copy replication is possible, but not recommended, with `S3` and `HDFS` disks. Zero-copy replication means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself.
Zero-copy replication is possible, but not recommended, with `S3` and `HDFS` (unsupported) disks. Zero-copy replication means that if the data is stored remotely on several machines and needs to be synchronized, then only the metadata is replicated (paths to the data parts), but not the data itself.
:::note Zero-copy replication is not ready for production
Zero-copy replication is disabled by default in ClickHouse version 22.8 and higher. This feature is not recommended for production use.

View File

@ -108,7 +108,7 @@ Columns:
- `used_aggregate_function_combinators` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions combinators`, which were used during query execution.
- `used_database_engines` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `database engines`, which were used during query execution.
- `used_data_type_families` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `data type families`, which were used during query execution.
- `used_dictionaries` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `dictionaries`, which were used during query execution.
- `used_dictionaries` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `dictionaries`, which were used during query execution. For dictionaries configured using an XML file this is the name of the dictionary, and for dictionaries created by an SQL statement, the canonical name is the fully qualified object name.
- `used_formats` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `formats`, which were used during query execution.
- `used_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `functions`, which were used during query execution.
- `used_storages` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `storages`, which were used during query execution.

View File

@ -0,0 +1,45 @@
---
slug: /en/sql-reference/aggregate-functions/reference/analysis_of_variance
sidebar_position: 6
---
# analysisOfVariance
Provides a statistical test for one-way analysis of variance (ANOVA test). It is a test over several groups of normally distributed observations to find out whether all groups have the same mean or not.
**Syntax**
```sql
analysisOfVariance(val, group_no)
```
Aliases: `anova`
**Parameters**
- `val`: value.
- `group_no` : group number that `val` belongs to.
:::note
Groups are enumerated starting from 0 and there should be at least two groups to perform a test.
There should be at least one group with the number of observations greater than one.
:::
**Returned value**
- `(f_statistic, p_value)`. [Tuple](../../data-types/tuple.md)([Float64](../../data-types/float.md), [Float64](../../data-types/float.md)).
**Example**
Query:
```sql
SELECT analysisOfVariance(number, number % 2) FROM numbers(1048575);
```
Result:
```response
┌─analysisOfVariance(number, modulo(number, 2))─┐
│ (0,1) │
└───────────────────────────────────────────────┘
```

View File

@ -37,6 +37,7 @@ Standard aggregate functions:
ClickHouse-specific aggregate functions:
- [analysisOfVariance](/docs/en/sql-reference/aggregate-functions/reference/analysis_of_variance.md)
- [any](/docs/en/sql-reference/aggregate-functions/reference/any_respect_nulls.md)
- [anyHeavy](/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md)
- [anyLast](/docs/en/sql-reference/aggregate-functions/reference/anylast.md)

View File

@ -998,17 +998,170 @@ SELECT version()
Returns the build ID generated by a compiler for the running ClickHouse server binary.
If executed in the context of a distributed table, this function generates a normal column with values relevant to each shard. Otherwise it produces a constant value.
## blockNumber()
## blockNumber
Returns the sequence number of the data block where the row is located.
Returns a monotonically increasing sequence number of the [block](../../development/architecture.md#block) containing the row.
The returned block number is updated on a best-effort basis, i.e. it may not be fully accurate.
## rowNumberInBlock() {#rowNumberInBlock}
**Syntax**
```sql
blockNumber()
```
**Returned value**
- Sequence number of the data block where the row is located. [UInt64](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT blockNumber()
FROM
(
SELECT *
FROM system.numbers
LIMIT 10
) SETTINGS max_block_size = 2
```
Result:
```response
┌─blockNumber()─┐
│ 7 │
│ 7 │
└───────────────┘
┌─blockNumber()─┐
│ 8 │
│ 8 │
└───────────────┘
┌─blockNumber()─┐
│ 9 │
│ 9 │
└───────────────┘
┌─blockNumber()─┐
│ 10 │
│ 10 │
└───────────────┘
┌─blockNumber()─┐
│ 11 │
│ 11 │
└───────────────┘
```
## rowNumberInBlock {#rowNumberInBlock}
Returns for each [block](../../development/architecture.md#block) processed by `rowNumberInBlock` the number of the current row.
The returned number starts for each block at 0.
**Syntax**
```sql
rowNumberInBlock()
```
**Returned value**
- Ordinal number of the row in the data block starting from 0. [UInt64](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT rowNumberInBlock()
FROM
(
SELECT *
FROM system.numbers_mt
LIMIT 10
) SETTINGS max_block_size = 2
```
Result:
```response
┌─rowNumberInBlock()─┐
│ 0 │
│ 1 │
└────────────────────┘
┌─rowNumberInBlock()─┐
│ 0 │
│ 1 │
└────────────────────┘
┌─rowNumberInBlock()─┐
│ 0 │
│ 1 │
└────────────────────┘
┌─rowNumberInBlock()─┐
│ 0 │
│ 1 │
└────────────────────┘
┌─rowNumberInBlock()─┐
│ 0 │
│ 1 │
└────────────────────┘
```
## rowNumberInAllBlocks
Returns a unique row number for each row processed by `rowNumberInAllBlocks`. The returned numbers start at 0.
**Syntax**
```sql
rowNumberInAllBlocks()
```
**Returned value**
- Ordinal number of the row in the data block starting from 0. [UInt64](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT rowNumberInAllBlocks()
FROM
(
SELECT *
FROM system.numbers_mt
LIMIT 10
)
SETTINGS max_block_size = 2
```
Result:
```response
┌─rowNumberInAllBlocks()─┐
│ 0 │
│ 1 │
└────────────────────────┘
┌─rowNumberInAllBlocks()─┐
│ 4 │
│ 5 │
└────────────────────────┘
┌─rowNumberInAllBlocks()─┐
│ 2 │
│ 3 │
└────────────────────────┘
┌─rowNumberInAllBlocks()─┐
│ 6 │
│ 7 │
└────────────────────────┘
┌─rowNumberInAllBlocks()─┐
│ 8 │
│ 9 │
└────────────────────────┘
```
Returns the ordinal number of the row in the data block. Different data blocks are always recalculated.
## rowNumberInAllBlocks()
Returns the ordinal number of the row in the data block. This function only considers the affected data blocks.
## neighbor

View File

@ -162,7 +162,7 @@ if (ARCH_AMD64 AND OS_LINUX AND NOT OS_ANDROID)
set (HARMFUL_LIB harmful)
endif ()
target_link_libraries (clickhouse PRIVATE clickhouse_common_io string_utils ${HARMFUL_LIB})
target_link_libraries (clickhouse PRIVATE clickhouse_common_io ${HARMFUL_LIB})
target_include_directories (clickhouse PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
if (ENABLE_CLICKHOUSE_KEEPER)

View File

@ -10,7 +10,6 @@ set (CLICKHOUSE_CLIENT_LINK
clickhouse_common_io
clickhouse_functions
clickhouse_parsers
string_utils
)
if (TARGET ch_rust::skim)

View File

@ -15,7 +15,7 @@
#include <Parsers/obfuscateQueries.h>
#include <Parsers/parseQuery.h>
#include <Common/ErrorCodes.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/TerminalSize.h>
#include <Core/BaseSettingsProgramOptions.h>

View File

@ -14,7 +14,7 @@
#include <Common/TerminalSize.h>
#include <Common/Exception.h>
#include <Common/SipHash.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/ShellCommand.h>
#include <Common/re2.h>
#include <base/find_symbols.h>

View File

@ -148,6 +148,7 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/createReadBufferFromFileBase.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ReadBufferFromRemoteFSGather.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/IOUringReader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/getIOUringReader.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/WriteBufferFromTemporaryFile.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/WriteBufferWithFinalizeCallback.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/AsynchronousBoundedReadBuffer.cpp

View File

@ -1,4 +1,4 @@
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include "config_tools.h"

View File

@ -1,6 +1,6 @@
#pragma once
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Core/Block.h>
#include <base/range.h>

View File

@ -15,7 +15,7 @@
#include "config_tools.h"
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/getHashOfLoadedBinary.h>
#include <Common/IO.h>

View File

@ -19,7 +19,7 @@
#include <Processors/LimitTransform.h>
#include <Common/SipHash.h>
#include <Common/UTF8Helpers.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/HashTable/HashMap.h>
#include <Common/typeid_cast.h>
#include <Common/assert_cast.h>

View File

@ -4,7 +4,7 @@
#include <Poco/String.h>
#include <base/find_symbols.h>
#include <Common/Exception.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include "validateODBCConnectionString.h"

View File

@ -13,7 +13,6 @@ set (CLICKHOUSE_SERVER_LINK
clickhouse_parsers
clickhouse_storages_system
clickhouse_table_functions
string_utils
${LINK_RESOURCE_LIB}

View File

@ -1,5 +1,5 @@
#include <Access/User.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Core/Protocol.h>
#include <base/insertAtEnd.h>

View File

@ -10,7 +10,7 @@
#include <Dictionaries/IDictionary.h>
#include <Common/Config/ConfigReloader.h>
#include <Common/SSHWrapper.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/quoteString.h>
#include <Common/transformEndianness.h>
#include <Core/Settings.h>

View File

@ -14,7 +14,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int CORRUPTED_DATA;
extern const int INCORRECT_DATA;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int LOGICAL_ERROR;
}
@ -198,7 +198,7 @@ public:
this->data(place).value().read(buf, *serialization_val, arena);
if (unlikely(this->data(place).value().has() != this->data(place).result().has()))
throw Exception(
ErrorCodes::CORRUPTED_DATA,
ErrorCodes::INCORRECT_DATA,
"Invalid state of the aggregate function {}: has_value ({}) != has_result ({})",
getName(),
this->data(place).value().has(),

View File

@ -1,6 +1,6 @@
#include "AggregateFunctionCombinatorFactory.h"
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
namespace DB
{

View File

@ -42,7 +42,7 @@ private:
return;
const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage();
bool is_final_supported = storage && storage->supportsFinal();
bool is_final_supported = storage && !storage->isRemote() && storage->supportsFinal();
if (!is_final_supported)
return;

View File

@ -192,7 +192,7 @@ void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node)
void QueryTreePassManager::runOnlyResolve(QueryTreeNodePtr query_tree_node)
{
// Run only QueryAnalysisPass and GroupingFunctionsResolvePass passes.
run(query_tree_node, 2);
run(query_tree_node, 3);
}
void QueryTreePassManager::run(QueryTreeNodePtr query_tree_node, size_t up_to_pass_index)
@ -249,6 +249,7 @@ void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze)
{
manager.addPass(std::make_unique<QueryAnalysisPass>(only_analyze));
manager.addPass(std::make_unique<GroupingFunctionsResolvePass>());
manager.addPass(std::make_unique<AutoFinalOnQueryPass>());
manager.addPass(std::make_unique<RemoveUnusedProjectionColumnsPass>());
manager.addPass(std::make_unique<FunctionToSubcolumnsPass>());
@ -294,7 +295,6 @@ void addQueryTreePasses(QueryTreePassManager & manager, bool only_analyze)
manager.addPass(std::make_unique<LogicalExpressionOptimizerPass>());
manager.addPass(std::make_unique<AutoFinalOnQueryPass>());
manager.addPass(std::make_unique<CrossToInnerJoinPass>());
manager.addPass(std::make_unique<ShardNumColumnToFunctionPass>());

View File

@ -39,6 +39,7 @@ public:
std::optional<UUID> backup_uuid;
bool deduplicate_files = true;
bool allow_s3_native_copy = true;
bool allow_azure_native_copy = true;
bool use_same_s3_credentials_for_base_backup = false;
bool azure_attempt_to_create_container = true;
ReadSettings read_settings;

View File

@ -31,22 +31,28 @@ namespace ErrorCodes
BackupReaderAzureBlobStorage::BackupReaderAzureBlobStorage(
StorageAzureBlob::Configuration configuration_,
bool allow_azure_native_copy,
const ReadSettings & read_settings_,
const WriteSettings & write_settings_,
const ContextPtr & context_)
: BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderAzureBlobStorage"))
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.container, false, false}
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.getConnectionURL().toString(), false, false}
, configuration(configuration_)
{
auto client_ptr = StorageAzureBlob::createClient(configuration, /* is_read_only */ false);
client_ptr->SetClickhouseOptions(Azure::Storage::Blobs::ClickhouseClientOptions{.IsClientForDisk=true});
object_storage = std::make_unique<AzureObjectStorage>("BackupReaderAzureBlobStorage",
std::move(client_ptr),
StorageAzureBlob::createSettings(context_),
configuration_.container);
object_storage = std::make_unique<AzureObjectStorage>(
"BackupReaderAzureBlobStorage",
std::move(client_ptr),
StorageAzureBlob::createSettings(context_),
configuration.container,
configuration.getConnectionURL().toString());
client = object_storage->getAzureBlobStorageClient();
settings = object_storage->getSettings();
auto settings_copy = *object_storage->getSettings();
settings_copy.use_native_copy = allow_azure_native_copy;
settings = std::make_unique<const AzureObjectStorageSettings>(settings_copy);
}
BackupReaderAzureBlobStorage::~BackupReaderAzureBlobStorage() = default;
@ -76,9 +82,9 @@ void BackupReaderAzureBlobStorage::copyFileToDisk(const String & path_in_backup,
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
{
auto destination_data_source_description = destination_disk->getDataSourceDescription();
if ((destination_data_source_description.type == DataSourceType::ObjectStorage)
&& (destination_data_source_description.object_storage_type == ObjectStorageType::Azure)
&& (destination_data_source_description.is_encrypted == encrypted_in_backup))
LOG_TRACE(log, "Source description {}, desctionation description {}", data_source_description.description, destination_data_source_description.description);
if (destination_data_source_description.sameKind(data_source_description)
&& destination_data_source_description.is_encrypted == encrypted_in_backup)
{
LOG_TRACE(log, "Copying {} from AzureBlobStorage to disk {}", path_in_backup, destination_disk->getName());
auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> &) -> size_t
@ -116,12 +122,13 @@ void BackupReaderAzureBlobStorage::copyFileToDisk(const String & path_in_backup,
BackupWriterAzureBlobStorage::BackupWriterAzureBlobStorage(
StorageAzureBlob::Configuration configuration_,
bool allow_azure_native_copy,
const ReadSettings & read_settings_,
const WriteSettings & write_settings_,
const ContextPtr & context_,
bool attempt_to_create_container)
: BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterAzureBlobStorage"))
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.container, false, false}
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.getConnectionURL().toString(), false, false}
, configuration(configuration_)
{
auto client_ptr = StorageAzureBlob::createClient(configuration, /* is_read_only */ false, attempt_to_create_container);
@ -130,9 +137,12 @@ BackupWriterAzureBlobStorage::BackupWriterAzureBlobStorage(
object_storage = std::make_unique<AzureObjectStorage>("BackupWriterAzureBlobStorage",
std::move(client_ptr),
StorageAzureBlob::createSettings(context_),
configuration_.container);
configuration_.container,
configuration_.getConnectionURL().toString());
client = object_storage->getAzureBlobStorageClient();
settings = object_storage->getSettings();
auto settings_copy = *object_storage->getSettings();
settings_copy.use_native_copy = allow_azure_native_copy;
settings = std::make_unique<const AzureObjectStorageSettings>(settings_copy);
}
void BackupWriterAzureBlobStorage::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
@ -140,7 +150,9 @@ void BackupWriterAzureBlobStorage::copyFileFromDisk(const String & path_in_backu
{
/// Use the native copy as a more optimal way to copy a file from AzureBlobStorage to AzureBlobStorage if it's possible.
auto source_data_source_description = src_disk->getDataSourceDescription();
if (source_data_source_description.sameKind(data_source_description) && (source_data_source_description.is_encrypted == copy_encrypted))
LOG_TRACE(log, "Source description {}, desctionation description {}", source_data_source_description.description, data_source_description.description);
if (source_data_source_description.sameKind(data_source_description)
&& source_data_source_description.is_encrypted == copy_encrypted)
{
/// getBlobPath() can return more than 3 elements if the file is stored as multiple objects in AzureBlobStorage container.
/// In this case we can't use the native copy.

View File

@ -16,7 +16,12 @@ namespace DB
class BackupReaderAzureBlobStorage : public BackupReaderDefault
{
public:
BackupReaderAzureBlobStorage(StorageAzureBlob::Configuration configuration_, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
BackupReaderAzureBlobStorage(
StorageAzureBlob::Configuration configuration_,
bool allow_azure_native_copy,
const ReadSettings & read_settings_,
const WriteSettings & write_settings_,
const ContextPtr & context_);
~BackupReaderAzureBlobStorage() override;
bool fileExists(const String & file_name) override;
@ -37,7 +42,13 @@ private:
class BackupWriterAzureBlobStorage : public BackupWriterDefault
{
public:
BackupWriterAzureBlobStorage(StorageAzureBlob::Configuration configuration_, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_, bool attempt_to_create_container);
BackupWriterAzureBlobStorage(
StorageAzureBlob::Configuration configuration_,
bool allow_azure_native_copy,
const ReadSettings & read_settings_,
const WriteSettings & write_settings_,
const ContextPtr & context_,
bool attempt_to_create_container);
~BackupWriterAzureBlobStorage() override;
bool fileExists(const String & file_name) override;

View File

@ -4,7 +4,7 @@
#include <Backups/BackupIO.h>
#include <Backups/IBackupEntry.h>
#include <Common/ProfileEvents.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <base/hex.h>
#include <Common/logger_useful.h>
#include <Common/quoteString.h>

View File

@ -27,6 +27,7 @@ namespace ErrorCodes
M(Bool, decrypt_files_from_encrypted_disks) \
M(Bool, deduplicate_files) \
M(Bool, allow_s3_native_copy) \
M(Bool, allow_azure_native_copy) \
M(Bool, use_same_s3_credentials_for_base_backup) \
M(Bool, azure_attempt_to_create_container) \
M(Bool, read_from_filesystem_cache) \

View File

@ -44,6 +44,9 @@ struct BackupSettings
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
bool allow_s3_native_copy = true;
/// Whether native copy is allowed (optimization for cloud storages, that sometimes could have bugs)
bool allow_azure_native_copy = true;
/// Whether base backup to S3 should inherit credentials from the BACKUP query.
bool use_same_s3_credentials_for_base_backup = false;

View File

@ -598,6 +598,7 @@ void BackupsWorker::doBackup(
backup_create_params.backup_uuid = backup_settings.backup_uuid;
backup_create_params.deduplicate_files = backup_settings.deduplicate_files;
backup_create_params.allow_s3_native_copy = backup_settings.allow_s3_native_copy;
backup_create_params.allow_azure_native_copy = backup_settings.allow_azure_native_copy;
backup_create_params.use_same_s3_credentials_for_base_backup = backup_settings.use_same_s3_credentials_for_base_backup;
backup_create_params.azure_attempt_to_create_container = backup_settings.azure_attempt_to_create_container;
backup_create_params.read_settings = getReadSettingsForBackup(context, backup_settings);

View File

@ -135,10 +135,12 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
if (params.open_mode == IBackup::OpenMode::READ)
{
auto reader = std::make_shared<BackupReaderAzureBlobStorage>(configuration,
params.read_settings,
params.write_settings,
params.context);
auto reader = std::make_shared<BackupReaderAzureBlobStorage>(
configuration,
params.allow_azure_native_copy,
params.read_settings,
params.write_settings,
params.context);
return std::make_unique<BackupImpl>(
params.backup_info,
@ -150,11 +152,13 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
}
else
{
auto writer = std::make_shared<BackupWriterAzureBlobStorage>(configuration,
params.read_settings,
params.write_settings,
params.context,
params.azure_attempt_to_create_container);
auto writer = std::make_shared<BackupWriterAzureBlobStorage>(
configuration,
params.allow_azure_native_copy,
params.read_settings,
params.write_settings,
params.context,
params.azure_attempt_to_create_container);
return std::make_unique<BackupImpl>(
params.backup_info,

View File

@ -6,7 +6,7 @@
#include <Common/ErrorHandlers.h>
#include <Common/SensitiveDataMasker.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/logger_useful.h>
#include <Formats/registerFormats.h>
#include <IO/ReadHelpers.h>

View File

@ -307,7 +307,6 @@ endif()
target_link_libraries (clickhouse_common_io
PRIVATE
string_utils
widechar_width
${LINK_LIBRARIES_ONLY_ON_X86_64}
PUBLIC
@ -320,7 +319,6 @@ target_link_libraries (clickhouse_common_io
target_link_libraries (clickhouse_compression
PUBLIC
string_utils
pcg_random
clickhouse_parsers
PRIVATE
@ -410,7 +408,6 @@ dbms_target_link_libraries (
clickhouse_parsers
ch_contrib::lz4
Poco::JSON
string_utils
PUBLIC
boost::system
clickhouse_common_io
@ -645,7 +642,6 @@ if (ENABLE_TESTS)
dbms
clickhouse_common_config
clickhouse_common_zookeeper
string_utils
hilite_comparator)
if (TARGET ch_contrib::simdjson)

View File

@ -18,7 +18,7 @@
#include <Common/typeid_cast.h>
#include <Common/TerminalSize.h>
#include <Common/clearPasswordFromCommandLine.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/filesystemHelpers.h>
#include <Common/NetException.h>
#include <Columns/ColumnString.h>
@ -710,8 +710,8 @@ void ClientBase::adjustSettings()
settings.input_format_values_allow_data_after_semicolon.changed = false;
}
/// Do not limit pretty format output in case of --pager specified.
if (!pager.empty())
/// Do not limit pretty format output in case of --pager specified or in case of stdout is not a tty.
if (!pager.empty() || !stdout_is_a_tty)
{
if (!global_context->getSettingsRef().output_format_pretty_max_rows.changed)
{

View File

@ -20,7 +20,7 @@
#include <Common/NetException.h>
#include <Common/CurrentMetrics.h>
#include <Common/DNSResolver.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/OpenSSLHelpers.h>
#include <Common/randomSeed.h>
#include <Common/logger_useful.h>

View File

@ -22,6 +22,7 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int ILLEGAL_COLUMN;
extern const int NOT_IMPLEMENTED;
extern const int BAD_ARGUMENTS;
}
@ -116,6 +117,38 @@ void ColumnNullable::get(size_t n, Field & res) const
getNestedColumn().get(n, res);
}
Float64 ColumnNullable::getFloat64(size_t n) const
{
if (isNullAt(n))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of {} at {} is NULL while calling method getFloat64", getName(), n);
else
return getNestedColumn().getFloat64(n);
}
Float32 ColumnNullable::getFloat32(size_t n) const
{
if (isNullAt(n))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of {} at {} is NULL while calling method getFloat32", getName(), n);
else
return getNestedColumn().getFloat32(n);
}
UInt64 ColumnNullable::getUInt(size_t n) const
{
if (isNullAt(n))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of {} at {} is NULL while calling method getUInt", getName(), n);
else
return getNestedColumn().getUInt(n);
}
Int64 ColumnNullable::getInt(size_t n) const
{
if (isNullAt(n))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The value of {} at {} is NULL while calling method getInt", getName(), n);
else
return getNestedColumn().getInt(n);
}
void ColumnNullable::insertData(const char * pos, size_t length)
{
if (pos == nullptr)

View File

@ -57,6 +57,10 @@ public:
void get(size_t n, Field & res) const override;
bool getBool(size_t n) const override { return isNullAt(n) ? false : nested_column->getBool(n); }
UInt64 get64(size_t n) const override { return nested_column->get64(n); }
Float64 getFloat64(size_t n) const override;
Float32 getFloat32(size_t n) const override;
UInt64 getUInt(size_t n) const override;
Int64 getInt(size_t n) const override;
bool isDefaultAt(size_t n) const override { return isNullAt(n); }
StringRef getDataAt(size_t) const override;
/// Will insert null value if pos=nullptr

View File

@ -1,5 +1,3 @@
add_subdirectory(StringUtils)
if (ENABLE_BENCHMARKS)
add_subdirectory(benchmarks)
endif()

View File

@ -13,8 +13,6 @@ target_link_libraries(clickhouse_common_config
clickhouse_common_zookeeper
common
Poco::XML
PRIVATE
string_utils
)
add_library(clickhouse_common_config_no_zookeeper_log ${SRCS})
@ -23,8 +21,6 @@ target_link_libraries(clickhouse_common_config_no_zookeeper_log
clickhouse_common_zookeeper_no_log
common
Poco::XML
PRIVATE
string_utils
)
if (TARGET ch_contrib::yaml_cpp)

View File

@ -18,7 +18,7 @@
#include <Poco/NumberParser.h>
#include <Common/ZooKeeper/ZooKeeperNodeCache.h>
#include <Common/ZooKeeper/KeeperException.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/Exception.h>
#include <Common/XMLUtils.h>
#include <Common/logger_useful.h>

View File

@ -0,0 +1,39 @@
#pragma once
#include <atomic>
#include <utility>
namespace DB
{
template <typename T>
struct CopyableAtomic
{
CopyableAtomic(const CopyableAtomic & other)
: value(other.value.load())
{}
explicit CopyableAtomic(T && value_)
: value(std::forward<T>(value_))
{}
CopyableAtomic & operator=(const CopyableAtomic & other)
{
value = other.value.load();
return *this;
}
CopyableAtomic & operator=(bool value_)
{
value = value_;
return *this;
}
explicit operator T() const { return value; }
const T & getValue() const { return value; }
std::atomic<T> value;
};
}

View File

@ -288,8 +288,10 @@
M(HTTPConnectionsTotal, "Total count of all sessions: stored in the pool and actively used right now for http hosts") \
\
M(AddressesActive, "Total count of addresses which are used for creation connections with connection pools") \
M(AddressesBanned, "Total count of addresses which are banned as faulty for creation connections with connection pools") \
M(AddressesBanned, "Total count of addresses which are banned as faulty for creation connections with connection pools") \
\
M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \
M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \
#ifdef APPLY_FOR_EXTERNAL_METRICS
#define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M)

View File

@ -12,7 +12,7 @@
#include <Common/Arena.h>
#include <Common/HashTable/HashMap.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>

View File

@ -1,5 +1,5 @@
#include <Common/HTTPHeaderFilter.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/Exception.h>
#include <Common/re2.h>

View File

@ -360,6 +360,7 @@ The server successfully detected this situation and will download merged part fr
M(QueryProfilerSignalOverruns, "Number of times we drop processing of a query profiler signal due to overrun plus the number of signals that OS has not delivered due to overrun.") \
M(QueryProfilerConcurrencyOverruns, "Number of times we drop processing of a query profiler signal due to too many concurrent query profilers in other threads, which may indicate overload.") \
M(QueryProfilerRuns, "Number of times QueryProfiler had been run.") \
M(QueryProfilerErrors, "Invalid memory accesses during asynchronous stack unwinding.") \
\
M(CreatedLogEntryForMerge, "Successfully created log entry to merge parts in ReplicatedMergeTree.") \
M(NotCreatedLogEntryForMerge, "Log entry to merge parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \

View File

@ -4,7 +4,7 @@
#include <Common/Exception.h>
#include <Common/ProxyListConfigurationResolver.h>
#include <Common/RemoteProxyConfigurationResolver.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/logger_useful.h>
namespace DB

View File

@ -1,6 +1,6 @@
#include <Common/ProxyListConfigurationResolver.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/logger_useful.h>
#include <Poco/URI.h>

View File

@ -12,7 +12,6 @@
#include <Common/logger_useful.h>
#include <Common/thread_local_rng.h>
#include <random>
namespace CurrentMetrics
{
@ -25,6 +24,7 @@ namespace ProfileEvents
extern const Event QueryProfilerSignalOverruns;
extern const Event QueryProfilerConcurrencyOverruns;
extern const Event QueryProfilerRuns;
extern const Event QueryProfilerErrors;
}
namespace DB
@ -84,11 +84,29 @@ namespace
#endif
const auto signal_context = *reinterpret_cast<ucontext_t *>(context);
const StackTrace stack_trace(signal_context);
std::optional<StackTrace> stack_trace;
#if defined(SANITIZER)
constexpr bool sanitizer = true;
#else
constexpr bool sanitizer = false;
#endif
asynchronous_stack_unwinding = true;
if (sanitizer || 0 == sigsetjmp(asynchronous_stack_unwinding_signal_jump_buffer, 1))
{
stack_trace.emplace(signal_context);
}
else
{
ProfileEvents::incrementNoTrace(ProfileEvents::QueryProfilerErrors);
}
asynchronous_stack_unwinding = false;
if (stack_trace)
TraceSender::send(trace_type, *stack_trace, {});
TraceSender::send(trace_type, stack_trace, {});
ProfileEvents::incrementNoTrace(ProfileEvents::QueryProfilerRuns);
errno = saved_errno;
}

View File

@ -1,7 +1,7 @@
#include <Poco/URI.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/RemoteHostFilter.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/Exception.h>
#include <Common/re2.h>
#include <IO/WriteHelpers.h>

View File

@ -5,7 +5,7 @@
#include <Common/Scheduler/ISchedulerQueue.h>
#include <Common/Exception.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <map>
#include <tuple>

View File

@ -10,7 +10,7 @@
#include <Common/re2.h>
#include <Common/Exception.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/ProfileEvents.h>
#ifndef NDEBUG

View File

@ -560,3 +560,7 @@ void StackTrace::dropCache()
std::lock_guard lock{stacktrace_cache_mutex};
cacheInstance().clear();
}
thread_local bool asynchronous_stack_unwinding = false;
thread_local sigjmp_buf asynchronous_stack_unwinding_signal_jump_buffer;

View File

@ -8,6 +8,7 @@
#include <optional>
#include <functional>
#include <csignal>
#include <csetjmp>
#ifdef OS_DARWIN
// ucontext is not available without _XOPEN_SOURCE
@ -87,3 +88,8 @@ protected:
};
std::string signalToErrorMessage(int sig, const siginfo_t & info, const ucontext_t & context);
/// Special handling for errors during asynchronous stack unwinding,
/// Which is used in Query Profiler
extern thread_local bool asynchronous_stack_unwinding;
extern thread_local sigjmp_buf asynchronous_stack_unwinding_signal_jump_buffer;

View File

@ -2,7 +2,7 @@
#include <base/getPageSize.h>
#include <Common/Exception.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/UTF8Helpers.h>
#include <Core/Defines.h>
#include <Poco/Unicode.h>

View File

@ -0,0 +1,87 @@
#include <Common/StringUtils.h>
#include <Common/TargetSpecific.h>
#if USE_MULTITARGET_CODE
#include <immintrin.h>
#endif
namespace impl
{
bool startsWith(const std::string & s, const char * prefix, size_t prefix_size)
{
return s.size() >= prefix_size && 0 == memcmp(s.data(), prefix, prefix_size);
}
bool endsWith(const std::string & s, const char * suffix, size_t suffix_size)
{
return s.size() >= suffix_size && 0 == memcmp(s.data() + s.size() - suffix_size, suffix, suffix_size);
}
}
DECLARE_DEFAULT_CODE(
bool isAllASCII(const UInt8 * data, size_t size)
{
UInt8 mask = 0;
for (size_t i = 0; i < size; ++i)
mask |= data[i];
return !(mask & 0x80);
})
DECLARE_SSE42_SPECIFIC_CODE(
/// Copy from https://github.com/lemire/fastvalidate-utf-8/blob/master/include/simdasciicheck.h
bool isAllASCII(const UInt8 * data, size_t size)
{
__m128i masks = _mm_setzero_si128();
size_t i = 0;
for (; i + 16 <= size; i += 16)
{
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(data + i));
masks = _mm_or_si128(masks, bytes);
}
int mask = _mm_movemask_epi8(masks);
UInt8 tail_mask = 0;
for (; i < size; i++)
tail_mask |= data[i];
mask |= (tail_mask & 0x80);
return !mask;
})
DECLARE_AVX2_SPECIFIC_CODE(
bool isAllASCII(const UInt8 * data, size_t size)
{
__m256i masks = _mm256_setzero_si256();
size_t i = 0;
for (; i + 32 <= size; i += 32)
{
__m256i bytes = _mm256_loadu_si256(reinterpret_cast<const __m256i *>(data + i));
masks = _mm256_or_si256(masks, bytes);
}
int mask = _mm256_movemask_epi8(masks);
UInt8 tail_mask = 0;
for (; i < size; i++)
tail_mask |= data[i];
mask |= (tail_mask & 0x80);
return !mask;
})
bool isAllASCII(const UInt8 * data, size_t size)
{
#if USE_MULTITARGET_CODE
if (isArchSupported(DB::TargetArch::AVX2))
return TargetSpecific::AVX2::isAllASCII(data, size);
if (isArchSupported(DB::TargetArch::SSE42))
return TargetSpecific::SSE42::isAllASCII(data, size);
#endif
return TargetSpecific::Default::isAllASCII(data, size);
}

View File

@ -7,8 +7,10 @@
#include <cstdint>
#include <type_traits>
#include <base/types.h>
namespace detail
namespace impl
{
bool startsWith(const std::string & s, const char * prefix, size_t prefix_size);
bool endsWith(const std::string & s, const char * suffix, size_t suffix_size);
@ -17,12 +19,12 @@ namespace detail
inline bool startsWith(const std::string & s, const std::string & prefix)
{
return detail::startsWith(s, prefix.data(), prefix.size());
return impl::startsWith(s, prefix.data(), prefix.size());
}
inline bool endsWith(const std::string & s, const std::string & suffix)
{
return detail::endsWith(s, suffix.data(), suffix.size());
return impl::endsWith(s, suffix.data(), suffix.size());
}
@ -30,12 +32,12 @@ inline bool endsWith(const std::string & s, const std::string & suffix)
/// string that is known at compile time.
inline bool startsWith(const std::string & s, const char * prefix)
{
return detail::startsWith(s, prefix, strlen(prefix));
return impl::startsWith(s, prefix, strlen(prefix));
}
inline bool endsWith(const std::string & s, const char * suffix)
{
return detail::endsWith(s, suffix, strlen(suffix));
return impl::endsWith(s, suffix, strlen(suffix));
}
/// Given an integer, return the adequate suffix for
@ -315,6 +317,9 @@ inline void trim(std::string & str, char c = ' ')
trimLeft(str, c);
}
/// If all characters in the string are ASCII, return true
bool isAllASCII(const UInt8 * data, size_t size);
constexpr bool containsGlobs(const std::string & str)
{
return str.find_first_of("*?{") != std::string::npos;

View File

@ -1,8 +0,0 @@
# These files are located in separate library, because they are used by separate products
# in places when no dependency on whole "dbms" library is possible.
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
add_headers_and_sources(clickhouse_common_stringutils .)
add_library(string_utils ${clickhouse_common_stringutils_headers} ${clickhouse_common_stringutils_sources})

View File

@ -1,17 +0,0 @@
#include "StringUtils.h"
namespace detail
{
bool startsWith(const std::string & s, const char * prefix, size_t prefix_size)
{
return s.size() >= prefix_size && 0 == memcmp(s.data(), prefix, prefix_size);
}
bool endsWith(const std::string & s, const char * suffix, size_t suffix_size)
{
return s.size() >= suffix_size && 0 == memcmp(s.data() + s.size() - suffix_size, suffix, suffix_size);
}
}

View File

@ -1,5 +1,5 @@
#include <Common/TLDListsHolder.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/logger_useful.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadHelpers.h>

View File

@ -498,8 +498,10 @@ void ThreadPoolImpl<Thread>::worker(typename std::list<Thread>::iterator thread_
template class ThreadPoolImpl<std::thread>;
template class ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>;
template class ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, false>>;
template class ThreadFromGlobalPoolImpl<true, true>;
template class ThreadFromGlobalPoolImpl<true, false>;
template class ThreadFromGlobalPoolImpl<false, false>;
std::unique_ptr<GlobalThreadPool> GlobalThreadPool::the_instance;

View File

@ -242,6 +242,11 @@ public:
if (unlikely(global_profiler_real_time_period != 0 || global_profiler_cpu_time_period != 0))
thread_status.initGlobalProfiler(global_profiler_real_time_period, global_profiler_cpu_time_period);
}
else
{
UNUSED(global_profiler_real_time_period);
UNUSED(global_profiler_cpu_time_period);
}
std::apply(function, arguments);
},

View File

@ -23,6 +23,9 @@ thread_local ThreadStatus constinit * current_thread = nullptr;
namespace
{
/// For aarch64 16K is not enough (likely due to tons of registers)
constexpr size_t UNWIND_MINSIGSTKSZ = 32 << 10;
/// Alternative stack for signal handling.
///
/// This stack should not be located in the TLS (thread local storage), since:
@ -50,7 +53,7 @@ struct ThreadStack
free(data);
}
static size_t getSize() { return std::max<size_t>(16 << 10, MINSIGSTKSZ); }
static size_t getSize() { return std::max<size_t>(UNWIND_MINSIGSTKSZ, MINSIGSTKSZ); }
void * getData() const { return data; }
private:
@ -124,26 +127,6 @@ ThreadStatus::ThreadStatus(bool check_current_thread_on_destruction_)
#endif
}
void ThreadStatus::initGlobalProfiler([[maybe_unused]] UInt64 global_profiler_real_time_period, [[maybe_unused]] UInt64 global_profiler_cpu_time_period)
{
#if !defined(SANITIZER) && !defined(CLICKHOUSE_KEEPER_STANDALONE_BUILD) && !defined(__APPLE__)
try
{
if (global_profiler_real_time_period > 0)
query_profiler_real = std::make_unique<QueryProfilerReal>(thread_id,
/* period= */ static_cast<UInt32>(global_profiler_real_time_period));
if (global_profiler_cpu_time_period > 0)
query_profiler_cpu = std::make_unique<QueryProfilerCPU>(thread_id,
/* period= */ static_cast<UInt32>(global_profiler_cpu_time_period));
}
catch (...)
{
tryLogCurrentException("ThreadStatus", "Cannot initialize GlobalProfiler");
}
#endif
}
ThreadGroupPtr ThreadStatus::getThreadGroup() const
{
chassert(current_thread == this);

View File

@ -1,14 +1,9 @@
#include <Common/StringUtils/StringUtils.h>
#include <Common/TargetSpecific.h>
#include <Common/UTF8Helpers.h>
#include <Common/StringUtils.h>
#include <widechar_width.h>
#include <bit>
#if USE_MULTITARGET_CODE
#include <immintrin.h>
#endif
namespace DB
{
namespace UTF8
@ -208,7 +203,6 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
}
size_t computeWidth(const UInt8 * data, size_t size, size_t prefix) noexcept
{
return computeWidthImpl<Width>(data, size, prefix, 0);
@ -219,71 +213,5 @@ size_t computeBytesBeforeWidth(const UInt8 * data, size_t size, size_t prefix, s
return computeWidthImpl<BytesBeforeLimit>(data, size, prefix, limit);
}
DECLARE_DEFAULT_CODE(
bool isAllASCII(const UInt8 * data, size_t size)
{
UInt8 mask = 0;
for (size_t i = 0; i < size; ++i)
mask |= data[i];
return !(mask & 0x80);
})
DECLARE_SSE42_SPECIFIC_CODE(
/// Copy from https://github.com/lemire/fastvalidate-utf-8/blob/master/include/simdasciicheck.h
bool isAllASCII(const UInt8 * data, size_t size)
{
__m128i masks = _mm_setzero_si128();
size_t i = 0;
for (; i + 16 <= size; i += 16)
{
__m128i bytes = _mm_loadu_si128(reinterpret_cast<const __m128i *>(data + i));
masks = _mm_or_si128(masks, bytes);
}
int mask = _mm_movemask_epi8(masks);
UInt8 tail_mask = 0;
for (; i < size; i++)
tail_mask |= data[i];
mask |= (tail_mask & 0x80);
return !mask;
})
DECLARE_AVX2_SPECIFIC_CODE(
bool isAllASCII(const UInt8 * data, size_t size)
{
__m256i masks = _mm256_setzero_si256();
size_t i = 0;
for (; i + 32 <= size; i += 32)
{
__m256i bytes = _mm256_loadu_si256(reinterpret_cast<const __m256i *>(data + i));
masks = _mm256_or_si256(masks, bytes);
}
int mask = _mm256_movemask_epi8(masks);
UInt8 tail_mask = 0;
for (; i < size; i++)
tail_mask |= data[i];
mask |= (tail_mask & 0x80);
return !mask;
})
bool isAllASCII(const UInt8* data, size_t size)
{
#if USE_MULTITARGET_CODE
if (isArchSupported(TargetArch::AVX2))
return TargetSpecific::AVX2::isAllASCII(data, size);
if (isArchSupported(TargetArch::SSE42))
return TargetSpecific::SSE42::isAllASCII(data, size);
#endif
return TargetSpecific::Default::isAllASCII(data, size);
}
}
}

View File

@ -136,10 +136,6 @@ size_t computeWidth(const UInt8 * data, size_t size, size_t prefix = 0) noexcept
*/
size_t computeBytesBeforeWidth(const UInt8 * data, size_t size, size_t prefix, size_t limit) noexcept;
/// If all the characters in the string are ASCII, return true.
bool isAllASCII(const UInt8* data, size_t size);
}
}

View File

@ -7,7 +7,7 @@
#include <base/types.h>
#include <Poco/Unicode.h>
#include <Common/StringSearcher.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/UTF8Helpers.h>
#include <base/unaligned.h>

View File

@ -12,8 +12,6 @@ target_link_libraries (clickhouse_common_zookeeper
clickhouse_common_io
clickhouse_compression
common
PRIVATE
string_utils
)
# for examples -- no logging (to avoid extra dependencies)
@ -23,8 +21,6 @@ target_link_libraries (clickhouse_common_zookeeper_no_log
clickhouse_common_io
clickhouse_compression
common
PRIVATE
string_utils
)
if (ENABLE_EXAMPLES)
add_subdirectory(examples)

View File

@ -1,7 +1,7 @@
#include "Common/ZooKeeper/IKeeper.h"
#include <Common/ZooKeeper/TestKeeper.h>
#include <Common/setThreadName.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <base/types.h>
#include <functional>

View File

@ -19,7 +19,7 @@
#include <Core/ServerUUID.h>
#include "Common/ZooKeeper/IKeeper.h"
#include <Common/DNSResolver.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/Exception.h>
#include <Poco/Net/NetException.h>

View File

@ -4,7 +4,7 @@
#include <base/getFQDNOrHostName.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/isLocalAddress.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Poco/String.h>
namespace DB

View File

@ -1,8 +1,15 @@
clickhouse_add_executable(zkutil_test_commands zkutil_test_commands.cpp)
target_link_libraries(zkutil_test_commands PRIVATE clickhouse_common_zookeeper_no_log)
target_link_libraries(zkutil_test_commands PRIVATE
clickhouse_common_zookeeper_no_log
dbms)
clickhouse_add_executable(zkutil_test_commands_new_lib zkutil_test_commands_new_lib.cpp)
target_link_libraries(zkutil_test_commands_new_lib PRIVATE clickhouse_common_zookeeper_no_log clickhouse_compression string_utils)
target_link_libraries(zkutil_test_commands_new_lib PRIVATE
clickhouse_common_zookeeper_no_log
clickhouse_compression
dbms)
clickhouse_add_executable(zkutil_test_async zkutil_test_async.cpp)
target_link_libraries(zkutil_test_async PRIVATE clickhouse_common_zookeeper_no_log)
target_link_libraries(zkutil_test_async PRIVATE
clickhouse_common_zookeeper_no_log
dbms)

View File

@ -1,7 +1,7 @@
#include <Poco/ConsoleChannel.h>
#include <Poco/Logger.h>
#include <Poco/Event.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/ZooKeeper/ZooKeeperImpl.h>
#include <Common/typeid_cast.h>
#include <iostream>

View File

@ -1,5 +1,5 @@
#include <base/hex.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/escapeForFileName.h>
namespace DB

View File

@ -87,3 +87,6 @@ if (ENABLE_SSL)
clickhouse_add_executable (encrypt_decrypt encrypt_decrypt.cpp)
target_link_libraries (encrypt_decrypt PRIVATE dbms)
endif()
clickhouse_add_executable (check_pointer_valid check_pointer_valid.cpp)
target_link_libraries (check_pointer_valid PRIVATE clickhouse_common_io)

View File

@ -0,0 +1,53 @@
#include <csetjmp>
#include <csignal>
#include <cstring>
#include <iostream>
/// This example demonstrates how is it possible to check if a pointer to memory is readable using a signal handler.
thread_local bool checking_pointer = false;
thread_local jmp_buf signal_jump_buffer;
void signalHandler(int sig, siginfo_t *, void *)
{
if (checking_pointer && sig == SIGSEGV)
siglongjmp(signal_jump_buffer, 1);
}
bool isPointerValid(const void * ptr)
{
checking_pointer = true;
if (0 == sigsetjmp(signal_jump_buffer, 1))
{
char res;
memcpy(&res, ptr, 1);
__asm__ __volatile__("" :: "r"(res) : "memory");
checking_pointer = false;
return true;
}
else
{
checking_pointer = false;
return false;
}
}
int main(int, char **)
{
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_sigaction = signalHandler;
sa.sa_flags = SA_SIGINFO;
if (sigemptyset(&sa.sa_mask)
|| sigaddset(&sa.sa_mask, SIGSEGV)
|| sigaction(SIGSEGV, &sa, nullptr))
return 1;
std::cerr << isPointerValid(reinterpret_cast<const void *>(0x123456789)) << "\n";
std::cerr << isPointerValid(&sa) << "\n";
return 0;
}

View File

@ -20,6 +20,9 @@
#include <Common/CurrentMetrics.h>
using ThreadFromGlobalPoolSimple = ThreadFromGlobalPoolImpl</* propagate_opentelemetry_context= */ false, /* global_trace_collector_allowed= */ false>;
using SimpleThreadPool = ThreadPoolImpl<ThreadFromGlobalPoolSimple>;
using Key = UInt64;
using Value = UInt64;
@ -255,7 +258,7 @@ int main(int argc, char ** argv)
std::cerr << std::fixed << std::setprecision(2);
ThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads);
SimpleThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads);
Source data(n);

View File

@ -20,6 +20,9 @@
#include <Common/CurrentMetrics.h>
using ThreadFromGlobalPoolSimple = ThreadFromGlobalPoolImpl</* propagate_opentelemetry_context= */ false, /* global_trace_collector_allowed= */ false>;
using SimpleThreadPool = ThreadPoolImpl<ThreadFromGlobalPoolSimple>;
using Key = UInt64;
using Value = UInt64;
using Source = std::vector<Key>;
@ -38,7 +41,7 @@ struct AggregateIndependent
template <typename Creator, typename Updater>
static void NO_INLINE execute(const Source & data, size_t num_threads, std::vector<std::unique_ptr<Map>> & results,
Creator && creator, Updater && updater,
ThreadPool & pool)
SimpleThreadPool & pool)
{
results.reserve(num_threads);
for (size_t i = 0; i < num_threads; ++i)
@ -76,7 +79,7 @@ struct AggregateIndependentWithSequentialKeysOptimization
template <typename Creator, typename Updater>
static void NO_INLINE execute(const Source & data, size_t num_threads, std::vector<std::unique_ptr<Map>> & results,
Creator && creator, Updater && updater,
ThreadPool & pool)
SimpleThreadPool & pool)
{
results.reserve(num_threads);
for (size_t i = 0; i < num_threads; ++i)
@ -124,7 +127,7 @@ struct MergeSequential
template <typename Merger>
static void NO_INLINE execute(Map ** source_maps, size_t num_maps, Map *& result_map,
Merger && merger,
ThreadPool &)
SimpleThreadPool &)
{
for (size_t i = 1; i < num_maps; ++i)
{
@ -144,7 +147,7 @@ struct MergeSequentialTransposed /// In practice not better than usual.
template <typename Merger>
static void NO_INLINE execute(Map ** source_maps, size_t num_maps, Map *& result_map,
Merger && merger,
ThreadPool &)
SimpleThreadPool &)
{
std::vector<typename Map::iterator> iterators(num_maps);
for (size_t i = 1; i < num_maps; ++i)
@ -177,7 +180,7 @@ struct MergeParallelForTwoLevelTable
template <typename Merger>
static void NO_INLINE execute(Map ** source_maps, size_t num_maps, Map *& result_map,
Merger && merger,
ThreadPool & pool)
SimpleThreadPool & pool)
{
for (size_t bucket = 0; bucket < Map::NUM_BUCKETS; ++bucket)
pool.scheduleOrThrowOnError([&, bucket, num_maps]
@ -202,7 +205,7 @@ struct Work
template <typename Creator, typename Updater, typename Merger>
static void NO_INLINE execute(const Source & data, size_t num_threads,
Creator && creator, Updater && updater, Merger && merger,
ThreadPool & pool)
SimpleThreadPool & pool)
{
std::vector<std::unique_ptr<Map>> intermediate_results;
@ -282,7 +285,7 @@ int main(int argc, char ** argv)
std::cerr << std::fixed << std::setprecision(2);
ThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads);
SimpleThreadPool pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, num_threads);
Source data(n);

View File

@ -14,6 +14,8 @@ int value = 0;
static void f() { ++value; }
static void * g(void *) { f(); return {}; }
using ThreadFromGlobalPoolSimple = ThreadFromGlobalPoolImpl</* propagate_opentelemetry_context= */ false, /* global_trace_collector_allowed= */ false>;
using SimpleThreadPool = ThreadPoolImpl<ThreadFromGlobalPoolSimple>;
namespace CurrentMetrics
{
@ -72,7 +74,7 @@ int main(int argc, char ** argv)
test(n, "Create and destroy ThreadPool each iteration", []
{
ThreadPool tp(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, 1);
SimpleThreadPool tp(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, 1);
tp.scheduleOrThrowOnError(f);
tp.wait();
});
@ -93,7 +95,7 @@ int main(int argc, char ** argv)
});
{
ThreadPool tp(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, 1);
SimpleThreadPool tp(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, 1);
test(n, "Schedule job for Threadpool each iteration", [&tp]
{
@ -103,7 +105,7 @@ int main(int argc, char ** argv)
}
{
ThreadPool tp(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, 128);
SimpleThreadPool tp(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, CurrentMetrics::LocalThreadScheduled, 128);
test(n, "Schedule job for Threadpool with 128 threads each iteration", [&tp]
{

View File

@ -3,7 +3,7 @@
#include <base/types.h>
#include <Common/Exception.h>
#include <Common/PODArray.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
namespace DB
{

View File

@ -1,6 +1,6 @@
#include <Common/formatIPv6.h>
#include <base/hex.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <base/range.h>
#include <array>

View File

@ -7,7 +7,7 @@
#include <base/hex.h>
#include <base/types.h>
#include <base/unaligned.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
constexpr size_t IPV4_BINARY_LENGTH = 4;
constexpr size_t IPV6_BINARY_LENGTH = 16;

View File

@ -3,7 +3,7 @@
#if defined(OS_LINUX)
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <base/hex.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadHelpers.h>

View File

@ -1,7 +1,7 @@
#include <Common/getMultipleKeysFromConfig.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
namespace DB
{

View File

@ -3,8 +3,8 @@
#include <Common/Exception.h>
#include <Common/RWLock.h>
#include <Common/Stopwatch.h>
#include <Core/Types.h>
#include <base/types.h>
#include <Common/ThreadPool.h>
#include <base/phdr_cache.h>
#include <random>
#include <pcg_random.hpp>
@ -541,7 +541,7 @@ TEST(Common, RWLockWriteLockTimeoutDuringWriteWithWaitingRead)
events.add(wc ? "Locked wb" : "Failed to lock wb");
EXPECT_EQ(wc, nullptr);
});
std::thread rc_thread([&] ()
{
std::this_thread::sleep_for(std::chrono::duration<int, std::milli>(200));

View File

@ -8,7 +8,7 @@
#include <Poco/Path.h>
#include <Common/getCurrentProcessFDCount.h>
#include <Common/getMaxFileDescriptorCount.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/config_version.h>
#include "Coordination/KeeperFeatureFlags.h"
#include <Coordination/Keeper4LWInfo.h>

View File

@ -11,7 +11,7 @@
#include <Common/ZooKeeper/ZooKeeperCommon.h>
#include <Common/SipHash.h>
#include <Common/ZooKeeper/ZooKeeperConstants.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/StringUtils.h>
#include <Common/ZooKeeper/IKeeper.h>
#include <base/hex.h>
#include <base/scope_guard.h>

View File

@ -5,6 +5,7 @@
#include <Common/ThreadPool.h>
#include <Common/callOnce.h>
#include <Disks/IO/IOUringReader.h>
#include <Disks/IO/getIOUringReader.h>
#include <Core/ServerSettings.h>
@ -303,10 +304,10 @@ IAsynchronousReader & Context::getThreadPoolReader(FilesystemReaderType type) co
}
#if USE_LIBURING
IOUringReader & Context::getIOURingReader() const
IOUringReader & Context::getIOUringReader() const
{
callOnce(shared->io_uring_reader_initialized, [&] {
shared->io_uring_reader = std::make_unique<IOUringReader>(512);
shared->io_uring_reader = createIOUringReader();
});
return *shared->io_uring_reader;
@ -457,4 +458,9 @@ const ServerSettings & Context::getServerSettings() const
return shared->server_settings;
}
bool Context::hasTraceCollector() const
{
return false;
}
}

View File

@ -137,7 +137,7 @@ public:
IAsynchronousReader & getThreadPoolReader(FilesystemReaderType type) const;
#if USE_LIBURING
IOUringReader & getIOURingReader() const;
IOUringReader & getIOUringReader() const;
#endif
std::shared_ptr<AsyncReadCounters> getAsyncReadCounters() const;
ThreadPool & getThreadPoolWriter() const;
@ -163,6 +163,8 @@ public:
zkutil::ZooKeeperPtr getZooKeeper() const;
const ServerSettings & getServerSettings() const;
bool hasTraceCollector() const;
};
}

View File

@ -1,4 +1,5 @@
#include <Common/CurrentThread.h>
#include <Common/ThreadStatus.h>
namespace DB
{
@ -11,4 +12,8 @@ void CurrentThread::attachToGroup(const ThreadGroupPtr &)
{
}
void ThreadStatus::initGlobalProfiler(UInt64 /*global_profiler_real_time_period*/, UInt64 /*global_profiler_cpu_time_period*/)
{
}
}

Some files were not shown because too many files have changed in this diff Show More