Merge https://github.com/ClickHouse/ClickHouse into group_array_intersect_merge_fix

This commit is contained in:
yariks5s 2024-07-16 17:37:16 +00:00
commit e82443aa5e
566 changed files with 3247 additions and 1087 deletions

View File

@ -13,3 +13,6 @@
# dbms/ → src/
# (though it is unlikely that you will see it in blame)
06446b4f08a142d6f1bc30664c47ded88ab51782
# Applied Black formatter for Python code
e6f5a3f98b21ba99cf274a9833797889e020a2b3

View File

@ -62,7 +62,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:

View File

@ -94,7 +94,7 @@ jobs:
echo "Generate Security"
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Generate ChangeLog
- name: Create ChangeLog PR
if: ${{ inputs.type == 'patch' && ! inputs.dry-run }}
uses: peter-evans/create-pull-request@v6
with:

View File

@ -58,7 +58,7 @@ jobs:
# BuildDockers:
# needs: [RunConfig]
# if: ${{ !failure() && !cancelled() }}
# uses: ./.github/workflows/reusable_docker.yml
# uses: ./.github/workflows/docker_test_images.yml
# with:
# data: ${{ needs.RunConfig.outputs.data }}
# StyleCheck:

View File

@ -51,7 +51,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
StyleCheck:

View File

@ -40,7 +40,7 @@ jobs:
} >> "$GITHUB_OUTPUT"
BuildDockers:
needs: [RunConfig]
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: "${{ needs.RunConfig.outputs.data }}"
set_latest: true

View File

@ -72,7 +72,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
StyleCheck:

View File

@ -57,7 +57,7 @@ jobs:
BuildDockers:
needs: [RunConfig]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_docker.yml
uses: ./.github/workflows/docker_test_images.yml
with:
data: ${{ needs.RunConfig.outputs.data }}
CompatibilityCheckX86:

View File

@ -102,6 +102,8 @@ jobs:
--job-name '${{inputs.test_name}}' \
--run \
--run-command '''${{inputs.run_command}}'''
# shellcheck disable=SC2319
echo "JOB_EXIT_CODE=$?" >> "$GITHUB_ENV"
- name: Post run
if: ${{ !cancelled() }}
run: |

View File

@ -3,8 +3,9 @@
#include <base/defines.h>
#include <fstream>
#include <sstream>
#include <string>
namespace fs = std::filesystem;
bool cgroupsV2Enabled()
{
@ -13,11 +14,11 @@ bool cgroupsV2Enabled()
{
/// This file exists iff the host has cgroups v2 enabled.
auto controllers_file = default_cgroups_mount / "cgroup.controllers";
if (!std::filesystem::exists(controllers_file))
if (!fs::exists(controllers_file))
return false;
return true;
}
catch (const std::filesystem::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
catch (const fs::filesystem_error &) /// all "underlying OS API errors", typically: permission denied
{
return false; /// not logging the exception as most callers fall back to cgroups v1
}
@ -33,8 +34,9 @@ bool cgroupsV2MemoryControllerEnabled()
/// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available
/// for the current + child cgroups. The set of available controllers can be restricted from level to level using file
/// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file.
std::string cgroup = cgroupV2OfProcess();
auto cgroup_dir = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
fs::path cgroup_dir = cgroupV2PathOfProcess();
if (cgroup_dir.empty())
return false;
std::ifstream controllers_file(cgroup_dir / "cgroup.controllers");
if (!controllers_file.is_open())
return false;
@ -46,7 +48,7 @@ bool cgroupsV2MemoryControllerEnabled()
#endif
}
std::string cgroupV2OfProcess()
fs::path cgroupV2PathOfProcess()
{
#if defined(OS_LINUX)
chassert(cgroupsV2Enabled());
@ -54,17 +56,18 @@ std::string cgroupV2OfProcess()
/// A simpler way to get the membership is:
std::ifstream cgroup_name_file("/proc/self/cgroup");
if (!cgroup_name_file.is_open())
return "";
return {};
/// With cgroups v2, there will be a *single* line with prefix "0::/"
/// (see https://docs.kernel.org/admin-guide/cgroup-v2.html)
std::string cgroup;
std::getline(cgroup_name_file, cgroup);
static const std::string v2_prefix = "0::/";
if (!cgroup.starts_with(v2_prefix))
return "";
return {};
cgroup = cgroup.substr(v2_prefix.length());
return cgroup;
/// Note: The 'root' cgroup can have an empty cgroup name, this is valid
return default_cgroups_mount / cgroup;
#else
return "";
return {};
#endif
}

View File

@ -1,7 +1,6 @@
#pragma once
#include <filesystem>
#include <string>
#if defined(OS_LINUX)
/// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers).
@ -16,7 +15,7 @@ bool cgroupsV2Enabled();
/// Assumes that cgroupsV2Enabled() is enabled.
bool cgroupsV2MemoryControllerEnabled();
/// Which cgroup does the process belong to?
/// Returns an empty string if the cgroup cannot be determined.
/// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup.
/// Returns an empty path the cgroup cannot be determined.
/// Assumes that cgroupsV2Enabled() is enabled.
std::string cgroupV2OfProcess();
std::filesystem::path cgroupV2PathOfProcess();

View File

@ -23,8 +23,9 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
if (!cgroupsV2MemoryControllerEnabled())
return {};
std::string cgroup = cgroupV2OfProcess();
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
std::filesystem::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Open the bottom-most nested memory limit setting file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.

View File

@ -42,9 +42,19 @@ endif ()
# But use 2 parallel jobs, since:
# - this is what llvm does
# - and I've verfied that lld-11 does not use all available CPU time (in peak) while linking one binary
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLEL_LINK_JOBS GREATER 2)
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
set (PARALLEL_LINK_JOBS 2)
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO)
if (ARCH_AARCH64)
# aarch64 builds start to often fail with OOMs (reason not yet clear), for now let's limit the concurrency
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 1.")
set (PARALLEL_LINK_JOBS 1)
if (LINKER_NAME MATCHES "lld")
math(EXPR LTO_JOBS ${NUMBER_OF_LOGICAL_CORES}/4)
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -Wl,--thinlto-jobs=${LTO_JOBS}")
endif()
elseif (PARALLEL_LINK_JOBS GREATER 2)
message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
set (PARALLEL_LINK_JOBS 2)
endif ()
endif()
message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB RAM, 'OFF' means the native core count).")

View File

@ -34,11 +34,7 @@ if (OS_LINUX)
# avoid spurious latencies and additional work associated with
# MADV_DONTNEED. See
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
else()
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
endif()
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
else()
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
endif()

View File

@ -4,3 +4,14 @@ It allows to integrate JEMalloc into CMake project.
- Added JEMALLOC_CONFIG_MALLOC_CONF substitution
- Add musl support (USE_MUSL)
- Also note, that darwin build requires JEMALLOC_PREFIX, while others do not
- JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE should be disabled
CLOCK_MONOTONIC_COARSE can go backwards after clock_adjtime(ADJ_FREQUENCY)
Let's disable it for now, and this menas that CLOCK_MONOTONIC will be used,
and this, should not be a problem, since:
- jemalloc do not call clock_gettime() that frequently
- the difference is CLOCK_MONOTONIC 20ns and CLOCK_MONOTONIC_COARSE 4ns
This can be done with the following command:
gg JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE | cut -d: -f1 | xargs sed -i 's@#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE@/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */@'

View File

@ -96,7 +96,7 @@
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.

View File

@ -98,7 +98,7 @@
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.

View File

@ -98,7 +98,7 @@
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.

View File

@ -98,7 +98,7 @@
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.

View File

@ -96,7 +96,7 @@
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.

View File

@ -98,7 +98,7 @@
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.

View File

@ -99,7 +99,7 @@
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.

2
contrib/openssl vendored

@ -1 +1 @@
Subproject commit ee2bb8513b28bf86b35404dd17a0e29305ca9e08
Subproject commit 66deddc1e53cda8706604a019777259372d1bd62

View File

@ -33,13 +33,9 @@ RUN apt-get update \
COPY requirements.txt /
RUN pip3 install --no-cache-dir -r /requirements.txt
COPY * /
ENV FUZZER_ARGS="-max_total_time=60"
SHELL ["/bin/bash", "-c"]
CMD set -o pipefail \
&& timeout -s 9 1h /run_libfuzzer.py 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer

View File

@ -23,7 +23,10 @@ source /utils.lib
/usr/share/clickhouse-test/config/install.sh
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --silent --inMemoryPersistence &
./setup_minio.sh stateful
./mc admin trace clickminio > /test_output/minio.log &
MC_ADMIN_PID=$!
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
@ -254,6 +257,8 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
fi
# Kill minio admin client to stop collecting logs
kill $MC_ADMIN_PID
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst ||:

View File

@ -12,7 +12,7 @@ MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
@ -54,6 +54,9 @@ source /utils.lib
/usr/share/clickhouse-test/config/install.sh
./setup_minio.sh stateless
./mc admin trace clickminio > /test_output/minio.log &
MC_ADMIN_PID=$!
./setup_hdfs_minicluster.sh
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
@ -383,6 +386,9 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
fi
# Kill minio admin client to stop collecting logs
kill $MC_ADMIN_PID
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &

View File

@ -10,7 +10,7 @@ cd hadoop-3.3.1
export JAVA_HOME=/usr
mkdir -p target/test/data
chown clickhouse ./target/test/data
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 &
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/hdfs_minicluster.log 2>&1 &
while ! nc -z localhost 12222; do
sleep 1

View File

@ -226,15 +226,59 @@ Other IDEs you can use are [Sublime Text](https://www.sublimetext.com/), [Visual
## Writing Code {#writing-code}
The description of ClickHouse architecture can be found here: https://clickhouse.com/docs/en/development/architecture/
Below you can find some quick links which may be useful when writing code for ClickHouse:
The Code Style Guide: https://clickhouse.com/docs/en/development/style/
- [ClickHouse architecture description](https://clickhouse.com/docs/en/development/architecture/).
- [The code style guide](https://clickhouse.com/docs/en/development/style/).
- [Adding third-party libraries](https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries)
- [Writing tests](https://clickhouse.com/docs/en/development/tests/)
- [List of open issues](https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest)
Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib/#adding-third-party-libraries
## Writing Documentation {#writing-documentation}
Writing tests: https://clickhouse.com/docs/en/development/tests/
As part of every pull request which adds a new feature, it is necessary to write documentation for it. If you'd like to preview your documentation changes the instructions for how to build the documentation page locally are available in the README.md file [here](https://github.com/ClickHouse/clickhouse-docs). When adding a new function to ClickHouse you can use the template below as a guide:
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest
```markdown
# newFunctionName
A short description of the function goes here. It should describe briefly what it does and a typical usage case.
**Syntax**
\```sql
newFunctionName(arg1, arg2[, arg3])
\```
**Arguments**
- `arg1` — Description of the argument. [DataType](../data-types/float.md)
- `arg2` — Description of the argument. [DataType](../data-types/float.md)
- `arg3` — Description of optional argument (optional). [DataType](../data-types/float.md)
**Implementation Details**
A description of implementation details if relevant.
**Returned value**
- Returns {insert what the function returns here}. [DataType](../data-types/float.md)
**Example**
Query:
\```sql
SELECT 'write your example query here';
\```
Response:
\```response
┌───────────────────────────────────┐
│ the result of the query │
└───────────────────────────────────┘
\```
```
## Test Data {#test-data}

View File

@ -15,7 +15,7 @@ You have four options for getting up and running with ClickHouse:
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, ARM, or PowerPC64LE CPU architecture
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
## ClickHouse Cloud

View File

@ -6,7 +6,20 @@ sidebar_label: MySQL Interface
# MySQL Interface
ClickHouse supports the MySQL wire protocol. This allow tools that are MySQL-compatible to interact with ClickHouse seamlessly (e.g. [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)).
ClickHouse supports the MySQL wire protocol. This allows certain clients that do not have native ClickHouse connectors leverage the MySQL protocol instead, and it has been validated with the following BI tools:
- [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)
- [Tableau Online](../integrations/tableau-online)
- [QuickSight](../integrations/quicksight)
If you are trying other untested clients or integrations, keep in mind that there could be the following limitations:
- SSL implementation might not be fully compatible; there could be potential [TLS SNI](https://www.cloudflare.com/learning/ssl/what-is-sni/) issues.
- A particular tool might require dialect features (e.g., MySQL-specific functions or settings) that are not implemented yet.
If there is a native driver available (e.g., [DBeaver](../integrations/dbeaver)), it is always preferred to use it instead of the MySQL interface. Additionally, while most of the MySQL language clients should work fine, MySQL interface is not guaranteed to be a drop-in replacement for a codebase with existing MySQL queries.
If your use case involves a particular tool that does not have a native ClickHouse driver, and you would like to use it via the MySQL interface and you found certain incompatibilities - please [create an issue](https://github.com/ClickHouse/ClickHouse/issues) in the ClickHouse repository.
## Enabling the MySQL Interface On ClickHouse Cloud

View File

@ -1030,7 +1030,7 @@ A table with no primary key represents the extreme case of a single equivalence
The fewer and the larger the equivalence classes are, the higher the degree of freedom when re-shuffling rows.
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemir, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
The heuristics applied to find the best row order within each equivalence class is suggested by D. Lemire, O. Kaser in [Reordering columns for smaller indexes](https://doi.org/10.1016/j.ins.2011.02.002) and based on sorting the rows within each equivalence class by ascending cardinality of the non-primary key columns.
It performs three steps:
1. Find all equivalence classes based on the row values in primary key columns.
2. For each equivalence class, calculate (usually estimate) the cardinalities of the non-primary-key columns.

View File

@ -96,3 +96,22 @@ Result:
│ 1 │ [2] │ [[4,1]] │
└───────────┴───────────┴───────────┘
```
## Reading nested subcolumns from Array
If nested type `T` inside `Array` has subcolumns (for example, if it's a [named tuple](./tuple.md)), you can read its subcolumns from an `Array(T)` type with the same subcolumn names. The type of a subcolumn will be `Array` of the type of original subcolumn.
**Example**
```sql
CREATE TABLE t_arr (arr Array(Tuple(field1 UInt32, field2 String))) ENGINE = MergeTree ORDER BY tuple();
INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]);
SELECT arr.field1, toTypeName(arr.field1), arr.field2, toTypeName(arr.field2) from t_arr;
```
```test
┌─arr.field1─┬─toTypeName(arr.field1)─┬─arr.field2────────────────┬─toTypeName(arr.field2)─┐
│ [1,2] │ Array(UInt32) │ ['Hello','World'] │ Array(String) │
│ [3,4,5] │ Array(UInt32) │ ['This','is','subcolumn'] │ Array(String) │
└────────────┴────────────────────────┴───────────────────────────┴────────────────────────┘
```

View File

@ -2698,6 +2698,204 @@ Like function `YYYYMMDDhhmmssToDate()` but produces a [DateTime64](../data-types
Accepts an additional, optional `precision` parameter after the `timezone` parameter.
## changeYear
Changes the year component of a date or date time.
**Syntax**
``` sql
changeYear(date_or_datetime, value)
```
**Arguments**
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
- `value` - a new value of the year. [Integer](../../sql-reference/data-types/int-uint.md).
**Returned value**
- The same type as `date_or_datetime`.
**Example**
``` sql
SELECT changeYear(toDate('1999-01-01'), 2000), changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000);
```
Result:
```
┌─changeYear(toDate('1999-01-01'), 2000)─┬─changeYear(toDateTime64('1999-01-01 00:00:00.000', 3), 2000)─┐
│ 2000-01-01 │ 2000-01-01 00:00:00.000 │
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
```
## changeMonth
Changes the month component of a date or date time.
**Syntax**
``` sql
changeMonth(date_or_datetime, value)
```
**Arguments**
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
- `value` - a new value of the month. [Integer](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Returns a value of same type as `date_or_datetime`.
**Example**
``` sql
SELECT changeMonth(toDate('1999-01-01'), 2), changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2);
```
Result:
```
┌─changeMonth(toDate('1999-01-01'), 2)─┬─changeMonth(toDateTime64('1999-01-01 00:00:00.000', 3), 2)─┐
│ 1999-02-01 │ 1999-02-01 00:00:00.000 │
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
```
## changeDay
Changes the day component of a date or date time.
**Syntax**
``` sql
changeDay(date_or_datetime, value)
```
**Arguments**
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
- `value` - a new value of the day. [Integer](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Returns a value of same type as `date_or_datetime`.
**Example**
``` sql
SELECT changeDay(toDate('1999-01-01'), 5), changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5);
```
Result:
```
┌─changeDay(toDate('1999-01-01'), 5)─┬─changeDay(toDateTime64('1999-01-01 00:00:00.000', 3), 5)─┐
│ 1999-01-05 │ 1999-01-05 00:00:00.000 │
└────────────────────────────────────┴──────────────────────────────────────────────────────────┘
```
## changeHour
Changes the hour component of a date or date time.
**Syntax**
``` sql
changeHour(date_or_datetime, value)
```
**Arguments**
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
- `value` - a new value of the hour. [Integer](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
**Example**
``` sql
SELECT changeHour(toDate('1999-01-01'), 14), changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14);
```
Result:
```
┌─changeHour(toDate('1999-01-01'), 14)─┬─changeHour(toDateTime64('1999-01-01 00:00:00.000', 3), 14)─┐
│ 1999-01-01 14:00:00 │ 1999-01-01 14:00:00.000 │
└──────────────────────────────────────┴────────────────────────────────────────────────────────────┘
```
## changeMinute
Changes the minute component of a date or date time.
**Syntax**
``` sql
changeMinute(date_or_datetime, value)
```
**Arguments**
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
- `value` - a new value of the minute. [Integer](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
**Example**
``` sql
SELECT changeMinute(toDate('1999-01-01'), 15), changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
```
Result:
```
┌─changeMinute(toDate('1999-01-01'), 15)─┬─changeMinute(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
│ 1999-01-01 00:15:00 │ 1999-01-01 00:15:00.000 │
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
```
## changeSecond
Changes the second component of a date or date time.
**Syntax**
``` sql
changeSecond(date_or_datetime, value)
```
**Arguments**
- `date_or_datetime` - a [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
- `value` - a new value of the second. [Integer](../../sql-reference/data-types/int-uint.md).
**Returned value**
- Returns a value of same type as `date_or_datetime`. If the input is a [Date](../data-types/date.md), return [DateTime](../data-types/datetime.md). If the input is a [Date32](../data-types/date32.md), return [DateTime64](../data-types/datetime64.md).
**Example**
``` sql
SELECT changeSecond(toDate('1999-01-01'), 15), changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15);
```
Result:
```
┌─changeSecond(toDate('1999-01-01'), 15)─┬─changeSecond(toDateTime64('1999-01-01 00:00:00.000', 3), 15)─┐
│ 1999-01-01 00:00:15 │ 1999-01-01 00:00:15.000 │
└────────────────────────────────────────┴──────────────────────────────────────────────────────────────┘
```
## addYears
Adds a specified number of years to a date, a date with time or a string-encoded date / date with time.
@ -2714,6 +2912,7 @@ addYears(date, num)
- `num`: Number of years to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2751,6 +2950,7 @@ addQuarters(date, num)
- `num`: Number of quarters to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2788,6 +2988,7 @@ addMonths(date, num)
- `num`: Number of months to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2825,6 +3026,7 @@ addWeeks(date, num)
- `num`: Number of weeks to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2862,6 +3064,7 @@ addDays(date, num)
- `num`: Number of days to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2899,6 +3102,7 @@ addHours(date, num)
- `num`: Number of hours to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
o
- Returns `date` plus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2936,6 +3140,7 @@ addMinutes(date, num)
- `num`: Number of minutes to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2973,6 +3178,7 @@ addSeconds(date, num)
- `num`: Number of seconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` plus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3010,6 +3216,7 @@ addMilliseconds(date_time, num)
- `num`: Number of milliseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` plus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -3045,6 +3252,7 @@ addMicroseconds(date_time, num)
- `num`: Number of microseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` plus `num` microseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -3080,6 +3288,7 @@ addNanoseconds(date_time, num)
- `num`: Number of nanoseconds to add. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` plus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -3115,6 +3324,7 @@ addInterval(interval_1, interval_2)
- `interval_2`: Second interval to be added. [interval](../data-types/special-data-types/interval.md).
**Returned value**
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
:::note
@ -3161,6 +3371,7 @@ addTupleOfIntervals(interval_1, interval_2)
- `intervals`: Tuple of intervals to add to `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
**Returned value**
- Returns `date` with added `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
**Example**
@ -3195,6 +3406,7 @@ subtractYears(date, num)
- `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3232,6 +3444,7 @@ subtractQuarters(date, num)
- `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3269,6 +3482,7 @@ subtractMonths(date, num)
- `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3306,6 +3520,7 @@ subtractWeeks(date, num)
- `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3343,6 +3558,7 @@ subtractDays(date, num)
- `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3380,6 +3596,7 @@ subtractHours(date, num)
- `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3417,6 +3634,7 @@ subtractMinutes(date, num)
- `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3454,6 +3672,7 @@ subtractSeconds(date, num)
- `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3491,6 +3710,7 @@ subtractMilliseconds(date_time, num)
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `num` milliseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -3526,6 +3746,7 @@ subtractMicroseconds(date_time, num)
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `num` microseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -3561,6 +3782,7 @@ subtractNanoseconds(date_time, num)
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `num` nanoseconds. [DateTime64](../data-types/datetime64.md).
**Example**
@ -3596,6 +3818,7 @@ subtractInterval(interval_1, interval_2)
- `interval_2`: Second interval to be negated. [interval](../data-types/special-data-types/interval.md).
**Returned value**
- Returns a tuple of intervals. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
:::note
@ -3642,6 +3865,7 @@ subtractTupleOfIntervals(interval_1, interval_2)
- `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
**Returned value**
- Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**

View File

@ -2984,6 +2984,66 @@ Result:
└─────────┘
```
## partitionID
Computes the [partition ID](../../engines/table-engines/mergetree-family/custom-partitioning-key.md).
:::note
This function is slow and should not be called for large amount of rows.
:::
**Syntax**
```sql
partitionID(x[, y, ...]);
```
**Arguments**
- `x` — Column for which to return the partition ID.
- `y, ...` — Remaining N columns for which to return the partition ID (optional).
**Returned Value**
- Partition ID that the row would belong to. [String](../data-types/string.md).
**Example**
Query:
```sql
DROP TABLE IF EXISTS tab;
CREATE TABLE tab
(
i int,
j int
)
ENGINE = MergeTree
PARTITION BY i
ORDER BY tuple();
INSERT INTO tab VALUES (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6);
SELECT i, j, partitionID(i), _partition_id FROM tab ORDER BY i, j;
```
Result:
```response
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
│ 1 │ 1 │ 1 │ 1 │
│ 1 │ 2 │ 1 │ 1 │
│ 1 │ 3 │ 1 │ 1 │
└───┴───┴────────────────┴───────────────┘
┌─i─┬─j─┬─partitionID(i)─┬─_partition_id─┐
│ 2 │ 4 │ 2 │ 2 │
│ 2 │ 5 │ 2 │ 2 │
│ 2 │ 6 │ 2 │ 2 │
└───┴───┴────────────────┴───────────────┘
```
## shardNum
Returns the index of a shard which processes a part of data in a distributed query. Indices are started from `1`.

View File

@ -24,9 +24,8 @@
#include <Common/TerminalSize.h>
#include <Common/config_version.h>
#include <Common/formatReadable.h>
#include <Core/Settings.h>
#include <Columns/ColumnString.h>
#include <Poco/Util/Application.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
@ -49,6 +48,8 @@
#include <Formats/registerFormats.h>
#include <Formats/FormatFactory.h>
#include <Poco/Util/Application.h>
namespace fs = std::filesystem;
using namespace std::literals;

View File

@ -1,5 +1,7 @@
#include "LibraryBridge.h"
#include <iostream>
int mainEntryClickHouseLibraryBridge(int argc, char ** argv)
{
DB::LibraryBridge app;

View File

@ -6,6 +6,7 @@
#include "ExternalDictionaryLibraryHandlerFactory.h"
#include <Formats/FormatFactory.h>
#include <IO/Operators.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <Common/BridgeProtocolVersion.h>

View File

@ -376,6 +376,7 @@ void LocalServer::setupUsers()
" </networks>"
" <profile>default</profile>"
" <quota>default</quota>"
" <named_collection_control>1</named_collection_control>"
" </default>"
" </users>"
" <quotas>"

View File

@ -3,11 +3,12 @@
#include <Client/ClientBase.h>
#include <Client/LocalConnection.h>
#include <Common/StatusFile.h>
#include <Common/InterruptListener.h>
#include <Loggers/Loggers.h>
#include <Core/ServerSettings.h>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
#include <Loggers/Loggers.h>
#include <Common/InterruptListener.h>
#include <Common/StatusFile.h>
#include <filesystem>
#include <memory>

View File

@ -2,6 +2,7 @@
#if USE_ODBC
#include <Core/Settings.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeNullable.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>

View File

@ -11,6 +11,7 @@
#include <Poco/Net/HTTPServerResponse.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/logger_useful.h>
#include <Core/Settings.h>
#include "getIdentifierQuote.h"
#include "validateODBCConnectionString.h"
#include "ODBCPooledConnectionFactory.h"

View File

@ -9,6 +9,7 @@
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <Core/Settings.h>
#include <IO/ReadBufferFromIStream.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>

View File

@ -1,5 +1,7 @@
#include "ODBCBridge.h"
#include <iostream>
int mainEntryClickHouseODBCBridge(int argc, char ** argv)
{
DB::ODBCBridge app;

View File

@ -2,8 +2,10 @@
#if USE_ODBC
#include <Core/Settings.h>
#include <Server/HTTP/HTMLForm.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/Operators.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Poco/Net/HTTPServerRequest.h>

View File

@ -68,6 +68,7 @@
#include <Interpreters/registerInterpreters.h>
#include <Interpreters/JIT/CompiledExpressionCache.h>
#include <Access/AccessControl.h>
#include <Storages/MergeTree/MergeTreeSettings.h>
#include <Storages/StorageReplicatedMergeTree.h>
#include <Storages/System/attachSystemTables.h>
#include <Storages/System/attachInformationSchemaTables.h>

View File

@ -516,6 +516,9 @@
/// Save query in history only if it is different.
let previous_query = '';
/// Start of the last query
let last_query_start = 0;
const current_url = new URL(window.location);
const opened_locally = location.protocol == 'file:';
@ -567,6 +570,8 @@
'&password=' + encodeURIComponent(password)
}
last_query_start = performance.now();
const xhr = new XMLHttpRequest;
xhr.open('POST', url, true);
@ -579,7 +584,8 @@
if (posted_request_num != request_num) {
return;
} else if (this.readyState === XMLHttpRequest.DONE) {
renderResponse(this.status, this.response);
const elapsed_msec = performance.now() - last_query_start;
renderResponse(this.status, this.response, elapsed_msec);
/// The query is saved in browser history (in state JSON object)
/// as well as in URL fragment identifier.
@ -587,7 +593,8 @@
const state = {
query: query,
status: this.status,
response: this.response.length > 100000 ? null : this.response /// Lower than the browser's limit.
response: this.response.length > 100000 ? null : this.response, /// Lower than the browser's limit.
elapsed_msec: elapsed_msec,
};
const title = "ClickHouse Query: " + query;
@ -617,7 +624,7 @@
xhr.send(query);
}
function renderResponse(status, response) {
function renderResponse(status, response, elapsed_msec) {
document.getElementById('hourglass').style.display = 'none';
if (status === 200) {
@ -632,6 +639,7 @@
renderChart(json);
} else {
renderUnparsedResult(response);
stats.innerText = `Elapsed (client-side): ${(elapsed_msec / 1000).toFixed(3)} sec.`;
}
document.getElementById('check-mark').style.display = 'inline';
} else {
@ -651,7 +659,7 @@
clear();
return;
}
renderResponse(event.state.status, event.state.response);
renderResponse(event.state.status, event.state.response, event.state.elapsed_msec);
};
if (window.location.hash) {

View File

@ -21,7 +21,6 @@
#include <Backups/BackupEntriesCollector.h>
#include <Backups/RestorerFromBackup.h>
#include <Core/Settings.h>
#include <Storages/MergeTree/MergeTreeSettings.h>
#include <base/defines.h>
#include <IO/Operators.h>
#include <Common/re2.h>

View File

@ -6,6 +6,7 @@
#include <IO/WriteHelpers.h>
#include <Interpreters/Context.h>
#include <Common/CurrentThread.h>
#include <Core/Settings.h>
static constexpr size_t MAX_AGGREGATE_FUNCTION_NAME_LENGTH = 1000;

View File

@ -3,7 +3,6 @@
#include <base/scope_guard.h>
#include <Common/Exception.h>
#include <Core/Settings.h>
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/QueryNode.h>

View File

@ -10,6 +10,8 @@
#include <Analyzer/TableNode.h>
#include <Analyzer/UnionNode.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -11,6 +11,8 @@
#include <Analyzer/FunctionNode.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -11,6 +11,8 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/LambdaNode.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -8,6 +8,8 @@
#include <Analyzer/TableExpressionModifiers.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -22,6 +22,8 @@
#include <Analyzer/HashUtils.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -10,6 +10,8 @@
#include <Analyzer/Utils.h>
#include <Analyzer/HashUtils.h>
#include <Core/Settings.h>
#include <Storages/IStorage.h>
#include <Functions/FunctionFactory.h>

View File

@ -11,6 +11,8 @@
#include <Analyzer/QueryNode.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -9,13 +9,14 @@
#include <Analyzer/FunctionNode.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/ColumnNode.h>
#include <Analyzer/Utils.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Functions/logical.h>
#include <Common/logger_useful.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
namespace DB

View File

@ -21,6 +21,8 @@
#include <Analyzer/Utils.h>
#include <Analyzer/JoinNode.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -1,6 +1,7 @@
#include <Analyzer/Passes/FuseFunctionsPass.h>
#include <Common/iota.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeTuple.h>

View File

@ -1,6 +1,7 @@
#include <Analyzer/Passes/GroupingFunctionsResolvePass.h>
#include <Core/ColumnNumbers.h>
#include <Core/Settings.h>
#include <Functions/grouping.h>

View File

@ -4,6 +4,7 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/FunctionNode.h>
#include <Core/Settings.h>
#include <Functions/FunctionFactory.h>
#include <Functions/multiIf.h>

View File

@ -5,6 +5,7 @@
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeEnum.h>

View File

@ -8,6 +8,7 @@
#include <Analyzer/JoinNode.h>
#include <Analyzer/HashUtils.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypesNumber.h>

View File

@ -2,6 +2,7 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/FunctionNode.h>
#include <Core/Settings.h>
#include <Functions/FunctionFactory.h>
#include <Functions/if.h>

View File

@ -7,6 +7,7 @@
#include <Analyzer/ConstantNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
#include <DataTypes/DataTypesNumber.h>

View File

@ -9,6 +9,7 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Common/DateLUT.h>
#include <Common/DateLUTImpl.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -8,6 +8,7 @@
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/QueryNode.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -3,6 +3,7 @@
#include <Analyzer/FunctionNode.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/IQueryTreeNode.h>
#include <Core/Settings.h>
#include <DataTypes/IDataType.h>
#include <Interpreters/ExternalDictionariesLoader.h>

View File

@ -8,6 +8,7 @@
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/QueryNode.h>
#include <Analyzer/SortNode.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -6,6 +6,8 @@
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <Core/Settings.h>
#include <Functions/FunctionFactory.h>
#include <Interpreters/Context.h>

View File

@ -6,6 +6,7 @@
#include <Analyzer/ConstantNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
#include <Functions/FunctionFactory.h>
namespace DB

View File

@ -1,19 +1,16 @@
#include <Analyzer/Passes/SumIfToCountIfPass.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeNullable.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <Analyzer/Utils.h>
#include <Functions/FunctionFactory.h>
#include <Interpreters/Context.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeNullable.h>
#include <Functions/FunctionFactory.h>
#include <Interpreters/Context.h>
namespace DB
{

View File

@ -9,6 +9,8 @@
#include <Analyzer/FunctionNode.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -9,6 +9,8 @@
#include <Analyzer/QueryNode.h>
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -40,6 +40,8 @@
#include <Analyzer/JoinNode.h>
#include <Analyzer/UnionNode.h>
#include <Core/Settings.h>
#include <Databases/IDatabase.h>
#include <Interpreters/StorageID.h>

View File

@ -1,8 +1,9 @@
#include <Analyzer/Resolve/IdentifierResolveScope.h>
#include <Interpreters/Context.h>
#include <Analyzer/QueryNode.h>
#include <Analyzer/UnionNode.h>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
namespace DB
{

View File

@ -1,6 +1,7 @@
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeObject.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/NestedUtils.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionHelpers.h>
@ -25,6 +26,8 @@
#include <Analyzer/Resolve/IdentifierResolveScope.h>
#include <Analyzer/Resolve/ReplaceColumnsVisitor.h>
#include <Core/Settings.h>
namespace DB
{
namespace ErrorCodes
@ -678,9 +681,33 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage(
bool match_full_identifier = false;
const auto & identifier_full_name = identifier_without_column_qualifier.getFullName();
auto it = table_expression_data.column_name_to_column_node.find(identifier_full_name);
bool can_resolve_directly_from_storage = it != table_expression_data.column_name_to_column_node.end();
if (can_resolve_directly_from_storage && table_expression_data.subcolumn_names.contains(identifier_full_name))
ColumnNodePtr result_column_node;
bool can_resolve_directly_from_storage = false;
bool is_subcolumn = false;
if (auto it = table_expression_data.column_name_to_column_node.find(identifier_full_name); it != table_expression_data.column_name_to_column_node.end())
{
can_resolve_directly_from_storage = true;
is_subcolumn = table_expression_data.subcolumn_names.contains(identifier_full_name);
result_column_node = it->second;
}
/// Check if it's a dynamic subcolumn
else
{
auto [column_name, dynamic_subcolumn_name] = Nested::splitName(identifier_full_name);
auto jt = table_expression_data.column_name_to_column_node.find(column_name);
if (jt != table_expression_data.column_name_to_column_node.end() && jt->second->getColumnType()->hasDynamicSubcolumns())
{
if (auto dynamic_subcolumn_type = jt->second->getColumnType()->tryGetSubcolumnType(dynamic_subcolumn_name))
{
result_column_node = std::make_shared<ColumnNode>(NameAndTypePair{identifier_full_name, dynamic_subcolumn_type}, jt->second->getColumnSource());
can_resolve_directly_from_storage = true;
is_subcolumn = true;
}
}
}
if (can_resolve_directly_from_storage && is_subcolumn)
{
/** In the case when we have an ARRAY JOIN, we should not resolve subcolumns directly from storage.
* For example, consider the following SQL query:
@ -696,11 +723,11 @@ QueryTreeNodePtr IdentifierResolver::tryResolveIdentifierFromStorage(
if (can_resolve_directly_from_storage)
{
match_full_identifier = true;
result_expression = it->second;
result_expression = result_column_node;
}
else
{
it = table_expression_data.column_name_to_column_node.find(identifier_without_column_qualifier.at(0));
auto it = table_expression_data.column_name_to_column_node.find(identifier_without_column_qualifier.at(0));
if (it != table_expression_data.column_name_to_column_node.end())
result_expression = it->second;
}

View File

@ -64,6 +64,8 @@
#include <Analyzer/Resolve/TableExpressionsAliasVisitor.h>
#include <Analyzer/Resolve/ReplaceColumnsVisitor.h>
#include <Core/Settings.h>
namespace ProfileEvents
{
extern const Event ScalarSubqueriesGlobalCacheHit;
@ -1490,6 +1492,10 @@ void QueryAnalyzer::qualifyColumnNodesWithProjectionNames(const QueryTreeNodes &
additional_column_qualification_parts = {table_expression_node->getAlias()};
else if (auto * table_node = table_expression_node->as<TableNode>())
additional_column_qualification_parts = {table_node->getStorageID().getDatabaseName(), table_node->getStorageID().getTableName()};
else if (auto * query_node = table_expression_node->as<QueryNode>(); query_node && query_node->isCTE())
additional_column_qualification_parts = {query_node->getCTEName()};
else if (auto * union_node = table_expression_node->as<UnionNode>(); union_node && union_node->isCTE())
additional_column_qualification_parts = {union_node->getCTEName()};
size_t additional_column_qualification_parts_size = additional_column_qualification_parts.size();
const auto & table_expression_data = scope.getTableExpressionDataOrThrow(table_expression_node);
@ -3410,14 +3416,14 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
function_base = function->build(argument_columns);
/// Do not constant fold get scalar functions
bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort";
// bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
// function_name == "shardCount" || function_name == "hostName" || function_name == "tcpPort";
/** If function is suitable for constant folding try to convert it to constant.
* Example: SELECT plus(1, 1);
* Result: SELECT 2;
*/
if (function_base->isSuitableForConstantFolding() && !disable_constant_folding)
if (function_base->isSuitableForConstantFolding()) // && !disable_constant_folding)
{
auto result_type = function_base->getResultType();
auto executable_function = function_base->prepare(argument_columns);
@ -3826,6 +3832,10 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(
node->convertToNullable();
break;
}
/// Check parent scopes until find current query scope.
if (scope_ptr->scope_node->getNodeType() == QueryTreeNodeType::QUERY)
break;
}
}
@ -4455,9 +4465,8 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
{
auto left_table_expression = extractLeftTableExpression(scope_query_node->getJoinTree());
if (table_expression_node.get() == left_table_expression.get() &&
scope.joins_count == 1 &&
scope.context->getSettingsRef().single_join_prefer_left_table)
table_expression_data.should_qualify_columns = false;
scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
table_expression_data.should_qualify_columns = false;
}
scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data));

View File

@ -10,6 +10,8 @@
#include <Interpreters/Context.h>
#include <Core/Settings.h>
namespace DB
{

View File

@ -1,5 +1,7 @@
#include <Analyzer/Utils.h>
#include <Core/Settings.h>
#include <Parsers/ASTTablesInSelectQuery.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTSubquery.h>

View File

@ -17,6 +17,7 @@
#include <base/scope_guard.h>
#include <base/sleep.h>
#include <Common/escapeForFileName.h>
#include <Core/Settings.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/copy.hpp>

View File

@ -1,6 +1,7 @@
#include <Backups/BackupIO_S3.h>
#if USE_AWS_S3
#include <Core/Settings.h>
#include <Common/quoteString.h>
#include <Common/threadPoolCallbackRunner.h>
#include <Interpreters/Context.h>

View File

@ -26,6 +26,7 @@
#include <Common/setThreadName.h>
#include <Common/scope_guard_safe.h>
#include <Common/ThreadPool.h>
#include <Core/Settings.h>
#include <boost/range/adaptor/map.hpp>

View File

@ -1,11 +1,11 @@
#include <Backups/DDLAdjustingForBackupVisitor.h>
#include <Core/ServerSettings.h>
#include <Interpreters/Context.h>
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTLiteral.h>
#include <Interpreters/Context.h>
#include <Storages/StorageReplicatedMergeTree.h>
#include <Parsers/formatAST.h>
#include <Storages/StorageReplicatedMergeTree.h>
namespace DB

View File

@ -23,8 +23,9 @@
#include <Common/quoteString.h>
#include <Common/escapeForFileName.h>
#include <base/insertAtEnd.h>
#include <boost/algorithm/string/join.hpp>
#include <Core/Settings.h>
#include <boost/algorithm/string/join.hpp>
#include <boost/range/adaptor/map.hpp>
#include <filesystem>

View File

@ -1,5 +1,7 @@
#include <mutex>
#include <Backups/WithRetries.h>
#include <Core/Settings.h>
#include <mutex>
namespace DB
{

View File

@ -8,6 +8,7 @@
#include <Common/SensitiveDataMasker.h>
#include <Common/StringUtils.h>
#include <Common/logger_useful.h>
#include <Core/Settings.h>
#include <Formats/registerFormats.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteBufferFromFile.h>

View File

@ -1,5 +1,7 @@
#include "LibraryBridgeHelper.h"
#include <Core/ServerSettings.h>
#include <Core/Settings.h>
#include <IO/ConnectionTimeouts.h>
namespace DB

View File

@ -10,6 +10,7 @@
#include <Common/Stopwatch.h>
#include <Common/DNSResolver.h>
#include <Core/ExternalTable.h>
#include <Core/Settings.h>
#include <Poco/Util/Application.h>
#include <Interpreters/Context.h>
#include <Client/Suggest.h>

View File

@ -1,6 +1,7 @@
#include <Client/ConnectionEstablisher.h>
#include <Common/quoteString.h>
#include <Common/ProfileEvents.h>
#include <Core/Settings.h>
namespace ProfileEvents
{

View File

@ -1,4 +1,5 @@
#include <Client/ConnectionPool.h>
#include <Core/Settings.h>
#include <boost/functional/hash.hpp>
@ -85,4 +86,15 @@ ConnectionPoolFactory & ConnectionPoolFactory::instance()
return ret;
}
IConnectionPool::Entry ConnectionPool::get(const DB::ConnectionTimeouts& timeouts, const DB::Settings& settings,
bool force_connected)
{
Entry entry = Base::get(settings.connection_pool_max_wait_ms.totalMilliseconds());
if (force_connected)
entry->forceConnected(timeouts);
return entry;
}
}

View File

@ -4,12 +4,13 @@
#include <Common/Priority.h>
#include <Client/Connection.h>
#include <IO/ConnectionTimeouts.h>
#include <Core/Settings.h>
#include <base/defines.h>
namespace DB
{
struct Settings;
/** Interface for connection pools.
*
* Usage (using the usual `ConnectionPool` example)
@ -102,15 +103,7 @@ public:
Entry get(const ConnectionTimeouts & timeouts, /// NOLINT
const Settings & settings,
bool force_connected = true) override
{
Entry entry = Base::get(settings.connection_pool_max_wait_ms.totalMilliseconds());
if (force_connected)
entry->forceConnected(timeouts);
return entry;
}
bool force_connected) override;
std::string getDescription() const
{

View File

@ -3,6 +3,7 @@
#include <Client/HedgedConnections.h>
#include <Common/ProfileEvents.h>
#include <Core/Settings.h>
#include <Interpreters/ClientInfo.h>
#include <Interpreters/Context.h>

View File

@ -8,13 +8,14 @@
#include <Common/Fiber.h>
#include <Client/ConnectionEstablisher.h>
#include <Client/ConnectionPoolWithFailover.h>
#include <Core/Settings.h>
#include <unordered_map>
#include <memory>
namespace DB
{
struct Settings;
/** Class for establishing hedged connections with replicas.
* The process of establishing connection is divided on stages, on each stage if
* replica doesn't respond for a long time, we start establishing connection with

View File

@ -2,6 +2,7 @@
#include <Common/thread_local_rng.h>
#include <Core/Protocol.h>
#include <Core/Settings.h>
#include <Interpreters/Context.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/Operators.h>

View File

@ -49,6 +49,7 @@ void logAboutProgress(LoggerPtr log, size_t processed, size_t total, AtomicStopw
AsyncLoader::Pool::Pool(const AsyncLoader::PoolInitializer & init)
: name(init.name)
, priority(init.priority)
, max_threads(init.max_threads > 0 ? init.max_threads : getNumberOfPhysicalCPUCores())
, thread_pool(std::make_unique<ThreadPool>(
init.metric_threads,
init.metric_active_threads,
@ -56,17 +57,16 @@ AsyncLoader::Pool::Pool(const AsyncLoader::PoolInitializer & init)
/* max_threads = */ std::numeric_limits<size_t>::max(), // Unlimited number of threads, we do worker management ourselves
/* max_free_threads = */ 0, // We do not require free threads
/* queue_size = */0)) // Unlimited queue to avoid blocking during worker spawning
, max_threads(init.max_threads > 0 ? init.max_threads : getNumberOfPhysicalCPUCores())
{}
AsyncLoader::Pool::Pool(Pool&& o) noexcept
: name(o.name)
, priority(o.priority)
, thread_pool(std::move(o.thread_pool))
, ready_queue(std::move(o.ready_queue))
, max_threads(o.max_threads)
, workers(o.workers)
, suspended_workers(o.suspended_workers.load()) // All these constructors are needed because std::atomic is neither copy-constructible, nor move-constructible. We never move pools after init, so it is safe.
, thread_pool(std::move(o.thread_pool))
{}
void cancelOnDependencyFailure(const LoadJobPtr & self, const LoadJobPtr & dependency, std::exception_ptr & cancel)

View File

@ -365,11 +365,11 @@ private:
{
const String name;
const Priority priority;
std::unique_ptr<ThreadPool> thread_pool; // NOTE: we avoid using a `ThreadPool` queue to be able to move jobs between pools.
std::map<UInt64, LoadJobPtr> ready_queue; // FIFO queue of jobs to be executed in this pool. Map is used for faster erasing. Key is `ready_seqno`
size_t max_threads; // Max number of workers to be spawn
size_t workers = 0; // Number of currently executing workers
std::atomic<size_t> suspended_workers{0}; // Number of workers that are blocked by `wait()` call on a job executing in the same pool (for deadlock resolution)
std::unique_ptr<ThreadPool> thread_pool; // NOTE: we avoid using a `ThreadPool` queue to be able to move jobs between pools.
explicit Pool(const PoolInitializer & init);
Pool(Pool&& o) noexcept;

View File

@ -25,6 +25,7 @@
#endif
using namespace DB;
namespace fs = std::filesystem;
namespace DB
{
@ -69,7 +70,7 @@ uint64_t readMetricFromStatFile(ReadBufferFromFile & buf, const std::string & ke
struct CgroupsV1Reader : ICgroupsReader
{
explicit CgroupsV1Reader(const std::filesystem::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
explicit CgroupsV1Reader(const fs::path & stat_file_dir) : buf(stat_file_dir / "memory.stat") { }
uint64_t readMemoryUsage() override
{
@ -85,7 +86,7 @@ private:
struct CgroupsV2Reader : ICgroupsReader
{
explicit CgroupsV2Reader(const std::filesystem::path & stat_file_dir)
explicit CgroupsV2Reader(const fs::path & stat_file_dir)
: current_buf(stat_file_dir / "memory.current"), stat_buf(stat_file_dir / "memory.stat")
{
}
@ -129,8 +130,9 @@ std::optional<std::string> getCgroupsV2Path()
if (!cgroupsV2MemoryControllerEnabled())
return {};
String cgroup = cgroupV2OfProcess();
auto current_cgroup = cgroup.empty() ? default_cgroups_mount : (default_cgroups_mount / cgroup);
fs::path current_cgroup = cgroupV2PathOfProcess();
if (current_cgroup.empty())
return {};
/// Return the bottom-most nested current memory file. If there is no such file at the current
/// level, try again at the parent level as memory settings are inherited.
@ -138,7 +140,7 @@ std::optional<std::string> getCgroupsV2Path()
{
const auto current_path = current_cgroup / "memory.current";
const auto stat_path = current_cgroup / "memory.stat";
if (std::filesystem::exists(current_path) && std::filesystem::exists(stat_path))
if (fs::exists(current_path) && fs::exists(stat_path))
return {current_cgroup};
current_cgroup = current_cgroup.parent_path();
}
@ -148,7 +150,7 @@ std::optional<std::string> getCgroupsV2Path()
std::optional<std::string> getCgroupsV1Path()
{
auto path = default_cgroups_mount / "memory/memory.stat";
if (!std::filesystem::exists(path))
if (!fs::exists(path))
return {};
return {default_cgroups_mount / "memory"};
}

View File

@ -2,7 +2,9 @@
#include <Interpreters/Context.h>
#include <Common/CurrentThread.h>
#include <Common/DateLUTImpl.h>
#include <Common/filesystemHelpers.h>
#include <Core/Settings.h>
#include <Poco/DigestStream.h>
#include <Poco/Exception.h>

Some files were not shown because too many files have changed in this diff Show More