diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index f0741b5465f..0fbcb95fc12 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -850,6 +850,48 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
+ BuilderBinRISCV64:
+ needs: [DockerHubPush]
+ runs-on: [self-hosted, builder]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/build_check
+ IMAGES_PATH=${{runner.temp}}/images_path
+ REPO_COPY=${{runner.temp}}/build_check/ClickHouse
+ CACHES_PATH=${{runner.temp}}/../ccaches
+ BUILD_NAME=binary_riscv64
+ EOF
+ - name: Download changed images
+ uses: actions/download-artifact@v3
+ with:
+ name: changed_images
+ path: ${{ env.IMAGES_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ submodules: true
+ fetch-depth: 0 # otherwise we will have no info about contributors
+ - name: Build
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
+ - name: Upload build URLs to artifacts
+ if: ${{ success() || failure() }}
+ uses: actions/upload-artifact@v3
+ with:
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
############################################################################################
@@ -932,6 +974,7 @@ jobs:
- BuilderBinDarwinAarch64
- BuilderBinFreeBSD
- BuilderBinPPC64
+ - BuilderBinRISCV64
- BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat
- BuilderBinClangTidy
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index afc08f3e637..f898e764915 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -911,6 +911,47 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
+ BuilderBinRISCV64:
+ needs: [DockerHubPush, FastTest, StyleCheck]
+ runs-on: [self-hosted, builder]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/build_check
+ IMAGES_PATH=${{runner.temp}}/images_path
+ REPO_COPY=${{runner.temp}}/build_check/ClickHouse
+ CACHES_PATH=${{runner.temp}}/../ccaches
+ BUILD_NAME=binary_riscv64
+ EOF
+ - name: Download changed images
+ uses: actions/download-artifact@v3
+ with:
+ name: changed_images
+ path: ${{ env.IMAGES_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ submodules: true
+ - name: Build
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
+ - name: Upload build URLs to artifacts
+ if: ${{ success() || failure() }}
+ uses: actions/upload-artifact@v3
+ with:
+ name: ${{ env.BUILD_URLS }}
+ path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################
##################################### Docker images #######################################
############################################################################################
@@ -992,6 +1033,7 @@ jobs:
- BuilderBinDarwinAarch64
- BuilderBinFreeBSD
- BuilderBinPPC64
+ - BuilderBinRISCV64
- BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat
- BuilderBinClangTidy
diff --git a/cmake/target.cmake b/cmake/target.cmake
index 5ef45576fb7..0791da87bf0 100644
--- a/cmake/target.cmake
+++ b/cmake/target.cmake
@@ -33,6 +33,19 @@ if (CMAKE_CROSSCOMPILING)
elseif (ARCH_PPC64LE)
set (ENABLE_GRPC OFF CACHE INTERNAL "")
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
+ elseif (ARCH_RISCV64)
+ # RISC-V support is preliminary
+ set (GLIBC_COMPATIBILITY OFF CACHE INTERNAL "")
+ set (ENABLE_LDAP OFF CACHE INTERNAL "")
+ set (OPENSSL_NO_ASM ON CACHE INTERNAL "")
+ set (ENABLE_JEMALLOC ON CACHE INTERNAL "")
+ set (ENABLE_PARQUET OFF CACHE INTERNAL "")
+ set (USE_UNWIND OFF CACHE INTERNAL "")
+ set (ENABLE_GRPC OFF CACHE INTERNAL "")
+ set (ENABLE_HDFS OFF CACHE INTERNAL "")
+ set (ENABLE_MYSQL OFF CACHE INTERNAL "")
+ # It might be ok, but we need to update 'sysroot'
+ set (ENABLE_RUST OFF CACHE INTERNAL "")
elseif (ARCH_S390X)
set (ENABLE_GRPC OFF CACHE INTERNAL "")
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
diff --git a/docker/packager/packager b/docker/packager/packager
index 1b3df858cd2..e12bd55dde3 100755
--- a/docker/packager/packager
+++ b/docker/packager/packager
@@ -138,6 +138,7 @@ def parse_env_variables(
ARM_V80COMPAT_SUFFIX = "-aarch64-v80compat"
FREEBSD_SUFFIX = "-freebsd"
PPC_SUFFIX = "-ppc64le"
+ RISCV_SUFFIX = "-riscv64"
AMD64_COMPAT_SUFFIX = "-amd64-compat"
result = []
@@ -150,6 +151,7 @@ def parse_env_variables(
is_cross_arm = compiler.endswith(ARM_SUFFIX)
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
+ is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
@@ -206,6 +208,11 @@ def parse_env_variables(
cmake_flags.append(
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-ppc64le.cmake"
)
+ elif is_cross_riscv:
+ cc = compiler[: -len(RISCV_SUFFIX)]
+ cmake_flags.append(
+ "-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
+ )
elif is_amd64_compat:
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
result.append("DEB_ARCH=amd64")
@@ -370,6 +377,7 @@ def parse_args() -> argparse.Namespace:
"clang-16-aarch64",
"clang-16-aarch64-v80compat",
"clang-16-ppc64le",
+ "clang-16-riscv64",
"clang-16-amd64-compat",
"clang-16-freebsd",
),
diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh
index 82a88272df9..b8061309342 100644
--- a/docker/test/upgrade/run.sh
+++ b/docker/test/upgrade/run.sh
@@ -67,6 +67,13 @@ start
stop
mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.initial.log
+# Start server from previous release
+# Let's enable S3 storage by default
+export USE_S3_STORAGE_FOR_MERGE_TREE=1
+# Previous version may not be ready for fault injections
+export ZOOKEEPER_FAULT_INJECTION=0
+configure
+
# force_sync=false doesn't work correctly on some older versions
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
| sed "s|false|true|" \
@@ -81,13 +88,6 @@ mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/cli
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
-# Start server from previous release
-# Let's enable S3 storage by default
-export USE_S3_STORAGE_FOR_MERGE_TREE=1
-# Previous version may not be ready for fault injections
-export ZOOKEEPER_FAULT_INJECTION=0
-configure
-
# it contains some new settings, but we can safely remove it
rm /etc/clickhouse-server/config.d/merge_tree.xml
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile
index 85e888f1df7..b255a2cc23d 100644
--- a/docker/test/util/Dockerfile
+++ b/docker/test/util/Dockerfile
@@ -94,7 +94,10 @@ RUN mkdir /tmp/ccache \
&& rm -rf /tmp/ccache
ARG TARGETARCH
-ARG SCCACHE_VERSION=v0.4.1
+ARG SCCACHE_VERSION=v0.5.4
+ENV SCCACHE_IGNORE_SERVER_IO_ERROR=1
+# sccache requires a value for the region. So by default we use The Default Region
+ENV SCCACHE_REGION=us-east-1
RUN arch=${TARGETARCH:-amd64} \
&& case $arch in \
amd64) rarch=x86_64 ;; \
diff --git a/docs/_includes/install/universal.sh b/docs/_includes/install/universal.sh
index 1699be138c8..5d4571aed9e 100755
--- a/docs/_includes/install/universal.sh
+++ b/docs/_includes/install/universal.sh
@@ -33,6 +33,9 @@ then
elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ]
then
DIR="powerpc64le"
+ elif [ "${ARCH}" = "riscv64" ]
+ then
+ DIR="riscv64"
fi
elif [ "${OS}" = "FreeBSD" ]
then
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index cff13302cdc..5f6cf98646b 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -3201,6 +3201,40 @@ ENGINE = Log
└──────────────────────────────────────────────────────────────────────────┘
```
+## default_temporary_table_engine {#default_temporary_table_engine}
+
+Same as [default_table_engine](#default_table_engine) but for temporary tables.
+
+Default value: `Memory`.
+
+In this example, any new temporary table that does not specify an `Engine` will use the `Log` table engine:
+
+Query:
+
+```sql
+SET default_temporary_table_engine = 'Log';
+
+CREATE TEMPORARY TABLE my_table (
+ x UInt32,
+ y UInt32
+);
+
+SHOW CREATE TEMPORARY TABLE my_table;
+```
+
+Result:
+
+```response
+┌─statement────────────────────────────────────────────────────────────────┐
+│ CREATE TEMPORARY TABLE default.my_table
+(
+ `x` UInt32,
+ `y` UInt32
+)
+ENGINE = Log
+└──────────────────────────────────────────────────────────────────────────┘
+```
+
## data_type_default_nullable {#data_type_default_nullable}
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md/#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable).
diff --git a/docs/en/operations/system-tables/asynchronous_metric_log.md b/docs/en/operations/system-tables/asynchronous_metric_log.md
index 4290799b6bc..efe57a202d8 100644
--- a/docs/en/operations/system-tables/asynchronous_metric_log.md
+++ b/docs/en/operations/system-tables/asynchronous_metric_log.md
@@ -9,7 +9,6 @@ Columns:
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
-- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Event time with microseconds resolution.
- `name` ([String](../../sql-reference/data-types/string.md)) — Metric name.
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
@@ -20,18 +19,18 @@ SELECT * FROM system.asynchronous_metric_log LIMIT 10
```
``` text
-┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
-└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
+┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
+└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
```
**See Also**
diff --git a/docs/ru/operations/system-tables/asynchronous_metric_log.md b/docs/ru/operations/system-tables/asynchronous_metric_log.md
index 886fbb6cab0..5145889c95f 100644
--- a/docs/ru/operations/system-tables/asynchronous_metric_log.md
+++ b/docs/ru/operations/system-tables/asynchronous_metric_log.md
@@ -8,7 +8,6 @@ slug: /ru/operations/system-tables/asynchronous_metric_log
Столбцы:
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — дата события.
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — время события.
-- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — время события в микросекундах.
- `name` ([String](../../sql-reference/data-types/string.md)) — название метрики.
- `value` ([Float64](../../sql-reference/data-types/float.md)) — значение метрики.
diff --git a/docs/zh/operations/system-tables/asynchronous_metric_log.md b/docs/zh/operations/system-tables/asynchronous_metric_log.md
index 419ad2a7ed6..9fa399f1aed 100644
--- a/docs/zh/operations/system-tables/asynchronous_metric_log.md
+++ b/docs/zh/operations/system-tables/asynchronous_metric_log.md
@@ -8,7 +8,6 @@ slug: /zh/operations/system-tables/asynchronous_metric_log
列:
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件日期。
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间。
-- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件时间(微秒)。
- `name` ([String](../../sql-reference/data-types/string.md)) — 指标名。
- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值。
@@ -17,18 +16,18 @@ slug: /zh/operations/system-tables/asynchronous_metric_log
SELECT * FROM system.asynchronous_metric_log LIMIT 10
```
``` text
-┌─event_date─┬──────────event_time─┬────event_time_microseconds─┬─name─────────────────────────────────────┬─────value─┐
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ CPUFrequencyMHz_0 │ 2120.9 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pmuzzy │ 743 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.arenas.all.pdirty │ 26288 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.run_intervals │ 0 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.background_thread.num_runs │ 0 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.retained │ 60694528 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.mapped │ 303161344 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.resident │ 260931584 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.metadata │ 12079488 │
-│ 2020-09-05 │ 2020-09-05 15:56:30 │ 2020-09-05 15:56:30.025227 │ jemalloc.allocated │ 133756128 │
-└────────────┴─────────────────────┴────────────────────────────┴──────────────────────────────────────────┴───────────┘
+┌─event_date─┬──────────event_time─┬─name─────────────────────────────────────┬─────value─┐
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ CPUFrequencyMHz_0 │ 2120.9 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pmuzzy │ 743 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.arenas.all.pdirty │ 26288 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.run_intervals │ 0 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.background_thread.num_runs │ 0 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.retained │ 60694528 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.mapped │ 303161344 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.resident │ 260931584 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.metadata │ 12079488 │
+│ 2020-09-05 │ 2020-09-05 15:56:30 │ jemalloc.allocated │ 133756128 │
+└────────────┴─────────────────────┴──────────────────────────────────────────┴───────────┘
```
**另请参阅**
diff --git a/programs/disks/CommandCopy.cpp b/programs/disks/CommandCopy.cpp
index 1cfce7fc022..5228b582d25 100644
--- a/programs/disks/CommandCopy.cpp
+++ b/programs/disks/CommandCopy.cpp
@@ -59,7 +59,7 @@ public:
String relative_path_from = validatePathAndGetAsRelative(path_from);
String relative_path_to = validatePathAndGetAsRelative(path_to);
- disk_from->copy(relative_path_from, disk_to, relative_path_to);
+ disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to);
}
};
}
diff --git a/programs/keeper-converter/KeeperConverter.cpp b/programs/keeper-converter/KeeperConverter.cpp
index a049e6bc2b3..20448aafa2f 100644
--- a/programs/keeper-converter/KeeperConverter.cpp
+++ b/programs/keeper-converter/KeeperConverter.cpp
@@ -42,7 +42,7 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv)
{
auto keeper_context = std::make_shared(true);
keeper_context->setDigestEnabled(true);
- keeper_context->setSnapshotDisk(std::make_shared("Keeper-snapshots", options["output-dir"].as(), 0));
+ keeper_context->setSnapshotDisk(std::make_shared("Keeper-snapshots", options["output-dir"].as()));
DB::KeeperStorage storage(/* tick_time_ms */ 500, /* superdigest */ "", keeper_context, /* initialize_system_nodes */ false);
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index d2d8a0d07fb..755b7f17d98 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -1581,6 +1581,15 @@ try
/// After attaching system databases we can initialize system log.
global_context->initializeSystemLogs();
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
+ /// Build loggers before tables startup to make log messages from tables
+ /// attach available in system.text_log
+ {
+ String level_str = config().getString("text_log.level", "");
+ int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str);
+ setTextLog(global_context->getTextLog(), level);
+
+ buildLoggers(config(), logger());
+ }
/// After the system database is created, attach virtual system tables (in addition to query_log and part_log)
attachSystemTablesServer(global_context, *database_catalog.getSystemDatabase(), has_zookeeper);
attachInformationSchema(global_context, *database_catalog.getDatabase(DatabaseCatalog::INFORMATION_SCHEMA));
@@ -1707,14 +1716,6 @@ try
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
async_metrics.start();
- {
- String level_str = config().getString("text_log.level", "");
- int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str);
- setTextLog(global_context->getTextLog(), level);
- }
-
- buildLoggers(config(), logger());
-
main_config_reloader->start();
access_control.startPeriodicReloading();
diff --git a/src/Access/Common/AccessRightsElement.cpp b/src/Access/Common/AccessRightsElement.cpp
index e11d43634ec..835f414df37 100644
--- a/src/Access/Common/AccessRightsElement.cpp
+++ b/src/Access/Common/AccessRightsElement.cpp
@@ -155,7 +155,7 @@ namespace
AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, std::string_view database_)
- : access_flags(access_flags_), database(database_), any_database(false)
+ : access_flags(access_flags_), database(database_), parameter(database_), any_database(false), any_parameter(false)
{
}
diff --git a/src/Access/Common/AccessType.h b/src/Access/Common/AccessType.h
index f65a77c1d6a..0b66a1b9578 100644
--- a/src/Access/Common/AccessType.h
+++ b/src/Access/Common/AccessType.h
@@ -70,7 +70,7 @@ enum class AccessType
M(ALTER_FREEZE_PARTITION, "FREEZE PARTITION, UNFREEZE", TABLE, ALTER_TABLE) \
\
M(ALTER_DATABASE_SETTINGS, "ALTER DATABASE SETTING, ALTER MODIFY DATABASE SETTING, MODIFY DATABASE SETTING", DATABASE, ALTER_DATABASE) /* allows to execute ALTER MODIFY SETTING */\
- M(ALTER_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute ALTER NAMED COLLECTION */\
+ M(ALTER_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute ALTER NAMED COLLECTION */\
\
M(ALTER_TABLE, "", GROUP, ALTER) \
M(ALTER_DATABASE, "", GROUP, ALTER) \
@@ -92,7 +92,7 @@ enum class AccessType
M(CREATE_ARBITRARY_TEMPORARY_TABLE, "", GLOBAL, CREATE) /* allows to create and manipulate temporary tables
with arbitrary table engine */\
M(CREATE_FUNCTION, "", GLOBAL, CREATE) /* allows to execute CREATE FUNCTION */ \
- M(CREATE_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute CREATE NAMED COLLECTION */ \
+ M(CREATE_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute CREATE NAMED COLLECTION */ \
M(CREATE, "", GROUP, ALL) /* allows to execute {CREATE|ATTACH} */ \
\
M(DROP_DATABASE, "", DATABASE, DROP) /* allows to execute {DROP|DETACH} DATABASE */\
@@ -101,7 +101,7 @@ enum class AccessType
implicitly enabled by the grant DROP_TABLE */\
M(DROP_DICTIONARY, "", DICTIONARY, DROP) /* allows to execute {DROP|DETACH} DICTIONARY */\
M(DROP_FUNCTION, "", GLOBAL, DROP) /* allows to execute DROP FUNCTION */\
- M(DROP_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) /* allows to execute DROP NAMED COLLECTION */\
+ M(DROP_NAMED_COLLECTION, "", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) /* allows to execute DROP NAMED COLLECTION */\
M(DROP, "", GROUP, ALL) /* allows to execute {DROP|DETACH} */\
\
M(UNDROP_TABLE, "", TABLE, ALL) /* allows to execute {UNDROP} TABLE */\
@@ -140,9 +140,10 @@ enum class AccessType
M(SHOW_SETTINGS_PROFILES, "SHOW PROFILES, SHOW CREATE SETTINGS PROFILE, SHOW CREATE PROFILE", GLOBAL, SHOW_ACCESS) \
M(SHOW_ACCESS, "", GROUP, ACCESS_MANAGEMENT) \
M(ACCESS_MANAGEMENT, "", GROUP, ALL) \
- M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) \
- M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_CONTROL) \
- M(NAMED_COLLECTION_CONTROL, "", NAMED_COLLECTION, ALL) \
+ M(SHOW_NAMED_COLLECTIONS, "SHOW NAMED COLLECTIONS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
+ M(SHOW_NAMED_COLLECTIONS_SECRETS, "SHOW NAMED COLLECTIONS SECRETS", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
+ M(NAMED_COLLECTION, "NAMED COLLECTION USAGE, USE NAMED COLLECTION", NAMED_COLLECTION, NAMED_COLLECTION_ADMIN) \
+ M(NAMED_COLLECTION_ADMIN, "NAMED COLLECTION CONTROL", NAMED_COLLECTION, ALL) \
\
M(SYSTEM_SHUTDOWN, "SYSTEM KILL, SHUTDOWN", GLOBAL, SYSTEM) \
M(SYSTEM_DROP_DNS_CACHE, "SYSTEM DROP DNS, DROP DNS CACHE, DROP DNS", GLOBAL, SYSTEM_DROP_CACHE) \
diff --git a/src/Access/UsersConfigAccessStorage.cpp b/src/Access/UsersConfigAccessStorage.cpp
index 187258d0fcd..15765045c97 100644
--- a/src/Access/UsersConfigAccessStorage.cpp
+++ b/src/Access/UsersConfigAccessStorage.cpp
@@ -328,7 +328,7 @@ namespace
if (!named_collection_control)
{
- user->access.revoke(AccessType::NAMED_COLLECTION_CONTROL);
+ user->access.revoke(AccessType::NAMED_COLLECTION_ADMIN);
}
if (!show_named_collections_secrets)
diff --git a/src/Access/tests/gtest_access_rights_ops.cpp b/src/Access/tests/gtest_access_rights_ops.cpp
index 5f1f13ca5a2..c2e9501f58c 100644
--- a/src/Access/tests/gtest_access_rights_ops.cpp
+++ b/src/Access/tests/gtest_access_rights_ops.cpp
@@ -53,7 +53,7 @@ TEST(AccessRights, Union)
"SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, "
"SYSTEM MOVES, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, "
"SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, "
- "SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION CONTROL ON db1");
+ "SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM SYNC DATABASE REPLICA, SYSTEM FLUSH DISTRIBUTED, dictGet ON db1.*, GRANT NAMED COLLECTION ADMIN ON db1");
}
diff --git a/src/Backups/tests/gtest_backup_entries.cpp b/src/Backups/tests/gtest_backup_entries.cpp
index ca603d20787..75972b35ba4 100644
--- a/src/Backups/tests/gtest_backup_entries.cpp
+++ b/src/Backups/tests/gtest_backup_entries.cpp
@@ -24,7 +24,7 @@ protected:
/// Make local disk.
temp_dir = std::make_unique();
temp_dir->createDirectories();
- local_disk = std::make_shared("local_disk", temp_dir->path() + "/", 0);
+ local_disk = std::make_shared("local_disk", temp_dir->path() + "/");
/// Make encrypted disk.
auto settings = std::make_unique();
@@ -38,7 +38,7 @@ protected:
settings->current_key = key;
settings->current_key_fingerprint = fingerprint;
- encrypted_disk = std::make_shared("encrypted_disk", std::move(settings), true);
+ encrypted_disk = std::make_shared("encrypted_disk", std::move(settings));
}
void TearDown() override
diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h
index bf790423d1d..b8ebff2a5d5 100644
--- a/src/Columns/ColumnVector.h
+++ b/src/Columns/ColumnVector.h
@@ -107,8 +107,8 @@ struct FloatCompareHelper
}
};
-template struct CompareHelper : public FloatCompareHelper {};
-template struct CompareHelper : public FloatCompareHelper {};
+template struct CompareHelper : public FloatCompareHelper {};
+template struct CompareHelper : public FloatCompareHelper {};
/** A template for columns that use a simple array to store.
diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp
index 105a7c0548f..f61cfae566d 100644
--- a/src/Common/CurrentMetrics.cpp
+++ b/src/Common/CurrentMetrics.cpp
@@ -93,8 +93,8 @@
M(ThreadPoolFSReaderThreadsActive, "Number of threads in the thread pool for local_filesystem_read_method=threadpool running a task.") \
M(BackupsIOThreads, "Number of threads in the BackupsIO thread pool.") \
M(BackupsIOThreadsActive, "Number of threads in the BackupsIO thread pool running a task.") \
- M(DiskObjectStorageAsyncThreads, "Number of threads in the async thread pool for DiskObjectStorage.") \
- M(DiskObjectStorageAsyncThreadsActive, "Number of threads in the async thread pool for DiskObjectStorage running a task.") \
+ M(DiskObjectStorageAsyncThreads, "Obsolete metric, shows nothing.") \
+ M(DiskObjectStorageAsyncThreadsActive, "Obsolete metric, shows nothing.") \
M(StorageHiveThreads, "Number of threads in the StorageHive thread pool.") \
M(StorageHiveThreadsActive, "Number of threads in the StorageHive thread pool running a task.") \
M(TablesLoaderThreads, "Number of threads in the tables loader thread pool.") \
@@ -141,6 +141,8 @@
M(MergeTreeOutdatedPartsLoaderThreadsActive, "Number of active threads in the threadpool for loading Outdated data parts.") \
M(MergeTreePartsCleanerThreads, "Number of threads in the MergeTree parts cleaner thread pool.") \
M(MergeTreePartsCleanerThreadsActive, "Number of threads in the MergeTree parts cleaner thread pool running a task.") \
+ M(IDiskCopierThreads, "Number of threads for copying data between disks of different types.") \
+ M(IDiskCopierThreadsActive, "Number of threads for copying data between disks of different types running a task.") \
M(SystemReplicasThreads, "Number of threads in the system.replicas thread pool.") \
M(SystemReplicasThreadsActive, "Number of threads in the system.replicas thread pool running a task.") \
M(RestartReplicaThreads, "Number of threads in the RESTART REPLICA thread pool.") \
diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp
index ee268be45f6..054a60cb91d 100644
--- a/src/Common/Exception.cpp
+++ b/src/Common/Exception.cpp
@@ -418,6 +418,18 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b
<< " (version " << VERSION_STRING << VERSION_OFFICIAL << ")";
}
catch (...) {}
+
+// #ifdef ABORT_ON_LOGICAL_ERROR
+// try
+// {
+// throw;
+// }
+// catch (const std::logic_error &)
+// {
+// abortOnFailedAssertion(stream.str());
+// }
+// catch (...) {}
+// #endif
}
catch (...)
{
diff --git a/src/Coordination/KeeperContext.cpp b/src/Coordination/KeeperContext.cpp
index 3c3c0500540..408344ee67f 100644
--- a/src/Coordination/KeeperContext.cpp
+++ b/src/Coordination/KeeperContext.cpp
@@ -220,7 +220,7 @@ KeeperContext::Storage KeeperContext::getLogsPathFromConfig(const Poco::Util::Ab
if (!fs::exists(path))
fs::create_directories(path);
- return std::make_shared("LocalLogDisk", path, 0);
+ return std::make_shared("LocalLogDisk", path);
};
/// the most specialized path
@@ -246,7 +246,7 @@ KeeperContext::Storage KeeperContext::getSnapshotsPathFromConfig(const Poco::Uti
if (!fs::exists(path))
fs::create_directories(path);
- return std::make_shared("LocalSnapshotDisk", path, 0);
+ return std::make_shared("LocalSnapshotDisk", path);
};
/// the most specialized path
@@ -272,7 +272,7 @@ KeeperContext::Storage KeeperContext::getStatePathFromConfig(const Poco::Util::A
if (!fs::exists(path))
fs::create_directories(path);
- return std::make_shared("LocalStateFileDisk", path, 0);
+ return std::make_shared("LocalStateFileDisk", path);
};
if (config.has("keeper_server.state_storage_disk"))
diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp
index 0f60c960b8b..6df149bbfbe 100644
--- a/src/Coordination/tests/gtest_coordination.cpp
+++ b/src/Coordination/tests/gtest_coordination.cpp
@@ -71,16 +71,16 @@ protected:
DB::KeeperContextPtr keeper_context = std::make_shared(true);
Poco::Logger * log{&Poco::Logger::get("CoordinationTest")};
- void setLogDirectory(const std::string & path) { keeper_context->setLogDisk(std::make_shared("LogDisk", path, 0)); }
+ void setLogDirectory(const std::string & path) { keeper_context->setLogDisk(std::make_shared("LogDisk", path)); }
void setSnapshotDirectory(const std::string & path)
{
- keeper_context->setSnapshotDisk(std::make_shared("SnapshotDisk", path, 0));
+ keeper_context->setSnapshotDisk(std::make_shared("SnapshotDisk", path));
}
void setStateFileDirectory(const std::string & path)
{
- keeper_context->setStateFileDisk(std::make_shared("StateFile", path, 0));
+ keeper_context->setStateFileDisk(std::make_shared("StateFile", path));
}
};
@@ -1503,9 +1503,9 @@ void testLogAndStateMachine(
using namespace DB;
ChangelogDirTest snapshots("./snapshots");
- keeper_context->setSnapshotDisk(std::make_shared("SnapshotDisk", "./snapshots", 0));
+ keeper_context->setSnapshotDisk(std::make_shared("SnapshotDisk", "./snapshots"));
ChangelogDirTest logs("./logs");
- keeper_context->setLogDisk(std::make_shared("LogDisk", "./logs", 0));
+ keeper_context->setLogDisk(std::make_shared("LogDisk", "./logs"));
ResponsesQueue queue(std::numeric_limits::max());
SnapshotsQueue snapshots_queue{1};
diff --git a/src/Core/Settings.h b/src/Core/Settings.h
index b7d12a518c8..59373df3ece 100644
--- a/src/Core/Settings.h
+++ b/src/Core/Settings.h
@@ -517,6 +517,7 @@ class IColumn;
M(Seconds, wait_for_window_view_fire_signal_timeout, 10, "Timeout for waiting for window view fire signal in event time processing", 0) \
M(UInt64, min_free_disk_space_for_temporary_data, 0, "The minimum disk space to keep while writing temporary data used in external sorting and aggregation.", 0) \
\
+ M(DefaultTableEngine, default_temporary_table_engine, DefaultTableEngine::Memory, "Default table engine used when ENGINE is not set in CREATE TEMPORARY statement.",0) \
M(DefaultTableEngine, default_table_engine, DefaultTableEngine::None, "Default table engine used when ENGINE is not set in CREATE statement.",0) \
M(Bool, show_table_uuid_in_table_create_query_if_not_nil, false, "For tables in databases with Engine=Atomic show UUID of the table in its CREATE query.", 0) \
M(Bool, database_atomic_wait_for_drop_and_detach_synchronously, false, "When executing DROP or DETACH TABLE in Atomic database, wait for table data to be finally dropped or detached.", 0) \
diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp
index 65147ee664e..2dc7f6145b3 100644
--- a/src/Dictionaries/ClickHouseDictionarySource.cpp
+++ b/src/Dictionaries/ClickHouseDictionarySource.cpp
@@ -217,7 +217,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
std::optional configuration;
std::string settings_config_prefix = config_prefix + ".clickhouse";
- auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix) : nullptr;
+ auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, global_context) : nullptr;
if (named_collection)
{
diff --git a/src/Dictionaries/MySQLDictionarySource.cpp b/src/Dictionaries/MySQLDictionarySource.cpp
index 730217f96b7..e61409e2b54 100644
--- a/src/Dictionaries/MySQLDictionarySource.cpp
+++ b/src/Dictionaries/MySQLDictionarySource.cpp
@@ -71,7 +71,7 @@ void registerDictionarySourceMysql(DictionarySourceFactory & factory)
MySQLSettings mysql_settings;
std::optional dictionary_configuration;
- auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix) : nullptr;
+ auto named_collection = created_from_ddl ? tryGetNamedCollectionWithOverrides(config, settings_config_prefix, global_context) : nullptr;
if (named_collection)
{
auto allowed_arguments{dictionary_allowed_keys};
diff --git a/src/Disks/DiskEncrypted.cpp b/src/Disks/DiskEncrypted.cpp
index 6b515b100c9..677dd73cc00 100644
--- a/src/Disks/DiskEncrypted.cpp
+++ b/src/Disks/DiskEncrypted.cpp
@@ -285,19 +285,32 @@ private:
};
DiskEncrypted::DiskEncrypted(
- const String & name_, const Poco::Util::AbstractConfiguration & config_, const String & config_prefix_, const DisksMap & map_, bool use_fake_transaction_)
- : DiskEncrypted(name_, parseDiskEncryptedSettings(name_, config_, config_prefix_, map_), use_fake_transaction_)
+ const String & name_, const Poco::Util::AbstractConfiguration & config_, const String & config_prefix_, const DisksMap & map_)
+ : DiskEncrypted(name_, parseDiskEncryptedSettings(name_, config_, config_prefix_, map_), config_, config_prefix_)
{
}
-DiskEncrypted::DiskEncrypted(const String & name_, std::unique_ptr settings_, bool use_fake_transaction_)
+DiskEncrypted::DiskEncrypted(const String & name_, std::unique_ptr settings_,
+ const Poco::Util::AbstractConfiguration & config_, const String & config_prefix_)
+ : IDisk(name_, config_, config_prefix_)
+ , delegate(settings_->wrapped_disk)
+ , encrypted_name(name_)
+ , disk_path(settings_->disk_path)
+ , disk_absolute_path(settings_->wrapped_disk->getPath() + settings_->disk_path)
+ , current_settings(std::move(settings_))
+ , use_fake_transaction(config_.getBool(config_prefix_ + ".use_fake_transaction", true))
+{
+ delegate->createDirectories(disk_path);
+}
+
+DiskEncrypted::DiskEncrypted(const String & name_, std::unique_ptr settings_)
: IDisk(name_)
, delegate(settings_->wrapped_disk)
, encrypted_name(name_)
, disk_path(settings_->disk_path)
, disk_absolute_path(settings_->wrapped_disk->getPath() + settings_->disk_path)
, current_settings(std::move(settings_))
- , use_fake_transaction(use_fake_transaction_)
+ , use_fake_transaction(true)
{
delegate->createDirectories(disk_path);
}
@@ -310,32 +323,6 @@ ReservationPtr DiskEncrypted::reserve(UInt64 bytes)
return std::make_unique(std::static_pointer_cast(shared_from_this()), std::move(reservation));
}
-void DiskEncrypted::copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path)
-{
- /// Check if we can copy the file without deciphering.
- if (isSameDiskType(*this, *to_disk))
- {
- /// Disk type is the same, check if the key is the same too.
- if (auto * to_disk_enc = typeid_cast(to_disk.get()))
- {
- auto from_settings = current_settings.get();
- auto to_settings = to_disk_enc->current_settings.get();
- if (from_settings->all_keys == to_settings->all_keys)
- {
- /// Keys are the same so we can simply copy the encrypted file.
- auto wrapped_from_path = wrappedPath(from_path);
- auto to_delegate = to_disk_enc->delegate;
- auto wrapped_to_path = to_disk_enc->wrappedPath(to_path);
- delegate->copy(wrapped_from_path, to_delegate, wrapped_to_path);
- return;
- }
- }
- }
-
- /// Copy the file through buffers with deciphering.
- copyThroughBuffers(from_path, to_disk, to_path);
-}
-
void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::shared_ptr & to_disk, const String & to_dir)
{
@@ -359,11 +346,8 @@ void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::sha
}
}
- if (!to_disk->exists(to_dir))
- to_disk->createDirectories(to_dir);
-
/// Copy the file through buffers with deciphering.
- copyThroughBuffers(from_dir, to_disk, to_dir);
+ IDisk::copyDirectoryContent(from_dir, to_disk, to_dir);
}
std::unique_ptr DiskEncrypted::readFile(
@@ -443,7 +427,7 @@ std::unordered_map DiskEncrypted::getSerializedMetadata(const st
void DiskEncrypted::applyNewSettings(
const Poco::Util::AbstractConfiguration & config,
- ContextPtr /*context*/,
+ ContextPtr context,
const String & config_prefix,
const DisksMap & disk_map)
{
@@ -455,6 +439,7 @@ void DiskEncrypted::applyNewSettings(
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Сhanging disk path on the fly is not supported. Disk {}", name);
current_settings.set(std::move(new_settings));
+ IDisk::applyNewSettings(config, context, config_prefix, disk_map);
}
void registerDiskEncrypted(DiskFactory & factory, bool global_skip_access_check)
@@ -467,7 +452,7 @@ void registerDiskEncrypted(DiskFactory & factory, bool global_skip_access_check)
const DisksMap & map) -> DiskPtr
{
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
- DiskPtr disk = std::make_shared(name, config, config_prefix, map, config.getBool(config_prefix + ".use_fake_transaction", true));
+ DiskPtr disk = std::make_shared(name, config, config_prefix, map);
disk->startup(context, skip_access_check);
return disk;
};
diff --git a/src/Disks/DiskEncrypted.h b/src/Disks/DiskEncrypted.h
index 69d051a9537..9963770bd1c 100644
--- a/src/Disks/DiskEncrypted.h
+++ b/src/Disks/DiskEncrypted.h
@@ -21,8 +21,10 @@ class WriteBufferFromFileBase;
class DiskEncrypted : public IDisk
{
public:
- DiskEncrypted(const String & name_, const Poco::Util::AbstractConfiguration & config_, const String & config_prefix_, const DisksMap & map_, bool use_fake_transaction_);
- DiskEncrypted(const String & name_, std::unique_ptr settings_, bool use_fake_transaction_);
+ DiskEncrypted(const String & name_, const Poco::Util::AbstractConfiguration & config_, const String & config_prefix_, const DisksMap & map_);
+ DiskEncrypted(const String & name_, std::unique_ptr settings_,
+ const Poco::Util::AbstractConfiguration & config_, const String & config_prefix_);
+ DiskEncrypted(const String & name_, std::unique_ptr settings_);
const String & getName() const override { return encrypted_name; }
const String & getPath() const override { return disk_absolute_path; }
@@ -110,8 +112,6 @@ public:
delegate->listFiles(wrapped_path, file_names);
}
- void copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path) override;
-
void copyDirectoryContent(const String & from_dir, const std::shared_ptr & to_disk, const String & to_dir) override;
std::unique_ptr readFile(
diff --git a/src/Disks/DiskLocal.cpp b/src/Disks/DiskLocal.cpp
index c76ea289101..9a61c176cf6 100644
--- a/src/Disks/DiskLocal.cpp
+++ b/src/Disks/DiskLocal.cpp
@@ -417,29 +417,12 @@ bool inline isSameDiskType(const IDisk & one, const IDisk & another)
return typeid(one) == typeid(another);
}
-void DiskLocal::copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path)
-{
- if (isSameDiskType(*this, *to_disk))
- {
- fs::path to = fs::path(to_disk->getPath()) / to_path;
- fs::path from = fs::path(disk_path) / from_path;
- if (from_path.ends_with('/'))
- from = from.parent_path();
- if (fs::is_directory(from))
- to /= from.filename();
-
- fs::copy(from, to, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
- }
- else
- copyThroughBuffers(from_path, to_disk, to_path, /* copy_root_dir */ true); /// Base implementation.
-}
-
void DiskLocal::copyDirectoryContent(const String & from_dir, const std::shared_ptr & to_disk, const String & to_dir)
{
if (isSameDiskType(*this, *to_disk))
- fs::copy(from_dir, to_dir, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
+ fs::copy(fs::path(disk_path) / from_dir, fs::path(to_disk->getPath()) / to_dir, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
else
- copyThroughBuffers(from_dir, to_disk, to_dir, /* copy_root_dir */ false); /// Base implementation.
+ IDisk::copyDirectoryContent(from_dir, to_disk, to_dir);
}
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
@@ -448,7 +431,7 @@ SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
}
-void DiskLocal::applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap &)
+void DiskLocal::applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & disk_map)
{
String new_disk_path;
UInt64 new_keep_free_space_bytes;
@@ -460,10 +443,13 @@ void DiskLocal::applyNewSettings(const Poco::Util::AbstractConfiguration & confi
if (keep_free_space_bytes != new_keep_free_space_bytes)
keep_free_space_bytes = new_keep_free_space_bytes;
+
+ IDisk::applyNewSettings(config, context, config_prefix, disk_map);
}
-DiskLocal::DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_)
- : IDisk(name_)
+DiskLocal::DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_,
+ const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
+ : IDisk(name_, config, config_prefix)
, disk_path(path_)
, keep_free_space_bytes(keep_free_space_bytes_)
, logger(&Poco::Logger::get("DiskLocal"))
@@ -472,13 +458,24 @@ DiskLocal::DiskLocal(const String & name_, const String & path_, UInt64 keep_fre
}
DiskLocal::DiskLocal(
- const String & name_, const String & path_, UInt64 keep_free_space_bytes_, ContextPtr context, UInt64 local_disk_check_period_ms)
- : DiskLocal(name_, path_, keep_free_space_bytes_)
+ const String & name_, const String & path_, UInt64 keep_free_space_bytes_, ContextPtr context,
+ const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
+ : DiskLocal(name_, path_, keep_free_space_bytes_, config, config_prefix)
{
+ auto local_disk_check_period_ms = config.getUInt("local_disk_check_period_ms", 0);
if (local_disk_check_period_ms > 0)
disk_checker = std::make_unique(this, context, local_disk_check_period_ms);
}
+DiskLocal::DiskLocal(const String & name_, const String & path_)
+ : IDisk(name_)
+ , disk_path(path_)
+ , keep_free_space_bytes(0)
+ , logger(&Poco::Logger::get("DiskLocal"))
+ , data_source_description(getLocalDataSourceDescription(disk_path))
+{
+}
+
DataSourceDescription DiskLocal::getDataSourceDescription() const
{
return data_source_description;
@@ -720,7 +717,7 @@ void registerDiskLocal(DiskFactory & factory, bool global_skip_access_check)
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
std::shared_ptr disk
- = std::make_shared(name, path, keep_free_space_bytes, context, config.getUInt("local_disk_check_period_ms", 0));
+ = std::make_shared(name, path, keep_free_space_bytes, context, config, config_prefix);
disk->startup(context, skip_access_check);
return disk;
};
diff --git a/src/Disks/DiskLocal.h b/src/Disks/DiskLocal.h
index 3d340ae40b7..b30732b67fd 100644
--- a/src/Disks/DiskLocal.h
+++ b/src/Disks/DiskLocal.h
@@ -19,13 +19,17 @@ public:
friend class DiskLocalCheckThread;
friend class DiskLocalReservation;
- DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_);
+ DiskLocal(const String & name_, const String & path_, UInt64 keep_free_space_bytes_,
+ const Poco::Util::AbstractConfiguration & config, const String & config_prefix);
DiskLocal(
const String & name_,
const String & path_,
UInt64 keep_free_space_bytes_,
ContextPtr context,
- UInt64 local_disk_check_period_ms);
+ const Poco::Util::AbstractConfiguration & config,
+ const String & config_prefix);
+
+ DiskLocal(const String & name_, const String & path_);
const String & getPath() const override { return disk_path; }
@@ -63,8 +67,6 @@ public:
void replaceFile(const String & from_path, const String & to_path) override;
- void copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path) override;
-
void copyDirectoryContent(const String & from_dir, const std::shared_ptr & to_disk, const String & to_dir) override;
void listFiles(const String & path, std::vector & file_names) const override;
diff --git a/src/Disks/DiskSelector.cpp b/src/Disks/DiskSelector.cpp
index 9894e4251a2..e51f79867b5 100644
--- a/src/Disks/DiskSelector.cpp
+++ b/src/Disks/DiskSelector.cpp
@@ -53,7 +53,7 @@ void DiskSelector::initialize(const Poco::Util::AbstractConfiguration & config,
disks.emplace(
default_disk_name,
std::make_shared(
- default_disk_name, context->getPath(), 0, context, config.getUInt("local_disk_check_period_ms", 0)));
+ default_disk_name, context->getPath(), 0, context, config, config_prefix));
}
is_initialized = true;
diff --git a/src/Disks/Executor.h b/src/Disks/Executor.h
deleted file mode 100644
index 7330bcdd559..00000000000
--- a/src/Disks/Executor.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#pragma once
-
-#include
-#include
-
-namespace DB
-{
-
-/// Interface to run task asynchronously with possibility to wait for execution.
-class Executor
-{
-public:
- virtual ~Executor() = default;
- virtual std::future execute(std::function task) = 0;
-};
-
-/// Executes task synchronously in case when disk doesn't support async operations.
-class SyncExecutor : public Executor
-{
-public:
- SyncExecutor() = default;
- std::future execute(std::function task) override
- {
- auto promise = std::make_shared>();
- try
- {
- task();
- promise->set_value();
- }
- catch (...)
- {
- try
- {
- promise->set_exception(std::current_exception());
- }
- catch (...) { }
- }
- return promise->get_future();
- }
-};
-
-}
diff --git a/src/Disks/IDisk.cpp b/src/Disks/IDisk.cpp
index bca867fec76..544ba014fde 100644
--- a/src/Disks/IDisk.cpp
+++ b/src/Disks/IDisk.cpp
@@ -1,5 +1,4 @@
#include "IDisk.h"
-#include "Disks/Executor.h"
#include
#include
#include
@@ -80,18 +79,33 @@ UInt128 IDisk::getEncryptedFileIV(const String &) const
using ResultsCollector = std::vector>;
-void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_path, Executor & exec, ResultsCollector & results, bool copy_root_dir, const WriteSettings & settings)
+void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_path, ThreadPool & pool, ResultsCollector & results, bool copy_root_dir, const WriteSettings & settings)
{
if (from_disk.isFile(from_path))
{
- auto result = exec.execute(
- [&from_disk, from_path, &to_disk, to_path, &settings]()
+ auto promise = std::make_shared>();
+ auto future = promise->get_future();
+
+ pool.scheduleOrThrowOnError(
+ [&from_disk, from_path, &to_disk, to_path, &settings, promise, thread_group = CurrentThread::getGroup()]()
{
- setThreadName("DiskCopier");
- from_disk.copyFile(from_path, to_disk, fs::path(to_path) / fileName(from_path), settings);
+ try
+ {
+ SCOPE_EXIT_SAFE(if (thread_group) CurrentThread::detachFromGroupIfNotDetached(););
+
+ if (thread_group)
+ CurrentThread::attachToGroup(thread_group);
+
+ from_disk.copyFile(from_path, to_disk, fs::path(to_path) / fileName(from_path), settings);
+ promise->set_value();
+ }
+ catch (...)
+ {
+ promise->set_exception(std::current_exception());
+ }
});
- results.push_back(std::move(result));
+ results.push_back(std::move(future));
}
else
{
@@ -104,13 +118,12 @@ void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_p
}
for (auto it = from_disk.iterateDirectory(from_path); it->isValid(); it->next())
- asyncCopy(from_disk, it->path(), to_disk, dest, exec, results, true, settings);
+ asyncCopy(from_disk, it->path(), to_disk, dest, pool, results, true, settings);
}
}
void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr & to_disk, const String & to_path, bool copy_root_dir)
{
- auto & exec = to_disk->getExecutor();
ResultsCollector results;
WriteSettings settings;
@@ -118,17 +131,12 @@ void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr & to_disk, const String & to_path)
-{
- copyThroughBuffers(from_path, to_disk, to_path, true);
+ result.get(); /// May rethrow an exception
}
@@ -137,7 +145,7 @@ void IDisk::copyDirectoryContent(const String & from_dir, const std::shared_ptr<
if (!to_disk->exists(to_dir))
to_disk->createDirectories(to_dir);
- copyThroughBuffers(from_dir, to_disk, to_dir, false);
+ copyThroughBuffers(from_dir, to_disk, to_dir, /* copy_root_dir */ false);
}
void IDisk::truncateFile(const String &, size_t)
@@ -233,4 +241,9 @@ catch (Exception & e)
throw;
}
+void IDisk::applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr /*context*/, const String & config_prefix, const DisksMap & /*map*/)
+{
+ copying_thread_pool.setMaxThreads(config.getInt(config_prefix + ".thread_pool_size", 16));
+}
+
}
diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h
index 5d75f3b70e5..ccef3db2dac 100644
--- a/src/Disks/IDisk.h
+++ b/src/Disks/IDisk.h
@@ -6,7 +6,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -35,6 +34,12 @@ namespace Poco
}
}
+namespace CurrentMetrics
+{
+ extern const Metric IDiskCopierThreads;
+ extern const Metric IDiskCopierThreadsActive;
+}
+
namespace DB
{
@@ -110,9 +115,15 @@ class IDisk : public Space
{
public:
/// Default constructor.
- explicit IDisk(const String & name_, std::shared_ptr executor_ = std::make_shared())
+ IDisk(const String & name_, const Poco::Util::AbstractConfiguration & config, const String & config_prefix)
: name(name_)
- , executor(executor_)
+ , copying_thread_pool(CurrentMetrics::IDiskCopierThreads, CurrentMetrics::IDiskCopierThreadsActive, config.getUInt(config_prefix + ".thread_pool_size", 16))
+ {
+ }
+
+ explicit IDisk(const String & name_)
+ : name(name_)
+ , copying_thread_pool(CurrentMetrics::IDiskCopierThreads, CurrentMetrics::IDiskCopierThreadsActive, 16)
{
}
@@ -181,9 +192,6 @@ public:
/// If a file with `to_path` path already exists, it will be replaced.
virtual void replaceFile(const String & from_path, const String & to_path) = 0;
- /// Recursively copy data containing at `from_path` to `to_path` located at `to_disk`.
- virtual void copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path);
-
/// Recursively copy files from from_dir to to_dir. Create to_dir if not exists.
virtual void copyDirectoryContent(const String & from_dir, const std::shared_ptr & to_disk, const String & to_dir);
@@ -379,7 +387,7 @@ public:
virtual SyncGuardPtr getDirectorySyncGuard(const String & path) const;
/// Applies new settings for disk in runtime.
- virtual void applyNewSettings(const Poco::Util::AbstractConfiguration &, ContextPtr, const String &, const DisksMap &) {}
+ virtual void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map);
/// Quite leaky abstraction. Some disks can use additional disk to store
/// some parts of metadata. In general case we have only one disk itself and
@@ -459,9 +467,6 @@ protected:
const String name;
- /// Returns executor to perform asynchronous operations.
- virtual Executor & getExecutor() { return *executor; }
-
/// Base implementation of the function copy().
/// It just opens two files, reads data by portions from the first file, and writes it to the second one.
/// A derived class may override copy() to provide a faster implementation.
@@ -470,7 +475,7 @@ protected:
virtual void checkAccessImpl(const String & path);
private:
- std::shared_ptr executor;
+ ThreadPool copying_thread_pool;
bool is_custom_disk = false;
/// Check access to the disk.
diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp
index 562b2b2fec0..a09befe84a8 100644
--- a/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp
+++ b/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp
@@ -31,9 +31,6 @@ void registerDiskAzureBlobStorage(DiskFactory & factory, bool global_skip_access
getAzureBlobContainerClient(config, config_prefix),
getAzureBlobStorageSettings(config, config_prefix, context));
- uint64_t copy_thread_pool_size = config.getUInt(config_prefix + ".thread_pool_size", 16);
- bool send_metadata = config.getBool(config_prefix + ".send_metadata", false);
-
auto metadata_storage = std::make_shared(metadata_disk, "");
std::shared_ptr azure_blob_storage_disk = std::make_shared(
@@ -42,8 +39,8 @@ void registerDiskAzureBlobStorage(DiskFactory & factory, bool global_skip_access
"DiskAzureBlobStorage",
std::move(metadata_storage),
std::move(azure_object_storage),
- send_metadata,
- copy_thread_pool_size
+ config,
+ config_prefix
);
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp
index e5bbd2ca0c6..8553b479149 100644
--- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp
+++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp
@@ -18,12 +18,6 @@
#include
#include
-namespace CurrentMetrics
-{
- extern const Metric DiskObjectStorageAsyncThreads;
- extern const Metric DiskObjectStorageAsyncThreadsActive;
-}
-
namespace DB
{
@@ -37,55 +31,6 @@ namespace ErrorCodes
extern const int DIRECTORY_DOESNT_EXIST;
}
-namespace
-{
-
-/// Runs tasks asynchronously using thread pool.
-class AsyncThreadPoolExecutor : public Executor
-{
-public:
- AsyncThreadPoolExecutor(const String & name_, int thread_pool_size)
- : name(name_)
- , pool(CurrentMetrics::DiskObjectStorageAsyncThreads, CurrentMetrics::DiskObjectStorageAsyncThreadsActive, thread_pool_size)
- {}
-
- std::future execute(std::function task) override
- {
- auto promise = std::make_shared>();
- pool.scheduleOrThrowOnError(
- [promise, task]()
- {
- try
- {
- task();
- promise->set_value();
- }
- catch (...)
- {
- tryLogCurrentException("Failed to run async task");
-
- try
- {
- promise->set_exception(std::current_exception());
- }
- catch (...) {}
- }
- });
-
- return promise->get_future();
- }
-
- void setMaxThreads(size_t threads)
- {
- pool.setMaxThreads(threads);
- }
-
-private:
- String name;
- ThreadPool pool;
-};
-
-}
DiskTransactionPtr DiskObjectStorage::createTransaction()
{
@@ -105,27 +50,20 @@ DiskTransactionPtr DiskObjectStorage::createObjectStorageTransaction()
send_metadata ? metadata_helper.get() : nullptr);
}
-std::shared_ptr DiskObjectStorage::getAsyncExecutor(const std::string & log_name, size_t size)
-{
- static auto reader = std::make_shared(log_name, size);
- return reader;
-}
-
DiskObjectStorage::DiskObjectStorage(
const String & name_,
const String & object_storage_root_path_,
const String & log_name,
MetadataStoragePtr metadata_storage_,
ObjectStoragePtr object_storage_,
- bool send_metadata_,
- uint64_t thread_pool_size_)
- : IDisk(name_, getAsyncExecutor(log_name, thread_pool_size_))
+ const Poco::Util::AbstractConfiguration & config,
+ const String & config_prefix)
+ : IDisk(name_, config, config_prefix)
, object_storage_root_path(object_storage_root_path_)
, log (&Poco::Logger::get("DiskObjectStorage(" + log_name + ")"))
, metadata_storage(std::move(metadata_storage_))
, object_storage(std::move(object_storage_))
- , send_metadata(send_metadata_)
- , threadpool_size(thread_pool_size_)
+ , send_metadata(config.getBool(config_prefix + ".send_metadata", false))
, metadata_helper(std::make_unique(this, ReadSettings{}))
{}
@@ -234,19 +172,23 @@ void DiskObjectStorage::moveFile(const String & from_path, const String & to_pat
transaction->commit();
}
-
-void DiskObjectStorage::copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path)
+void DiskObjectStorage::copyFile( /// NOLINT
+ const String & from_file_path,
+ IDisk & to_disk,
+ const String & to_file_path,
+ const WriteSettings & settings)
{
- /// It's the same object storage disk
- if (this == to_disk.get())
+ if (this == &to_disk)
{
+ /// It may use s3-server-side copy
auto transaction = createObjectStorageTransaction();
- transaction->copyFile(from_path, to_path);
+ transaction->copyFile(from_file_path, to_file_path);
transaction->commit();
}
else
{
- IDisk::copy(from_path, to_disk, to_path);
+ /// Copy through buffers
+ IDisk::copyFile(from_file_path, to_disk, to_file_path, settings);
}
}
@@ -519,14 +461,15 @@ bool DiskObjectStorage::isWriteOnce() const
DiskObjectStoragePtr DiskObjectStorage::createDiskObjectStorage()
{
+ const auto config_prefix = "storage_configuration.disks." + name;
return std::make_shared(
getName(),
object_storage_root_path,
getName(),
metadata_storage,
object_storage,
- send_metadata,
- threadpool_size);
+ Context::getGlobalContextInstance()->getConfigRef(),
+ config_prefix);
}
std::unique_ptr DiskObjectStorage::readFile(
@@ -582,13 +525,12 @@ void DiskObjectStorage::writeFileUsingBlobWritingFunction(const String & path, W
}
void DiskObjectStorage::applyNewSettings(
- const Poco::Util::AbstractConfiguration & config, ContextPtr context_, const String &, const DisksMap &)
+ const Poco::Util::AbstractConfiguration & config, ContextPtr context_, const String & /*config_prefix*/, const DisksMap & disk_map)
{
+ /// FIXME we cannot use config_prefix that was passed through arguments because the disk may be wrapped with cache and we need another name
const auto config_prefix = "storage_configuration.disks." + name;
object_storage->applyNewSettings(config, config_prefix, context_);
-
- if (AsyncThreadPoolExecutor * exec = dynamic_cast(&getExecutor()))
- exec->setMaxThreads(config.getInt(config_prefix + ".thread_pool_size", 16));
+ IDisk::applyNewSettings(config, context_, config_prefix, disk_map);
}
void DiskObjectStorage::restoreMetadataIfNeeded(
diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.h b/src/Disks/ObjectStorages/DiskObjectStorage.h
index c9820956a4d..d22d1310413 100644
--- a/src/Disks/ObjectStorages/DiskObjectStorage.h
+++ b/src/Disks/ObjectStorages/DiskObjectStorage.h
@@ -33,8 +33,8 @@ public:
const String & log_name,
MetadataStoragePtr metadata_storage_,
ObjectStoragePtr object_storage_,
- bool send_metadata_,
- uint64_t thread_pool_size_);
+ const Poco::Util::AbstractConfiguration & config,
+ const String & config_prefix);
/// Create fake transaction
DiskTransactionPtr createTransaction() override;
@@ -152,7 +152,11 @@ public:
Strings getBlobPath(const String & path) const override;
void writeFileUsingBlobWritingFunction(const String & path, WriteMode mode, WriteBlobFunction && write_blob_function) override;
- void copy(const String & from_path, const std::shared_ptr & to_disk, const String & to_path) override;
+ void copyFile( /// NOLINT
+ const String & from_file_path,
+ IDisk & to_disk,
+ const String & to_file_path,
+ const WriteSettings & settings = {}) override;
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context_, const String &, const DisksMap &) override;
@@ -198,8 +202,6 @@ public:
NameSet getCacheLayersNames() const override;
#endif
- static std::shared_ptr getAsyncExecutor(const std::string & log_name, size_t size);
-
bool supportsStat() const override { return metadata_storage->supportsStat(); }
struct stat stat(const String & path) const override;
@@ -225,7 +227,6 @@ private:
std::optional tryReserve(UInt64 bytes);
const bool send_metadata;
- size_t threadpool_size;
std::unique_ptr metadata_helper;
};
diff --git a/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp b/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp
index 5ac6128c3c0..cc9e4b0b712 100644
--- a/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp
+++ b/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp
@@ -25,7 +25,7 @@ std::pair prepareForLocalMetadata(
/// where the metadata files are stored locally
auto metadata_path = getDiskMetadataPath(name, config, config_prefix, context);
fs::create_directories(metadata_path);
- auto metadata_disk = std::make_shared(name + "-metadata", metadata_path, 0);
+ auto metadata_disk = std::make_shared(name + "-metadata", metadata_path, 0, config, config_prefix);
return std::make_pair(metadata_path, metadata_disk);
}
diff --git a/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp b/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp
index 74d1698bf01..bbcdd40d85f 100644
--- a/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp
+++ b/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.cpp
@@ -8,6 +8,14 @@
#include
#include
#include
+#include
+
+
+namespace CurrentMetrics
+{
+ extern const Metric LocalThread;
+ extern const Metric LocalThreadActive;
+}
namespace DB
{
@@ -101,7 +109,7 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::migrateFileToRestorableSchema
updateObjectMetadata(object.remote_path, metadata);
}
}
-void DiskObjectStorageRemoteMetadataRestoreHelper::migrateToRestorableSchemaRecursive(const String & path, Futures & results)
+void DiskObjectStorageRemoteMetadataRestoreHelper::migrateToRestorableSchemaRecursive(const String & path, ThreadPool & pool)
{
checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks.
@@ -120,29 +128,26 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::migrateToRestorableSchemaRecu
/// The whole directory can be migrated asynchronously.
if (dir_contains_only_files)
{
- auto result = disk->getExecutor().execute([this, path]
+ pool.scheduleOrThrowOnError([this, path]
{
for (auto it = disk->iterateDirectory(path); it->isValid(); it->next())
migrateFileToRestorableSchema(it->path());
});
-
- results.push_back(std::move(result));
}
else
{
for (auto it = disk->iterateDirectory(path); it->isValid(); it->next())
- if (!disk->isDirectory(it->path()))
+ {
+ if (disk->isDirectory(it->path()))
{
- auto source_path = it->path();
- auto result = disk->getExecutor().execute([this, source_path]
- {
- migrateFileToRestorableSchema(source_path);
- });
-
- results.push_back(std::move(result));
+ migrateToRestorableSchemaRecursive(it->path(), pool);
}
else
- migrateToRestorableSchemaRecursive(it->path(), results);
+ {
+ auto source_path = it->path();
+ pool.scheduleOrThrowOnError([this, source_path] { migrateFileToRestorableSchema(source_path); });
+ }
+ }
}
}
@@ -153,16 +158,13 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::migrateToRestorableSchema()
{
LOG_INFO(disk->log, "Start migration to restorable schema for disk {}", disk->name);
- Futures results;
+ ThreadPool pool{CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive};
for (const auto & root : data_roots)
if (disk->exists(root))
- migrateToRestorableSchemaRecursive(root + '/', results);
+ migrateToRestorableSchemaRecursive(root + '/', pool);
- for (auto & result : results)
- result.wait();
- for (auto & result : results)
- result.get();
+ pool.wait();
saveSchemaVersion(RESTORABLE_SCHEMA_VERSION);
}
@@ -355,8 +357,8 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::restoreFiles(IObjectStorage *
{
LOG_INFO(disk->log, "Starting restore files for disk {}", disk->name);
- std::vector> results;
- auto restore_files = [this, &source_object_storage, &restore_information, &results](const RelativePathsWithMetadata & objects)
+ ThreadPool pool{CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive};
+ auto restore_files = [this, &source_object_storage, &restore_information, &pool](const RelativePathsWithMetadata & objects)
{
std::vector keys_names;
for (const auto & object : objects)
@@ -378,12 +380,10 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::restoreFiles(IObjectStorage *
if (!keys_names.empty())
{
- auto result = disk->getExecutor().execute([this, &source_object_storage, &restore_information, keys_names]()
+ pool.scheduleOrThrowOnError([this, &source_object_storage, &restore_information, keys_names]()
{
processRestoreFiles(source_object_storage, restore_information.source_path, keys_names);
});
-
- results.push_back(std::move(result));
}
return true;
@@ -394,10 +394,7 @@ void DiskObjectStorageRemoteMetadataRestoreHelper::restoreFiles(IObjectStorage *
restore_files(children);
- for (auto & result : results)
- result.wait();
- for (auto & result : results)
- result.get();
+ pool.wait();
LOG_INFO(disk->log, "Files are restored for disk {}", disk->name);
diff --git a/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.h b/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.h
index cb8d9b8a5af..e7de4afcaf3 100644
--- a/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.h
+++ b/src/Disks/ObjectStorages/DiskObjectStorageRemoteMetadataRestoreHelper.h
@@ -75,7 +75,7 @@ private:
void saveSchemaVersion(const int & version) const;
void updateObjectMetadata(const String & key, const ObjectAttributes & metadata) const;
void migrateFileToRestorableSchema(const String & path) const;
- void migrateToRestorableSchemaRecursive(const String & path, Futures & results);
+ void migrateToRestorableSchemaRecursive(const String & path, ThreadPool & pool);
void readRestoreInformation(RestoreInformation & restore_information);
void restoreFiles(IObjectStorage * source_object_storage, const RestoreInformation & restore_information);
diff --git a/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp b/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp
index 693b966caf2..e72e7028c4b 100644
--- a/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp
+++ b/src/Disks/ObjectStorages/HDFS/registerDiskHDFS.cpp
@@ -44,7 +44,6 @@ void registerDiskHDFS(DiskFactory & factory, bool global_skip_access_check)
auto [_, metadata_disk] = prepareForLocalMetadata(name, config, config_prefix, context);
auto metadata_storage = std::make_shared(metadata_disk, uri);
- uint64_t copy_thread_pool_size = config.getUInt(config_prefix + ".thread_pool_size", 16);
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
DiskPtr disk = std::make_shared(
@@ -53,8 +52,8 @@ void registerDiskHDFS(DiskFactory & factory, bool global_skip_access_check)
"DiskHDFS",
std::move(metadata_storage),
std::move(hdfs_storage),
- /* send_metadata = */ false,
- copy_thread_pool_size);
+ config,
+ config_prefix);
disk->startup(context, skip_access_check);
return disk;
diff --git a/src/Disks/ObjectStorages/Local/registerLocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/registerLocalObjectStorage.cpp
index 251fc77d1f8..eb9039fed44 100644
--- a/src/Disks/ObjectStorages/Local/registerLocalObjectStorage.cpp
+++ b/src/Disks/ObjectStorages/Local/registerLocalObjectStorage.cpp
@@ -34,7 +34,7 @@ void registerDiskLocalObjectStorage(DiskFactory & factory, bool global_skip_acce
metadata_storage = std::make_shared(metadata_disk, path);
auto disk = std::make_shared(
- name, path, "Local", metadata_storage, local_storage, false, /* threadpool_size */16);
+ name, path, "Local", metadata_storage, local_storage, config, config_prefix);
disk->startup(context, global_skip_access_check);
return disk;
};
diff --git a/src/Disks/ObjectStorages/S3/registerDiskS3.cpp b/src/Disks/ObjectStorages/S3/registerDiskS3.cpp
index f3a57069a30..fb125ae8517 100644
--- a/src/Disks/ObjectStorages/S3/registerDiskS3.cpp
+++ b/src/Disks/ObjectStorages/S3/registerDiskS3.cpp
@@ -150,17 +150,14 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
}
}
- bool send_metadata = config.getBool(config_prefix + ".send_metadata", false);
- uint64_t copy_thread_pool_size = config.getUInt(config_prefix + ".thread_pool_size", 16);
-
DiskObjectStoragePtr s3disk = std::make_shared(
name,
uri.key,
type == "s3" ? "DiskS3" : "DiskS3Plain",
std::move(metadata_storage),
std::move(s3_storage),
- send_metadata,
- copy_thread_pool_size);
+ config,
+ config_prefix);
s3disk->startup(context, skip_access_check);
diff --git a/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp b/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp
index 8a54de81815..bc6c17863ef 100644
--- a/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp
+++ b/src/Disks/ObjectStorages/Web/registerDiskWebServer.cpp
@@ -52,8 +52,8 @@ void registerDiskWebServer(DiskFactory & factory, bool global_skip_access_check)
"DiskWebServer",
metadata_storage,
object_storage,
- /* send_metadata */false,
- /* threadpool_size */16);
+ config,
+ config_prefix);
disk->startup(context, skip_access_check);
return disk;
};
diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp
index f4be8b8fe86..e6551ad16b5 100644
--- a/src/Disks/StoragePolicy.cpp
+++ b/src/Disks/StoragePolicy.cpp
@@ -302,7 +302,11 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
for (const auto & volume : getVolumes())
{
if (!new_volume_names.contains(volume->getName()))
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "New storage policy {} shall contain volumes of old one", backQuote(name));
+ throw Exception(
+ ErrorCodes::BAD_ARGUMENTS,
+ "New storage policy {} shall contain volumes of the old storage policy {}",
+ backQuote(new_storage_policy->getName()),
+ backQuote(name));
std::unordered_set new_disk_names;
for (const auto & disk : new_storage_policy->getVolumeByName(volume->getName())->getDisks())
@@ -310,7 +314,11 @@ void StoragePolicy::checkCompatibleWith(const StoragePolicyPtr & new_storage_pol
for (const auto & disk : volume->getDisks())
if (!new_disk_names.contains(disk->getName()))
- throw Exception(ErrorCodes::BAD_ARGUMENTS, "New storage policy {} shall contain disks of old one", backQuote(name));
+ throw Exception(
+ ErrorCodes::BAD_ARGUMENTS,
+ "New storage policy {} shall contain disks of the old storage policy {}",
+ backQuote(new_storage_policy->getName()),
+ backQuote(name));
}
}
diff --git a/src/Disks/loadLocalDiskConfig.cpp b/src/Disks/loadLocalDiskConfig.cpp
index 0e5eca17ca7..0a9cdae1ae3 100644
--- a/src/Disks/loadLocalDiskConfig.cpp
+++ b/src/Disks/loadLocalDiskConfig.cpp
@@ -56,7 +56,7 @@ void loadDiskLocalConfig(const String & name,
tmp_path = context->getPath();
// Create tmp disk for getting total disk space.
- keep_free_space_bytes = static_cast(DiskLocal("tmp", tmp_path, 0).getTotalSpace() * ratio);
+ keep_free_space_bytes = static_cast(DiskLocal("tmp", tmp_path, 0, config, config_prefix).getTotalSpace() * ratio);
}
}
diff --git a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp
index 16acd109c27..d65808f5b6b 100644
--- a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp
+++ b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp
@@ -33,7 +33,7 @@ public:
void SetUp() override
{
fs::create_directories(tmp_root);
- disk = std::make_shared("local_disk", tmp_root, 0);
+ disk = std::make_shared("local_disk", tmp_root);
}
void TearDown() override
diff --git a/src/Disks/tests/gtest_disk.cpp b/src/Disks/tests/gtest_disk.cpp
index 1f33f536399..d57ca7bd81b 100644
--- a/src/Disks/tests/gtest_disk.cpp
+++ b/src/Disks/tests/gtest_disk.cpp
@@ -10,7 +10,7 @@ namespace fs = std::filesystem;
DB::DiskPtr createDisk()
{
fs::create_directory("tmp/");
- return std::make_shared("local_disk", "tmp/", 0);
+ return std::make_shared("local_disk", "tmp/");
}
void destroyDisk(DB::DiskPtr & disk)
diff --git a/src/Disks/tests/gtest_disk_encrypted.cpp b/src/Disks/tests/gtest_disk_encrypted.cpp
index ee9e284d409..b61b6140b0c 100644
--- a/src/Disks/tests/gtest_disk_encrypted.cpp
+++ b/src/Disks/tests/gtest_disk_encrypted.cpp
@@ -23,7 +23,7 @@ protected:
/// Make local disk.
temp_dir = std::make_unique();
temp_dir->createDirectories();
- local_disk = std::make_shared("local_disk", getDirectory(), 0);
+ local_disk = std::make_shared("local_disk", getDirectory());
}
void TearDown() override
@@ -42,7 +42,7 @@ protected:
settings->current_key = key;
settings->current_key_fingerprint = fingerprint;
settings->disk_path = path;
- encrypted_disk = std::make_shared("encrypted_disk", std::move(settings), true);
+ encrypted_disk = std::make_shared("encrypted_disk", std::move(settings));
}
String getFileNames()
diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h
index db0ff976d63..279294b367c 100644
--- a/src/Functions/FunctionsHashing.h
+++ b/src/Functions/FunctionsHashing.h
@@ -81,7 +81,7 @@ namespace impl
static SipHashKey parseSipHashKey(const ColumnWithTypeAndName & key)
{
- SipHashKey ret;
+ SipHashKey ret{};
const auto * tuple = checkAndGetColumn(key.column.get());
if (!tuple)
@@ -90,6 +90,9 @@ namespace impl
if (tuple->tupleSize() != 2)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "wrong tuple size: key must be a tuple of 2 UInt64");
+ if (tuple->empty())
+ return ret;
+
if (const auto * key0col = checkAndGetColumn(&(tuple->getColumn(0))))
ret.key0 = key0col->get64(0);
else
diff --git a/src/Functions/transform.cpp b/src/Functions/transform.cpp
index 8d6e53c491e..1fc0e3adf96 100644
--- a/src/Functions/transform.cpp
+++ b/src/Functions/transform.cpp
@@ -698,6 +698,8 @@ namespace
const DataTypePtr & from_type = arguments[0].type;
+ std::lock_guard lock(cache.mutex);
+
if (from_type->onlyNull())
{
cache.is_empty = true;
@@ -711,8 +713,6 @@ namespace
throw Exception(
ErrorCodes::ILLEGAL_COLUMN, "Second and third arguments of function {} must be constant arrays.", getName());
- std::lock_guard lock(cache.mutex);
-
const ColumnPtr & from_column_uncasted = array_from->getDataPtr();
cache.from_column = castColumn(
diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp
index fc24f0ae029..1a8e0ad96fa 100644
--- a/src/Interpreters/ConcurrentHashJoin.cpp
+++ b/src/Interpreters/ConcurrentHashJoin.cpp
@@ -49,7 +49,7 @@ ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptrgetOnlyClause().key_names_right, right_block);
@@ -77,7 +77,7 @@ bool ConcurrentHashJoin::addJoinedBlock(const Block & right_block, bool check_li
if (!lock.owns_lock())
continue;
- bool limit_exceeded = !hash_join->data->addJoinedBlock(dispatched_block, check_limits);
+ bool limit_exceeded = !hash_join->data->addBlockToJoin(dispatched_block, check_limits);
dispatched_block = {};
blocks_left--;
diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h
index 5e53f9845aa..1283879971d 100644
--- a/src/Interpreters/ConcurrentHashJoin.h
+++ b/src/Interpreters/ConcurrentHashJoin.h
@@ -16,13 +16,13 @@ namespace DB
{
/**
- * Can run addJoinedBlock() parallelly to speedup the join process. On test, it almose linear speedup by
+ * Can run addBlockToJoin() parallelly to speedup the join process. On test, it almose linear speedup by
* the degree of parallelism.
*
* The default HashJoin is not thread safe for inserting right table's rows and run it in a single thread. When
* the right table is large, the join process is too slow.
*
- * We create multiple HashJoin instances here. In addJoinedBlock(), one input block is split into multiple blocks
+ * We create multiple HashJoin instances here. In addBlockToJoin(), one input block is split into multiple blocks
* corresponding to the HashJoin instances by hashing every row on the join keys. And make a guarantee that every HashJoin
* instance is written by only one thread.
*
@@ -37,7 +37,7 @@ public:
~ConcurrentHashJoin() override = default;
const TableJoin & getTableJoin() const override { return *table_join; }
- bool addJoinedBlock(const Block & block, bool check_limits) override;
+ bool addBlockToJoin(const Block & block, bool check_limits) override;
void checkTypesOfKeys(const Block & block) const override;
void joinBlock(Block & block, std::shared_ptr & not_processed) override;
void setTotals(const Block & block) override;
diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp
index 7482450d529..442f1b913f1 100644
--- a/src/Interpreters/Context.cpp
+++ b/src/Interpreters/Context.cpp
@@ -875,9 +875,9 @@ catch (...)
"It is ok to skip this exception as cleaning old temporary files is not necessary", path));
}
-static VolumePtr createLocalSingleDiskVolume(const std::string & path)
+static VolumePtr createLocalSingleDiskVolume(const std::string & path, const Poco::Util::AbstractConfiguration & config_)
{
- auto disk = std::make_shared("_tmp_default", path, 0);
+ auto disk = std::make_shared("_tmp_default", path, 0, config_, "storage_configuration.disks._tmp_default");
VolumePtr volume = std::make_shared("_tmp_default", disk, 0);
return volume;
}
@@ -893,7 +893,7 @@ void Context::setTemporaryStoragePath(const String & path, size_t max_size)
if (!shared->tmp_path.ends_with('/'))
shared->tmp_path += '/';
- VolumePtr volume = createLocalSingleDiskVolume(shared->tmp_path);
+ VolumePtr volume = createLocalSingleDiskVolume(shared->tmp_path, getConfigRef());
for (const auto & disk : volume->getDisks())
{
@@ -966,7 +966,7 @@ void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t
LOG_DEBUG(shared->log, "Using file cache ({}) for temporary files", file_cache->getBasePath());
shared->tmp_path = file_cache->getBasePath();
- VolumePtr volume = createLocalSingleDiskVolume(shared->tmp_path);
+ VolumePtr volume = createLocalSingleDiskVolume(shared->tmp_path, getConfigRef());
shared->root_temp_data_on_disk = std::make_shared(volume, file_cache.get(), max_size);
}
diff --git a/src/Interpreters/DirectJoin.cpp b/src/Interpreters/DirectJoin.cpp
index cfefd7c5a91..431f216436d 100644
--- a/src/Interpreters/DirectJoin.cpp
+++ b/src/Interpreters/DirectJoin.cpp
@@ -103,7 +103,7 @@ DirectKeyValueJoin::DirectKeyValueJoin(
right_sample_block_with_storage_column_names = right_sample_block_with_storage_column_names_;
}
-bool DirectKeyValueJoin::addJoinedBlock(const Block &, bool)
+bool DirectKeyValueJoin::addBlockToJoin(const Block &, bool)
{
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Unreachable code reached");
}
diff --git a/src/Interpreters/DirectJoin.h b/src/Interpreters/DirectJoin.h
index 644b66a9d99..e55ac278705 100644
--- a/src/Interpreters/DirectJoin.h
+++ b/src/Interpreters/DirectJoin.h
@@ -32,10 +32,10 @@ public:
virtual const TableJoin & getTableJoin() const override { return *table_join; }
- virtual bool addJoinedBlock(const Block &, bool) override;
+ virtual bool addBlockToJoin(const Block &, bool) override;
virtual void checkTypesOfKeys(const Block &) const override;
- /// Join the block with data from left hand of JOIN to the right hand data (that was previously built by calls to addJoinedBlock).
+ /// Join the block with data from left hand of JOIN to the right hand data (that was previously built by calls to addBlockToJoin).
/// Could be called from different threads in parallel.
virtual void joinBlock(Block & block, std::shared_ptr &) override;
diff --git a/src/Interpreters/FilesystemCacheLog.h b/src/Interpreters/FilesystemCacheLog.h
index d6dd00e5463..0d088a922e0 100644
--- a/src/Interpreters/FilesystemCacheLog.h
+++ b/src/Interpreters/FilesystemCacheLog.h
@@ -11,16 +11,7 @@
namespace DB
{
-///
-/// -------- Column --------- Type ------
-/// | event_date | DateTime |
-/// | event_time | UInt64 |
-/// | query_id | String |
-/// | remote_file_path | String |
-/// | segment_range | Tuple |
-/// | read_type | String |
-/// -------------------------------------
-///
+
struct FilesystemCacheLogElement
{
enum class CacheType
diff --git a/src/Interpreters/FullSortingMergeJoin.h b/src/Interpreters/FullSortingMergeJoin.h
index 7318d1d24a1..a6b53a51c04 100644
--- a/src/Interpreters/FullSortingMergeJoin.h
+++ b/src/Interpreters/FullSortingMergeJoin.h
@@ -30,9 +30,9 @@ public:
const TableJoin & getTableJoin() const override { return *table_join; }
- bool addJoinedBlock(const Block & /* block */, bool /* check_limits */) override
+ bool addBlockToJoin(const Block & /* block */, bool /* check_limits */) override
{
- throw Exception(ErrorCodes::LOGICAL_ERROR, "FullSortingMergeJoin::addJoinedBlock should not be called");
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "FullSortingMergeJoin::addBlockToJoin should not be called");
}
static bool isSupported(const std::shared_ptr & table_join)
diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp
index 4218a8ea4e1..5b9521f3774 100644
--- a/src/Interpreters/GraceHashJoin.cpp
+++ b/src/Interpreters/GraceHashJoin.cpp
@@ -288,10 +288,7 @@ void GraceHashJoin::initBuckets()
size_t initial_num_buckets = roundUpToPowerOfTwoOrZero(std::clamp(settings.grace_hash_join_initial_buckets, 1, settings.grace_hash_join_max_buckets));
- for (size_t i = 0; i < initial_num_buckets; ++i)
- {
- addBucket(buckets);
- }
+ addBuckets(initial_num_buckets);
if (buckets.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "No buckets created");
@@ -310,13 +307,13 @@ bool GraceHashJoin::isSupported(const std::shared_ptr & table_join)
GraceHashJoin::~GraceHashJoin() = default;
-bool GraceHashJoin::addJoinedBlock(const Block & block, bool /*check_limits*/)
+bool GraceHashJoin::addBlockToJoin(const Block & block, bool /*check_limits*/)
{
if (current_bucket == nullptr)
throw Exception(ErrorCodes::LOGICAL_ERROR, "GraceHashJoin is not initialized");
Block materialized = materializeBlock(block);
- addJoinedBlockImpl(std::move(materialized));
+ addBlockToJoinImpl(std::move(materialized));
return true;
}
@@ -356,52 +353,66 @@ bool GraceHashJoin::hasMemoryOverflow(const InMemoryJoinPtr & hash_join_) const
return hasMemoryOverflow(total_rows, total_bytes);
}
-GraceHashJoin::Buckets GraceHashJoin::rehashBuckets(size_t to_size)
+GraceHashJoin::Buckets GraceHashJoin::rehashBuckets()
{
std::unique_lock lock(rehash_mutex);
+
+ if (!isPowerOf2(buckets.size())) [[unlikely]]
+ throw Exception(ErrorCodes::LOGICAL_ERROR, "Number of buckets should be power of 2 but it's {}", buckets.size());
+
+ const size_t to_size = buckets.size() * 2;
size_t current_size = buckets.size();
- if (to_size <= current_size)
- return buckets;
-
- chassert(isPowerOf2(to_size));
-
if (to_size > max_num_buckets)
{
- throw Exception(ErrorCodes::LIMIT_EXCEEDED,
+ throw Exception(
+ ErrorCodes::LIMIT_EXCEEDED,
"Too many grace hash join buckets ({} > {}), "
"consider increasing grace_hash_join_max_buckets or max_rows_in_join/max_bytes_in_join",
- to_size, max_num_buckets);
+ to_size,
+ max_num_buckets);
}
LOG_TRACE(log, "Rehashing from {} to {}", current_size, to_size);
- buckets.reserve(to_size);
- for (size_t i = current_size; i < to_size; ++i)
- addBucket(buckets);
+ addBuckets(to_size - current_size);
return buckets;
}
-void GraceHashJoin::addBucket(Buckets & destination)
+void GraceHashJoin::addBuckets(const size_t bucket_count)
{
- // There could be exceptions from createStream, In ci tests
- // there is a certain probability of failure in allocating memory, see memory_tracker_fault_probability.
- // It may terminate this thread and leave a broken hash_join, and another thread cores when it tries to
- // use the broken hash_join. So we print an exception message here to help debug.
- try
- {
- auto & left_file = tmp_data->createStream(left_sample_block);
- auto & right_file = tmp_data->createStream(prepareRightBlock(right_sample_block));
+ // Exception can be thrown in number of cases:
+ // - during creation of temporary files for buckets
+ // - in CI tests, there is a certain probability of failure in allocating memory, see memory_tracker_fault_probability
+ // Therefore, new buckets are added only after all of them created successfully,
+ // otherwise we can end up having unexpected number of buckets
- BucketPtr new_bucket = std::make_shared(destination.size(), left_file, right_file, log);
- destination.emplace_back(std::move(new_bucket));
- }
- catch (...)
- {
- LOG_ERROR(&Poco::Logger::get("GraceHashJoin"), "Can't create bucket. current buckets size: {}", destination.size());
- throw;
- }
+ const size_t current_size = buckets.size();
+ Buckets tmp_buckets;
+ tmp_buckets.reserve(bucket_count);
+ for (size_t i = 0; i < bucket_count; ++i)
+ try
+ {
+ auto & left_file = tmp_data->createStream(left_sample_block);
+ auto & right_file = tmp_data->createStream(prepareRightBlock(right_sample_block));
+
+ BucketPtr new_bucket = std::make_shared(current_size + i, left_file, right_file, log);
+ tmp_buckets.emplace_back(std::move(new_bucket));
+ }
+ catch (...)
+ {
+ LOG_ERROR(
+ &Poco::Logger::get("GraceHashJoin"),
+ "Can't create bucket {} due to error: {}",
+ current_size + i,
+ getCurrentExceptionMessage(false));
+ throw;
+ }
+
+ buckets.reserve(buckets.size() + bucket_count);
+ for (auto & bucket : tmp_buckets)
+ buckets.emplace_back(std::move(bucket));
}
void GraceHashJoin::checkTypesOfKeys(const Block & block) const
@@ -596,7 +607,7 @@ IBlocksStreamPtr GraceHashJoin::getDelayedBlocks()
while (Block block = right_reader.read())
{
num_rows += block.rows();
- addJoinedBlockImpl(std::move(block));
+ addBlockToJoinImpl(std::move(block));
}
LOG_TRACE(log, "Loaded bucket {} with {}(/{}) rows",
@@ -621,7 +632,7 @@ Block GraceHashJoin::prepareRightBlock(const Block & block)
return HashJoin::prepareRightBlock(block, hash_join_sample_block);
}
-void GraceHashJoin::addJoinedBlockImpl(Block block)
+void GraceHashJoin::addBlockToJoinImpl(Block block)
{
block = prepareRightBlock(block);
Buckets buckets_snapshot = getCurrentBuckets();
@@ -638,15 +649,10 @@ void GraceHashJoin::addJoinedBlockImpl(Block block)
if (current_block.rows() > 0)
{
std::lock_guard lock(hash_join_mutex);
- auto current_buckets = getCurrentBuckets();
- if (!isPowerOf2(current_buckets.size())) [[unlikely]]
- {
- throw Exception(ErrorCodes::LOGICAL_ERROR, "Broken buckets. its size({}) is not power of 2", current_buckets.size());
- }
if (!hash_join)
hash_join = makeInMemoryJoin();
- hash_join->addJoinedBlock(current_block, /* check_limits = */ false);
+ hash_join->addBlockToJoin(current_block, /* check_limits = */ false);
if (!hasMemoryOverflow(hash_join))
return;
@@ -654,7 +660,7 @@ void GraceHashJoin::addJoinedBlockImpl(Block block)
current_block = {};
// Must use the latest buckets snapshot in case that it has been rehashed by other threads.
- buckets_snapshot = rehashBuckets(current_buckets.size() * 2);
+ buckets_snapshot = rehashBuckets();
auto right_blocks = hash_join->releaseJoinedBlocks(/* restructure */ false);
hash_join = nullptr;
@@ -677,7 +683,7 @@ void GraceHashJoin::addJoinedBlockImpl(Block block)
hash_join = makeInMemoryJoin();
if (current_block.rows() > 0)
- hash_join->addJoinedBlock(current_block, /* check_limits = */ false);
+ hash_join->addBlockToJoin(current_block, /* check_limits = */ false);
}
}
diff --git a/src/Interpreters/GraceHashJoin.h b/src/Interpreters/GraceHashJoin.h
index b8d83f4cad0..3736e9c5e1e 100644
--- a/src/Interpreters/GraceHashJoin.h
+++ b/src/Interpreters/GraceHashJoin.h
@@ -23,11 +23,11 @@ class HashJoin;
*
* The joining algorithm consists of three stages:
*
- * 1) During the first stage we accumulate blocks of the right table via @addJoinedBlock.
+ * 1) During the first stage we accumulate blocks of the right table via @addBlockToJoin.
* Each input block is split into multiple buckets based on the hash of the row join keys.
* The first bucket is added to the in-memory HashJoin, and the remaining buckets are written to disk for further processing.
* When the size of HashJoin exceeds the limits, we double the number of buckets.
- * There can be multiple threads calling addJoinedBlock, just like @ConcurrentHashJoin.
+ * There can be multiple threads calling addBlockToJoin, just like @ConcurrentHashJoin.
*
* 2) At the second stage we process left table blocks via @joinBlock.
* Again, each input block is split into multiple buckets by hash.
@@ -65,7 +65,7 @@ public:
void initialize(const Block & sample_block) override;
- bool addJoinedBlock(const Block & block, bool check_limits) override;
+ bool addBlockToJoin(const Block & block, bool check_limits) override;
void checkTypesOfKeys(const Block & block) const override;
void joinBlock(Block & block, std::shared_ptr & not_processed) override;
@@ -94,22 +94,23 @@ private:
InMemoryJoinPtr makeInMemoryJoin();
/// Add right table block to the @join. Calls @rehash on overflow.
- void addJoinedBlockImpl(Block block);
+ void addBlockToJoinImpl(Block block);
/// Check that join satisfies limits on rows/bytes in table_join.
bool hasMemoryOverflow(size_t total_rows, size_t total_bytes) const;
bool hasMemoryOverflow(const InMemoryJoinPtr & hash_join_) const;
bool hasMemoryOverflow(const BlocksList & blocks) const;
- /// Create new bucket at the end of @destination.
- void addBucket(Buckets & destination);
+ /// Add bucket_count new buckets
+ /// Throws if a bucket creation fails
+ void addBuckets(size_t bucket_count);
/// Increase number of buckets to match desired_size.
/// Called when HashJoin in-memory table for one bucket exceeds the limits.
///
/// NB: after @rehashBuckets there may be rows that are written to the buckets that they do not belong to.
/// It is fine; these rows will be written to the corresponding buckets during the third stage.
- Buckets rehashBuckets(size_t to_size);
+ Buckets rehashBuckets();
/// Perform some bookkeeping after all calls to @joinBlock.
void startReadingDelayedBlocks();
diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp
index 6fe2b8464f5..3a464de0f12 100644
--- a/src/Interpreters/HashJoin.cpp
+++ b/src/Interpreters/HashJoin.cpp
@@ -79,8 +79,8 @@ namespace JoinStuff
{
assert(flags[nullptr].size() <= size);
need_flags = true;
- // For one disjunct clause case, we don't need to reinit each time we call addJoinedBlock.
- // and there is no value inserted in this JoinUsedFlags before addJoinedBlock finish.
+ // For one disjunct clause case, we don't need to reinit each time we call addBlockToJoin.
+ // and there is no value inserted in this JoinUsedFlags before addBlockToJoin finish.
// So we reinit only when the hash table is rehashed to a larger size.
if (flags.empty() || flags[nullptr].size() < size) [[unlikely]]
{
@@ -581,7 +581,7 @@ namespace
};
- template
+ template
size_t NO_INLINE insertFromBlockImplTypeCase(
HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns,
const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, bool & is_inserted)
@@ -600,7 +600,7 @@ namespace
for (size_t i = 0; i < rows; ++i)
{
- if (has_null_map && (*null_map)[i])
+ if (null_map && (*null_map)[i])
{
/// nulls are not inserted into hash table,
/// keep them for RIGHT and FULL joins
@@ -622,21 +622,6 @@ namespace
return map.getBufferSizeInCells();
}
-
- template
- size_t insertFromBlockImplType(
- HashJoin & join, Map & map, size_t rows, const ColumnRawPtrs & key_columns,
- const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, bool & is_inserted)
- {
- if (null_map)
- return insertFromBlockImplTypeCase(
- join, map, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted);
- else
- return insertFromBlockImplTypeCase(
- join, map, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted);
- }
-
-
template
size_t insertFromBlockImpl(
HashJoin & join, HashJoin::Type type, Maps & maps, size_t rows, const ColumnRawPtrs & key_columns,
@@ -653,7 +638,7 @@ namespace
#define M(TYPE) \
case HashJoin::Type::TYPE: \
- return insertFromBlockImplType>::Type>(\
+ return insertFromBlockImplTypeCase>::Type>(\
join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted); \
break;
@@ -729,7 +714,7 @@ Block HashJoin::prepareRightBlock(const Block & block) const
return prepareRightBlock(block, savedBlockSample());
}
-bool HashJoin::addJoinedBlock(const Block & source_block_, bool check_limits)
+bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits)
{
if (!data)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Join data was released");
@@ -781,7 +766,7 @@ bool HashJoin::addJoinedBlock(const Block & source_block_, bool check_limits)
size_t total_bytes = 0;
{
if (storage_join_lock)
- throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "addJoinedBlock called when HashJoin locked to prevent updates");
+ throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "addBlockToJoin called when HashJoin locked to prevent updates");
data->blocks_allocated_size += block_to_save.allocatedBytes();
data->blocks.emplace_back(std::move(block_to_save));
@@ -1260,7 +1245,7 @@ void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unuse
/// Joins right table columns which indexes are present in right_indexes using specified map.
/// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS).
-template
+template
NO_INLINE IColumn::Filter joinRightColumns(
std::vector && key_getter_vector,
const std::vector & mapv,
@@ -1284,20 +1269,13 @@ NO_INLINE IColumn::Filter joinRightColumns(
for (size_t i = 0; i < rows; ++i)
{
bool right_row_found = false;
- bool null_element_found = false;
KnownRowsHolder known_rows;
for (size_t onexpr_idx = 0; onexpr_idx < added_columns.join_on_keys.size(); ++onexpr_idx)
{
const auto & join_keys = added_columns.join_on_keys[onexpr_idx];
- if constexpr (has_null_map)
- {
- if (join_keys.null_map && (*join_keys.null_map)[i])
- {
- null_element_found = true;
- continue;
- }
- }
+ if (join_keys.null_map && (*join_keys.null_map)[i])
+ continue;
bool row_acceptable = !join_keys.isRowFiltered(i);
using FindResult = typename KeyGetter::FindResult;
@@ -1379,20 +1357,6 @@ NO_INLINE IColumn::Filter joinRightColumns(
}
}
- if constexpr (has_null_map)
- {
- if (!right_row_found && null_element_found)
- {
- addNotFoundRow(added_columns, current_offset);
-
- if constexpr (join_features.need_replication)
- {
- (*added_columns.offsets_to_replicate)[i] = current_offset;
- }
- continue;
- }
- }
-
if (!right_row_found)
{
if constexpr (join_features.is_anti_join && join_features.left)
@@ -1410,7 +1374,7 @@ NO_INLINE IColumn::Filter joinRightColumns(
return filter;
}
-template
+template
IColumn::Filter joinRightColumnsSwitchMultipleDisjuncts(
std::vector && key_getter_vector,
const std::vector & mapv,
@@ -1418,8 +1382,8 @@ IColumn::Filter joinRightColumnsSwitchMultipleDisjuncts(
JoinStuff::JoinUsedFlags & used_flags [[maybe_unused]])
{
return mapv.size() > 1
- ? joinRightColumns(std::forward>(key_getter_vector), mapv, added_columns, used_flags)
- : joinRightColumns(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
+ ? joinRightColumns(std::forward>(key_getter_vector), mapv, added_columns, used_flags)
+ : joinRightColumns(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
}
template
@@ -1429,21 +1393,13 @@ IColumn::Filter joinRightColumnsSwitchNullability(
AddedColumns & added_columns,
JoinStuff::JoinUsedFlags & used_flags)
{
- bool has_null_map = std::any_of(added_columns.join_on_keys.begin(), added_columns.join_on_keys.end(),
- [](const auto & k) { return k.null_map; });
if (added_columns.need_filter)
{
- if (has_null_map)
- return joinRightColumnsSwitchMultipleDisjuncts(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
- else
- return joinRightColumnsSwitchMultipleDisjuncts(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
+ return joinRightColumnsSwitchMultipleDisjuncts(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
}
else
{
- if (has_null_map)
- return joinRightColumnsSwitchMultipleDisjuncts(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
- else
- return joinRightColumnsSwitchMultipleDisjuncts(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
+ return joinRightColumnsSwitchMultipleDisjuncts(std::forward>(key_getter_vector), mapv, added_columns, used_flags);
}
}
@@ -1868,7 +1824,7 @@ struct AdderNonJoined
/// Based on:
/// - map offsetInternal saved in used_flags for single disjuncts
/// - flags in BlockWithFlags for multiple disjuncts
-template
+template
class NotJoinedHash final : public NotJoinedBlocks::RightColumnsFiller
{
public:
diff --git a/src/Interpreters/HashJoin.h b/src/Interpreters/HashJoin.h
index 50eda4482bd..f30bbc3a46c 100644
--- a/src/Interpreters/HashJoin.h
+++ b/src/Interpreters/HashJoin.h
@@ -155,11 +155,11 @@ public:
/** Add block of data from right hand of JOIN to the map.
* Returns false, if some limit was exceeded and you should not insert more data.
*/
- bool addJoinedBlock(const Block & source_block_, bool check_limits) override;
+ bool addBlockToJoin(const Block & source_block_, bool check_limits) override;
void checkTypesOfKeys(const Block & block) const override;
- /** Join data from the map (that was previously built by calls to addJoinedBlock) to the block with data from "left" table.
+ /** Join data from the map (that was previously built by calls to addBlockToJoin) to the block with data from "left" table.
* Could be called from different threads in parallel.
*/
void joinBlock(Block & block, ExtraBlockPtr & not_processed) override;
@@ -406,7 +406,7 @@ private:
Poco::Logger * log;
/// Should be set via setLock to protect hash table from modification from StorageJoin
- /// If set HashJoin instance is not available for modification (addJoinedBlock)
+ /// If set HashJoin instance is not available for modification (addBlockToJoin)
TableLockHolder storage_join_lock = nullptr;
void dataMapInit(MapsVariant &);
diff --git a/src/Interpreters/IJoin.h b/src/Interpreters/IJoin.h
index 83067b0eab7..97b119bd795 100644
--- a/src/Interpreters/IJoin.h
+++ b/src/Interpreters/IJoin.h
@@ -52,7 +52,7 @@ public:
/// Add block of data from right hand of JOIN.
/// @returns false, if some limit was exceeded and you should not insert more data.
- virtual bool addJoinedBlock(const Block & block, bool check_limits = true) = 0; /// NOLINT
+ virtual bool addBlockToJoin(const Block & block, bool check_limits = true) = 0; /// NOLINT
/* Some initialization may be required before joinBlock() call.
* It's better to done in in constructor, but left block exact structure is not known at that moment.
@@ -62,7 +62,7 @@ public:
virtual void checkTypesOfKeys(const Block & block) const = 0;
- /// Join the block with data from left hand of JOIN to the right hand data (that was previously built by calls to addJoinedBlock).
+ /// Join the block with data from left hand of JOIN to the right hand data (that was previously built by calls to addBlockToJoin).
/// Could be called from different threads in parallel.
virtual void joinBlock(Block & block, std::shared_ptr & not_processed) = 0;
@@ -79,7 +79,7 @@ public:
/// Returns true if no data to join with.
virtual bool alwaysReturnsEmptySet() const = 0;
- /// StorageJoin/Dictionary is already filled. No need to call addJoinedBlock.
+ /// StorageJoin/Dictionary is already filled. No need to call addBlockToJoin.
/// Different query plan is used for such joins.
virtual bool isFilled() const { return pipelineType() == JoinPipelineType::FilledRight; }
virtual JoinPipelineType pipelineType() const { return JoinPipelineType::FillRightFirst; }
diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp
index d0bb3dd389f..55d2449f739 100644
--- a/src/Interpreters/InterpreterCreateQuery.cpp
+++ b/src/Interpreters/InterpreterCreateQuery.cpp
@@ -881,46 +881,24 @@ void InterpreterCreateQuery::validateTableStructure(const ASTCreateQuery & creat
}
}
-String InterpreterCreateQuery::getTableEngineName(DefaultTableEngine default_table_engine)
+namespace
{
- switch (default_table_engine)
+ void checkTemporaryTableEngineName(const String& name)
{
- case DefaultTableEngine::Log:
- return "Log";
-
- case DefaultTableEngine::StripeLog:
- return "StripeLog";
-
- case DefaultTableEngine::MergeTree:
- return "MergeTree";
-
- case DefaultTableEngine::ReplacingMergeTree:
- return "ReplacingMergeTree";
-
- case DefaultTableEngine::ReplicatedMergeTree:
- return "ReplicatedMergeTree";
-
- case DefaultTableEngine::ReplicatedReplacingMergeTree:
- return "ReplicatedReplacingMergeTree";
-
- case DefaultTableEngine::Memory:
- return "Memory";
-
- default:
- throw Exception(ErrorCodes::LOGICAL_ERROR, "default_table_engine is set to unknown value");
+ if (name.starts_with("Replicated") || name == "KeeperMap")
+ throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with Replicated or KeeperMap table engines");
}
-}
-void InterpreterCreateQuery::setDefaultTableEngine(ASTStorage & storage, ContextPtr local_context)
-{
- if (local_context->getSettingsRef().default_table_engine.value == DefaultTableEngine::None)
- throw Exception(ErrorCodes::ENGINE_REQUIRED, "Table engine is not specified in CREATE query");
+ void setDefaultTableEngine(ASTStorage &storage, DefaultTableEngine engine)
+ {
+ if (engine == DefaultTableEngine::None)
+ throw Exception(ErrorCodes::ENGINE_REQUIRED, "Table engine is not specified in CREATE query");
- auto engine_ast = std::make_shared();
- auto default_table_engine = local_context->getSettingsRef().default_table_engine.value;
- engine_ast->name = getTableEngineName(default_table_engine);
- engine_ast->no_empty_args = true;
- storage.set(storage.engine, engine_ast);
+ auto engine_ast = std::make_shared();
+ engine_ast->name = SettingFieldDefaultTableEngine(engine).toString();
+ engine_ast->no_empty_args = true;
+ storage.set(storage.engine, engine_ast);
+ }
}
void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
@@ -936,32 +914,23 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
if (create.temporary)
{
- /// It's possible if some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not.
- /// It makes sense when default_table_engine setting is used, but not for temporary tables.
- /// For temporary tables we ignore this setting to allow CREATE TEMPORARY TABLE query without specifying ENGINE
+ /// Some part of storage definition is specified, but ENGINE is not: just set the one from default_temporary_table_engine setting.
if (!create.cluster.empty())
throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with ON CLUSTER clause");
- if (create.storage)
+ if (!create.storage)
{
- if (create.storage->engine)
- {
- if (create.storage->engine->name.starts_with("Replicated") || create.storage->engine->name == "KeeperMap")
- throw Exception(ErrorCodes::INCORRECT_QUERY, "Temporary tables cannot be created with Replicated or KeeperMap table engines");
- }
- else
- throw Exception(ErrorCodes::INCORRECT_QUERY, "Invalid storage definition for temporary table");
- }
- else
- {
- auto engine_ast = std::make_shared();
- engine_ast->name = "Memory";
- engine_ast->no_empty_args = true;
auto storage_ast = std::make_shared();
- storage_ast->set(storage_ast->engine, engine_ast);
create.set(create.storage, storage_ast);
}
+
+ if (!create.storage->engine)
+ {
+ setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_temporary_table_engine.value);
+ }
+
+ checkTemporaryTableEngineName(create.storage->engine->name);
return;
}
@@ -969,7 +938,7 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
{
/// Some part of storage definition (such as PARTITION BY) is specified, but ENGINE is not: just set default one.
if (!create.storage->engine)
- setDefaultTableEngine(*create.storage, getContext());
+ setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value);
return;
}
@@ -1008,7 +977,7 @@ void InterpreterCreateQuery::setEngine(ASTCreateQuery & create) const
}
create.set(create.storage, std::make_shared());
- setDefaultTableEngine(*create.storage, getContext());
+ setDefaultTableEngine(*create.storage, getContext()->getSettingsRef().default_table_engine.value);
}
static void generateUUIDForTable(ASTCreateQuery & create)
diff --git a/src/Interpreters/InterpreterCreateQuery.h b/src/Interpreters/InterpreterCreateQuery.h
index a5fa6576091..67339dea928 100644
--- a/src/Interpreters/InterpreterCreateQuery.h
+++ b/src/Interpreters/InterpreterCreateQuery.h
@@ -90,8 +90,6 @@ private:
/// Calculate list of columns, constraints, indices, etc... of table. Rewrite query in canonical way.
TableProperties getTablePropertiesAndNormalizeCreateQuery(ASTCreateQuery & create) const;
void validateTableStructure(const ASTCreateQuery & create, const TableProperties & properties) const;
- static String getTableEngineName(DefaultTableEngine default_table_engine);
- static void setDefaultTableEngine(ASTStorage & storage, ContextPtr local_context);
void setEngine(ASTCreateQuery & create) const;
AccessRightsElements getRequiredAccess() const;
diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp
index e1ff8676bc7..1c2eb66923e 100644
--- a/src/Interpreters/InterpreterSystemQuery.cpp
+++ b/src/Interpreters/InterpreterSystemQuery.cpp
@@ -370,15 +370,15 @@ BlockIO InterpreterSystemQuery::execute()
else
{
auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache;
- if (query.delete_key.empty())
+ if (query.key_to_drop.empty())
{
cache->removeAllReleasable();
}
else
{
- auto key = FileCacheKey::fromKeyString(query.delete_key);
- if (query.delete_offset.has_value())
- cache->removeFileSegment(key, query.delete_offset.value());
+ auto key = FileCacheKey::fromKeyString(query.key_to_drop);
+ if (query.offset_to_drop.has_value())
+ cache->removeFileSegment(key, query.offset_to_drop.value());
else
cache->removeKey(key);
}
diff --git a/src/Interpreters/JoinSwitcher.cpp b/src/Interpreters/JoinSwitcher.cpp
index 15702784d74..5ea347549c1 100644
--- a/src/Interpreters/JoinSwitcher.cpp
+++ b/src/Interpreters/JoinSwitcher.cpp
@@ -19,16 +19,16 @@ JoinSwitcher::JoinSwitcher(std::shared_ptr table_join_, const Block &
limits.max_bytes = table_join->defaultMaxBytes();
}
-bool JoinSwitcher::addJoinedBlock(const Block & block, bool)
+bool JoinSwitcher::addBlockToJoin(const Block & block, bool)
{
std::lock_guard lock(switch_mutex);
if (switched)
- return join->addJoinedBlock(block);
+ return join->addBlockToJoin(block);
/// HashJoin with external limits check
- join->addJoinedBlock(block, false);
+ join->addBlockToJoin(block, false);
size_t rows = join->getTotalRowCount();
size_t bytes = join->getTotalByteCount();
@@ -48,7 +48,7 @@ bool JoinSwitcher::switchJoin()
bool success = true;
for (const Block & saved_block : right_blocks)
- success = success && join->addJoinedBlock(saved_block);
+ success = success && join->addBlockToJoin(saved_block);
switched = true;
return success;
diff --git a/src/Interpreters/JoinSwitcher.h b/src/Interpreters/JoinSwitcher.h
index eec4787037d..fb5066b2d04 100644
--- a/src/Interpreters/JoinSwitcher.h
+++ b/src/Interpreters/JoinSwitcher.h
@@ -23,7 +23,7 @@ public:
/// Add block of data from right hand of JOIN into current join object.
/// If join-in-memory memory limit exceeded switches to join-on-disk and continue with it.
/// @returns false, if join-on-disk disk limit exceeded
- bool addJoinedBlock(const Block & block, bool check_limits) override;
+ bool addBlockToJoin(const Block & block, bool check_limits) override;
void checkTypesOfKeys(const Block & block) const override
{
diff --git a/src/Interpreters/MergeJoin.cpp b/src/Interpreters/MergeJoin.cpp
index d31510c2fb5..ceef1371f16 100644
--- a/src/Interpreters/MergeJoin.cpp
+++ b/src/Interpreters/MergeJoin.cpp
@@ -669,7 +669,7 @@ Block MergeJoin::modifyRightBlock(const Block & src_block) const
return block;
}
-bool MergeJoin::addJoinedBlock(const Block & src_block, bool)
+bool MergeJoin::addBlockToJoin(const Block & src_block, bool)
{
Block block = modifyRightBlock(src_block);
diff --git a/src/Interpreters/MergeJoin.h b/src/Interpreters/MergeJoin.h
index 8b5d884a0e6..03a661c5b8a 100644
--- a/src/Interpreters/MergeJoin.h
+++ b/src/Interpreters/MergeJoin.h
@@ -23,7 +23,7 @@ public:
MergeJoin(std::shared_ptr table_join_, const Block & right_sample_block);
const TableJoin & getTableJoin() const override { return *table_join; }
- bool addJoinedBlock(const Block & block, bool check_limits) override;
+ bool addBlockToJoin(const Block & block, bool check_limits) override;
void checkTypesOfKeys(const Block & block) const override;
void joinBlock(Block &, ExtraBlockPtr & not_processed) override;
diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp
index 9c5e7bff61e..22244a7075c 100644
--- a/src/Parsers/ASTSystemQuery.cpp
+++ b/src/Parsers/ASTSystemQuery.cpp
@@ -212,11 +212,11 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &,
if (!filesystem_cache_name.empty())
{
settings.ostr << (settings.hilite ? hilite_none : "") << " " << filesystem_cache_name;
- if (!delete_key.empty())
+ if (!key_to_drop.empty())
{
- settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << delete_key;
- if (delete_offset.has_value())
- settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << delete_offset.value();
+ settings.ostr << (settings.hilite ? hilite_none : "") << " KEY " << key_to_drop;
+ if (offset_to_drop.has_value())
+ settings.ostr << (settings.hilite ? hilite_none : "") << " OFFSET " << offset_to_drop.value();
}
}
}
diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h
index ebc3e9cd430..6c81162f103 100644
--- a/src/Parsers/ASTSystemQuery.h
+++ b/src/Parsers/ASTSystemQuery.h
@@ -107,8 +107,8 @@ public:
UInt64 seconds{};
String filesystem_cache_name;
- std::string delete_key;
- std::optional delete_offset;
+ std::string key_to_drop;
+ std::optional offset_to_drop;
String backup_name;
diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp
index ef71e994d56..09c86876b48 100644
--- a/src/Parsers/ParserSystemQuery.cpp
+++ b/src/Parsers/ParserSystemQuery.cpp
@@ -409,9 +409,9 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
res->filesystem_cache_name = ast->as()->value.safeGet();
if (ParserKeyword{"KEY"}.ignore(pos, expected) && ParserIdentifier().parse(pos, ast, expected))
{
- res->delete_key = ast->as()->name();
+ res->key_to_drop = ast->as()->name();
if (ParserKeyword{"OFFSET"}.ignore(pos, expected) && ParserLiteral().parse(pos, ast, expected))
- res->delete_offset = ast->as()->value.safeGet();
+ res->offset_to_drop = ast->as()->value.safeGet();
}
}
if (!parseQueryWithOnCluster(res, pos, expected))
diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp
index 4ac972e2a79..eebbfc04304 100644
--- a/src/Processors/QueryPlan/AggregatingStep.cpp
+++ b/src/Processors/QueryPlan/AggregatingStep.cpp
@@ -319,6 +319,8 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
{
auto column_with_default = col.column->cloneEmpty();
col.type->insertDefaultInto(*column_with_default);
+ column_with_default->finalize();
+
auto column = ColumnConst::create(std::move(column_with_default), 0);
const auto * node = &dag->addColumn({ColumnPtr(std::move(column)), col.type, col.name});
node = &dag->materializeNode(*node);
diff --git a/src/Processors/Transforms/JoiningTransform.cpp b/src/Processors/Transforms/JoiningTransform.cpp
index bba8ec6fa16..49b90d04b81 100644
--- a/src/Processors/Transforms/JoiningTransform.cpp
+++ b/src/Processors/Transforms/JoiningTransform.cpp
@@ -305,7 +305,7 @@ void FillingRightJoinSideTransform::work()
if (for_totals)
join->setTotals(block);
else
- stop_reading = !join->addJoinedBlock(block);
+ stop_reading = !join->addBlockToJoin(block);
set_totals = for_totals;
}
diff --git a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp
index b73e2cca314..e1921f45eda 100644
--- a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp
+++ b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp
@@ -455,22 +455,34 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::freeze(
MutableDataPartStoragePtr DataPartStorageOnDiskBase::clonePart(
const std::string & to,
const std::string & dir_path,
- const DiskPtr & disk,
+ const DiskPtr & dst_disk,
Poco::Logger * log) const
{
String path_to_clone = fs::path(to) / dir_path / "";
+ auto src_disk = volume->getDisk();
- if (disk->exists(path_to_clone))
+ if (dst_disk->exists(path_to_clone))
{
- LOG_WARNING(log, "Path {} already exists. Will remove it and clone again.", fullPath(disk, path_to_clone));
- disk->removeRecursive(path_to_clone);
+ throw Exception(ErrorCodes::DIRECTORY_ALREADY_EXISTS,
+ "Cannot clone part {} from '{}' to '{}': path '{}' already exists",
+ dir_path, getRelativePath(), path_to_clone, fullPath(dst_disk, path_to_clone));
}
- disk->createDirectories(to);
- volume->getDisk()->copy(getRelativePath(), disk, to);
- volume->getDisk()->removeFileIfExists(fs::path(path_to_clone) / "delete-on-destroy.txt");
+ try
+ {
+ dst_disk->createDirectories(to);
+ src_disk->copyDirectoryContent(getRelativePath(), dst_disk, path_to_clone);
+ }
+ catch (...)
+ {
+ /// It's safe to remove it recursively (even with zero-copy-replication)
+ /// because we've just did full copy through copyDirectoryContent
+ LOG_WARNING(log, "Removing directory {} after failed attempt to move a data part", path_to_clone);
+ dst_disk->removeRecursive(path_to_clone);
+ throw;
+ }
- auto single_disk_volume = std::make_shared(disk->getName(), disk, 0);
+ auto single_disk_volume = std::make_shared(dst_disk->getName(), dst_disk, 0);
return create(single_disk_volume, to, dir_path, /*initialize=*/ true);
}
diff --git a/src/Storages/MergeTree/DataPartStorageOnDiskBase.h b/src/Storages/MergeTree/DataPartStorageOnDiskBase.h
index 5f7dcc3fd32..648bc908f59 100644
--- a/src/Storages/MergeTree/DataPartStorageOnDiskBase.h
+++ b/src/Storages/MergeTree/DataPartStorageOnDiskBase.h
@@ -68,7 +68,7 @@ public:
MutableDataPartStoragePtr clonePart(
const std::string & to,
const std::string & dir_path,
- const DiskPtr & disk,
+ const DiskPtr & dst_disk,
Poco::Logger * log) const override;
void rename(
diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp
index b9591864869..bd3f91abeeb 100644
--- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp
+++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp
@@ -502,8 +502,10 @@ void IMergeTreeDataPart::removeIfNeeded()
throw Exception(ErrorCodes::LOGICAL_ERROR, "relative_path {} of part {} is invalid or not set",
getDataPartStorage().getPartDirectory(), name);
- const auto part_parent_directory = directoryPath(part_directory);
- bool is_moving_part = part_parent_directory.ends_with("moving/");
+ fs::path part_directory_path = getDataPartStorage().getRelativePath();
+ if (part_directory_path.filename().empty())
+ part_directory_path = part_directory_path.parent_path();
+ bool is_moving_part = part_directory_path.parent_path().filename() == "moving";
if (!startsWith(file_name, "tmp") && !endsWith(file_name, ".tmp_proj") && !is_moving_part)
{
LOG_ERROR(
diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h
index 8c379af193d..3ad47d3253b 100644
--- a/src/Storages/MergeTree/MergeTreeData.h
+++ b/src/Storages/MergeTree/MergeTreeData.h
@@ -1030,7 +1030,7 @@ public:
/// Fetch part only if some replica has it on shared storage like S3
/// Overridden in StorageReplicatedMergeTree
- virtual MutableDataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return nullptr; }
+ virtual MutableDataPartPtr tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return nullptr; }
/// Check shared data usage on other replicas for detached/freezed part
/// Remove local files and remote files if needed
diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp
index a8f34ba4cec..44987a4f082 100644
--- a/src/Storages/MergeTree/MergeTreePartsMover.cpp
+++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp
@@ -233,9 +233,15 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
disk->createDirectories(path_to_clone);
- cloned_part_storage = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name);
+ auto zero_copy_part = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name);
- if (!cloned_part_storage)
+ if (zero_copy_part)
+ {
+ /// FIXME for some reason we cannot just use this part, we have to re-create it through MergeTreeDataPartBuilder
+ zero_copy_part->is_temp = false; /// Do not remove it in dtor
+ cloned_part_storage = zero_copy_part->getDataPartStoragePtr();
+ }
+ else
{
LOG_INFO(log, "Part {} was not fetched, we are the first who move it to another disk, so we will copy it", part->name);
cloned_part_storage = part->getDataPartStorage().clonePart(path_to_clone, part->getDataPartStorage().getPartDirectory(), disk, log);
diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp
index f4a071b8f27..1346d5937f7 100644
--- a/src/Storages/MergeTree/MutateTask.cpp
+++ b/src/Storages/MergeTree/MutateTask.cpp
@@ -67,7 +67,9 @@ static void splitAndModifyMutationCommands(
if (!isWidePart(part) || !isFullPartStorage(part->getDataPartStorage()))
{
- NameSet mutated_columns, dropped_columns;
+ NameSet mutated_columns;
+ NameSet dropped_columns;
+
for (const auto & command : commands)
{
if (command.type == MutationCommand::Type::MATERIALIZE_INDEX
@@ -258,6 +260,10 @@ getColumnsForNewDataPart(
storage_columns.emplace_back(column);
}
+ NameSet storage_columns_set;
+ for (const auto & [name, _] : storage_columns)
+ storage_columns_set.insert(name);
+
for (const auto & command : all_commands)
{
if (command.type == MutationCommand::UPDATE)
@@ -292,15 +298,19 @@ getColumnsForNewDataPart(
SerializationInfoByName new_serialization_infos;
for (const auto & [name, old_info] : serialization_infos)
{
- if (removed_columns.contains(name))
- continue;
-
auto it = renamed_columns_from_to.find(name);
auto new_name = it == renamed_columns_from_to.end() ? name : it->second;
+ /// Column can be removed only in this data part by CLEAR COLUMN query.
+ if (!storage_columns_set.contains(new_name) || removed_columns.contains(new_name))
+ continue;
+
+ /// In compact part we read all columns and all of them are in @updated_header.
+ /// But in wide part we must keep serialization infos for columns that are not touched by mutation.
if (!updated_header.has(new_name))
{
- new_serialization_infos.emplace(new_name, old_info);
+ if (isWidePart(source_part))
+ new_serialization_infos.emplace(new_name, old_info);
continue;
}
diff --git a/src/Storages/NamedCollectionsHelpers.cpp b/src/Storages/NamedCollectionsHelpers.cpp
index 83128ab025a..f301cca92a1 100644
--- a/src/Storages/NamedCollectionsHelpers.cpp
+++ b/src/Storages/NamedCollectionsHelpers.cpp
@@ -1,4 +1,5 @@
#include "NamedCollectionsHelpers.h"
+#include
#include
#include
#include
@@ -15,19 +16,16 @@ namespace ErrorCodes
namespace
{
- NamedCollectionPtr tryGetNamedCollectionFromASTs(ASTs asts, bool throw_unknown_collection)
+ std::optional getCollectionName(ASTs asts)
{
if (asts.empty())
- return nullptr;
+ return std::nullopt;
const auto * identifier = asts[0]->as();
if (!identifier)
- return nullptr;
+ return std::nullopt;
- const auto & collection_name = identifier->name();
- if (throw_unknown_collection)
- return NamedCollectionFactory::instance().get(collection_name);
- return NamedCollectionFactory::instance().tryGet(collection_name);
+ return identifier->name();
}
std::optional>> getKeyValueFromAST(ASTPtr ast, bool fallback_to_ast_value, ContextPtr context)
@@ -74,7 +72,18 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
NamedCollectionUtils::loadIfNot();
- auto collection = tryGetNamedCollectionFromASTs(asts, throw_unknown_collection);
+ auto collection_name = getCollectionName(asts);
+ if (!collection_name.has_value())
+ return nullptr;
+
+ context->checkAccess(AccessType::NAMED_COLLECTION, *collection_name);
+
+ NamedCollectionPtr collection;
+ if (throw_unknown_collection)
+ collection = NamedCollectionFactory::instance().get(*collection_name);
+ else
+ collection = NamedCollectionFactory::instance().tryGet(*collection_name);
+
if (!collection)
return nullptr;
@@ -106,12 +115,14 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
}
MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
- const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix)
+ const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context)
{
auto collection_name = config.getString(config_prefix + ".name", "");
if (collection_name.empty())
return nullptr;
+ context->checkAccess(AccessType::NAMED_COLLECTION, collection_name);
+
const auto & collection = NamedCollectionFactory::instance().get(collection_name);
auto collection_copy = collection->duplicate();
diff --git a/src/Storages/NamedCollectionsHelpers.h b/src/Storages/NamedCollectionsHelpers.h
index d0d6a526f9b..3d0ff5d8dab 100644
--- a/src/Storages/NamedCollectionsHelpers.h
+++ b/src/Storages/NamedCollectionsHelpers.h
@@ -22,7 +22,7 @@ MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(
ASTs asts, ContextPtr context, bool throw_unknown_collection = true, std::vector> * complex_args = nullptr);
/// Helper function to get named collection for dictionary source.
/// Dictionaries have collection name as name argument of dict configuration and other arguments are overrides.
-MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix);
+MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context);
HTTPHeaderEntries getHeadersFromNamedCollection(const NamedCollection & collection);
diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp
index a238e9ef26c..640706aae17 100644
--- a/src/Storages/StorageJoin.cpp
+++ b/src/Storages/StorageJoin.cpp
@@ -146,7 +146,7 @@ void StorageJoin::mutate(const MutationCommands & commands, ContextPtr context)
Block block;
while (executor.pull(block))
{
- new_data->addJoinedBlock(block, true);
+ new_data->addBlockToJoin(block, true);
if (persistent)
backup_stream.write(block);
}
@@ -257,7 +257,7 @@ void StorageJoin::insertBlock(const Block & block, ContextPtr context)
if (!holder)
throw Exception(ErrorCodes::DEADLOCK_AVOIDED, "StorageJoin: cannot insert data because current query tries to read from this storage");
- join->addJoinedBlock(block_to_insert, true);
+ join->addBlockToJoin(block_to_insert, true);
}
size_t StorageJoin::getSize(ContextPtr context) const
diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp
index dac9e6923a5..7861dc6917c 100644
--- a/src/Storages/StorageReplicatedMergeTree.cpp
+++ b/src/Storages/StorageReplicatedMergeTree.cpp
@@ -1987,7 +1987,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry, bool need_to_che
}
-MutableDataPartStoragePtr StorageReplicatedMergeTree::executeFetchShared(
+MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::executeFetchShared(
const String & source_replica,
const String & new_part_name,
const DiskPtr & disk,
@@ -4476,7 +4476,7 @@ bool StorageReplicatedMergeTree::fetchPart(
}
-MutableDataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart(
+MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::fetchExistsPart(
const String & part_name,
const StorageMetadataPtr & metadata_snapshot,
const String & source_replica_path,
@@ -4582,7 +4582,7 @@ MutableDataPartStoragePtr StorageReplicatedMergeTree::fetchExistsPart(
ProfileEvents::increment(ProfileEvents::ReplicatedPartFetches);
LOG_DEBUG(log, "Fetched part {} from {}:{}", part_name, zookeeper_name, source_replica_path);
- return part->getDataPartStoragePtr();
+ return part;
}
void StorageReplicatedMergeTree::startup()
@@ -8901,7 +8901,7 @@ std::pair StorageReplicatedMergeTree::unlockSharedDataByID(
}
-MutableDataPartStoragePtr StorageReplicatedMergeTree::tryToFetchIfShared(
+MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::tryToFetchIfShared(
const IMergeTreeDataPart & part,
const DiskPtr & disk,
const String & path)
diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h
index bdd3f0da5bf..f3c8d017333 100644
--- a/src/Storages/StorageReplicatedMergeTree.h
+++ b/src/Storages/StorageReplicatedMergeTree.h
@@ -244,7 +244,7 @@ public:
bool canExecuteFetch(const ReplicatedMergeTreeLogEntry & entry, String & disable_reason) const;
/// Fetch part only when it stored on shared storage like S3
- MutableDataPartStoragePtr executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path);
+ MutableDataPartPtr executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path);
/// Lock part in zookeeper for use shared data in several nodes
void lockSharedData(const IMergeTreeDataPart & part, bool replace_existing_lock, std::optional hardlinked_files) const override;
@@ -286,7 +286,7 @@ public:
MergeTreeDataFormatVersion data_format_version);
/// Fetch part only if some replica has it on shared storage like S3
- MutableDataPartStoragePtr tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override;
+ MutableDataPartPtr tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override;
/// Get best replica having this partition on a same type remote disk
String getSharedDataReplica(const IMergeTreeDataPart & part, DataSourceType data_source_type) const;
@@ -717,7 +717,7 @@ private:
* Used for replace local part on the same s3-shared part in hybrid storage.
* Returns false if part is already fetching right now.
*/
- MutableDataPartStoragePtr fetchExistsPart(
+ MutableDataPartPtr fetchExistsPart(
const String & part_name,
const StorageMetadataPtr & metadata_snapshot,
const String & replica_path,
diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py
index c680b5810fc..ea7d112c73e 100644
--- a/tests/ci/ci_config.py
+++ b/tests/ci/ci_config.py
@@ -173,6 +173,16 @@ CI_CONFIG = {
"with_coverage": False,
"comment": "SSE2-only build",
},
+ "binary_riscv64": {
+ "compiler": "clang-16-riscv64",
+ "build_type": "",
+ "sanitizer": "",
+ "package_type": "binary",
+ "static_binary_name": "riscv64",
+ "tidy": "disable",
+ "with_coverage": False,
+ "comment": "",
+ },
},
"builds_report_config": {
"ClickHouse build check": [
@@ -194,6 +204,7 @@ CI_CONFIG = {
"binary_freebsd",
"binary_darwin_aarch64",
"binary_ppc64le",
+ "binary_riscv64",
"binary_amd64_compat",
],
},
diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py
index 21398790be3..cd6861c29a0 100644
--- a/tests/integration/helpers/cluster.py
+++ b/tests/integration/helpers/cluster.py
@@ -36,6 +36,7 @@ try:
from confluent_kafka.avro.cached_schema_registry_client import (
CachedSchemaRegistryClient,
)
+ from .hdfs_api import HDFSApi # imports requests_kerberos
except Exception as e:
logging.warning(f"Cannot import some modules, some tests may not work: {e}")
@@ -51,7 +52,6 @@ from helpers.client import QueryRuntimeException
import docker
from .client import Client
-from .hdfs_api import HDFSApi
from .config_cluster import *
@@ -3416,13 +3416,14 @@ class ClickHouseInstance:
database=database,
)
time.sleep(sleep_time)
+
+ if result is not None:
+ return result
except QueryRuntimeException as ex:
logging.debug("Retry {} got exception {}".format(i + 1, ex))
time.sleep(sleep_time)
- if result is not None:
- return result
- raise Exception("Query {sql} did not fail".format(sql))
+ raise Exception("Query {} did not fail".format(sql))
# The same as query_and_get_error but ignores successful query.
def query_and_get_answer_with_error(
diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py
index 2df560708e0..60b46926589 100644
--- a/tests/integration/helpers/network.py
+++ b/tests/integration/helpers/network.py
@@ -32,6 +32,9 @@ class PartitionManager:
{"destination": instance.ip_address, "source_port": 2181, "action": action}
)
+ def dump_rules(self):
+ return _NetworkManager.get().dump_rules()
+
def restore_instance_zk_connections(self, instance, action="DROP"):
self._check_instance(instance)
@@ -157,6 +160,10 @@ class _NetworkManager:
cmd.extend(self._iptables_cmd_suffix(**kwargs))
self._exec_run(cmd, privileged=True)
+ def dump_rules(self):
+ cmd = ["iptables", "-L", "DOCKER-USER"]
+ return self._exec_run(cmd, privileged=True)
+
@staticmethod
def clean_all_user_iptables_rules():
for i in range(1000):
@@ -212,8 +219,8 @@ class _NetworkManager:
def __init__(
self,
- container_expire_timeout=50,
- container_exit_timeout=60,
+ container_expire_timeout=120,
+ container_exit_timeout=120,
docker_api_version=os.environ.get("DOCKER_API_VERSION"),
):
self.container_expire_timeout = container_expire_timeout
diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json
index e9089fcde73..d060218456a 100644
--- a/tests/integration/parallel_skip.json
+++ b/tests/integration/parallel_skip.json
@@ -66,5 +66,7 @@
"test_server_reload/test.py::test_remove_http_port",
"test_server_reload/test.py::test_remove_mysql_port",
"test_server_reload/test.py::test_remove_postgresql_port",
- "test_server_reload/test.py::test_remove_tcp_port"
+ "test_server_reload/test.py::test_remove_tcp_port",
+
+ "test_keeper_map/test.py::test_keeper_map_without_zk"
]
diff --git a/tests/integration/test_dictionaries_mysql/configs/users.xml b/tests/integration/test_dictionaries_mysql/configs/users.xml
index 4555a2ed494..70c7d3bc2c1 100644
--- a/tests/integration/test_dictionaries_mysql/configs/users.xml
+++ b/tests/integration/test_dictionaries_mysql/configs/users.xml
@@ -12,6 +12,7 @@
default
default
+ 1
diff --git a/tests/integration/test_dictionaries_mysql/test.py b/tests/integration/test_dictionaries_mysql/test.py
index a12139a0bea..ee0d957b8a9 100644
--- a/tests/integration/test_dictionaries_mysql/test.py
+++ b/tests/integration/test_dictionaries_mysql/test.py
@@ -8,9 +8,14 @@ import logging
DICTS = ["configs/dictionaries/mysql_dict1.xml", "configs/dictionaries/mysql_dict2.xml"]
CONFIG_FILES = ["configs/remote_servers.xml", "configs/named_collections.xml"]
+USER_CONFIGS = ["configs/users.xml"]
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance(
- "instance", main_configs=CONFIG_FILES, with_mysql=True, dictionaries=DICTS
+ "instance",
+ main_configs=CONFIG_FILES,
+ user_configs=USER_CONFIGS,
+ with_mysql=True,
+ dictionaries=DICTS,
)
create_table_mysql_template = """
diff --git a/tests/integration/test_dictionaries_postgresql/configs/users.xml b/tests/integration/test_dictionaries_postgresql/configs/users.xml
new file mode 100644
index 00000000000..beb08eb6ed4
--- /dev/null
+++ b/tests/integration/test_dictionaries_postgresql/configs/users.xml
@@ -0,0 +1,10 @@
+
+
+
+
+ default
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_keeper_map/test.py b/tests/integration/test_keeper_map/test.py
index c6ec7103056..d7b4230d872 100644
--- a/tests/integration/test_keeper_map/test.py
+++ b/tests/integration/test_keeper_map/test.py
@@ -1,7 +1,7 @@
import pytest
from helpers.cluster import ClickHouseCluster
-from helpers.network import PartitionManager
+from helpers.network import PartitionManager, _NetworkManager
test_recover_staled_replica_run = 1
@@ -38,41 +38,67 @@ def remove_children(client, path):
client.delete(child_path)
-def test_keeper_map_without_zk(started_cluster):
- def assert_keeper_exception_after_partition(query):
- with PartitionManager() as pm:
- pm.drop_instance_zk_connections(node)
- error = node.query_and_get_error(query)
- assert "Coordination::Exception" in error
+def print_iptables_rules():
+ print(f"iptables rules: {_NetworkManager.get().dump_rules()}")
+
+def assert_keeper_exception_after_partition(query):
+ with PartitionManager() as pm:
+ pm.drop_instance_zk_connections(node)
+ try:
+ error = node.query_and_get_error_with_retry(query, sleep_time=1)
+ assert "Coordination::Exception" in error
+ except:
+ print_iptables_rules()
+ raise
+
+
+def run_query(query):
+ try:
+ result = node.query_with_retry(query, sleep_time=1)
+ return result
+ except:
+ print_iptables_rules()
+ raise
+
+
+def test_keeper_map_without_zk(started_cluster):
assert_keeper_exception_after_partition(
- "CREATE TABLE test_keeper_map_without_zk (key UInt64, value UInt64) ENGINE = KeeperMap('/test_without_zk') PRIMARY KEY(key);"
+ "CREATE TABLE test_keeper_map_without_zk (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_without_zk') PRIMARY KEY(key);"
)
- node.query(
- "CREATE TABLE test_keeper_map_without_zk (key UInt64, value UInt64) ENGINE = KeeperMap('/test_without_zk') PRIMARY KEY(key);"
+ run_query(
+ "CREATE TABLE test_keeper_map_without_zk (key UInt64, value UInt64) ENGINE = KeeperMap('/test_keeper_map_without_zk') PRIMARY KEY(key);"
)
assert_keeper_exception_after_partition(
"INSERT INTO test_keeper_map_without_zk VALUES (1, 11)"
)
- node.query("INSERT INTO test_keeper_map_without_zk VALUES (1, 11)")
+ run_query("INSERT INTO test_keeper_map_without_zk VALUES (1, 11)")
assert_keeper_exception_after_partition("SELECT * FROM test_keeper_map_without_zk")
- node.query("SELECT * FROM test_keeper_map_without_zk")
+ assert run_query("SELECT * FROM test_keeper_map_without_zk") == "1\t11\n"
with PartitionManager() as pm:
pm.drop_instance_zk_connections(node)
node.restart_clickhouse(60)
- error = node.query_and_get_error("SELECT * FROM test_keeper_map_without_zk")
- assert "Failed to activate table because of connection issues" in error
+ try:
+ error = node.query_and_get_error_with_retry(
+ "SELECT * FROM test_keeper_map_without_zk", sleep_time=1
+ )
+ assert "Failed to activate table because of connection issues" in error
+ except:
+ print_iptables_rules()
+ raise
- node.query("SELECT * FROM test_keeper_map_without_zk")
+ run_query("SELECT * FROM test_keeper_map_without_zk")
client = get_genuine_zk()
- remove_children(client, "/test_keeper_map/test_without_zk")
+ remove_children(client, "/test_keeper_map/test_keeper_map_without_zk")
node.restart_clickhouse(60)
- error = node.query_and_get_error("SELECT * FROM test_keeper_map_without_zk")
+ error = node.query_and_get_error_with_retry(
+ "SELECT * FROM test_keeper_map_without_zk"
+ )
assert "Failed to activate table because of invalid metadata in ZooKeeper" in error
node.query("DETACH TABLE test_keeper_map_without_zk")
diff --git a/tests/integration/test_mask_sensitive_info/configs/users.xml b/tests/integration/test_mask_sensitive_info/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_mask_sensitive_info/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py
index 2131a76b5be..004491af4ac 100644
--- a/tests/integration/test_mask_sensitive_info/test.py
+++ b/tests/integration/test_mask_sensitive_info/test.py
@@ -9,6 +9,7 @@ node = cluster.add_instance(
main_configs=[
"configs/named_collections.xml",
],
+ user_configs=["configs/users.xml"],
with_zookeeper=True,
)
diff --git a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml
index 4480327c4b5..235b9a7b7a1 100644
--- a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml
+++ b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml
@@ -72,4 +72,6 @@
+
+ true
diff --git a/tests/integration/test_merge_tree_s3_failover/test.py b/tests/integration/test_merge_tree_s3_failover/test.py
index 05aeeff2ec1..90dda631924 100644
--- a/tests/integration/test_merge_tree_s3_failover/test.py
+++ b/tests/integration/test_merge_tree_s3_failover/test.py
@@ -183,7 +183,8 @@ def test_move_failover(cluster):
) ENGINE=MergeTree()
ORDER BY id
TTL dt + INTERVAL 4 SECOND TO VOLUME 'external'
- SETTINGS storage_policy='s3_cold'
+ SETTINGS storage_policy='s3_cold', temporary_directories_lifetime=1,
+ merge_tree_clear_old_temporary_directories_interval_seconds=1
"""
)
diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py
index b5606ee8bc2..5561d63840b 100644
--- a/tests/integration/test_multiple_disks/test.py
+++ b/tests/integration/test_multiple_disks/test.py
@@ -49,6 +49,18 @@ def start_cluster():
cluster.shutdown()
+def get_oldest_part(node, table_name):
+ return node.query(
+ f"SELECT name FROM system.parts WHERE table = '{table_name}' and active = 1 ORDER BY modification_time LIMIT 1"
+ ).strip()
+
+
+def get_disk_for_part(node, table_name, part):
+ return node.query(
+ f"SELECT disk_name FROM system.parts WHERE table == '{table_name}' and active = 1 and name = '{part}' ORDER BY modification_time"
+ ).strip()
+
+
def test_system_tables(start_cluster):
expected_disks_data = [
{
@@ -694,22 +706,21 @@ def test_jbod_overflow(start_cluster, name, engine):
def test_background_move(start_cluster, name, engine):
try:
node1.query_with_retry(
- """
+ f"""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
- SETTINGS storage_policy='moving_jbod_with_external'
- """.format(
- name=name, engine=engine
- )
+ SETTINGS storage_policy='moving_jbod_with_external', max_replicated_merges_in_queue=0
+ """
)
node1.query(f"SYSTEM STOP MERGES {name}")
+ first_part = None
for i in range(5):
data = [] # 5MB in total
- for i in range(5):
+ for _ in range(5):
data.append(get_random_string(1024 * 1024)) # 1MB row
# small jbod size is 40MB, so lets insert 5MB batch 5 times
node1.query_with_retry(
@@ -718,25 +729,26 @@ def test_background_move(start_cluster, name, engine):
)
)
- used_disks = get_used_disks_for_table(node1, name)
+ # we are doing moves in parallel so we need to fetch the name of first part before we add new parts
+ if i == 0:
+ first_part = get_oldest_part(node1, name)
+
+ assert first_part is not None
retry = 20
i = 0
- while not sum(1 for x in used_disks if x == "jbod1") <= 2 and i < retry:
+ # multiple moves can be assigned in parallel so we can move later parts before the oldest
+ # we need to wait explicitly until the oldest part is moved
+ while get_disk_for_part(node1, name, first_part) != "external" and i < retry:
time.sleep(0.5)
- used_disks = get_used_disks_for_table(node1, name)
i += 1
- assert sum(1 for x in used_disks if x == "jbod1") <= 2
-
# first (oldest) part was moved to external
- assert used_disks[0] == "external"
+ assert get_disk_for_part(node1, name, first_part) == "external"
node1.query("SYSTEM FLUSH LOGS")
path = node1.query(
- "SELECT path_on_disk FROM system.part_log WHERE table = '{}' AND event_type='MovePart' AND part_name = 'all_1_1_0'".format(
- name
- )
+ f"SELECT path_on_disk FROM system.part_log WHERE table = '{name}' AND event_type='MovePart' AND part_name = '{first_part}'"
)
# first (oldest) part was moved to external
@@ -762,36 +774,28 @@ def test_background_move(start_cluster, name, engine):
def test_start_stop_moves(start_cluster, name, engine):
try:
node1.query_with_retry(
- """
+ f"""
CREATE TABLE IF NOT EXISTS {name} (
s1 String
) ENGINE = {engine}
ORDER BY tuple()
- SETTINGS storage_policy='moving_jbod_with_external'
- """.format(
- name=name, engine=engine
- )
+ SETTINGS storage_policy='moving_jbod_with_external', max_replicated_merges_in_queue=0
+ """
)
- node1.query_with_retry("INSERT INTO {} VALUES ('HELLO')".format(name))
- node1.query_with_retry("INSERT INTO {} VALUES ('WORLD')".format(name))
+ node1.query_with_retry(f"INSERT INTO {name} VALUES ('HELLO')")
+ node1.query_with_retry(f"INSERT INTO {name} VALUES ('WORLD')")
used_disks = get_used_disks_for_table(node1, name)
assert all(d == "jbod1" for d in used_disks), "All writes shoud go to jbods"
- first_part = node1.query(
- "SELECT name FROM system.parts WHERE table = '{}' and active = 1 ORDER BY modification_time LIMIT 1".format(
- name
- )
- ).strip()
+ first_part = get_oldest_part(node1, name)
node1.query("SYSTEM STOP MOVES")
with pytest.raises(QueryRuntimeException):
node1.query(
- "ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(
- name, first_part
- )
+ f"ALTER TABLE {name} MOVE PART '{first_part}' TO VOLUME 'external'"
)
used_disks = get_used_disks_for_table(node1, name)
@@ -801,24 +805,18 @@ def test_start_stop_moves(start_cluster, name, engine):
node1.query("SYSTEM START MOVES")
- node1.query(
- "ALTER TABLE {} MOVE PART '{}' TO VOLUME 'external'".format(
- name, first_part
- )
- )
+ node1.query(f"ALTER TABLE {name} MOVE PART '{first_part}' TO VOLUME 'external'")
disk = node1.query(
- "SELECT disk_name FROM system.parts WHERE table = '{}' and name = '{}' and active = 1".format(
- name, first_part
- )
+ f"SELECT disk_name FROM system.parts WHERE table = '{name}' and name = '{first_part}' and active = 1"
).strip()
assert disk == "external"
- node1.query_with_retry("TRUNCATE TABLE {}".format(name))
+ node1.query_with_retry(f"TRUNCATE TABLE {name}")
- node1.query("SYSTEM STOP MOVES {}".format(name))
- node1.query("SYSTEM STOP MERGES {}".format(name))
+ node1.query(f"SYSTEM STOP MOVES {name}")
+ node1.query(f"SYSTEM STOP MERGES {name}")
for i in range(5):
data = [] # 5MB in total
@@ -831,6 +829,8 @@ def test_start_stop_moves(start_cluster, name, engine):
)
)
+ first_part = get_oldest_part(node1, name)
+
used_disks = get_used_disks_for_table(node1, name)
retry = 5
@@ -843,23 +843,20 @@ def test_start_stop_moves(start_cluster, name, engine):
# first (oldest) part doesn't move anywhere
assert used_disks[0] == "jbod1"
- node1.query("SYSTEM START MOVES {}".format(name))
+ node1.query(f"SYSTEM START MOVES {name}")
- # wait sometime until background backoff finishes
+ # multiple moves can be assigned in parallel so we can move later parts before the oldest
+ # we need to wait explicitly until the oldest part is moved
retry = 60
i = 0
- while not sum(1 for x in used_disks if x == "jbod1") <= 2 and i < retry:
+ while get_disk_for_part(node1, name, first_part) != "external" and i < retry:
time.sleep(1)
- used_disks = get_used_disks_for_table(node1, name)
i += 1
- node1.query("SYSTEM START MERGES {}".format(name))
-
- assert sum(1 for x in used_disks if x == "jbod1") <= 2
-
# first (oldest) part moved to external
- assert used_disks[0] == "external"
+ assert get_disk_for_part(node1, name, first_part) == "external"
+ node1.query(f"SYSTEM START MERGES {name}")
finally:
node1.query_with_retry(f"DROP TABLE IF EXISTS {name} SYNC")
diff --git a/tests/integration/test_mysql_database_engine/configs/user.xml b/tests/integration/test_mysql_database_engine/configs/user.xml
new file mode 100644
index 00000000000..775c63350b0
--- /dev/null
+++ b/tests/integration/test_mysql_database_engine/configs/user.xml
@@ -0,0 +1,10 @@
+
+
+
+
+ default
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_mysql_database_engine/configs/users.xml b/tests/integration/test_mysql_database_engine/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_mysql_database_engine/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py
index 52a7b319551..18dde5307fd 100644
--- a/tests/integration/test_mysql_database_engine/test.py
+++ b/tests/integration/test_mysql_database_engine/test.py
@@ -12,6 +12,7 @@ cluster = ClickHouseCluster(__file__)
clickhouse_node = cluster.add_instance(
"node1",
main_configs=["configs/remote_servers.xml", "configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
with_mysql=True,
stay_alive=True,
)
diff --git a/tests/integration/test_named_collections/configs/users.d/0a_users_no_default_access.xml b/tests/integration/test_named_collections/configs/users.d/0a_users_no_default_access.xml
new file mode 100644
index 00000000000..b8f38f04ca9
--- /dev/null
+++ b/tests/integration/test_named_collections/configs/users.d/0a_users_no_default_access.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ default
+
+
+
diff --git a/tests/integration/test_postgresql_database_engine/configs/users.xml b/tests/integration/test_postgresql_database_engine/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_postgresql_database_engine/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_postgresql_database_engine/test.py b/tests/integration/test_postgresql_database_engine/test.py
index d9f06f0295b..59a464f9020 100644
--- a/tests/integration/test_postgresql_database_engine/test.py
+++ b/tests/integration/test_postgresql_database_engine/test.py
@@ -8,7 +8,10 @@ from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
- "node1", main_configs=["configs/named_collections.xml"], with_postgres=True
+ "node1",
+ main_configs=["configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
+ with_postgres=True,
)
postgres_table_template = """
diff --git a/tests/integration/test_postgresql_replica_database_engine_2/configs/users.xml b/tests/integration/test_postgresql_replica_database_engine_2/configs/users.xml
index 26ea20e012f..e0c51962193 100644
--- a/tests/integration/test_postgresql_replica_database_engine_2/configs/users.xml
+++ b/tests/integration/test_postgresql_replica_database_engine_2/configs/users.xml
@@ -4,4 +4,11 @@
1
+
+
+
+ default
+ 1
+
+
diff --git a/tests/integration/test_redirect_url_storage/configs/users.xml b/tests/integration/test_redirect_url_storage/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_redirect_url_storage/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_redirect_url_storage/test.py b/tests/integration/test_redirect_url_storage/test.py
index b2178655444..17a9a03008e 100644
--- a/tests/integration/test_redirect_url_storage/test.py
+++ b/tests/integration/test_redirect_url_storage/test.py
@@ -9,6 +9,7 @@ cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
main_configs=["configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
with_zookeeper=False,
with_hdfs=True,
)
diff --git a/tests/integration/test_s3_cluster/configs/users.xml b/tests/integration/test_s3_cluster/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_s3_cluster/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_s3_cluster/test.py b/tests/integration/test_s3_cluster/test.py
index 41f19cdd12d..673ca318c92 100644
--- a/tests/integration/test_s3_cluster/test.py
+++ b/tests/integration/test_s3_cluster/test.py
@@ -68,6 +68,7 @@ def started_cluster():
cluster.add_instance(
"s0_0_0",
main_configs=["configs/cluster.xml", "configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
macros={"replica": "node1", "shard": "shard1"},
with_minio=True,
with_zookeeper=True,
@@ -75,12 +76,14 @@ def started_cluster():
cluster.add_instance(
"s0_0_1",
main_configs=["configs/cluster.xml", "configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
macros={"replica": "replica2", "shard": "shard1"},
with_zookeeper=True,
)
cluster.add_instance(
"s0_1_0",
main_configs=["configs/cluster.xml", "configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
macros={"replica": "replica1", "shard": "shard2"},
with_zookeeper=True,
)
diff --git a/tests/integration/test_s3_table_functions/configs/users.d/users.xml b/tests/integration/test_s3_table_functions/configs/users.d/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_s3_table_functions/configs/users.d/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_s3_table_functions/test.py b/tests/integration/test_s3_table_functions/test.py
index 516d6582990..a6def175136 100644
--- a/tests/integration/test_s3_table_functions/test.py
+++ b/tests/integration/test_s3_table_functions/test.py
@@ -11,6 +11,9 @@ node = cluster.add_instance(
main_configs=[
"configs/config.d/minio.xml",
],
+ user_configs=[
+ "configs/users.d/users.xml",
+ ],
with_minio=True,
)
@@ -44,7 +47,7 @@ def test_s3_table_functions(started_cluster):
"""
INSERT INTO FUNCTION s3
(
- nc_s3,
+ nc_s3,
filename = 'test_file.tsv.gz',
format = 'TSV',
structure = 'number UInt64',
@@ -60,7 +63,7 @@ def test_s3_table_functions(started_cluster):
"""
SELECT count(*) FROM s3
(
- nc_s3,
+ nc_s3,
filename = 'test_file.tsv.gz',
format = 'TSV',
structure = 'number UInt64',
@@ -85,7 +88,7 @@ def test_s3_table_functions_timeouts(started_cluster):
"""
INSERT INTO FUNCTION s3
(
- nc_s3,
+ nc_s3,
filename = 'test_file.tsv.gz',
format = 'TSV',
structure = 'number UInt64',
diff --git a/tests/integration/test_s3_zero_copy_ttl/configs/s3.xml b/tests/integration/test_s3_zero_copy_ttl/configs/s3.xml
index 5ffeb0c0d01..e179c848be1 100644
--- a/tests/integration/test_s3_zero_copy_ttl/configs/s3.xml
+++ b/tests/integration/test_s3_zero_copy_ttl/configs/s3.xml
@@ -33,4 +33,6 @@
true
+
+ true
diff --git a/tests/integration/test_s3_zero_copy_ttl/test.py b/tests/integration/test_s3_zero_copy_ttl/test.py
index 7dcf3734653..04bff4a44fb 100644
--- a/tests/integration/test_s3_zero_copy_ttl/test.py
+++ b/tests/integration/test_s3_zero_copy_ttl/test.py
@@ -35,7 +35,7 @@ def test_ttl_move_and_s3(started_cluster):
ORDER BY id
PARTITION BY id
TTL date TO DISK 's3_disk'
- SETTINGS storage_policy='s3_and_default'
+ SETTINGS storage_policy='s3_and_default', temporary_directories_lifetime=1
""".format(
i
)
diff --git a/tests/integration/test_storage_azure_blob_storage/configs/users.xml b/tests/integration/test_storage_azure_blob_storage/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_azure_blob_storage/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py
index 6089466ff5d..21f57a67495 100644
--- a/tests/integration/test_storage_azure_blob_storage/test.py
+++ b/tests/integration/test_storage_azure_blob_storage/test.py
@@ -25,7 +25,7 @@ def cluster():
cluster.add_instance(
"node",
main_configs=["configs/named_collections.xml"],
- user_configs=["configs/disable_profilers.xml"],
+ user_configs=["configs/disable_profilers.xml", "configs/users.xml"],
with_azurite=True,
)
cluster.start()
diff --git a/tests/integration/test_storage_delta/configs/users.d/users.xml b/tests/integration/test_storage_delta/configs/users.d/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_delta/configs/users.d/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py
index 9477b66dab8..0cd1208edfa 100644
--- a/tests/integration/test_storage_delta/test.py
+++ b/tests/integration/test_storage_delta/test.py
@@ -53,6 +53,7 @@ def started_cluster():
cluster.add_instance(
"node1",
main_configs=["configs/config.d/named_collections.xml"],
+ user_configs=["configs/users.d/users.xml"],
with_minio=True,
)
diff --git a/tests/integration/test_storage_dict/configs/users.xml b/tests/integration/test_storage_dict/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_dict/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_dict/test.py b/tests/integration/test_storage_dict/test.py
index 1ed974f267d..dd4ab5c8d2c 100644
--- a/tests/integration/test_storage_dict/test.py
+++ b/tests/integration/test_storage_dict/test.py
@@ -10,7 +10,10 @@ def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance(
- "node1", main_configs=["configs/conf.xml"], with_nginx=True
+ "node1",
+ main_configs=["configs/conf.xml"],
+ user_configs=["configs/users.xml"],
+ with_nginx=True,
)
cluster.start()
diff --git a/tests/integration/test_storage_hudi/configs/users.d/users.xml b/tests/integration/test_storage_hudi/configs/users.d/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_hudi/configs/users.d/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_hudi/test.py b/tests/integration/test_storage_hudi/test.py
index 2b77f4d6d61..6fe7a193129 100644
--- a/tests/integration/test_storage_hudi/test.py
+++ b/tests/integration/test_storage_hudi/test.py
@@ -51,6 +51,7 @@ def started_cluster():
cluster.add_instance(
"node1",
main_configs=["configs/config.d/named_collections.xml"],
+ user_configs=["configs/users.d/users.xml"],
with_minio=True,
)
diff --git a/tests/integration/test_storage_iceberg/configs/users.d/users.xml b/tests/integration/test_storage_iceberg/configs/users.d/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_iceberg/configs/users.d/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_iceberg/test.py b/tests/integration/test_storage_iceberg/test.py
index b3b2f160740..c22b8cda9b5 100644
--- a/tests/integration/test_storage_iceberg/test.py
+++ b/tests/integration/test_storage_iceberg/test.py
@@ -53,6 +53,7 @@ def started_cluster():
cluster.add_instance(
"node1",
main_configs=["configs/config.d/named_collections.xml"],
+ user_configs=["configs/users.d/users.xml"],
with_minio=True,
)
diff --git a/tests/integration/test_storage_kafka/configs/users.xml b/tests/integration/test_storage_kafka/configs/users.xml
index 992464a0ac2..3168de649f8 100644
--- a/tests/integration/test_storage_kafka/configs/users.xml
+++ b/tests/integration/test_storage_kafka/configs/users.xml
@@ -6,4 +6,11 @@
0
+
+
+
+ default
+ 1
+
+
diff --git a/tests/integration/test_storage_meilisearch/configs/users.xml b/tests/integration/test_storage_meilisearch/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_meilisearch/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_meilisearch/test.py b/tests/integration/test_storage_meilisearch/test.py
index ddcd7154154..b6acee18981 100644
--- a/tests/integration/test_storage_meilisearch/test.py
+++ b/tests/integration/test_storage_meilisearch/test.py
@@ -16,7 +16,10 @@ def started_cluster(request):
try:
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
- "meili", main_configs=["configs/named_collection.xml"], with_meili=True
+ "meili",
+ main_configs=["configs/named_collection.xml"],
+ user_configs=["configs/users.xml"],
+ with_meili=True,
)
cluster.start()
yield cluster
diff --git a/tests/integration/test_storage_mongodb/configs/users.xml b/tests/integration/test_storage_mongodb/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_mongodb/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py
index 6ce71fb91fa..13f11852945 100644
--- a/tests/integration/test_storage_mongodb/test.py
+++ b/tests/integration/test_storage_mongodb/test.py
@@ -17,6 +17,7 @@ def started_cluster(request):
"configs_secure/config.d/ssl_conf.xml",
"configs/named_collections.xml",
],
+ user_configs=["configs/users.xml"],
with_mongo=True,
with_mongo_secure=request.param,
)
diff --git a/tests/integration/test_storage_mysql/configs/users.xml b/tests/integration/test_storage_mysql/configs/users.xml
index d030ccb0e72..a11985dd113 100644
--- a/tests/integration/test_storage_mysql/configs/users.xml
+++ b/tests/integration/test_storage_mysql/configs/users.xml
@@ -12,6 +12,7 @@
::/0
default
+ 1
diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py
index 49629575ec7..3e3132949e7 100644
--- a/tests/integration/test_storage_mysql/test.py
+++ b/tests/integration/test_storage_mysql/test.py
@@ -13,6 +13,7 @@ cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
main_configs=["configs/remote_servers.xml", "configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
with_mysql=True,
)
node2 = cluster.add_instance(
diff --git a/tests/integration/test_storage_postgresql/configs/users.xml b/tests/integration/test_storage_postgresql/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_postgresql/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_postgresql/test.py b/tests/integration/test_storage_postgresql/test.py
index 9f7c012e66f..686eb1ea751 100644
--- a/tests/integration/test_storage_postgresql/test.py
+++ b/tests/integration/test_storage_postgresql/test.py
@@ -7,12 +7,15 @@ from helpers.postgres_utility import get_postgres_conn
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
- "node1", main_configs=["configs/named_collections.xml"], with_postgres=True
+ "node1",
+ main_configs=["configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
+ with_postgres=True,
)
node2 = cluster.add_instance(
"node2",
main_configs=["configs/named_collections.xml"],
- user_configs=["configs/settings.xml"],
+ user_configs=["configs/settings.xml", "configs/users.xml"],
with_postgres_cluster=True,
)
diff --git a/tests/integration/test_storage_rabbitmq/configs/users.xml b/tests/integration/test_storage_rabbitmq/configs/users.xml
index 2cef0a6de3c..e42fefa905b 100644
--- a/tests/integration/test_storage_rabbitmq/configs/users.xml
+++ b/tests/integration/test_storage_rabbitmq/configs/users.xml
@@ -4,4 +4,11 @@
1
+
+
+
+ default
+ 1
+
+
diff --git a/tests/integration/test_storage_s3/configs/access.xml b/tests/integration/test_storage_s3/configs/access.xml
new file mode 100644
index 00000000000..8bded9104f6
--- /dev/null
+++ b/tests/integration/test_storage_s3/configs/access.xml
@@ -0,0 +1,19 @@
+
+
+
+
+ default
+ default
+
+ GRANT admin_role
+
+
+
+
+
+
+ GRANT USE NAMED COLLECTION ON * WITH GRANT OPTION
+
+
+
+
diff --git a/tests/integration/test_storage_s3/configs/users.xml b/tests/integration/test_storage_s3/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_s3/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py
index 6c251d2f84e..45437fefa79 100644
--- a/tests/integration/test_storage_s3/test.py
+++ b/tests/integration/test_storage_s3/test.py
@@ -55,6 +55,17 @@ def started_cluster():
"configs/named_collections.xml",
"configs/schema_cache.xml",
],
+ user_configs=["configs/access.xml", "configs/users.xml"],
+ )
+ cluster.add_instance(
+ "dummy_without_named_collections",
+ with_minio=True,
+ main_configs=[
+ "configs/defaultS3.xml",
+ "configs/named_collections.xml",
+ "configs/schema_cache.xml",
+ ],
+ user_configs=["configs/access.xml"],
)
cluster.add_instance(
"s3_max_redirects",
@@ -918,25 +929,61 @@ def test_truncate_table(started_cluster):
def test_predefined_connection_configuration(started_cluster):
bucket = started_cluster.minio_bucket
- instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
+ instance = started_cluster.instances[
+ "dummy_without_named_collections"
+ ] # type: ClickHouseInstance
name = "test_table"
- instance.query("drop table if exists {}".format(name))
- instance.query(
- "CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name)
+ instance.query("CREATE USER user")
+ instance.query("GRANT CREATE ON *.* TO user")
+ instance.query("GRANT SOURCES ON *.* TO user")
+ instance.query("GRANT SELECT ON *.* TO user")
+
+ instance.query(f"drop table if exists {name}", user="user")
+ error = instance.query_and_get_error(
+ f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')"
+ )
+ assert (
+ "To execute this query it's necessary to have grant NAMED COLLECTION ON s3_conf1"
+ in error
+ )
+ error = instance.query_and_get_error(
+ f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')",
+ user="user",
+ )
+ assert (
+ "To execute this query it's necessary to have grant NAMED COLLECTION ON s3_conf1"
+ in error
)
- instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
- result = instance.query("SELECT * FROM {}".format(name))
+ instance.query("GRANT NAMED COLLECTION ON s3_conf1 TO user", user="admin")
+ instance.query(
+ f"CREATE TABLE {name} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')",
+ user="user",
+ )
+
+ instance.query(f"INSERT INTO {name} SELECT number FROM numbers(10)")
+ result = instance.query(f"SELECT * FROM {name}")
assert result == instance.query("SELECT number FROM numbers(10)")
result = instance.query(
- "SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')"
+ "SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')", user="user"
)
assert result == instance.query("SELECT number FROM numbers(10)")
- result = instance.query_and_get_error("SELECT * FROM s3(no_collection)")
- assert "There is no named collection `no_collection`" in result
+ error = instance.query_and_get_error("SELECT * FROM s3(no_collection)")
+ assert (
+ "To execute this query it's necessary to have grant NAMED COLLECTION ON no_collection"
+ in error
+ )
+ error = instance.query_and_get_error("SELECT * FROM s3(no_collection)", user="user")
+ assert (
+ "To execute this query it's necessary to have grant NAMED COLLECTION ON no_collection"
+ in error
+ )
+ instance = started_cluster.instances["dummy"] # has named collection access
+ error = instance.query_and_get_error("SELECT * FROM s3(no_collection)")
+ assert "There is no named collection `no_collection`" in error
result = ""
diff --git a/tests/integration/test_storage_s3/test_invalid_env_credentials.py b/tests/integration/test_storage_s3/test_invalid_env_credentials.py
index 0ee679014b1..d91cb7d68f9 100644
--- a/tests/integration/test_storage_s3/test_invalid_env_credentials.py
+++ b/tests/integration/test_storage_s3/test_invalid_env_credentials.py
@@ -92,6 +92,7 @@ def started_cluster():
"configs/use_environment_credentials.xml",
"configs/named_collections.xml",
],
+ user_configs=["configs/users.xml"],
)
logging.info("Starting cluster...")
diff --git a/tests/integration/test_storage_url/configs/users.xml b/tests/integration/test_storage_url/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_storage_url/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_storage_url/test.py b/tests/integration/test_storage_url/test.py
index f360ec105ec..7f359078967 100644
--- a/tests/integration/test_storage_url/test.py
+++ b/tests/integration/test_storage_url/test.py
@@ -6,6 +6,7 @@ cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
main_configs=["configs/conf.xml", "configs/named_collections.xml"],
+ user_configs=["configs/users.xml"],
with_nginx=True,
)
diff --git a/tests/integration/test_table_function_mongodb/configs/users.xml b/tests/integration/test_table_function_mongodb/configs/users.xml
new file mode 100644
index 00000000000..4b6ba057ecb
--- /dev/null
+++ b/tests/integration/test_table_function_mongodb/configs/users.xml
@@ -0,0 +1,9 @@
+
+
+
+
+ default
+ 1
+
+
+
diff --git a/tests/integration/test_table_function_mongodb/test.py b/tests/integration/test_table_function_mongodb/test.py
index e0ad71b0079..3b6ace9d11b 100644
--- a/tests/integration/test_table_function_mongodb/test.py
+++ b/tests/integration/test_table_function_mongodb/test.py
@@ -16,6 +16,7 @@ def started_cluster(request):
main_configs=[
"configs_secure/config.d/ssl_conf.xml",
],
+ user_configs=["configs/users.xml"],
with_mongo_secure=request.param,
)
cluster.start()
diff --git a/tests/queries/0_stateless/00417_kill_query.reference b/tests/queries/0_stateless/00417_kill_query.reference
index 7e89d9674db..1a3b47964c0 100644
--- a/tests/queries/0_stateless/00417_kill_query.reference
+++ b/tests/queries/0_stateless/00417_kill_query.reference
@@ -1,2 +1,2 @@
-SELECT sleep(1) FROM system.numbers LIMIT 4
-SELECT sleep(1) FROM system.numbers LIMIT 5
+SELECT sleep(1) FROM system.numbers LIMIT 30
+SELECT sleep(1) FROM system.numbers LIMIT 31
diff --git a/tests/queries/0_stateless/00417_kill_query.sh b/tests/queries/0_stateless/00417_kill_query.sh
index dc690caca39..cd5b788a147 100755
--- a/tests/queries/0_stateless/00417_kill_query.sh
+++ b/tests/queries/0_stateless/00417_kill_query.sh
@@ -9,13 +9,14 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
QUERY_FIELND_NUM=4
-$CLICKHOUSE_CLIENT --max_block_size=1 -q "SELECT sleep(1) FROM system.numbers LIMIT 4" &>/dev/null &
+$CLICKHOUSE_CLIENT --max_block_size=1 -q "SELECT sleep(1) FROM system.numbers LIMIT 30" &>/dev/null &
sleep 1
$CLICKHOUSE_CLIENT -q "KILL QUERY WHERE current_database = '${CLICKHOUSE_DATABASE}' and query LIKE 'SELECT sleep(%' AND (elapsed >= 0.) SYNC" | cut -f $QUERY_FIELND_NUM
-$CLICKHOUSE_CLIENT --max_block_size=1 -q "SELECT sleep(1) FROM system.numbers LIMIT 5" &>/dev/null &
+# 31 is for the query to be different from the previous one
+$CLICKHOUSE_CLIENT --max_block_size=1 -q "SELECT sleep(1) FROM system.numbers LIMIT 31" &>/dev/null &
sleep 1
-$CLICKHOUSE_CLIENT -q "KILL QUERY WHERE current_database = '${CLICKHOUSE_DATABASE}' and query = 'SELECT sleep(1) FROM system.numbers LIMIT 5' ASYNC" | cut -f $QUERY_FIELND_NUM
+$CLICKHOUSE_CLIENT -q "KILL QUERY WHERE current_database = '${CLICKHOUSE_DATABASE}' and query = 'SELECT sleep(1) FROM system.numbers LIMIT 31' ASYNC" | cut -f $QUERY_FIELND_NUM
$CLICKHOUSE_CLIENT -q "KILL QUERY WHERE 0 ASYNC"
$CLICKHOUSE_CLIENT -q "KILL QUERY WHERE 0 FORMAT TabSeparated"
diff --git a/tests/queries/0_stateless/00941_system_columns_race_condition.sh b/tests/queries/0_stateless/00941_system_columns_race_condition.sh
index 69dfb30cd2c..4f2cd6ee91b 100755
--- a/tests/queries/0_stateless/00941_system_columns_race_condition.sh
+++ b/tests/queries/0_stateless/00941_system_columns_race_condition.sh
@@ -14,35 +14,43 @@ $CLICKHOUSE_CLIENT -q "CREATE TABLE alter_table (a UInt8, b Int16, c Float32, d
function thread1()
{
- # NOTE: database = $CLICKHOUSE_DATABASE is unwanted
- while true; do $CLICKHOUSE_CLIENT --query "SELECT name FROM system.columns UNION ALL SELECT name FROM system.columns FORMAT Null"; done
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
+ # NOTE: database = $CLICKHOUSE_DATABASE is unwanted
+ $CLICKHOUSE_CLIENT --query "SELECT name FROM system.columns UNION ALL SELECT name FROM system.columns FORMAT Null";
+ done
}
function thread2()
{
- while true; do $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;"; done
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
+ $CLICKHOUSE_CLIENT -n --query "ALTER TABLE alter_table ADD COLUMN h String; ALTER TABLE alter_table MODIFY COLUMN h UInt64; ALTER TABLE alter_table DROP COLUMN h;";
+ done
}
# https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout
export -f thread1;
export -f thread2;
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread1 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
-timeout 15 bash -c thread2 2> /dev/null &
+TIMEOUT=15
+
+thread1 $TIMEOUT 2> /dev/null &
+thread1 $TIMEOUT 2> /dev/null &
+thread1 $TIMEOUT 2> /dev/null &
+thread1 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
+thread1 $TIMEOUT 2> /dev/null &
+thread1 $TIMEOUT 2> /dev/null &
+thread1 $TIMEOUT 2> /dev/null &
+thread1 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
+thread2 $TIMEOUT 2> /dev/null &
wait
diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference
index 9e6249bfcb3..b1ce5ab71d5 100644
--- a/tests/queries/0_stateless/01271_show_privileges.reference
+++ b/tests/queries/0_stateless/01271_show_privileges.reference
@@ -39,7 +39,7 @@ ALTER MOVE PARTITION ['ALTER MOVE PART','MOVE PARTITION','MOVE PART'] TABLE ALTE
ALTER FETCH PARTITION ['ALTER FETCH PART','FETCH PARTITION'] TABLE ALTER TABLE
ALTER FREEZE PARTITION ['FREEZE PARTITION','UNFREEZE'] TABLE ALTER TABLE
ALTER DATABASE SETTINGS ['ALTER DATABASE SETTING','ALTER MODIFY DATABASE SETTING','MODIFY DATABASE SETTING'] DATABASE ALTER DATABASE
-ALTER NAMED COLLECTION [] NAMED_COLLECTION NAMED COLLECTION CONTROL
+ALTER NAMED COLLECTION [] NAMED_COLLECTION NAMED COLLECTION ADMIN
ALTER TABLE [] \N ALTER
ALTER DATABASE [] \N ALTER
ALTER VIEW REFRESH ['ALTER LIVE VIEW REFRESH','REFRESH VIEW'] VIEW ALTER VIEW
@@ -53,14 +53,14 @@ CREATE DICTIONARY [] DICTIONARY CREATE
CREATE TEMPORARY TABLE [] GLOBAL CREATE ARBITRARY TEMPORARY TABLE
CREATE ARBITRARY TEMPORARY TABLE [] GLOBAL CREATE
CREATE FUNCTION [] GLOBAL CREATE
-CREATE NAMED COLLECTION [] NAMED_COLLECTION NAMED COLLECTION CONTROL
+CREATE NAMED COLLECTION [] NAMED_COLLECTION NAMED COLLECTION ADMIN
CREATE [] \N ALL
DROP DATABASE [] DATABASE DROP
DROP TABLE [] TABLE DROP
DROP VIEW [] VIEW DROP
DROP DICTIONARY [] DICTIONARY DROP
DROP FUNCTION [] GLOBAL DROP
-DROP NAMED COLLECTION [] NAMED_COLLECTION NAMED COLLECTION CONTROL
+DROP NAMED COLLECTION [] NAMED_COLLECTION NAMED COLLECTION ADMIN
DROP [] \N ALL
UNDROP TABLE [] TABLE ALL
TRUNCATE ['TRUNCATE TABLE'] TABLE ALL
@@ -92,9 +92,10 @@ SHOW QUOTAS ['SHOW CREATE QUOTA'] GLOBAL SHOW ACCESS
SHOW SETTINGS PROFILES ['SHOW PROFILES','SHOW CREATE SETTINGS PROFILE','SHOW CREATE PROFILE'] GLOBAL SHOW ACCESS
SHOW ACCESS [] \N ACCESS MANAGEMENT
ACCESS MANAGEMENT [] \N ALL
-SHOW NAMED COLLECTIONS ['SHOW NAMED COLLECTIONS'] NAMED_COLLECTION NAMED COLLECTION CONTROL
-SHOW NAMED COLLECTIONS SECRETS ['SHOW NAMED COLLECTIONS SECRETS'] NAMED_COLLECTION NAMED COLLECTION CONTROL
-NAMED COLLECTION CONTROL [] NAMED_COLLECTION ALL
+SHOW NAMED COLLECTIONS ['SHOW NAMED COLLECTIONS'] NAMED_COLLECTION NAMED COLLECTION ADMIN
+SHOW NAMED COLLECTIONS SECRETS ['SHOW NAMED COLLECTIONS SECRETS'] NAMED_COLLECTION NAMED COLLECTION ADMIN
+NAMED COLLECTION ['NAMED COLLECTION USAGE','USE NAMED COLLECTION'] NAMED_COLLECTION NAMED COLLECTION ADMIN
+NAMED COLLECTION ADMIN ['NAMED COLLECTION CONTROL'] NAMED_COLLECTION ALL
SYSTEM SHUTDOWN ['SYSTEM KILL','SHUTDOWN'] GLOBAL SYSTEM
SYSTEM DROP DNS CACHE ['SYSTEM DROP DNS','DROP DNS CACHE','DROP DNS'] GLOBAL SYSTEM DROP CACHE
SYSTEM DROP MARK CACHE ['SYSTEM DROP MARK','DROP MARK CACHE','DROP MARKS'] GLOBAL SYSTEM DROP CACHE
diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh
index aee69e64b1b..57409d782ae 100755
--- a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh
+++ b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh
@@ -12,22 +12,27 @@ $CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 --query "CREATE DATABA
function thread1()
{
- while true; do
- $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x;
- DROP TABLE test_01320.r;" 2>&1 | grep -F "Code:" | grep -v "UNKNOWN_DATABASE"
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
+ $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_01320.r (x UInt64) ENGINE = ReplicatedMergeTree('/test/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/table', 'r') ORDER BY x; DROP TABLE test_01320.r;"
done
}
function thread2()
{
- while true; do $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA test_01320.r" 2>/dev/null; done
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
+ $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA test_01320.r" 2>/dev/null;
+ done
}
export -f thread1
export -f thread2
-timeout 10 bash -c thread1 &
-timeout 10 bash -c thread2 &
+TIMEOUT=10
+
+thread1 $TIMEOUT &
+thread2 $TIMEOUT &
wait
diff --git a/tests/queries/0_stateless/01632_tinylog_read_write.sh b/tests/queries/0_stateless/01632_tinylog_read_write.sh
index 69f985a9d0d..10625ec5d27 100755
--- a/tests/queries/0_stateless/01632_tinylog_read_write.sh
+++ b/tests/queries/0_stateless/01632_tinylog_read_write.sh
@@ -11,14 +11,16 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --multiquery --query "DROP TABLE IF EXISTS test; CREATE TABLE IF NOT EXISTS test (x UInt64, s Array(Nullable(String))) ENGINE = TinyLog;"
function thread_select {
- while true; do
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
$CLICKHOUSE_CLIENT --local_filesystem_read_method pread --query "SELECT * FROM test FORMAT Null"
sleep 0.0$RANDOM
done
}
function thread_insert {
- while true; do
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$1" ]; do
$CLICKHOUSE_CLIENT --query "INSERT INTO test VALUES (1, ['Hello'])"
sleep 0.0$RANDOM
done
@@ -30,15 +32,17 @@ export -f thread_insert
# Do randomized queries and expect nothing extraordinary happens.
-timeout 10 bash -c 'thread_select' &
-timeout 10 bash -c 'thread_select' &
-timeout 10 bash -c 'thread_select' &
-timeout 10 bash -c 'thread_select' &
+TIMEOUT=10
-timeout 10 bash -c 'thread_insert' &
-timeout 10 bash -c 'thread_insert' &
-timeout 10 bash -c 'thread_insert' &
-timeout 10 bash -c 'thread_insert' &
+thread_select $TIMEOUT &
+thread_select $TIMEOUT &
+thread_select $TIMEOUT &
+thread_select $TIMEOUT &
+
+thread_insert $TIMEOUT &
+thread_insert $TIMEOUT &
+thread_insert $TIMEOUT &
+thread_insert $TIMEOUT &
wait
echo "Done"
diff --git a/tests/queries/0_stateless/02050_client_profile_events.sh b/tests/queries/0_stateless/02050_client_profile_events.sh
index dce0c80525a..05e48de771d 100755
--- a/tests/queries/0_stateless/02050_client_profile_events.sh
+++ b/tests/queries/0_stateless/02050_client_profile_events.sh
@@ -25,7 +25,7 @@ profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events -
test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)"
echo 'print each 100 ms'
-profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events --profile-events-delay-ms=100 -q 'select sleep(1) from numbers(2) format Null' |& grep -c 'SelectedRows')"
+profile_events="$($CLICKHOUSE_CLIENT --max_block_size 1 --print-profile-events --profile-events-delay-ms=100 -q 'select sleep(0.2) from numbers(10) format Null' |& grep -c 'SelectedRows')"
test "$profile_events" -gt 1 && echo OK || echo "FAIL ($profile_events)"
echo 'check that ProfileEvents is new for each query'
diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference
index 3834b05601f..69b4e458940 100644
--- a/tests/queries/0_stateless/02117_show_create_table_system.reference
+++ b/tests/queries/0_stateless/02117_show_create_table_system.reference
@@ -297,7 +297,7 @@ CREATE TABLE system.grants
(
`user_name` Nullable(String),
`role_name` Nullable(String),
- `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'AZURE' = 161, 'SOURCES' = 162, 'CLUSTER' = 163, 'ALL' = 164, 'NONE' = 165),
+ `access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION' = 96, 'NAMED COLLECTION ADMIN' = 97, 'SYSTEM SHUTDOWN' = 98, 'SYSTEM DROP DNS CACHE' = 99, 'SYSTEM DROP MARK CACHE' = 100, 'SYSTEM DROP UNCOMPRESSED CACHE' = 101, 'SYSTEM DROP MMAP CACHE' = 102, 'SYSTEM DROP QUERY CACHE' = 103, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 104, 'SYSTEM DROP FILESYSTEM CACHE' = 105, 'SYSTEM DROP SCHEMA CACHE' = 106, 'SYSTEM DROP S3 CLIENT CACHE' = 107, 'SYSTEM DROP CACHE' = 108, 'SYSTEM RELOAD CONFIG' = 109, 'SYSTEM RELOAD USERS' = 110, 'SYSTEM RELOAD SYMBOLS' = 111, 'SYSTEM RELOAD DICTIONARY' = 112, 'SYSTEM RELOAD MODEL' = 113, 'SYSTEM RELOAD FUNCTION' = 114, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 115, 'SYSTEM RELOAD' = 116, 'SYSTEM RESTART DISK' = 117, 'SYSTEM MERGES' = 118, 'SYSTEM TTL MERGES' = 119, 'SYSTEM FETCHES' = 120, 'SYSTEM MOVES' = 121, 'SYSTEM DISTRIBUTED SENDS' = 122, 'SYSTEM REPLICATED SENDS' = 123, 'SYSTEM SENDS' = 124, 'SYSTEM REPLICATION QUEUES' = 125, 'SYSTEM DROP REPLICA' = 126, 'SYSTEM SYNC REPLICA' = 127, 'SYSTEM RESTART REPLICA' = 128, 'SYSTEM RESTORE REPLICA' = 129, 'SYSTEM WAIT LOADING PARTS' = 130, 'SYSTEM SYNC DATABASE REPLICA' = 131, 'SYSTEM SYNC TRANSACTION LOG' = 132, 'SYSTEM SYNC FILE CACHE' = 133, 'SYSTEM FLUSH DISTRIBUTED' = 134, 'SYSTEM FLUSH LOGS' = 135, 'SYSTEM FLUSH' = 136, 'SYSTEM THREAD FUZZER' = 137, 'SYSTEM UNFREEZE' = 138, 'SYSTEM FAILPOINT' = 139, 'SYSTEM' = 140, 'dictGet' = 141, 'displaySecretsInShowAndSelect' = 142, 'addressToLine' = 143, 'addressToLineWithInlines' = 144, 'addressToSymbol' = 145, 'demangle' = 146, 'INTROSPECTION' = 147, 'FILE' = 148, 'URL' = 149, 'REMOTE' = 150, 'MONGO' = 151, 'REDIS' = 152, 'MEILISEARCH' = 153, 'MYSQL' = 154, 'POSTGRES' = 155, 'SQLITE' = 156, 'ODBC' = 157, 'JDBC' = 158, 'HDFS' = 159, 'S3' = 160, 'HIVE' = 161, 'AZURE' = 162, 'SOURCES' = 163, 'CLUSTER' = 164, 'ALL' = 165, 'NONE' = 166),
`database` Nullable(String),
`table` Nullable(String),
`column` Nullable(String),
@@ -582,10 +582,10 @@ ENGINE = SystemPartsColumns
COMMENT 'SYSTEM TABLE is built on the fly.'
CREATE TABLE system.privileges
(
- `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'AZURE' = 161, 'SOURCES' = 162, 'CLUSTER' = 163, 'ALL' = 164, 'NONE' = 165),
+ `privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION' = 96, 'NAMED COLLECTION ADMIN' = 97, 'SYSTEM SHUTDOWN' = 98, 'SYSTEM DROP DNS CACHE' = 99, 'SYSTEM DROP MARK CACHE' = 100, 'SYSTEM DROP UNCOMPRESSED CACHE' = 101, 'SYSTEM DROP MMAP CACHE' = 102, 'SYSTEM DROP QUERY CACHE' = 103, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 104, 'SYSTEM DROP FILESYSTEM CACHE' = 105, 'SYSTEM DROP SCHEMA CACHE' = 106, 'SYSTEM DROP S3 CLIENT CACHE' = 107, 'SYSTEM DROP CACHE' = 108, 'SYSTEM RELOAD CONFIG' = 109, 'SYSTEM RELOAD USERS' = 110, 'SYSTEM RELOAD SYMBOLS' = 111, 'SYSTEM RELOAD DICTIONARY' = 112, 'SYSTEM RELOAD MODEL' = 113, 'SYSTEM RELOAD FUNCTION' = 114, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 115, 'SYSTEM RELOAD' = 116, 'SYSTEM RESTART DISK' = 117, 'SYSTEM MERGES' = 118, 'SYSTEM TTL MERGES' = 119, 'SYSTEM FETCHES' = 120, 'SYSTEM MOVES' = 121, 'SYSTEM DISTRIBUTED SENDS' = 122, 'SYSTEM REPLICATED SENDS' = 123, 'SYSTEM SENDS' = 124, 'SYSTEM REPLICATION QUEUES' = 125, 'SYSTEM DROP REPLICA' = 126, 'SYSTEM SYNC REPLICA' = 127, 'SYSTEM RESTART REPLICA' = 128, 'SYSTEM RESTORE REPLICA' = 129, 'SYSTEM WAIT LOADING PARTS' = 130, 'SYSTEM SYNC DATABASE REPLICA' = 131, 'SYSTEM SYNC TRANSACTION LOG' = 132, 'SYSTEM SYNC FILE CACHE' = 133, 'SYSTEM FLUSH DISTRIBUTED' = 134, 'SYSTEM FLUSH LOGS' = 135, 'SYSTEM FLUSH' = 136, 'SYSTEM THREAD FUZZER' = 137, 'SYSTEM UNFREEZE' = 138, 'SYSTEM FAILPOINT' = 139, 'SYSTEM' = 140, 'dictGet' = 141, 'displaySecretsInShowAndSelect' = 142, 'addressToLine' = 143, 'addressToLineWithInlines' = 144, 'addressToSymbol' = 145, 'demangle' = 146, 'INTROSPECTION' = 147, 'FILE' = 148, 'URL' = 149, 'REMOTE' = 150, 'MONGO' = 151, 'REDIS' = 152, 'MEILISEARCH' = 153, 'MYSQL' = 154, 'POSTGRES' = 155, 'SQLITE' = 156, 'ODBC' = 157, 'JDBC' = 158, 'HDFS' = 159, 'S3' = 160, 'HIVE' = 161, 'AZURE' = 162, 'SOURCES' = 163, 'CLUSTER' = 164, 'ALL' = 165, 'NONE' = 166),
`aliases` Array(String),
`level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5, 'NAMED_COLLECTION' = 6)),
- `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION CONTROL' = 96, 'SYSTEM SHUTDOWN' = 97, 'SYSTEM DROP DNS CACHE' = 98, 'SYSTEM DROP MARK CACHE' = 99, 'SYSTEM DROP UNCOMPRESSED CACHE' = 100, 'SYSTEM DROP MMAP CACHE' = 101, 'SYSTEM DROP QUERY CACHE' = 102, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 103, 'SYSTEM DROP FILESYSTEM CACHE' = 104, 'SYSTEM DROP SCHEMA CACHE' = 105, 'SYSTEM DROP S3 CLIENT CACHE' = 106, 'SYSTEM DROP CACHE' = 107, 'SYSTEM RELOAD CONFIG' = 108, 'SYSTEM RELOAD USERS' = 109, 'SYSTEM RELOAD SYMBOLS' = 110, 'SYSTEM RELOAD DICTIONARY' = 111, 'SYSTEM RELOAD MODEL' = 112, 'SYSTEM RELOAD FUNCTION' = 113, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 114, 'SYSTEM RELOAD' = 115, 'SYSTEM RESTART DISK' = 116, 'SYSTEM MERGES' = 117, 'SYSTEM TTL MERGES' = 118, 'SYSTEM FETCHES' = 119, 'SYSTEM MOVES' = 120, 'SYSTEM DISTRIBUTED SENDS' = 121, 'SYSTEM REPLICATED SENDS' = 122, 'SYSTEM SENDS' = 123, 'SYSTEM REPLICATION QUEUES' = 124, 'SYSTEM DROP REPLICA' = 125, 'SYSTEM SYNC REPLICA' = 126, 'SYSTEM RESTART REPLICA' = 127, 'SYSTEM RESTORE REPLICA' = 128, 'SYSTEM WAIT LOADING PARTS' = 129, 'SYSTEM SYNC DATABASE REPLICA' = 130, 'SYSTEM SYNC TRANSACTION LOG' = 131, 'SYSTEM SYNC FILE CACHE' = 132, 'SYSTEM FLUSH DISTRIBUTED' = 133, 'SYSTEM FLUSH LOGS' = 134, 'SYSTEM FLUSH' = 135, 'SYSTEM THREAD FUZZER' = 136, 'SYSTEM UNFREEZE' = 137, 'SYSTEM FAILPOINT' = 138, 'SYSTEM' = 139, 'dictGet' = 140, 'displaySecretsInShowAndSelect' = 141, 'addressToLine' = 142, 'addressToLineWithInlines' = 143, 'addressToSymbol' = 144, 'demangle' = 145, 'INTROSPECTION' = 146, 'FILE' = 147, 'URL' = 148, 'REMOTE' = 149, 'MONGO' = 150, 'REDIS' = 151, 'MEILISEARCH' = 152, 'MYSQL' = 153, 'POSTGRES' = 154, 'SQLITE' = 155, 'ODBC' = 156, 'JDBC' = 157, 'HDFS' = 158, 'S3' = 159, 'HIVE' = 160, 'AZURE' = 161, 'SOURCES' = 162, 'CLUSTER' = 163, 'ALL' = 164, 'NONE' = 165))
+ `parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SHOW FILESYSTEM CACHES' = 5, 'SELECT' = 6, 'INSERT' = 7, 'ALTER UPDATE' = 8, 'ALTER DELETE' = 9, 'ALTER ADD COLUMN' = 10, 'ALTER MODIFY COLUMN' = 11, 'ALTER DROP COLUMN' = 12, 'ALTER COMMENT COLUMN' = 13, 'ALTER CLEAR COLUMN' = 14, 'ALTER RENAME COLUMN' = 15, 'ALTER MATERIALIZE COLUMN' = 16, 'ALTER COLUMN' = 17, 'ALTER MODIFY COMMENT' = 18, 'ALTER ORDER BY' = 19, 'ALTER SAMPLE BY' = 20, 'ALTER ADD INDEX' = 21, 'ALTER DROP INDEX' = 22, 'ALTER MATERIALIZE INDEX' = 23, 'ALTER CLEAR INDEX' = 24, 'ALTER INDEX' = 25, 'ALTER ADD PROJECTION' = 26, 'ALTER DROP PROJECTION' = 27, 'ALTER MATERIALIZE PROJECTION' = 28, 'ALTER CLEAR PROJECTION' = 29, 'ALTER PROJECTION' = 30, 'ALTER ADD CONSTRAINT' = 31, 'ALTER DROP CONSTRAINT' = 32, 'ALTER CONSTRAINT' = 33, 'ALTER TTL' = 34, 'ALTER MATERIALIZE TTL' = 35, 'ALTER SETTINGS' = 36, 'ALTER MOVE PARTITION' = 37, 'ALTER FETCH PARTITION' = 38, 'ALTER FREEZE PARTITION' = 39, 'ALTER DATABASE SETTINGS' = 40, 'ALTER NAMED COLLECTION' = 41, 'ALTER TABLE' = 42, 'ALTER DATABASE' = 43, 'ALTER VIEW REFRESH' = 44, 'ALTER VIEW MODIFY QUERY' = 45, 'ALTER VIEW' = 46, 'ALTER' = 47, 'CREATE DATABASE' = 48, 'CREATE TABLE' = 49, 'CREATE VIEW' = 50, 'CREATE DICTIONARY' = 51, 'CREATE TEMPORARY TABLE' = 52, 'CREATE ARBITRARY TEMPORARY TABLE' = 53, 'CREATE FUNCTION' = 54, 'CREATE NAMED COLLECTION' = 55, 'CREATE' = 56, 'DROP DATABASE' = 57, 'DROP TABLE' = 58, 'DROP VIEW' = 59, 'DROP DICTIONARY' = 60, 'DROP FUNCTION' = 61, 'DROP NAMED COLLECTION' = 62, 'DROP' = 63, 'UNDROP TABLE' = 64, 'TRUNCATE' = 65, 'OPTIMIZE' = 66, 'BACKUP' = 67, 'KILL QUERY' = 68, 'KILL TRANSACTION' = 69, 'MOVE PARTITION BETWEEN SHARDS' = 70, 'CREATE USER' = 71, 'ALTER USER' = 72, 'DROP USER' = 73, 'CREATE ROLE' = 74, 'ALTER ROLE' = 75, 'DROP ROLE' = 76, 'ROLE ADMIN' = 77, 'CREATE ROW POLICY' = 78, 'ALTER ROW POLICY' = 79, 'DROP ROW POLICY' = 80, 'CREATE QUOTA' = 81, 'ALTER QUOTA' = 82, 'DROP QUOTA' = 83, 'CREATE SETTINGS PROFILE' = 84, 'ALTER SETTINGS PROFILE' = 85, 'DROP SETTINGS PROFILE' = 86, 'SHOW USERS' = 87, 'SHOW ROLES' = 88, 'SHOW ROW POLICIES' = 89, 'SHOW QUOTAS' = 90, 'SHOW SETTINGS PROFILES' = 91, 'SHOW ACCESS' = 92, 'ACCESS MANAGEMENT' = 93, 'SHOW NAMED COLLECTIONS' = 94, 'SHOW NAMED COLLECTIONS SECRETS' = 95, 'NAMED COLLECTION' = 96, 'NAMED COLLECTION ADMIN' = 97, 'SYSTEM SHUTDOWN' = 98, 'SYSTEM DROP DNS CACHE' = 99, 'SYSTEM DROP MARK CACHE' = 100, 'SYSTEM DROP UNCOMPRESSED CACHE' = 101, 'SYSTEM DROP MMAP CACHE' = 102, 'SYSTEM DROP QUERY CACHE' = 103, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 104, 'SYSTEM DROP FILESYSTEM CACHE' = 105, 'SYSTEM DROP SCHEMA CACHE' = 106, 'SYSTEM DROP S3 CLIENT CACHE' = 107, 'SYSTEM DROP CACHE' = 108, 'SYSTEM RELOAD CONFIG' = 109, 'SYSTEM RELOAD USERS' = 110, 'SYSTEM RELOAD SYMBOLS' = 111, 'SYSTEM RELOAD DICTIONARY' = 112, 'SYSTEM RELOAD MODEL' = 113, 'SYSTEM RELOAD FUNCTION' = 114, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 115, 'SYSTEM RELOAD' = 116, 'SYSTEM RESTART DISK' = 117, 'SYSTEM MERGES' = 118, 'SYSTEM TTL MERGES' = 119, 'SYSTEM FETCHES' = 120, 'SYSTEM MOVES' = 121, 'SYSTEM DISTRIBUTED SENDS' = 122, 'SYSTEM REPLICATED SENDS' = 123, 'SYSTEM SENDS' = 124, 'SYSTEM REPLICATION QUEUES' = 125, 'SYSTEM DROP REPLICA' = 126, 'SYSTEM SYNC REPLICA' = 127, 'SYSTEM RESTART REPLICA' = 128, 'SYSTEM RESTORE REPLICA' = 129, 'SYSTEM WAIT LOADING PARTS' = 130, 'SYSTEM SYNC DATABASE REPLICA' = 131, 'SYSTEM SYNC TRANSACTION LOG' = 132, 'SYSTEM SYNC FILE CACHE' = 133, 'SYSTEM FLUSH DISTRIBUTED' = 134, 'SYSTEM FLUSH LOGS' = 135, 'SYSTEM FLUSH' = 136, 'SYSTEM THREAD FUZZER' = 137, 'SYSTEM UNFREEZE' = 138, 'SYSTEM FAILPOINT' = 139, 'SYSTEM' = 140, 'dictGet' = 141, 'displaySecretsInShowAndSelect' = 142, 'addressToLine' = 143, 'addressToLineWithInlines' = 144, 'addressToSymbol' = 145, 'demangle' = 146, 'INTROSPECTION' = 147, 'FILE' = 148, 'URL' = 149, 'REMOTE' = 150, 'MONGO' = 151, 'REDIS' = 152, 'MEILISEARCH' = 153, 'MYSQL' = 154, 'POSTGRES' = 155, 'SQLITE' = 156, 'ODBC' = 157, 'JDBC' = 158, 'HDFS' = 159, 'S3' = 160, 'HIVE' = 161, 'AZURE' = 162, 'SOURCES' = 163, 'CLUSTER' = 164, 'ALL' = 165, 'NONE' = 166))
)
ENGINE = SystemPrivileges
COMMENT 'SYSTEM TABLE is built on the fly.'
diff --git a/tests/queries/0_stateless/02184_default_table_engine.reference b/tests/queries/0_stateless/02184_default_table_engine.reference
index 870dff90efa..495b9627acb 100644
--- a/tests/queries/0_stateless/02184_default_table_engine.reference
+++ b/tests/queries/0_stateless/02184_default_table_engine.reference
@@ -27,3 +27,4 @@ CREATE TABLE default.val2\n(\n `n` Int32\n) AS values(\'n int\', 1, 2)
CREATE TABLE default.log\n(\n `n` Int32\n)\nENGINE = Log
CREATE TABLE default.kek\n(\n `n` Int32\n)\nENGINE = Memory
CREATE TABLE default.lol\n(\n `n` Int32\n)\nENGINE = MergeTree\nORDER BY n\nSETTINGS min_bytes_for_wide_part = 123, index_granularity = 8192
+CREATE TEMPORARY TABLE tmp_log\n(\n `n` Int32\n)\nENGINE = Log
diff --git a/tests/queries/0_stateless/02184_default_table_engine.sql b/tests/queries/0_stateless/02184_default_table_engine.sql
index 109875d53a5..a984ec1b6c9 100644
--- a/tests/queries/0_stateless/02184_default_table_engine.sql
+++ b/tests/queries/0_stateless/02184_default_table_engine.sql
@@ -83,8 +83,8 @@ CREATE TEMPORARY TABLE tmp (n int);
SHOW CREATE TEMPORARY TABLE tmp;
CREATE TEMPORARY TABLE tmp1 (n int) ENGINE=Memory;
CREATE TEMPORARY TABLE tmp2 (n int) ENGINE=Log;
-CREATE TEMPORARY TABLE tmp2 (n int) ORDER BY n; -- {serverError 80}
-CREATE TEMPORARY TABLE tmp2 (n int, PRIMARY KEY (n)); -- {serverError 80}
+CREATE TEMPORARY TABLE tmp2 (n int) ORDER BY n; -- {serverError 36}
+CREATE TEMPORARY TABLE tmp2 (n int, PRIMARY KEY (n)); -- {serverError 36}
CREATE TABLE log (n int);
SHOW CREATE log;
@@ -128,3 +128,7 @@ SHOW CREATE TABLE kek;
SHOW CREATE TABLE lol;
DROP TABLE kek;
DROP TABLE lol;
+
+SET default_temporary_table_engine = 'Log';
+CREATE TEMPORARY TABLE tmp_log (n int);
+SHOW CREATE TEMPORARY TABLE tmp_log;
diff --git a/tests/queries/0_stateless/02210_processors_profile_log.reference b/tests/queries/0_stateless/02210_processors_profile_log.reference
index 181022d2421..41543d0706a 100644
--- a/tests/queries/0_stateless/02210_processors_profile_log.reference
+++ b/tests/queries/0_stateless/02210_processors_profile_log.reference
@@ -18,13 +18,13 @@ SELECT
multiIf(
-- ExpressionTransform executes sleep(),
-- so IProcessor::work() will spend 1 sec.
- name = 'ExpressionTransform', elapsed_us>1e6,
+ name = 'ExpressionTransform', elapsed_us>=1e6,
-- SourceFromSingleChunk, that feed data to ExpressionTransform,
-- will feed first block and then wait in PortFull.
- name = 'SourceFromSingleChunk', output_wait_elapsed_us>1e6,
+ name = 'SourceFromSingleChunk', output_wait_elapsed_us>=1e6,
-- NullSource/LazyOutputFormatLazyOutputFormat are the outputs
-- so they cannot starts to execute before sleep(1) will be executed.
- input_wait_elapsed_us>1e6)
+ input_wait_elapsed_us>=1e6)
elapsed,
input_rows,
input_bytes,
diff --git a/tests/queries/0_stateless/02210_processors_profile_log.sql b/tests/queries/0_stateless/02210_processors_profile_log.sql
index 44e563ef57b..a15ed26fd67 100644
--- a/tests/queries/0_stateless/02210_processors_profile_log.sql
+++ b/tests/queries/0_stateless/02210_processors_profile_log.sql
@@ -15,13 +15,13 @@ SELECT
multiIf(
-- ExpressionTransform executes sleep(),
-- so IProcessor::work() will spend 1 sec.
- name = 'ExpressionTransform', elapsed_us>1e6,
+ name = 'ExpressionTransform', elapsed_us>=1e6,
-- SourceFromSingleChunk, that feed data to ExpressionTransform,
-- will feed first block and then wait in PortFull.
- name = 'SourceFromSingleChunk', output_wait_elapsed_us>1e6,
+ name = 'SourceFromSingleChunk', output_wait_elapsed_us>=1e6,
-- NullSource/LazyOutputFormatLazyOutputFormat are the outputs
-- so they cannot starts to execute before sleep(1) will be executed.
- input_wait_elapsed_us>1e6)
+ input_wait_elapsed_us>=1e6)
elapsed,
input_rows,
input_bytes,
diff --git a/tests/queries/0_stateless/02470_mutation_sync_race.sh b/tests/queries/0_stateless/02470_mutation_sync_race.sh
index 6c259e46cb1..37e99663ab5 100755
--- a/tests/queries/0_stateless/02470_mutation_sync_race.sh
+++ b/tests/queries/0_stateless/02470_mutation_sync_race.sh
@@ -12,7 +12,11 @@ $CLICKHOUSE_CLIENT -q "insert into src values (0)"
function thread()
{
+ local TIMELIMIT=$((SECONDS+$1))
for i in $(seq 1000); do
+ if [ $SECONDS -ge "$TIMELIMIT" ]; then
+ return
+ fi
$CLICKHOUSE_CLIENT -q "alter table src detach partition tuple()"
$CLICKHOUSE_CLIENT -q "alter table src attach partition tuple()"
$CLICKHOUSE_CLIENT -q "alter table src update A = ${i} where 1 settings mutations_sync=2"
@@ -20,8 +24,6 @@ function thread()
done
}
-export -f thread;
-
TIMEOUT=30
-timeout $TIMEOUT bash -c thread || true
+thread $TIMEOUT || true
\ No newline at end of file
diff --git a/tests/queries/0_stateless/02481_async_insert_race_long.sh b/tests/queries/0_stateless/02481_async_insert_race_long.sh
index cec9278c127..c4b026c6aba 100755
--- a/tests/queries/0_stateless/02481_async_insert_race_long.sh
+++ b/tests/queries/0_stateless/02481_async_insert_race_long.sh
@@ -11,21 +11,24 @@ export MY_CLICKHOUSE_CLIENT="$CLICKHOUSE_CLIENT --async_insert_busy_timeout_ms 1
function insert1()
{
- while true; do
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
${MY_CLICKHOUSE_CLIENT} --wait_for_async_insert 0 -q 'INSERT INTO async_inserts_race FORMAT CSV 1,"a"'
done
}
function insert2()
{
- while true; do
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
${MY_CLICKHOUSE_CLIENT} --wait_for_async_insert 0 -q 'INSERT INTO async_inserts_race FORMAT JSONEachRow {"id": 5, "s": "e"} {"id": 6, "s": "f"}'
done
}
function insert3()
{
- while true; do
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
${MY_CLICKHOUSE_CLIENT} --wait_for_async_insert 1 -q "INSERT INTO async_inserts_race VALUES (7, 'g') (8, 'h')" &
sleep 0.05
done
@@ -33,29 +36,29 @@ function insert3()
function select1()
{
- while true; do
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
${MY_CLICKHOUSE_CLIENT} -q "SELECT * FROM async_inserts_race FORMAT Null"
done
-
}
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS async_inserts_race"
${CLICKHOUSE_CLIENT} -q "CREATE TABLE async_inserts_race (id UInt32, s String) ENGINE = MergeTree ORDER BY id"
-TIMEOUT=10
-
export -f insert1
export -f insert2
export -f insert3
export -f select1
+TIMEOUT=10
+
for _ in {1..3}; do
- timeout $TIMEOUT bash -c insert1 &
- timeout $TIMEOUT bash -c insert2 &
- timeout $TIMEOUT bash -c insert3 &
+ insert1 $TIMEOUT &
+ insert2 $TIMEOUT &
+ insert3 $TIMEOUT &
done
-timeout $TIMEOUT bash -c select1 &
+select1 $TIMEOUT &
wait
echo "OK"
diff --git a/tests/queries/0_stateless/02771_semi_join_use_nulls.reference b/tests/queries/0_stateless/02771_semi_join_use_nulls.reference
index 8d4b1a3a75e..91c0d964968 100644
--- a/tests/queries/0_stateless/02771_semi_join_use_nulls.reference
+++ b/tests/queries/0_stateless/02771_semi_join_use_nulls.reference
@@ -11,7 +11,9 @@
0 0
0 0
0 1
+\N 0
0 1
+\N 0
0 0
0 0
0 \N
@@ -25,7 +27,9 @@
0 0
0 0
0 2
+\N 1
0 2
+\N 1
0 \N
0 0
0 \N
@@ -39,7 +43,9 @@
0 \N
0 \N
0 1
+\N \N
0 1
+\N \N
0 0
0 0
0 0
@@ -53,7 +59,9 @@
0 0
0 0
\N 2
+\N 1
\N 2
+\N 1
0 0
0 0
0 0
@@ -67,7 +75,9 @@
0 0
0 0
0 1
+\N 0
0 1
+\N 0
0 0
0 0
0 0
@@ -81,7 +91,9 @@
0 0
0 0
1 2
+\N 1
1 2
+\N 1
0 \N
0 0
0 0
@@ -95,7 +107,9 @@
0 \N
0 \N
0 1
+\N \N
0 1
+\N \N
0 0
0 0
0 0
@@ -109,4 +123,6 @@
0 0
0 0
1 2
+\N 1
1 2
+\N 1
diff --git a/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2 b/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2
index 37b2e63761b..248461a98bb 100644
--- a/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2
+++ b/tests/queries/0_stateless/02771_semi_join_use_nulls.sql.j2
@@ -44,6 +44,12 @@ SELECT id > 1, d.idd FROM (SELECT {{ maybe_materialize }}(toLowCardinality(0)) A
ON a.id = d.idd
;
+SELECT *
+FROM (SELECT {{ maybe_materialize }}(NULL :: Nullable(UInt64)) AS id) AS a
+{{ strictness }} {{ kind }} JOIN (SELECT {{ maybe_materialize }}(1 :: UInt32) AS id) AS d
+ON a.id = d.id
+;
+
{% endfor -%}
{% endfor -%}
{% endfor -%}
diff --git a/tests/queries/0_stateless/02789_object_type_invalid_num_of_rows.reference b/tests/queries/0_stateless/02789_object_type_invalid_num_of_rows.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02789_object_type_invalid_num_of_rows.sql b/tests/queries/0_stateless/02789_object_type_invalid_num_of_rows.sql
new file mode 100644
index 00000000000..d0fc6905593
--- /dev/null
+++ b/tests/queries/0_stateless/02789_object_type_invalid_num_of_rows.sql
@@ -0,0 +1,2 @@
+set allow_experimental_object_type=1;
+SELECT '0.02' GROUP BY GROUPING SETS (('6553.6'), (CAST('{"x" : 1}', 'Object(\'json\')'))) FORMAT Null; -- { serverError NOT_IMPLEMENTED }
diff --git a/tests/queries/0_stateless/02790_keyed_hash_bug.reference b/tests/queries/0_stateless/02790_keyed_hash_bug.reference
new file mode 100644
index 00000000000..a321a9052d0
--- /dev/null
+++ b/tests/queries/0_stateless/02790_keyed_hash_bug.reference
@@ -0,0 +1 @@
+16324913028386710556
diff --git a/tests/queries/0_stateless/02790_keyed_hash_bug.sql b/tests/queries/0_stateless/02790_keyed_hash_bug.sql
new file mode 100644
index 00000000000..409e284d0d5
--- /dev/null
+++ b/tests/queries/0_stateless/02790_keyed_hash_bug.sql
@@ -0,0 +1,2 @@
+--- previously caused MemorySanitizer: use-of-uninitialized-value, because we tried to read hash key from empty tuple column during interpretation
+SELECT sipHash64Keyed((1111111111111111111, toUInt64(222222222222223))) group by toUInt64(222222222222223);
diff --git a/utils/keeper-data-dumper/main.cpp b/utils/keeper-data-dumper/main.cpp
index 5a6fd15d72c..51a09b676dc 100644
--- a/utils/keeper-data-dumper/main.cpp
+++ b/utils/keeper-data-dumper/main.cpp
@@ -64,8 +64,8 @@ int main(int argc, char *argv[])
SnapshotsQueue snapshots_queue{1};
CoordinationSettingsPtr settings = std::make_shared();
KeeperContextPtr keeper_context = std::make_shared(true);
- keeper_context->setLogDisk(std::make_shared("LogDisk", argv[2], 0));
- keeper_context->setSnapshotDisk(std::make_shared("LogDisk", argv[1], 0));
+ keeper_context->setLogDisk(std::make_shared("LogDisk", argv[2]));
+ keeper_context->setSnapshotDisk(std::make_shared("LogDisk", argv[1]));
auto state_machine = std::make_shared(queue, snapshots_queue, settings, keeper_context, nullptr);
state_machine->init();