Merge branch 'master' into mvcc_prototype

This commit is contained in:
Alexander Tokmakov 2022-02-17 13:49:37 +03:00
commit dae044f86b
171 changed files with 3521 additions and 1726 deletions

11
.github/workflows/debug.yml vendored Normal file
View File

@ -0,0 +1,11 @@
# The CI for each commit, prints envs and content of GITHUB_EVENT_PATH
name: Debug
'on':
[push, pull_request, release]
jobs:
DebugInfo:
runs-on: ubuntu-latest
steps:
- uses: hmarr/debug-action@1201a20fc9d278ddddd5f0f46922d06513892491

View File

@ -1065,6 +1065,41 @@ jobs:
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, s3 storage, actions)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
runs-on: [self-hosted, func-tester-aarch64]
@ -2844,6 +2879,7 @@ jobs:
- FunctionalStatefulTestDebug
- FunctionalStatefulTestRelease
- FunctionalStatefulTestReleaseDatabaseOrdinary
- FunctionalStatelessTestReleaseS3
- FunctionalStatefulTestAarch64
- FunctionalStatefulTestAsan
- FunctionalStatefulTestTsan

View File

@ -1215,6 +1215,41 @@ jobs:
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestReleaseS3:
needs: [BuilderDebRelease]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/stateless_s3_storage
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=Stateless tests (release, s3 storage, actions)
REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse
KILL_TIMEOUT=10800
EOF
- name: Download json reports
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- name: Clear repository
run: |
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
- name: Check out repository code
uses: actions/checkout@v2
- name: Functional test
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT"
- name: Cleanup
if: always()
run: |
docker kill "$(docker ps -q)" ||:
docker rm -f "$(docker ps -a -q)" ||:
sudo rm -fr "$TEMP_PATH"
FunctionalStatelessTestAarch64:
needs: [BuilderDebAarch64]
runs-on: [self-hosted, func-tester-aarch64]
@ -3037,6 +3072,7 @@ jobs:
- FunctionalStatefulTestTsan
- FunctionalStatefulTestMsan
- FunctionalStatefulTestUBsan
- FunctionalStatelessTestReleaseS3
- StressTestDebug
- StressTestAsan
- StressTestTsan

View File

@ -23,8 +23,8 @@ jobs:
uses: actions/checkout@v2
- name: Download packages and push to Artifactory
run: |
rm -rf "$TEMP_PATH" && mkdir -p "$REPO_COPY"
cp -r "$GITHUB_WORKSPACE" "$REPO_COPY"
rm -rf "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY"
python3 ./tests/ci/push_to_artifactory.py --release "${{ github.ref }}" \
--commit '${{ github.sha }}' --all

View File

@ -12,6 +12,18 @@
#include <tuple>
#include <limits>
#include <boost/multiprecision/cpp_bin_float.hpp>
#include <boost/math/special_functions/fpclassify.hpp>
/// Use same extended double for all platforms
#if (LDBL_MANT_DIG == 64)
#define CONSTEXPR_FROM_DOUBLE constexpr
using FromDoubleIntermediateType = long double;
#else
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
#define CONSTEXPR_FROM_DOUBLE
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;
#endif
namespace wide
{
@ -265,12 +277,23 @@ struct integer<Bits, Signed>::_impl
constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept
{
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
static_assert(std::is_same_v<T, double> || std::is_same_v<T, FromDoubleIntermediateType>);
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
if (!std::isfinite(t))
if constexpr (std::is_same_v<T, double>)
{
self = 0;
return;
if (!std::isfinite(t))
{
self = 0;
return;
}
}
else
{
if (!boost::math::isfinite(t))
{
self = 0;
return;
}
}
const T alpha = t / static_cast<T>(max_int);
@ -278,13 +301,13 @@ struct integer<Bits, Signed>::_impl
if (alpha <= static_cast<T>(max_int))
self = static_cast<uint64_t>(alpha);
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
set_multiplier<double>(self, alpha);
set_multiplier<double>(self, static_cast<double>(alpha));
self *= max_int;
self += static_cast<uint64_t>(t - floor(alpha) * static_cast<T>(max_int)); // += b_i
}
constexpr static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
CONSTEXPR_FROM_DOUBLE static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
{
constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
constexpr int64_t min_int = std::numeric_limits<int64_t>::lowest();
@ -294,24 +317,17 @@ struct integer<Bits, Signed>::_impl
/// the result may not fit in 64 bits.
/// The example of such a number is 9.22337e+18.
/// As to_Integral does a static_cast to int64_t, it may result in UB.
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
/// The necessary check here is that FromDoubleIntermediateType has enough significant (mantissa) bits to store the
/// int64_t max value precisely.
// TODO Be compatible with Apple aarch64
#if not (defined(__APPLE__) && defined(__aarch64__))
static_assert(LDBL_MANT_DIG >= 64,
"On your system long double has less than 64 precision bits, "
"which may result in UB when initializing double from int64_t");
#endif
if (rhs > static_cast<long double>(min_int) && rhs < static_cast<long double>(max_int))
if (rhs > static_cast<FromDoubleIntermediateType>(min_int) && rhs < static_cast<FromDoubleIntermediateType>(max_int))
{
self = static_cast<int64_t>(rhs);
return;
}
const long double rhs_long_double = (static_cast<long double>(rhs) < 0)
? -static_cast<long double>(rhs)
const FromDoubleIntermediateType rhs_long_double = (static_cast<FromDoubleIntermediateType>(rhs) < 0)
? -static_cast<FromDoubleIntermediateType>(rhs)
: rhs;
set_multiplier(self, rhs_long_double);

View File

@ -89,6 +89,10 @@ function run_tests()
# everything in parallel except DatabaseReplicated. See below.
fi
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--s3-storage')
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
ADDITIONAL_OPTIONS+=('--jobs')

View File

@ -69,14 +69,14 @@ You can also download and install packages manually from [here](https://repo.cli
It is recommended to use official pre-compiled `tgz` archives for all Linux distributions, where installation of `deb` or `rpm` packages is not possible.
The required version can be downloaded with `curl` or `wget` from repository https://repo.clickhouse.com/tgz/.
After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest version:
After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest stable version:
``` bash
export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1`
curl -O https://repo.clickhouse.com/tgz/clickhouse-common-static-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/clickhouse-server-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/clickhouse-client-$LATEST_VERSION.tgz
export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep stable | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1`
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-server-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-client-$LATEST_VERSION.tgz
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh

View File

@ -14,7 +14,7 @@ toc_title: OpenTelemetry Support
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.
If no parent trace context is supplied, ClickHouse can start a new trace, with probability controlled by the [opentelemetry_start_trace_probability](../operations/settings/settings.md#opentelemetry-start-trace-probability) setting.
If no parent trace context is supplied or the provided trace context does not comply with W3C standard above, ClickHouse can start a new trace, with probability controlled by the [opentelemetry_start_trace_probability](../operations/settings/settings.md#opentelemetry-start-trace-probability) setting.
## Propagating the Trace Context
@ -46,8 +46,8 @@ ENGINE = URL('http://127.0.0.1:9411/api/v2/spans', 'JSONEachRow')
SETTINGS output_format_json_named_tuples_as_objects = 1,
output_format_json_array_of_rows = 1 AS
SELECT
lower(hex(reinterpretAsFixedString(trace_id))) AS traceId,
lower(hex(parent_span_id)) AS parentId,
lower(hex(trace_id)) AS traceId,
case when parent_span_id = 0 then '' else lower(hex(parent_span_id)) end AS parentId,
lower(hex(span_id)) AS id,
operation_name AS name,
start_time_us AS timestamp,

View File

@ -1807,7 +1807,7 @@ ignoring check result for the source table, and will insert rows lost because of
The setting allows a user to provide own deduplication semantic in MergeTree/ReplicatedMergeTree
For example, by providing a unique value for the setting in each INSERT statement,
user can avoid the same inserted data being deduplicated
user can avoid the same inserted data being deduplicated.
Possilbe values:
@ -1815,7 +1815,35 @@ Possilbe values:
Default value: empty string (disabled)
`insert_deduplication_token` is used for deduplication _only_ when not empty
`insert_deduplication_token` is used for deduplication _only_ when not empty.
Example:
```sql
CREATE TABLE test_table
( A Int64 )
ENGINE = MergeTree
ORDER BY A
SETTINGS non_replicated_deduplication_window = 100;
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (1);
-- the next insert won't be deduplicated because insert_deduplication_token is different
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test1' (1);
-- the next insert will be deduplicated because insert_deduplication_token
-- is the same as one of the previous
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (2);
SELECT * FROM test_table
┌─A─┐
│ 1 │
└───┘
┌─A─┐
│ 1 │
└───┘
```
## max_network_bytes {#settings-max-network-bytes}

View File

@ -27,7 +27,7 @@ The null hypothesis is that means of populations are equal. Normal distribution
**Returned values**
[Tuple](../../../sql-reference/data-types/tuple.md) with two two or four elements (if the optional `confidence_level` is specified)
[Tuple](../../../sql-reference/data-types/tuple.md) with two or four elements (if the optional `confidence_level` is specified)
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).

View File

@ -16,8 +16,8 @@ By default, tables are created only on the current server. Distributed DDL queri
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1],
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2],
name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr1] [compression_codec] [TTL expr1],
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [compression_codec] [TTL expr2],
...
) ENGINE = engine
```
@ -112,6 +112,13 @@ Materialized expression. Such a column cant be specified for INSERT, because
For an INSERT without a list of columns, these columns are not considered.
In addition, this column is not substituted when using an asterisk in a SELECT query. This is to preserve the invariant that the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns.
### EPHEMERAL {#ephemeral}
`EPHEMERAL expr`
Ephemeral column. Such a column isn't stored in the table and cannot be SELECTed, but can be referenced in the defaults of CREATE statement.
INSERT without list of columns will skip such column, so SELECT/INSERT invariant is preserved - the dump obtained using `SELECT *` can be inserted back into the table using INSERT without specifying the list of columns.
### ALIAS {#alias}
`ALIAS expr`

View File

@ -1736,6 +1736,48 @@ ClickHouse генерирует исключение:
Т.е. если `INSERT` в основную таблицу д.б. пропущен (сдедуплицирован), то автоматически не будет вставки и в материализованные представления. Это имплементировано для того, чтобы работали материализованные представления, которые сильно группируют данные основных `INSERT`, до такой степени что блоки вставляемые в материализованные представления получаются одинаковыми для разных `INSERT` в основную таблицу.
Одновременно это «ломает» идемпотентность вставки в материализованные представления. Т.е. если `INSERT` был успешен в основную таблицу и неуспешен в таблицу материализованного представления (напр. из-за сетевого сбоя при коммуникации с Zookeeper), клиент получит ошибку и попытается повторить `INSERT`. Но вставки в материализованные представления произведено не будет, потому что дедупликация сработает на основной таблице. Настройка `deduplicate_blocks_in_dependent_materialized_views` позволяет это изменить. Т.е. при повторном `INSERT` будет произведена дедупликация на таблице материализованного представления, и повторный инсерт вставит данные в таблицу материализованного представления, которые не удалось вставить из-за сбоя первого `INSERT`.
## insert_deduplication_token {#insert_deduplication_token}
Этот параметр позволяет пользователю указать собственную семантику дедупликации в MergeTree/ReplicatedMergeTree.
Например, предоставляя уникальное значение параметра в каждом операторе INSERT,
пользователь может избежать дедупликации одних и тех же вставленных данных.
Возможные значения:
- Любая строка
Значение по умолчанию: пустая строка (выключено).
`insert_deduplication_token` используется для дедупликации олько_ когда значение не пустое
Example:
```sql
CREATE TABLE test_table
( A Int64 )
ENGINE = MergeTree
ORDER BY A
SETTINGS non_replicated_deduplication_window = 100;
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (1);
-- следующая вставка не будет дедуплицирована, потому что insert_deduplication_token отличается
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test1' (1);
-- следующая вставка будет дедуплицирована, потому что insert_deduplication_token
-- тот же самый, что и один из предыдущих
INSERT INTO test_table Values SETTINGS insert_deduplication_token = 'test' (2);
SELECT * FROM test_table
┌─A─┐
│ 1 │
└───┘
┌─A─┐
│ 1 │
└───┘
```
## count_distinct_implementation {#settings-count_distinct_implementation}
Задаёт, какая из функций `uniq*` используется при выполнении конструкции [COUNT(DISTINCT …)](../../sql-reference/aggregate-functions/reference/count.md#agg_function-count).

View File

@ -14,8 +14,8 @@ toc_title: "Таблица"
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1],
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2],
name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr1] [compression_codec] [TTL expr1],
name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|EPHEMERAL|ALIAS expr2] [compression_codec] [TTL expr2],
...
) ENGINE = engine
```
@ -108,6 +108,13 @@ SELECT x, toTypeName(x) FROM t1;
При INSERT без указания списка столбцов, такие столбцы не рассматриваются.
Также этот столбец не подставляется при использовании звёздочки в запросе SELECT. Это необходимо, чтобы сохранить инвариант, что дамп, полученный путём `SELECT *`, можно вставить обратно в таблицу INSERT-ом без указания списка столбцов.
### EPHEMERAL {#ephemeral}
`EPHEMERAL expr`
Эфемерное выражение. Такой столбец не хранится в таблице и не может быть получен в запросе SELECT, но на него можно ссылаться в выражениях по умолчанию запроса CREATE.
INSERT без списка столбцов игнорирует этот столбец, таким образом сохраняется инвариант - т.е. дамп, полученный путём `SELECT *`, можно вставить обратно в таблицу INSERT-ом без указания списка столбцов.
### ALIAS {#alias}
`ALIAS expr`

File diff suppressed because it is too large Load Diff

View File

@ -1,31 +1,125 @@
# 分布 {#distributed}
---
toc_priority: 33
toc_title: 分布式引擎
---
# 分布式引擎 {#distributed}
**分布式引擎本身不存储数据**, 但可以在多个服务器上进行分布式查询。
读是自动并行的。读取时,远程服务器表的索引(如果有的话)会被使用。
分布式引擎参数:服务器配置文件中的集群名,远程数据库名,远程表名,数据分片键(可选)。
示例:
Distributed(logs, default, hits[, sharding_key])
## 创建数据表 {#distributed-creating-a-table}
将会从位于«logs»集群中 default.hits 表所有服务器上读取数据。
远程服务器不仅用于读取数据,还会对尽可能数据做部分处理。
例如,对于使用 GROUP BY 的查询,数据首先在远程服务器聚合,之后返回聚合函数的中间状态给查询请求的服务器。再在请求的服务器上进一步汇总数据。
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1],
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
...
) ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]])
[SETTINGS name=value, ...]
```
数据库名参数除了用数据库名之外也可用返回字符串的常量表达式。例如currentDatabase()。
## 已有数据表 {#distributed-from-a-table}
`Distributed` 表指向当前服务器上的一个表时,你可以采用以下语句:
logs 服务器配置文件中的集群名称。
集群示例配置如下:
``` sql
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] AS [db2.]name2 ENGINE = Distributed(cluster, database, table[, sharding_key[, policy_name]]) [SETTINGS name=value, ...]
```
**分布式引擎参数**
- `cluster` - 服务为配置中的集群名
- `database` - 远程数据库名
- `table` - 远程数据表名
- `sharding_key` - (可选) 分片key
- `policy_name` - (可选) 规则名,它会被用作存储临时文件以便异步发送数据
**详见**
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) 设置
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes) 查看示例
**分布式设置**
- `fsync_after_insert` - 对异步插入到分布式的文件数据执行`fsync`。确保操作系统将所有插入的数据刷新到启动节点**磁盘上的一个文件**中。
- `fsync_directories` - 对目录执行`fsync`。保证操作系统在分布式表上进行异步插入相关操作(插入后,发送数据到分片等)后刷新目录元数据.
- `bytes_to_throw_insert` - 如果超过这个数量的压缩字节将等待异步INSERT将抛出一个异常。0 - 不抛出。默认值0.
- `bytes_to_delay_insert` - 如果超过这个数量的压缩字节将等待异步INSERT查询将被延迟。0 - 不要延迟。默认值0.
- `max_delay_to_insert` - 最大延迟多少秒插入数据到分布式表如果有很多挂起字节异步发送。默认值60。
- `monitor_batch_inserts` - 等同于 [distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts)
- `monitor_split_batch_on_failure` - 等同于[distributed_directory_monitor_split_batch_on_failure](../../../operations/settings/settings.md#distributed_directory_monitor_split_batch_on_failure)
- `monitor_sleep_time_ms` - 等同于 [distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms)
- `monitor_max_sleep_time_ms` - 等同于 [distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms)
!!! note "备注"
**稳定性设置** (`fsync_...`):
- 只影响异步插入(例如:`insert_distributed_sync=false`), 当数据首先存储在启动节点磁盘上然后再异步发送到shard。
— 可能会显著降低`insert`的性能
- 影响将存储在分布式表文件夹中的数据写入 **接受您插入的节点** 。如果你需要保证写入数据到底层的MergeTree表中请参阅 `system.merge_tree_settings` 中的持久性设置(`...fsync...`)
**插入限制设置** (`..._insert`) 请见:
- [insert_distributed_sync](../../../operations/settings/settings.md#insert_distributed_sync) 设置
- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) 设置
- `bytes_to_throw_insert``bytes_to_delay_insert` 之前处理,所以你不应该设置它的值小于 `bytes_to_delay_insert`
**示例**
``` sql
CREATE TABLE hits_all AS hits
ENGINE = Distributed(logs, default, hits[, sharding_key[, policy_name]])
SETTINGS
fsync_after_insert=0,
fsync_directories=0;
```
数据将从`logs`集群中的所有服务器中,从位于集群中的每个服务器上的`default.hits`表读取。。
数据不仅在远程服务器上读取,而且在远程服务器上进行部分处理(在可能的范围内)。
例如,对于带有 `GROUP BY`的查询,数据将在远程服务器上聚合,聚合函数的中间状态将被发送到请求者服务器。然后将进一步聚合数据。
您可以使用一个返回字符串的常量表达式来代替数据库名称。例如: `currentDatabase()`
## 集群 {#distributed-clusters}
集群是通过[服务器配置文件](../../../operations/configuration-files.md)来配置的
``` xml
<remote_servers>
<logs>
<!-- 分布式查询的服务器间集群密码
默认值:无密码(将不执行身份验证)
如果设置了,那么分布式查询将在分片上验证,所以至少:
- 这样的集群应该存在于shard上
- 这样的集群应该有相同的密码。
而且(这是更重要的)initial_user将作为查询的当前用户使用。
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- 可选的。写数据时分片权重。 默认: 1. -->
<weight>1</weight>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- 可选的。是否只将数据写入其中一个副本。默认值:false(将数据写入所有副本)。 -->
<internal_replication>false</internal_replication>
<replica>
<!-- 可选的。负载均衡副本的优先级请参见load_balancing 设置)。默认值:1(值越小优先级越高)。 -->
<priority>1</priority>
<host>example01-01-1</host>
<port>9000</port>
</replica>
@ -58,6 +152,7 @@ logs 服务器配置文件中的集群名称。
集群名称不能包含点号。
每个服务器需要指定 `host``port`,和可选的 `user``password``secure``compression` 的参数:
- `host` 远程服务器地址。可以域名、IPv4或IPv6。如果指定域名则服务在启动时发起一个 DNS 请求,并且请求结果会在服务器运行期间一直被记录。如果 DNS 请求失败,则服务不会启动。如果你修改了 DNS 记录,则需要重启服务。
- `port` 消息传递的 TCP 端口「tcp_port」配置通常设为 9000。不要跟 http_port 混淆。
- `user` 用于连接远程服务器的用户名。默认值default。该用户必须有权限访问该远程服务器。访问权限配置在 users.xml 文件中。更多信息,请查看«访问权限»部分。
@ -78,9 +173,10 @@ logs 服务器配置文件中的集群名称。
通过分布式引擎可以像使用本地服务器一样使用集群。但是,集群不是自动扩展的:你必须编写集群配置到服务器配置文件中(最好,给所有集群的服务器写上完整配置)。
不支持用分布式表查询别的分布式表(除非该表只有一个分片)。或者说,要用分布表查查询«最终»的数据表。
分布式引擎需要将集群信息写入配置文件。配置文件中的集群信息会即时更新,无需重启服务器。如果你每次是要向不确定的一组分片和副本发送查询,则不适合创建分布式表 - 而应该使用«远程»表函数。 请参阅«表函数»部分。
## 写入数据
向集群写数据的方法有两种:
一,自已指定要将哪些数据写入哪些服务器,并直接在每个分片上执行写入。换句话说,在分布式表上«查询»,在数据表上 INSERT。
@ -111,10 +207,30 @@ SELECT 查询会被发送到所有分片,并且无论数据在分片中如何
- 使用需要特定键连接数据( IN 或 JOIN )的查询。如果数据是用该键进行分片,则应使用本地 IN 或 JOIN 而不是 GLOBAL IN 或 GLOBAL JOIN这样效率更高。
- 使用大量服务器(上百或更多),但有大量小查询(个别客户的查询 - 网站,广告商或合作伙伴)。为了使小查询不影响整个集群,让单个客户的数据处于单个分片上是有意义的。或者,正如我们在 Yandex.Metrica 中所做的那样,你可以配置两级分片:将整个集群划分为«层»,一个层可以包含多个分片。单个客户的数据位于单个层上,根据需要将分片添加到层中,层中的数据随机分布。然后给每层创建分布式表,再创建一个全局的分布式表用于全局的查询。
数据是异步写入的。对于分布式表的 INSERT数据块只写本地文件系统。之后会尽快地在后台发送到远程服务器。你可以通过查看表目录中的文件列表(等待发送的数据)来检查数据是否成功发送:/var/lib/clickhouse/data/database/table/
数据是异步写入的。对于分布式表的 INSERT数据块只写本地文件系统。之后会尽快地在后台发送到远程服务器。发送数据的周期性是由[distributed_directory_monitor_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_sleep_time_ms)和[distributed_directory_monitor_max_sleep_time_ms](../../../operations/settings/settings.md#distributed_directory_monitor_max_sleep_time_ms)设置。分布式引擎会分别发送每个插入数据的文件,但是你可以使用[distributed_directory_monitor_batch_inserts](../../../operations/settings/settings.md#distributed_directory_monitor_batch_inserts)设置启用批量发送文件。该设置通过更好地利用本地服务器和网络资源来提高集群性能。你应该检查表目录`/var/lib/clickhouse/data/database/table/`中的文件列表(等待发送的数据)来检查数据是否发送成功。执行后台任务的线程数可以通过[background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size)设置
如果在 INSERT 到分布式表时服务器节点丢失或重启设备故障则插入的数据可能会丢失。如果在表目录中检测到损坏的数据分片则会将其转移到«broken»子目录并不再使用。
启用 max_parallel_replicas 选项后会在分表的所有副本上并行查询处理。更多信息请参阅«设置max_parallel_replicas»部分。
## 读取数据 {#distributed-reading-data}
当查询一个`Distributed`表时,`SELECT`查询被发送到所有的分片,不管数据是如何分布在分片上的(它们可以完全随机分布)。当您添加一个新分片时,您不必将旧数据传输到它。相反,您可以使用更重的权重向其写入新数据——数据的分布会稍微不均匀,但查询将正确有效地工作。
当启用`max_parallel_replicas`选项时,查询处理将在单个分片中的所有副本之间并行化。更多信息,请参见[max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas)。
要了解更多关于分布式`in`和`global in`查询是如何处理的,请参考[这里](../../../sql-reference/operators/in.md#select-distributed-subqueries)文档。
## 虚拟列 {#virtual-columns}
- `_shard_num` — 表`system.clusters` 中的 `shard_num` 值 . 数据类型: [UInt32](../../../sql-reference/data-types/int-uint.md).
!!! note "备注"
因为 [remote](../../../sql-reference/table-functions/remote.md) 和 [cluster](../../../sql-reference/table-functions/cluster.md) 表方法内部创建了分布式表, `_shard_num` 对他们都有效.
**详见**
- [虚拟列](../../../engines/table-engines/index.md#table_engines-virtual_columns) 描述
- [background_distributed_schedule_pool_size](../../../operations/settings/settings.md#background_distributed_schedule_pool_size) 设置
- [shardNum()](../../../sql-reference/functions/other-functions.md#shard-num) 和 [shardCount()](../../../sql-reference/functions/other-functions.md#shard-count) 方法
[原始文章](https://clickhouse.com/docs/en/operations/table_engines/distributed/) <!--hide-->

View File

@ -66,14 +66,14 @@ sudo yum install clickhouse-server clickhouse-client
所需的版本可以通过`curl`或`wget`从存储库`https://repo.clickhouse.com/tgz/`下载。
下载后解压缩下载资源文件并使用安装脚本进行安装。以下是一个最新版本的安装示例:
下载后解压缩下载资源文件并使用安装脚本进行安装。以下是一个最新稳定版本的安装示例:
``` bash
export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1`
curl -O https://repo.clickhouse.com/tgz/clickhouse-common-static-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/clickhouse-server-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/clickhouse-client-$LATEST_VERSION.tgz
export LATEST_VERSION=`curl https://api.github.com/repos/ClickHouse/ClickHouse/tags 2>/dev/null | grep stable | grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -n 1`
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-common-static-dbg-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-server-$LATEST_VERSION.tgz
curl -O https://repo.clickhouse.com/tgz/stable/clickhouse-client-$LATEST_VERSION.tgz
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh

View File

@ -1439,7 +1439,7 @@ f9725a22f9191e064120d718e26862a9 -
如果您通过[Client](../interfaces/cli.md) 在 [交互模式](https://clickhouse.com/docs/zh/interfaces/cli/#cli_usage)下输入或输出数据,格式架构中指定的文件名可以使用绝对路径或客户端当前目录的相对路径。
如果在[批处理模式](https://clickhouse.com/docs/zh/interfaces/cli/#cli_usage)下使用客户端,则由于安全原因,架构的路径必须使用相对路径。
如果您通过 HTTP接口](../interfaces/http.md)输入或输出数据,格式架构中指定的文件名应该位于服务器设置的[format_schema_path](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-format_schema_path)指定的目录中。
如果您通过 [HTTP接口](../interfaces/http.md)输入或输出数据,格式架构中指定的文件名应该位于服务器设置的[format_schema_path](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-format_schema_path)指定的目录中。
## 跳过错误 {#skippingerrors}

View File

@ -1 +0,0 @@
../../../../en/sql-reference/statements/alter/constraint.md

View File

@ -0,0 +1,22 @@
---
toc_priority: 43
toc_title: 约束
---
# 操作约束 {#manipulations-with-constraints}
约束可以使用以下语法添加或删除:
``` sql
ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression;
ALTER TABLE [db].name DROP CONSTRAINT constraint_name;
```
查看[constraints](../../../sql-reference/statements/create/table.md#constraints)。
查询将从表中添加或删除关于约束的元数据,因此它们将被立即处理。
!!! warning "警告"
如果已有数据被添加,约束检查**将不会被执行**。
复制表上的所有更改都会被广播到ZooKeeper并应用到其他副本上。

View File

@ -1 +0,0 @@
../../../../en/sql-reference/statements/alter/view.md

View File

@ -0,0 +1,44 @@
---
toc_priority: 50
toc_title: VIEW
---
# ALTER TABLE … MODIFY QUERY 语句 {#alter-modify-query}
当使用`ALTER TABLE … MODIFY QUERY`语句创建一个[物化视图](../create/view.md#materialized)时,可以修改`SELECT`查询。当物化视图在没有 `TO [db.]name` 的情况下创建时使用它。必须启用 `allow_experimental_alter_materialized_view_structure`设置。
如果一个物化视图使用`TO [db.]name`,你必须先 [DETACH](../detach.md) 视图。用[ALTER TABLE](index.md)修改目标表,然后 [ATTACH](../attach.md)之前分离的(`DETACH`)视图。
**示例**
```sql
CREATE TABLE src_table (`a` UInt32) ENGINE = MergeTree ORDER BY a;
CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table;
INSERT INTO src_table (a) VALUES (1), (2);
SELECT * FROM mv;
```
```text
┌─a─┐
│ 1 │
│ 2 │
└───┘
```
```sql
ALTER TABLE mv MODIFY QUERY SELECT a * 2 as a FROM src_table;
INSERT INTO src_table (a) VALUES (3), (4);
SELECT * FROM mv;
```
```text
┌─a─┐
│ 6 │
│ 8 │
└───┘
┌─a─┐
│ 1 │
│ 2 │
└───┘
```
## ALTER LIVE VIEW 语句 {#alter-live-view}
`ALTER LIVE VIEW ... REFRESH` 语句刷新一个 [实时视图](../create/view.md#live-view). 参见 [强制实时视图刷新](../create/view.md#live-view-alter-refresh).

View File

@ -481,29 +481,25 @@ catch (...)
void Client::connect()
{
UInt16 default_port = ConnectionParameters::getPortFromConfig(config());
connection_parameters = ConnectionParameters(config(), hosts_ports[0].host,
hosts_ports[0].port.value_or(default_port));
String server_name;
UInt64 server_version_major = 0;
UInt64 server_version_minor = 0;
UInt64 server_version_patch = 0;
for (size_t attempted_address_index = 0; attempted_address_index < hosts_ports.size(); ++attempted_address_index)
for (size_t attempted_address_index = 0; attempted_address_index < hosts_and_ports.size(); ++attempted_address_index)
{
connection_parameters.host = hosts_ports[attempted_address_index].host;
connection_parameters.port = hosts_ports[attempted_address_index].port.value_or(default_port);
if (is_interactive)
std::cout << "Connecting to "
<< (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at "
: "")
<< connection_parameters.host << ":" << connection_parameters.port
<< (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl;
try
{
connection_parameters
= ConnectionParameters(config(), hosts_and_ports[attempted_address_index].host, hosts_and_ports[attempted_address_index].port);
if (is_interactive)
std::cout << "Connecting to "
<< (!connection_parameters.default_database.empty() ? "database " + connection_parameters.default_database + " at "
: "")
<< connection_parameters.host << ":" << connection_parameters.port
<< (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl;
connection = Connection::createConnection(connection_parameters, global_context);
if (max_client_network_bandwidth)
@ -535,7 +531,7 @@ void Client::connect()
}
else
{
if (attempted_address_index == hosts_ports.size() - 1)
if (attempted_address_index == hosts_and_ports.size() - 1)
throw;
if (is_interactive)
@ -994,11 +990,6 @@ void Client::addOptions(OptionsDescription & options_description)
/// Main commandline options related to client functionality and all parameters from Settings.
options_description.main_description->add_options()
("config,c", po::value<std::string>(), "config-file path (another shorthand)")
("host,h", po::value<std::vector<HostPort>>()->multitoken()->default_value({{"localhost"}}, "localhost"),
"list of server hosts with optionally assigned port to connect. List elements are separated by a space."
"Every list element looks like '<host>[:<port>]'. If port isn't assigned, connection is made by port from '--port' param"
"Example of usage: '-h host1:1 host2 host3:3'")
("port", po::value<int>()->default_value(9000), "server port, which is default port for every host from '--host' param")
("secure,s", "Use TLS connection")
("user,u", po::value<std::string>()->default_value("default"), "user")
/** If "--password [value]" is used but the value is omitted, the bad argument exception will be thrown.
@ -1044,12 +1035,24 @@ void Client::addOptions(OptionsDescription & options_description)
(
"types", po::value<std::string>(), "types"
);
/// Commandline options related to hosts and ports.
options_description.hosts_and_ports_description.emplace(createOptionsDescription("Hosts and ports options", terminal_width));
options_description.hosts_and_ports_description->add_options()
("host,h", po::value<String>()->default_value("localhost"),
"Server hostname. Multiple hosts can be passed via multiple arguments"
"Example of usage: '--host host1 --host host2 --port port2 --host host3 ...'"
"Each '--port port' will be attached to the last seen host that doesn't have a port yet,"
"if there is no such host, the port will be attached to the next first host or to default host.")
("port", po::value<UInt16>()->default_value(DBMS_DEFAULT_PORT), "server ports")
;
}
void Client::processOptions(const OptionsDescription & options_description,
const CommandLineOptions & options,
const std::vector<Arguments> & external_tables_arguments)
const std::vector<Arguments> & external_tables_arguments,
const std::vector<Arguments> & hosts_and_ports_arguments)
{
namespace po = boost::program_options;
@ -1081,6 +1084,25 @@ void Client::processOptions(const OptionsDescription & options_description,
exit(exit_code);
}
}
if (hosts_and_ports_arguments.empty())
{
hosts_and_ports.emplace_back(HostAndPort{"localhost", DBMS_DEFAULT_PORT});
}
else
{
for (const auto & hosts_and_ports_argument : hosts_and_ports_arguments)
{
/// Parse commandline options related to external tables.
po::parsed_options parsed_hosts_and_ports
= po::command_line_parser(hosts_and_ports_argument).options(options_description.hosts_and_ports_description.value()).run();
po::variables_map host_and_port_options;
po::store(parsed_hosts_and_ports, host_and_port_options);
hosts_and_ports.emplace_back(
HostAndPort{host_and_port_options["host"].as<std::string>(), host_and_port_options["port"].as<UInt16>()});
}
}
send_external_tables = true;
shared_context = Context::createShared();
@ -1105,12 +1127,8 @@ void Client::processOptions(const OptionsDescription & options_description,
if (options.count("config"))
config().setString("config-file", options["config"].as<std::string>());
if (options.count("host"))
hosts_ports = options["host"].as<std::vector<HostPort>>();
if (options.count("interleave-queries-file"))
interleave_queries_files = options["interleave-queries-file"].as<std::vector<std::string>>();
if (options.count("port") && !options["port"].defaulted())
config().setInt("port", options["port"].as<int>());
if (options.count("secure"))
config().setBool("secure", true);
if (options.count("user") && !options["user"].defaulted())

View File

@ -25,8 +25,11 @@ protected:
void printHelpMessage(const OptionsDescription & options_description) override;
void addOptions(OptionsDescription & options_description) override;
void processOptions(const OptionsDescription & options_description, const CommandLineOptions & options,
const std::vector<Arguments> & external_tables_arguments) override;
void processOptions(
const OptionsDescription & options_description,
const CommandLineOptions & options,
const std::vector<Arguments> & external_tables_arguments,
const std::vector<Arguments> & hosts_and_ports_arguments) override;
void processConfig() override;
private:

View File

@ -775,7 +775,7 @@ void LocalServer::applyCmdOptions(ContextMutablePtr context)
}
void LocalServer::processOptions(const OptionsDescription &, const CommandLineOptions & options, const std::vector<Arguments> &)
void LocalServer::processOptions(const OptionsDescription &, const CommandLineOptions & options, const std::vector<Arguments> &, const std::vector<Arguments> &)
{
if (options.count("table"))
config().setString("table-name", options["table"].as<std::string>());

View File

@ -41,7 +41,7 @@ protected:
void addOptions(OptionsDescription & options_description) override;
void processOptions(const OptionsDescription & options_description, const CommandLineOptions & options,
const std::vector<Arguments> &) override;
const std::vector<Arguments> &, const std::vector<Arguments> &) override;
void processConfig() override;
private:

View File

@ -1721,7 +1721,12 @@ void ClientBase::showClientVersion()
}
void ClientBase::readArguments(int argc, char ** argv, Arguments & common_arguments, std::vector<Arguments> & external_tables_arguments)
void ClientBase::readArguments(
int argc,
char ** argv,
Arguments & common_arguments,
std::vector<Arguments> & external_tables_arguments,
std::vector<Arguments> & hosts_and_ports_arguments)
{
/** We allow different groups of arguments:
* - common arguments;
@ -1732,6 +1737,10 @@ void ClientBase::readArguments(int argc, char ** argv, Arguments & common_argume
*/
bool in_external_group = false;
std::string prev_host_arg;
std::string prev_port_arg;
for (int arg_num = 1; arg_num < argc; ++arg_num)
{
const char * arg = argv[arg_num];
@ -1792,10 +1801,74 @@ void ClientBase::readArguments(int argc, char ** argv, Arguments & common_argume
query_parameters.emplace(String(param_continuation), String(arg));
}
}
else if (startsWith(arg, "--host") || startsWith(arg, "-h"))
{
std::string host_arg;
/// --host host
if (arg == "--host"sv || arg == "-h"sv)
{
++arg_num;
if (arg_num >= argc)
throw Exception("Host argument requires value", ErrorCodes::BAD_ARGUMENTS);
arg = argv[arg_num];
host_arg = "--host=";
host_arg.append(arg);
}
else
host_arg = arg;
/// --port port1 --host host1
if (!prev_port_arg.empty())
{
hosts_and_ports_arguments.push_back({host_arg, prev_port_arg});
prev_port_arg.clear();
}
else
{
/// --host host1 --host host2
if (!prev_host_arg.empty())
hosts_and_ports_arguments.push_back({prev_host_arg});
prev_host_arg = host_arg;
}
}
else if (startsWith(arg, "--port"))
{
std::string port_arg = arg;
/// --port port
if (arg == "--port"sv)
{
port_arg.push_back('=');
++arg_num;
if (arg_num >= argc)
throw Exception("Port argument requires value", ErrorCodes::BAD_ARGUMENTS);
arg = argv[arg_num];
port_arg.append(arg);
}
/// --host host1 --port port1
if (!prev_host_arg.empty())
{
hosts_and_ports_arguments.push_back({port_arg, prev_host_arg});
prev_host_arg.clear();
}
else
{
/// --port port1 --port port2
if (!prev_port_arg.empty())
hosts_and_ports_arguments.push_back({prev_port_arg});
prev_port_arg = port_arg;
}
}
else
common_arguments.emplace_back(arg);
}
}
if (!prev_host_arg.empty())
hosts_and_ports_arguments.push_back({prev_host_arg});
if (!prev_port_arg.empty())
hosts_and_ports_arguments.push_back({prev_port_arg});
}
void ClientBase::parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments)
@ -1838,8 +1911,9 @@ void ClientBase::init(int argc, char ** argv)
Arguments common_arguments{""}; /// 0th argument is ignored.
std::vector<Arguments> external_tables_arguments;
std::vector<Arguments> hosts_and_ports_arguments;
readArguments(argc, argv, common_arguments, external_tables_arguments);
readArguments(argc, argv, common_arguments, external_tables_arguments, hosts_and_ports_arguments);
po::variables_map options;
OptionsDescription options_description;
@ -1929,7 +2003,7 @@ void ClientBase::init(int argc, char ** argv)
/// Output of help message.
if (options.count("help")
|| (options.count("host") && options["host"].as<std::vector<HostPort>>()[0].host == "elp")) /// If user writes -help instead of --help.
|| (options.count("host") && options["host"].as<std::string>() == "elp")) /// If user writes -help instead of --help.
{
printHelpMessage(options_description);
exit(0);
@ -1992,7 +2066,7 @@ void ClientBase::init(int argc, char ** argv)
profile_events.print = options.count("print-profile-events");
profile_events.delay_ms = options["profile-events-delay-ms"].as<UInt64>();
processOptions(options_description, options, external_tables_arguments);
processOptions(options_description, options, external_tables_arguments, hosts_and_ports_arguments);
argsToConfig(common_arguments, config(), 100);
clearPasswordFromCommandLine(argc, argv);

View File

@ -92,13 +92,15 @@ protected:
{
std::optional<ProgramOptionsDescription> main_description;
std::optional<ProgramOptionsDescription> external_description;
std::optional<ProgramOptionsDescription> hosts_and_ports_description;
};
virtual void printHelpMessage(const OptionsDescription & options_description) = 0;
virtual void addOptions(OptionsDescription & options_description) = 0;
virtual void processOptions(const OptionsDescription & options_description,
const CommandLineOptions & options,
const std::vector<Arguments> & external_tables_arguments) = 0;
const std::vector<Arguments> & external_tables_arguments,
const std::vector<Arguments> & hosts_and_ports_arguments) = 0;
virtual void processConfig() = 0;
protected:
@ -134,7 +136,12 @@ private:
void resetOutput();
void outputQueryInfo(bool echo_query_);
void readArguments(int argc, char ** argv, Arguments & common_arguments, std::vector<Arguments> & external_tables_arguments);
void readArguments(
int argc,
char ** argv,
Arguments & common_arguments,
std::vector<Arguments> & external_tables_arguments,
std::vector<Arguments> & hosts_and_ports_arguments);
void parseAndCheckOptions(OptionsDescription & options_description, po::variables_map & options, Arguments & arguments);
void updateSuggest(const ASTCreateQuery & ast_create);
@ -245,24 +252,13 @@ protected:
QueryProcessingStage::Enum query_processing_stage;
struct HostPort
struct HostAndPort
{
String host;
std::optional<UInt16> port{};
friend std::istream & operator>>(std::istream & in, HostPort & hostPort)
{
String host_with_port;
in >> host_with_port;
DB::DNSResolver & resolver = DB::DNSResolver::instance();
std::pair<Poco::Net::IPAddress, std::optional<UInt16>>
host_and_port = resolver.resolveHostOrAddress(host_with_port);
hostPort.host = host_and_port.first.toString();
hostPort.port = host_and_port.second;
return in;
}
UInt16 port;
};
std::vector<HostPort> hosts_ports{};
std::vector<HostAndPort> hosts_and_ports{};
};
}

View File

@ -133,7 +133,7 @@ void MultiplexedConnections::sendQuery(
modified_settings.group_by_two_level_threshold_bytes = 0;
}
if (settings.allow_experimental_parallel_reading_from_replicas)
if (settings.max_parallel_replicas > 1 && settings.allow_experimental_parallel_reading_from_replicas)
{
client_info.collaborate_with_initiator = true;
client_info.count_participating_replicas = replica_info.all_replicas_count;

View File

@ -91,4 +91,14 @@ FilterDescription::FilterDescription(const IColumn & column_)
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
}
SparseFilterDescription::SparseFilterDescription(const IColumn & column)
{
const auto * column_sparse = typeid_cast<const ColumnSparse *>(&column);
if (!column_sparse || !typeid_cast<const ColumnUInt8 *>(&column_sparse->getValuesColumn()))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER,
"Illegal type {} of column for sparse filter. Must be Sparse(UInt8)", column.getName());
filter_indices = &column_sparse->getOffsetsColumn();
}
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <Columns/IColumn.h>
#include <Columns/ColumnsCommon.h>
namespace DB
@ -15,20 +16,37 @@ struct ConstantFilterDescription
bool always_false = false;
bool always_true = false;
ConstantFilterDescription() {}
ConstantFilterDescription() = default;
explicit ConstantFilterDescription(const IColumn & column);
};
struct IFilterDescription
{
virtual ColumnPtr filter(const IColumn & column, ssize_t result_size_hint) const = 0;
virtual size_t countBytesInFilter() const = 0;
virtual ~IFilterDescription() = default;
};
/// Obtain a filter from non constant Column, that may have type: UInt8, Nullable(UInt8).
struct FilterDescription
struct FilterDescription final : public IFilterDescription
{
const IColumn::Filter * data = nullptr; /// Pointer to filter when it is not always true or always false.
ColumnPtr data_holder; /// If new column was generated, it will be owned by holder.
explicit FilterDescription(const IColumn & column);
ColumnPtr filter(const IColumn & column, ssize_t result_size_hint) const override { return column.filter(*data, result_size_hint); }
size_t countBytesInFilter() const override { return DB::countBytesInFilter(*data); }
};
struct SparseFilterDescription final : public IFilterDescription
{
const IColumn * filter_indices = nullptr;
explicit SparseFilterDescription(const IColumn & column);
ColumnPtr filter(const IColumn & column, ssize_t) const override { return column.index(*filter_indices, 0); }
size_t countBytesInFilter() const override { return filter_indices->size(); }
};
struct ColumnWithTypeAndName;

View File

@ -202,45 +202,6 @@ Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, U
return Poco::Net::SocketAddress(impl->cache_host(host).front(), port);
}
std::pair<Poco::Net::IPAddress, std::optional<UInt16>> DNSResolver::resolveHostOrAddress(const std::string & host_and_port)
{
Poco::Net::IPAddress ip;
size_t number_of_colons = std::count(host_and_port.begin(), host_and_port.end(), ':');
if (number_of_colons > 1)
{
/// IPv6 host
if (host_and_port.starts_with('['))
{
size_t close_bracket_pos = host_and_port.find(']');
assert(close_bracket_pos != std::string::npos);
ip = resolveHost(host_and_port.substr(0, close_bracket_pos));
if (close_bracket_pos == host_and_port.size() - 1)
return {ip, std::nullopt};
if (host_and_port[close_bracket_pos + 1] != ':')
throw Exception("Missing delimiter between host and port", ErrorCodes::BAD_ARGUMENTS);
unsigned int port;
if (!Poco::NumberParser::tryParseUnsigned(host_and_port.substr(close_bracket_pos + 2), port))
throw Exception("Port must be numeric", ErrorCodes::BAD_ARGUMENTS);
if (port > 0xFFFF)
throw Exception("Port must be less 0xFFFF", ErrorCodes::BAD_ARGUMENTS);
return {ip, port};
}
return {resolveHost(host_and_port), std::nullopt};
}
else if (number_of_colons == 1)
{
/// IPv4 host with port
Poco::Net::SocketAddress socket = resolveAddress(host_and_port);
return {socket.host(), socket.port()};
}
/// IPv4 host
return {resolveHost(host_and_port), std::nullopt};
}
String DNSResolver::reverseResolve(const Poco::Net::IPAddress & address)
{
if (impl->disable_cache)

View File

@ -34,10 +34,6 @@ public:
Poco::Net::SocketAddress resolveAddress(const std::string & host, UInt16 port);
/// Accepts host names like 'example.com'/'example.com:port' or '127.0.0.1'/'127.0.0.1:port' or '::1'/'[::1]:port'
/// and resolves its IP and port, if port is set
std::pair<Poco::Net::IPAddress, std::optional<UInt16>> resolveHostOrAddress(const std::string & host_and_port);
/// Accepts host IP and resolves its host name
String reverseResolve(const Poco::Net::IPAddress & address);

View File

@ -6,8 +6,10 @@
using namespace std::chrono_literals;
constexpr std::chrono::microseconds ZERO_MICROSEC = 0us;
OvercommitTracker::OvercommitTracker()
: max_wait_time(0us)
: max_wait_time(ZERO_MICROSEC)
, picked_tracker(nullptr)
, cancelation_state(QueryCancelationState::NONE)
{}
@ -22,6 +24,9 @@ bool OvercommitTracker::needToStopQuery(MemoryTracker * tracker)
{
std::unique_lock<std::mutex> lk(overcommit_m);
if (max_wait_time == ZERO_MICROSEC)
return true;
pickQueryToExclude();
assert(cancelation_state == QueryCancelationState::RUNNING);

View File

@ -30,6 +30,7 @@ void CachedCompressedReadBuffer::initInput()
void CachedCompressedReadBuffer::prefetch()
{
initInput();
file_in->prefetch();
}

View File

@ -61,14 +61,14 @@ public:
void setReadUntilPosition(size_t position) override
{
if (file_in)
file_in->setReadUntilPosition(position);
initInput();
file_in->setReadUntilPosition(position);
}
void setReadUntilEnd() override
{
if (file_in)
file_in->setReadUntilEnd();
initInput();
file_in->setReadUntilEnd();
}
};

View File

@ -54,11 +54,6 @@ ChangelogFileDescription getChangelogFileDescription(const std::string & path_st
return result;
}
LogEntryPtr makeClone(const LogEntryPtr & entry)
{
return cs_new<nuraft::log_entry>(entry->get_term(), nuraft::buffer::clone(entry->get_buf()), entry->get_val_type());
}
Checksum computeRecordChecksum(const ChangelogRecord & record)
{
SipHash hash;
@ -519,7 +514,7 @@ void Changelog::appendEntry(uint64_t index, const LogEntryPtr & log_entry)
rotate(index);
current_writer->appendRecord(buildRecord(index, log_entry));
logs[index] = makeClone(log_entry);
logs[index] = log_entry;
max_log_id = index;
}

View File

@ -69,7 +69,7 @@ public:
static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false);
/// will throw when the table we want to attach already exists (in active / detached / detached permanently form)
void checkMetadataFilenameAvailability(const String & to_table_name) const;
void checkMetadataFilenameAvailability(const String & to_table_name) const override;
void checkMetadataFilenameAvailabilityUnlocked(const String & to_table_name, std::unique_lock<std::mutex> &) const;
void modifySettingsMetadata(const SettingsChanges & settings_changes, ContextPtr query_context);

View File

@ -158,9 +158,14 @@ public:
virtual void startupTables(ThreadPool & /*thread_pool*/, bool /*force_restore*/, bool /*force_attach*/) {}
/// Check the existence of the table.
/// Check the existence of the table in memory (attached).
virtual bool isTableExist(const String & name, ContextPtr context) const = 0;
/// Check the existence of the table in any state (in active / detached / detached permanently state).
/// Throws exception when table exists.
virtual void checkMetadataFilenameAvailability(const String & /*table_name*/) const {}
/// Get the table for work. Return nullptr if there is no table.
virtual StoragePtr tryGetTable(const String & name, ContextPtr context) const = 0;

View File

@ -66,7 +66,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskAzureBlobStorage::readFile(
std::optional<size_t>) const
{
auto settings = current_settings.get();
auto metadata = readMeta(path);
auto metadata = readMetadata(path);
LOG_TEST(log, "Read from file by path: {}", backQuote(metadata_disk->getPath() + path));
@ -94,7 +94,6 @@ std::unique_ptr<WriteBufferFromFileBase> DiskAzureBlobStorage::writeFile(
size_t buf_size,
WriteMode mode)
{
auto metadata = readOrCreateMetaForWriting(path, mode);
auto blob_path = path + "_" + getRandomASCIIString(8); /// NOTE: path contains the tmp_* prefix in the blob name
LOG_TRACE(log, "{} to file by path: {}. AzureBlob Storage path: {}",
@ -106,7 +105,12 @@ std::unique_ptr<WriteBufferFromFileBase> DiskAzureBlobStorage::writeFile(
current_settings.get()->max_single_part_upload_size,
buf_size);
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromAzureBlobStorage>>(std::move(buffer), std::move(metadata), blob_path);
auto create_metadata_callback = [this, path, mode, blob_path] (size_t count)
{
readOrCreateUpdateAndStoreMetadata(path, mode, false, [blob_path, count] (Metadata & metadata) { metadata.addObject(blob_path, count); return true; });
};
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromAzureBlobStorage>>(std::move(buffer), std::move(create_metadata_callback), path);
}

View File

@ -23,7 +23,7 @@ public:
{
}
virtual ~WritingToCacheWriteBuffer() override
~WritingToCacheWriteBuffer() override
{
try
{
@ -274,6 +274,7 @@ void DiskCacheWrapper::removeDirectory(const String & path)
{
if (cache_disk->exists(path))
cache_disk->removeDirectory(path);
DiskDecorator::removeDirectory(path);
}
@ -298,6 +299,18 @@ void DiskCacheWrapper::removeSharedRecursive(const String & path, bool keep_s3)
DiskDecorator::removeSharedRecursive(path, keep_s3);
}
void DiskCacheWrapper::removeSharedFiles(const RemoveBatchRequest & files, bool keep_s3)
{
for (const auto & file : files)
{
if (cache_disk->exists(file.path))
cache_disk->removeSharedFile(file.path, keep_s3);
}
DiskDecorator::removeSharedFiles(files, keep_s3);
}
void DiskCacheWrapper::createHardLink(const String & src_path, const String & dst_path)
{
/// Don't create hardlinks for cache files to shadow directory as it just waste cache disk space.

View File

@ -48,6 +48,7 @@ public:
void removeRecursive(const String & path) override;
void removeSharedFile(const String & path, bool keep_s3) override;
void removeSharedRecursive(const String & path, bool keep_s3) override;
void removeSharedFiles(const RemoveBatchRequest & files, bool keep_s3) override;
void createHardLink(const String & src_path, const String & dst_path) override;
ReservationPtr reserve(UInt64 bytes) override;

View File

@ -72,17 +72,9 @@ public:
void startup() override;
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map) override;
std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const override { return delegate->readMetaFile(path, settings, size); }
DiskPtr getMetadataDiskIfExistsOrSelf() override { return delegate->getMetadataDiskIfExistsOrSelf(); }
std::unique_ptr<WriteBufferFromFileBase> writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode) override { return delegate->writeMetaFile(path, buf_size, mode); }
void removeMetaFileIfExists(const String & path) override { delegate->removeMetaFileIfExists(path); }
std::unordered_map<String, String> getSerializedMetadata(const std::vector<String> & file_paths) const override { return delegate->getSerializedMetadata(file_paths); }
UInt32 getRefCount(const String & path) const override { return delegate->getRefCount(path); }

View File

@ -539,14 +539,14 @@ catch (...)
struct DiskWriteCheckData
{
constexpr static size_t PAGE_SIZE = 4096;
char data[PAGE_SIZE]{};
constexpr static size_t PAGE_SIZE_IN_BYTES = 4096;
char data[PAGE_SIZE_IN_BYTES]{};
DiskWriteCheckData()
{
static const char * magic_string = "ClickHouse disk local write check";
static size_t magic_string_len = strlen(magic_string);
memcpy(data, magic_string, magic_string_len);
memcpy(data + PAGE_SIZE - magic_string_len, magic_string, magic_string_len);
memcpy(data + PAGE_SIZE_IN_BYTES - magic_string_len, magic_string, magic_string_len);
}
};
@ -557,7 +557,7 @@ try
String tmp_template = fs::path(disk_path) / "";
{
auto buf = WriteBufferFromTemporaryFile::create(tmp_template);
buf->write(data.data, data.PAGE_SIZE);
buf->write(data.data, data.PAGE_SIZE_IN_BYTES);
buf->sync();
}
return true;

View File

@ -1,4 +1,7 @@
#include <Disks/HDFS/DiskHDFS.h>
#if USE_HDFS
#include <Disks/DiskLocal.h>
#include <Disks/RemoteDisksCommon.h>
@ -73,7 +76,7 @@ DiskHDFS::DiskHDFS(
std::unique_ptr<ReadBufferFromFileBase> DiskHDFS::readFile(const String & path, const ReadSettings & read_settings, std::optional<size_t>, std::optional<size_t>) const
{
auto metadata = readMeta(path);
auto metadata = readMetadata(path);
LOG_TEST(log,
"Read from file by path: {}. Existing HDFS objects: {}",
@ -87,8 +90,6 @@ std::unique_ptr<ReadBufferFromFileBase> DiskHDFS::readFile(const String & path,
std::unique_ptr<WriteBufferFromFileBase> DiskHDFS::writeFile(const String & path, size_t buf_size, WriteMode mode)
{
auto metadata = readOrCreateMetaForWriting(path, mode);
/// Path to store new HDFS object.
auto file_name = getRandomName();
auto hdfs_path = remote_fs_root_path + file_name;
@ -100,10 +101,13 @@ std::unique_ptr<WriteBufferFromFileBase> DiskHDFS::writeFile(const String & path
auto hdfs_buffer = std::make_unique<WriteBufferFromHDFS>(hdfs_path,
config, settings->replication, buf_size,
mode == WriteMode::Rewrite ? O_WRONLY : O_WRONLY | O_APPEND);
auto create_metadata_callback = [this, path, mode, file_name] (size_t count)
{
readOrCreateUpdateAndStoreMetadata(path, mode, false, [file_name, count] (Metadata & metadata) { metadata.addObject(file_name, count); return true; });
};
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromHDFS>>(std::move(hdfs_buffer),
std::move(metadata),
file_name);
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromHDFS>>(
std::move(hdfs_buffer), std::move(create_metadata_callback), path);
}
@ -179,3 +183,4 @@ void registerDiskHDFS(DiskFactory & factory)
}
}
#endif

View File

@ -1,5 +1,9 @@
#pragma once
#include <Common/config.h>
#if USE_HDFS
#include <Disks/IDiskRemote.h>
#include <Storages/HDFS/HDFSCommon.h>
#include <Core/UUID.h>
@ -79,3 +83,4 @@ private:
};
}
#endif

View File

@ -86,28 +86,4 @@ SyncGuardPtr IDisk::getDirectorySyncGuard(const String & /* path */) const
return nullptr;
}
std::unique_ptr<ReadBufferFromFileBase> IDisk::readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const
{
LOG_TRACE(&Poco::Logger::get("IDisk"), "Read local metafile: {}", path);
return readFile(path, settings, size);
}
std::unique_ptr<WriteBufferFromFileBase> IDisk::writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode)
{
LOG_TRACE(&Poco::Logger::get("IDisk"), "Write local metafile: {}", path);
return writeFile(path, buf_size, mode);
}
void IDisk::removeMetaFileIfExists(const String & path)
{
LOG_TRACE(&Poco::Logger::get("IDisk"), "Remove local metafile: {}", path);
removeFileIfExists(path);
}
}

View File

@ -277,28 +277,34 @@ public:
/// Applies new settings for disk in runtime.
virtual void applyNewSettings(const Poco::Util::AbstractConfiguration &, ContextPtr, const String &, const DisksMap &) {}
/// Open the local file for read and return ReadBufferFromFileBase object.
/// Overridden in IDiskRemote.
/// Used for work with custom metadata.
virtual std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const;
/// Quite leaky abstraction. Some disks can use additional disk to store
/// some parts of metadata. In general case we have only one disk itself and
/// return pointer to it.
///
/// Actually it's a part of IDiskRemote implementation but we have so
/// complex hierarchy of disks (with decorators), so we cannot even
/// dynamic_cast some pointer to IDisk to pointer to IDiskRemote.
virtual std::shared_ptr<IDisk> getMetadataDiskIfExistsOrSelf() { return std::static_pointer_cast<IDisk>(shared_from_this()); }
/// Open the local file for write and return WriteBufferFromFileBase object.
/// Overridden in IDiskRemote.
/// Used for work with custom metadata.
virtual std::unique_ptr<WriteBufferFromFileBase> writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode);
virtual void removeMetaFileIfExists(const String & path);
/// Very similar case as for getMetadataDiskIfExistsOrSelf(). If disk has "metadata"
/// it will return mapping for each required path: path -> metadata as string.
/// Only for IDiskRemote.
virtual std::unordered_map<String, String> getSerializedMetadata(const std::vector<String> & /* paths */) const { return {}; }
/// Return reference count for remote FS.
/// Overridden in IDiskRemote.
/// You can ask -- why we have zero and what does it mean? For some unknown reason
/// the decision was made to take 0 as "no references exist", but only file itself left.
/// With normal file system we will get 1 in this case:
/// $ stat clickhouse
/// File: clickhouse
/// Size: 3014014920 Blocks: 5886760 IO Block: 4096 regular file
/// Device: 10301h/66305d Inode: 3109907 Links: 1
/// Why we have always zero by default? Because normal filesystem
/// manages hardlinks by itself. So you can always remove hardlink and all
/// other alive harlinks will not be removed.
virtual UInt32 getRefCount(const String &) const { return 0; }
protected:
friend class DiskDecorator;

View File

@ -24,23 +24,64 @@ namespace ErrorCodes
extern const int UNKNOWN_FORMAT;
extern const int FILE_ALREADY_EXISTS;
extern const int PATH_ACCESS_DENIED;;
extern const int CANNOT_DELETE_DIRECTORY;
extern const int FILE_DOESNT_EXIST;
extern const int BAD_FILE_TYPE;
}
/// Load metadata by path or create empty if `create` flag is set.
IDiskRemote::Metadata::Metadata(
const String & remote_fs_root_path_,
DiskPtr metadata_disk_,
const String & metadata_file_path_,
bool create)
: RemoteMetadata(remote_fs_root_path_, metadata_file_path_)
, metadata_disk(metadata_disk_)
, total_size(0), ref_count(0)
IDiskRemote::Metadata IDiskRemote::Metadata::readMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_)
{
if (create)
return;
Metadata result(remote_fs_root_path_, metadata_disk_, metadata_file_path_);
result.load();
return result;
}
IDiskRemote::Metadata IDiskRemote::Metadata::createAndStoreMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync)
{
Metadata result(remote_fs_root_path_, metadata_disk_, metadata_file_path_);
result.save(sync);
return result;
}
IDiskRemote::Metadata IDiskRemote::Metadata::readUpdateAndStoreMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync, IDiskRemote::MetadataUpdater updater)
{
Metadata result(remote_fs_root_path_, metadata_disk_, metadata_file_path_);
result.load();
if (updater(result))
result.save(sync);
return result;
}
IDiskRemote::Metadata IDiskRemote::Metadata::createUpdateAndStoreMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync, IDiskRemote::MetadataUpdater updater)
{
Metadata result(remote_fs_root_path_, metadata_disk_, metadata_file_path_);
updater(result);
result.save(sync);
return result;
}
IDiskRemote::Metadata IDiskRemote::Metadata::createAndStoreMetadataIfNotExists(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync, bool overwrite)
{
if (overwrite || !metadata_disk_->exists(metadata_file_path_))
{
return createAndStoreMetadata(remote_fs_root_path_, metadata_disk_, metadata_file_path_, sync);
}
else
{
auto result = readMetadata(remote_fs_root_path_, metadata_disk_, metadata_file_path_);
if (result.read_only)
throw Exception("File is read-only: " + metadata_file_path_, ErrorCodes::PATH_ACCESS_DENIED);
return result;
}
}
void IDiskRemote::Metadata::load()
{
try
{
const ReadSettings read_settings;
@ -102,103 +143,158 @@ IDiskRemote::Metadata::Metadata(
}
}
/// Load metadata by path or create empty if `create` flag is set.
IDiskRemote::Metadata::Metadata(
const String & remote_fs_root_path_,
DiskPtr metadata_disk_,
const String & metadata_file_path_)
: RemoteMetadata(remote_fs_root_path_, metadata_file_path_)
, metadata_disk(metadata_disk_)
, total_size(0), ref_count(0)
{
}
void IDiskRemote::Metadata::addObject(const String & path, size_t size)
{
total_size += size;
remote_fs_objects.emplace_back(path, size);
}
void IDiskRemote::Metadata::saveToBuffer(WriteBuffer & buf, bool sync)
{
writeIntText(VERSION_RELATIVE_PATHS, buf);
writeChar('\n', buf);
writeIntText(remote_fs_objects.size(), buf);
writeChar('\t', buf);
writeIntText(total_size, buf);
writeChar('\n', buf);
for (const auto & [remote_fs_object_path, remote_fs_object_size] : remote_fs_objects)
{
writeIntText(remote_fs_object_size, buf);
writeChar('\t', buf);
writeEscapedString(remote_fs_object_path, buf);
writeChar('\n', buf);
}
writeIntText(ref_count, buf);
writeChar('\n', buf);
writeBoolText(read_only, buf);
writeChar('\n', buf);
buf.finalize();
if (sync)
buf.sync();
}
/// Fsync metadata file if 'sync' flag is set.
void IDiskRemote::Metadata::save(bool sync)
{
auto buf = metadata_disk->writeFile(metadata_file_path, 1024);
saveToBuffer(*buf, sync);
}
writeIntText(VERSION_RELATIVE_PATHS, *buf);
writeChar('\n', *buf);
std::string IDiskRemote::Metadata::serializeToString()
{
WriteBufferFromOwnString write_buf;
saveToBuffer(write_buf, false);
return write_buf.str();
}
writeIntText(remote_fs_objects.size(), *buf);
writeChar('\t', *buf);
writeIntText(total_size, *buf);
writeChar('\n', *buf);
IDiskRemote::Metadata IDiskRemote::readMetadataUnlocked(const String & path, std::shared_lock<std::shared_mutex> &) const
{
return Metadata::readMetadata(remote_fs_root_path, metadata_disk, path);
}
for (const auto & [remote_fs_object_path, remote_fs_object_size] : remote_fs_objects)
IDiskRemote::Metadata IDiskRemote::readMetadata(const String & path) const
{
std::shared_lock lock(metadata_mutex);
return readMetadataUnlocked(path, lock);
}
IDiskRemote::Metadata IDiskRemote::readUpdateAndStoreMetadata(const String & path, bool sync, IDiskRemote::MetadataUpdater updater)
{
std::unique_lock lock(metadata_mutex);
return Metadata::readUpdateAndStoreMetadata(remote_fs_root_path, metadata_disk, path, sync, updater);
}
IDiskRemote::Metadata IDiskRemote::readOrCreateUpdateAndStoreMetadata(const String & path, WriteMode mode, bool sync, IDiskRemote::MetadataUpdater updater)
{
if (mode == WriteMode::Rewrite || !metadata_disk->exists(path))
{
writeIntText(remote_fs_object_size, *buf);
writeChar('\t', *buf);
writeEscapedString(remote_fs_object_path, *buf);
writeChar('\n', *buf);
std::unique_lock lock(metadata_mutex);
return Metadata::createUpdateAndStoreMetadata(remote_fs_root_path, metadata_disk, path, sync, updater);
}
else
{
return Metadata::readUpdateAndStoreMetadata(remote_fs_root_path, metadata_disk, path, sync, updater);
}
}
IDiskRemote::Metadata IDiskRemote::createAndStoreMetadata(const String & path, bool sync)
{
return Metadata::createAndStoreMetadata(remote_fs_root_path, metadata_disk, path, sync);
}
IDiskRemote::Metadata IDiskRemote::createUpdateAndStoreMetadata(const String & path, bool sync, IDiskRemote::MetadataUpdater updater)
{
return Metadata::createUpdateAndStoreMetadata(remote_fs_root_path, metadata_disk, path, sync, updater);
}
std::unordered_map<String, String> IDiskRemote::getSerializedMetadata(const std::vector<std::string> & file_paths) const
{
std::unordered_map<String, String> metadatas;
std::shared_lock lock(metadata_mutex);
for (const auto & path : file_paths)
{
IDiskRemote::Metadata metadata = readMetadataUnlocked(path, lock);
metadata.ref_count = 0;
metadatas[path] = metadata.serializeToString();
}
writeIntText(ref_count, *buf);
writeChar('\n', *buf);
writeBoolText(read_only, *buf);
writeChar('\n', *buf);
buf->finalize();
if (sync)
buf->sync();
return metadatas;
}
IDiskRemote::Metadata IDiskRemote::readOrCreateMetaForWriting(const String & path, WriteMode mode)
{
bool exist = exists(path);
if (exist)
{
auto metadata = readMeta(path);
if (metadata.read_only)
throw Exception("File is read-only: " + path, ErrorCodes::PATH_ACCESS_DENIED);
if (mode == WriteMode::Rewrite)
removeFile(path); /// Remove for re-write.
else
return metadata;
}
auto metadata = createMeta(path);
/// Save empty metadata to disk to have ability to get file size while buffer is not finalized.
metadata.save();
return metadata;
}
IDiskRemote::Metadata IDiskRemote::readMeta(const String & path) const
{
return Metadata(remote_fs_root_path, metadata_disk, path);
}
IDiskRemote::Metadata IDiskRemote::createMeta(const String & path) const
{
return Metadata(remote_fs_root_path, metadata_disk, path, true);
}
void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper)
void IDiskRemote::removeMetadata(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper)
{
LOG_TRACE(log, "Remove file by path: {}", backQuote(metadata_disk->getPath() + path));
if (!metadata_disk->exists(path))
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Metadata path '{}' doesn't exist", path);
if (!metadata_disk->isFile(path))
throw Exception(ErrorCodes::CANNOT_DELETE_DIRECTORY, "Path '{}' is a directory", path);
throw Exception(ErrorCodes::BAD_FILE_TYPE, "Path '{}' is not a regular file", path);
try
{
auto metadata = readMeta(path);
auto metadata_updater = [fs_paths_keeper, this] (Metadata & metadata)
{
if (metadata.ref_count == 0)
{
for (const auto & [remote_fs_object_path, _] : metadata.remote_fs_objects)
fs_paths_keeper->addPath(remote_fs_root_path + remote_fs_object_path);
return false;
}
else /// In other case decrement number of references, save metadata and delete hardlink.
{
--metadata.ref_count;
}
return true;
};
readUpdateAndStoreMetadata(path, false, metadata_updater);
metadata_disk->removeFile(path);
/// If there is no references - delete content from remote FS.
if (metadata.ref_count == 0)
{
metadata_disk->removeFile(path);
for (const auto & [remote_fs_object_path, _] : metadata.remote_fs_objects)
fs_paths_keeper->addPath(remote_fs_root_path + remote_fs_object_path);
}
else /// In other case decrement number of references, save metadata and delete file.
{
--metadata.ref_count;
metadata.save();
metadata_disk->removeFile(path);
}
}
catch (const Exception & e)
{
@ -216,18 +312,19 @@ void IDiskRemote::removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths
}
void IDiskRemote::removeMetaRecursive(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper)
void IDiskRemote::removeMetadataRecursive(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper)
{
checkStackSize(); /// This is needed to prevent stack overflow in case of cyclic symlinks.
if (metadata_disk->isFile(path))
{
removeMeta(path, fs_paths_keeper);
removeMetadata(path, fs_paths_keeper);
}
else
{
for (auto it{iterateDirectory(path)}; it->isValid(); it->next())
removeMetaRecursive(it->path(), fs_paths_keeper);
for (auto it = iterateDirectory(path); it->isValid(); it->next())
removeMetadataRecursive(it->path(), fs_paths_keeper);
metadata_disk->removeDirectory(path);
}
}
@ -305,16 +402,13 @@ bool IDiskRemote::isFile(const String & path) const
void IDiskRemote::createFile(const String & path)
{
/// Create empty metadata file.
auto metadata = createMeta(path);
metadata.save();
createAndStoreMetadata(path, false);
}
size_t IDiskRemote::getFileSize(const String & path) const
{
auto metadata = readMeta(path);
return metadata.total_size;
return readMetadata(path).total_size;
}
@ -341,45 +435,45 @@ void IDiskRemote::replaceFile(const String & from_path, const String & to_path)
}
void IDiskRemote::removeSharedFile(const String & path, bool keep_in_remote_fs)
void IDiskRemote::removeSharedFile(const String & path, bool delete_metadata_only)
{
RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper();
removeMeta(path, fs_paths_keeper);
if (!keep_in_remote_fs)
removeMetadata(path, fs_paths_keeper);
if (!delete_metadata_only)
removeFromRemoteFS(fs_paths_keeper);
}
void IDiskRemote::removeSharedFileIfExists(const String & path, bool keep_in_remote_fs)
void IDiskRemote::removeSharedFileIfExists(const String & path, bool delete_metadata_only)
{
RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper();
if (metadata_disk->exists(path))
{
removeMeta(path, fs_paths_keeper);
if (!keep_in_remote_fs)
removeMetadata(path, fs_paths_keeper);
if (!delete_metadata_only)
removeFromRemoteFS(fs_paths_keeper);
}
}
void IDiskRemote::removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs)
void IDiskRemote::removeSharedFiles(const RemoveBatchRequest & files, bool delete_metadata_only)
{
RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper();
for (const auto & file : files)
{
bool skip = file.if_exists && !metadata_disk->exists(file.path);
if (!skip)
removeMeta(file.path, fs_paths_keeper);
removeMetadata(file.path, fs_paths_keeper);
}
if (!keep_in_remote_fs)
if (!delete_metadata_only)
removeFromRemoteFS(fs_paths_keeper);
}
void IDiskRemote::removeSharedRecursive(const String & path, bool keep_in_remote_fs)
void IDiskRemote::removeSharedRecursive(const String & path, bool delete_metadata_only)
{
RemoteFSPathKeeperPtr fs_paths_keeper = createFSPathKeeper();
removeMetaRecursive(path, fs_paths_keeper);
if (!keep_in_remote_fs)
removeMetadataRecursive(path, fs_paths_keeper);
if (!delete_metadata_only)
removeFromRemoteFS(fs_paths_keeper);
}
@ -388,9 +482,7 @@ void IDiskRemote::setReadOnly(const String & path)
{
/// We should store read only flag inside metadata file (instead of using FS flag),
/// because we modify metadata file when create hard-links from it.
auto metadata = readMeta(path);
metadata.read_only = true;
metadata.save();
readUpdateAndStoreMetadata(path, false, [] (Metadata & metadata) { metadata.read_only = true; return true; });
}
@ -414,7 +506,7 @@ void IDiskRemote::createDirectories(const String & path)
void IDiskRemote::clearDirectory(const String & path)
{
for (auto it{iterateDirectory(path)}; it->isValid(); it->next())
for (auto it = iterateDirectory(path); it->isValid(); it->next())
if (isFile(it->path()))
removeFile(it->path());
}
@ -453,10 +545,7 @@ Poco::Timestamp IDiskRemote::getLastModified(const String & path)
void IDiskRemote::createHardLink(const String & src_path, const String & dst_path)
{
/// Increment number of references.
auto src = readMeta(src_path);
++src.ref_count;
src.save();
readUpdateAndStoreMetadata(src_path, false, [] (Metadata & metadata) { metadata.ref_count++; return true; });
/// Create FS hardlink to metadata file.
metadata_disk->createHardLink(src_path, dst_path);
@ -498,7 +587,7 @@ bool IDiskRemote::tryReserve(UInt64 bytes)
String IDiskRemote::getUniqueId(const String & path) const
{
LOG_TRACE(log, "Remote path: {}, Path: {}", remote_fs_root_path, path);
Metadata metadata(remote_fs_root_path, metadata_disk, path);
auto metadata = readMetadata(path);
String id;
if (!metadata.remote_fs_objects.empty())
id = metadata.remote_fs_root_path + metadata.remote_fs_objects[0].first;
@ -514,34 +603,9 @@ AsynchronousReaderPtr IDiskRemote::getThreadPoolReader()
return reader;
}
std::unique_ptr<ReadBufferFromFileBase> IDiskRemote::readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const
{
LOG_TRACE(log, "Read metafile: {}", path);
return metadata_disk->readFile(path, settings, size);
}
std::unique_ptr<WriteBufferFromFileBase> IDiskRemote::writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode)
{
LOG_TRACE(log, "Write metafile: {}", path);
return metadata_disk->writeFile(path, buf_size, mode);
}
void IDiskRemote::removeMetaFileIfExists(const String & path)
{
LOG_TRACE(log, "Remove metafile: {}", path);
return metadata_disk->removeFileIfExists(path);
}
UInt32 IDiskRemote::getRefCount(const String & path) const
{
auto meta = readMeta(path);
return meta.ref_count;
return readMetadata(path).ref_count;
}
ThreadPool & IDiskRemote::getThreadPoolWriter()

View File

@ -6,6 +6,8 @@
#include <Disks/DiskFactory.h>
#include <Disks/Executor.h>
#include <utility>
#include <mutex>
#include <shared_mutex>
#include <Common/MultiVersion.h>
#include <Common/ThreadPool.h>
#include <filesystem>
@ -57,16 +59,23 @@ public:
size_t thread_pool_size);
struct Metadata;
using MetadataUpdater = std::function<bool(Metadata & metadata)>;
const String & getName() const final override { return name; }
const String & getPath() const final override { return metadata_disk->getPath(); }
Metadata readMeta(const String & path) const;
/// Methods for working with metadata. For some operations (like hardlink
/// creation) metadata can be updated concurrently from multiple threads
/// (file actually rewritten on disk). So additional RW lock is required for
/// metadata read and write, but not for create new metadata.
Metadata readMetadata(const String & path) const;
Metadata readMetadataUnlocked(const String & path, std::shared_lock<std::shared_mutex> &) const;
Metadata readUpdateAndStoreMetadata(const String & path, bool sync, MetadataUpdater updater);
Metadata readOrCreateUpdateAndStoreMetadata(const String & path, WriteMode mode, bool sync, MetadataUpdater updater);
Metadata createMeta(const String & path) const;
Metadata readOrCreateMetaForWriting(const String & path, WriteMode mode);
Metadata createAndStoreMetadata(const String & path, bool sync);
Metadata createUpdateAndStoreMetadata(const String & path, bool sync, MetadataUpdater updater);
UInt64 getTotalSpace() const override { return std::numeric_limits<UInt64>::max(); }
@ -94,13 +103,13 @@ public:
void removeRecursive(const String & path) override { removeSharedRecursive(path, false); }
void removeSharedFile(const String & path, bool keep_in_remote_fs) override;
void removeSharedFile(const String & path, bool delete_metadata_only) override;
void removeSharedFileIfExists(const String & path, bool keep_in_remote_fs) override;
void removeSharedFileIfExists(const String & path, bool delete_metadata_only) override;
void removeSharedFiles(const RemoveBatchRequest & files, bool keep_in_remote_fs) override;
void removeSharedFiles(const RemoveBatchRequest & files, bool delete_metadata_only) override;
void removeSharedRecursive(const String & path, bool keep_in_remote_fs) override;
void removeSharedRecursive(const String & path, bool delete_metadata_only) override;
void listFiles(const String & path, std::vector<String> & file_names) override;
@ -139,21 +148,14 @@ public:
static AsynchronousReaderPtr getThreadPoolReader();
static ThreadPool & getThreadPoolWriter();
virtual std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const override;
virtual std::unique_ptr<WriteBufferFromFileBase> writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode) override;
virtual void removeMetaFileIfExists(
const String & path) override;
DiskPtr getMetadataDiskIfExistsOrSelf() override { return metadata_disk; }
UInt32 getRefCount(const String & path) const override;
/// Return metadata for each file path. Also, before serialization reset
/// ref_count for each metadata to zero. This function used only for remote
/// fetches/sends in replicated engines. That's why we reset ref_count to zero.
std::unordered_map<String, String> getSerializedMetadata(const std::vector<String> & file_paths) const override;
protected:
Poco::Logger * log;
const String name;
@ -162,15 +164,16 @@ protected:
DiskPtr metadata_disk;
private:
void removeMeta(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper);
void removeMetadata(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper);
void removeMetaRecursive(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper);
void removeMetadataRecursive(const String & path, RemoteFSPathKeeperPtr fs_paths_keeper);
bool tryReserve(UInt64 bytes);
UInt64 reserved_bytes = 0;
UInt64 reservation_count = 0;
std::mutex reservation_mutex;
mutable std::shared_mutex metadata_mutex;
};
using RemoteDiskPtr = std::shared_ptr<IDiskRemote>;
@ -200,6 +203,7 @@ struct RemoteMetadata
struct IDiskRemote::Metadata : RemoteMetadata
{
using Updater = std::function<bool(IDiskRemote::Metadata & metadata)>;
/// Metadata file version.
static constexpr UInt32 VERSION_ABSOLUTE_PATHS = 1;
static constexpr UInt32 VERSION_RELATIVE_PATHS = 2;
@ -211,22 +215,36 @@ struct IDiskRemote::Metadata : RemoteMetadata
size_t total_size = 0;
/// Number of references (hardlinks) to this metadata file.
///
/// FIXME: Why we are tracking it explicetly, without
/// info from filesystem????
UInt32 ref_count = 0;
/// Flag indicates that file is read only.
bool read_only = false;
/// Load metadata by path or create empty if `create` flag is set.
Metadata(const String & remote_fs_root_path_,
DiskPtr metadata_disk_,
const String & metadata_file_path_,
bool create = false);
Metadata(
const String & remote_fs_root_path_,
DiskPtr metadata_disk_,
const String & metadata_file_path_);
void addObject(const String & path, size_t size);
static Metadata readMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_);
static Metadata readUpdateAndStoreMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync, Updater updater);
static Metadata createAndStoreMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync);
static Metadata createUpdateAndStoreMetadata(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync, Updater updater);
static Metadata createAndStoreMetadataIfNotExists(const String & remote_fs_root_path_, DiskPtr metadata_disk_, const String & metadata_file_path_, bool sync, bool overwrite);
/// Serialize metadata to string (very same with saveToBuffer)
std::string serializeToString();
private:
/// Fsync metadata file if 'sync' flag is set.
void save(bool sync = false);
void saveToBuffer(WriteBuffer & buffer, bool sync);
void load();
};
class DiskRemoteReservation final : public IReservation

View File

@ -12,15 +12,14 @@ namespace DB
template <typename T>
WriteIndirectBufferFromRemoteFS<T>::WriteIndirectBufferFromRemoteFS(
std::unique_ptr<T> impl_,
IDiskRemote::Metadata metadata_,
const String & remote_fs_path_)
CreateMetadataCallback && create_callback_,
const String & metadata_file_path_)
: WriteBufferFromFileDecorator(std::move(impl_))
, metadata(std::move(metadata_))
, remote_fs_path(remote_fs_path_)
, create_metadata_callback(std::move(create_callback_))
, metadata_file_path(metadata_file_path_)
{
}
template <typename T>
WriteIndirectBufferFromRemoteFS<T>::~WriteIndirectBufferFromRemoteFS()
{
@ -34,25 +33,13 @@ WriteIndirectBufferFromRemoteFS<T>::~WriteIndirectBufferFromRemoteFS()
}
}
template <typename T>
void WriteIndirectBufferFromRemoteFS<T>::finalizeImpl()
{
WriteBufferFromFileDecorator::finalizeImpl();
metadata.addObject(remote_fs_path, count());
metadata.save();
create_metadata_callback(count());
}
template <typename T>
void WriteIndirectBufferFromRemoteFS<T>::sync()
{
if (finalized)
metadata.save(true);
}
#if USE_AWS_S3
template
class WriteIndirectBufferFromRemoteFS<WriteBufferFromS3>;

View File

@ -9,6 +9,8 @@
namespace DB
{
using CreateMetadataCallback = std::function<void(size_t bytes_count)>;
/// Stores data in S3/HDFS and adds the object path and object size to metadata file on local FS.
template <typename T>
class WriteIndirectBufferFromRemoteFS final : public WriteBufferFromFileDecorator
@ -16,21 +18,18 @@ class WriteIndirectBufferFromRemoteFS final : public WriteBufferFromFileDecorato
public:
WriteIndirectBufferFromRemoteFS(
std::unique_ptr<T> impl_,
IDiskRemote::Metadata metadata_,
const String & remote_fs_path_);
CreateMetadataCallback && create_callback_,
const String & metadata_file_path_);
virtual ~WriteIndirectBufferFromRemoteFS() override;
~WriteIndirectBufferFromRemoteFS() override;
void sync() override;
String getFileName() const override { return metadata.metadata_file_path; }
String getFileName() const override { return metadata_file_path; }
private:
void finalizeImpl() override;
IDiskRemote::Metadata metadata;
String remote_fs_path;
CreateMetadataCallback create_metadata_callback;
String metadata_file_path;
};
}

View File

@ -218,7 +218,7 @@ void DiskS3::moveFile(const String & from_path, const String & to_path, bool sen
std::unique_ptr<ReadBufferFromFileBase> DiskS3::readFile(const String & path, const ReadSettings & read_settings, std::optional<size_t>, std::optional<size_t>) const
{
auto settings = current_settings.get();
auto metadata = readMeta(path);
auto metadata = readMetadata(path);
LOG_TEST(log, "Read from file by path: {}. Existing S3 objects: {}",
backQuote(metadata_disk->getPath() + path), metadata.remote_fs_objects.size());
@ -245,10 +245,9 @@ std::unique_ptr<ReadBufferFromFileBase> DiskS3::readFile(const String & path, co
std::unique_ptr<WriteBufferFromFileBase> DiskS3::writeFile(const String & path, size_t buf_size, WriteMode mode)
{
auto settings = current_settings.get();
auto metadata = readOrCreateMetaForWriting(path, mode);
/// Path to store new S3 object.
auto s3_path = getRandomASCIIString();
auto blob_name = getRandomASCIIString();
std::optional<ObjectMetadata> object_metadata;
if (settings->send_metadata)
@ -257,40 +256,45 @@ std::unique_ptr<WriteBufferFromFileBase> DiskS3::writeFile(const String & path,
object_metadata = {
{"path", path}
};
s3_path = "r" + revisionToString(revision) + "-file-" + s3_path;
blob_name = "r" + revisionToString(revision) + "-file-" + blob_name;
}
LOG_TRACE(log, "{} to file by path: {}. S3 path: {}",
mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_disk->getPath() + path), remote_fs_root_path + s3_path);
mode == WriteMode::Rewrite ? "Write" : "Append", backQuote(metadata_disk->getPath() + path), remote_fs_root_path + blob_name);
ScheduleFunc schedule = [pool = &getThreadPoolWriter()](auto callback)
{
pool->scheduleOrThrow([callback = std::move(callback), thread_group = CurrentThread::getGroup()]()
{
if (thread_group)
CurrentThread::attachTo(thread_group);
/// FIXME -- thread pool lead to obscure segfaults
/// ScheduleFunc schedule = [pool = &getThreadPoolWriter(), thread_group = CurrentThread::getGroup()](auto callback)
/// {
/// pool->scheduleOrThrow([callback = std::move(callback), thread_group]()
/// {
/// if (thread_group)
/// CurrentThread::attachTo(thread_group);
SCOPE_EXIT_SAFE(
if (thread_group)
CurrentThread::detachQueryIfNotDetached();
);
callback();
});
};
/// SCOPE_EXIT_SAFE(
/// if (thread_group)
/// CurrentThread::detachQueryIfNotDetached();
/// );
/// callback();
/// });
/// };
auto s3_buffer = std::make_unique<WriteBufferFromS3>(
settings->client,
bucket,
metadata.remote_fs_root_path + s3_path,
remote_fs_root_path + blob_name,
settings->s3_min_upload_part_size,
settings->s3_upload_part_size_multiply_factor,
settings->s3_upload_part_size_multiply_parts_count_threshold,
settings->s3_max_single_part_upload_size,
std::move(object_metadata),
buf_size,
std::move(schedule));
buf_size /*, std::move(schedule) */);
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromS3>>(std::move(s3_buffer), std::move(metadata), s3_path);
auto create_metadata_callback = [this, path, blob_name, mode] (size_t count)
{
readOrCreateUpdateAndStoreMetadata(path, mode, false, [blob_name, count] (Metadata & metadata) { metadata.addObject(blob_name, count); return true; });
};
return std::make_unique<WriteIndirectBufferFromRemoteFS<WriteBufferFromS3>>(std::move(s3_buffer), std::move(create_metadata_callback), path);
}
void DiskS3::createHardLink(const String & src_path, const String & dst_path)
@ -312,13 +316,7 @@ void DiskS3::createHardLink(const String & src_path, const String & dst_path, bo
createFileOperationObject("hardlink", revision, object_metadata);
}
/// Increment number of references.
auto src = readMeta(src_path);
++src.ref_count;
src.save();
/// Create FS hardlink to metadata file.
metadata_disk->createHardLink(src_path, dst_path);
IDiskRemote::createHardLink(src_path, dst_path);
}
void DiskS3::shutdown()
@ -438,7 +436,7 @@ void DiskS3::migrateFileToRestorableSchema(const String & path)
{
LOG_TRACE(log, "Migrate file {} to restorable schema", metadata_disk->getPath() + path);
auto meta = readMeta(path);
auto meta = readMetadata(path);
for (const auto & [key, _] : meta.remote_fs_objects)
{
@ -894,15 +892,19 @@ void DiskS3::processRestoreFiles(const String & source_bucket, const String & so
const auto & path = path_entry->second;
createDirectories(directoryPath(path));
auto metadata = createMeta(path);
auto relative_key = shrinkKey(source_path, key);
/// Copy object if we restore to different bucket / path.
if (bucket != source_bucket || remote_fs_root_path != source_path)
copyObject(source_bucket, key, bucket, remote_fs_root_path + relative_key, head_result);
metadata.addObject(relative_key, head_result.GetContentLength());
metadata.save();
auto updater = [relative_key, head_result] (Metadata & metadata)
{
metadata.addObject(relative_key, head_result.GetContentLength());
return true;
};
createUpdateAndStoreMetadata(path, false, updater);
LOG_TRACE(log, "Restored file {}", path);
}

View File

@ -176,6 +176,10 @@ void registerDiskS3(DiskFactory & factory)
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr {
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
if (uri.key.empty())
throw Exception("Empty S3 path specified in disk configuration", ErrorCodes::BAD_ARGUMENTS);
if (uri.key.back() != '/')
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
@ -200,7 +204,16 @@ void registerDiskS3(DiskFactory & factory)
s3disk->startup();
if (config.getBool(config_prefix + ".cache_enabled", true))
#ifdef NDEBUG
bool use_cache = true;
#else
/// Current S3 cache implementation lead to allocations in destructor of
/// read buffer.
bool use_cache = false;
#endif
if (config.getBool(config_prefix + ".cache_enabled", use_cache))
{
String cache_path = config.getString(config_prefix + ".cache_path", context->getPath() + "disks/" + name + "/cache/");
s3disk = wrapWithCache(s3disk, "s3-cache", cache_path, metadata_path);

View File

@ -159,7 +159,6 @@ struct IntegerRoundingComputation
switch (scale_mode)
{
case ScaleMode::Zero:
return x;
case ScaleMode::Positive:
return x;
case ScaleMode::Negative:
@ -171,10 +170,15 @@ struct IntegerRoundingComputation
static ALWAYS_INLINE void compute(const T * __restrict in, size_t scale, T * __restrict out)
{
if (sizeof(T) <= sizeof(scale) && scale > size_t(std::numeric_limits<T>::max()))
*out = 0;
else
*out = compute(*in, scale);
if constexpr (sizeof(T) <= sizeof(scale) && scale_mode == ScaleMode::Negative)
{
if (scale > size_t(std::numeric_limits<T>::max()))
{
*out = 0;
return;
}
}
*out = compute(*in, scale);
}
};

View File

@ -79,6 +79,10 @@ void WriteBufferFromS3::nextImpl()
if (!offset())
return;
/// Buffer in a bad state after exception
if (temporary_buffer->tellp() == -1)
allocateBuffer();
temporary_buffer->write(working_buffer.begin(), offset());
ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset());
@ -91,6 +95,7 @@ void WriteBufferFromS3::nextImpl()
if (!multipart_upload_id.empty() && last_part_size > upload_part_size)
{
writePart();
allocateBuffer();
@ -168,7 +173,10 @@ void WriteBufferFromS3::writePart()
LOG_DEBUG(log, "Writing part. Bucket: {}, Key: {}, Upload_id: {}, Size: {}", bucket, key, multipart_upload_id, size);
if (size < 0)
throw Exception("Failed to write part. Buffer in invalid state.", ErrorCodes::S3_ERROR);
{
LOG_WARNING(log, "Skipping part upload. Buffer is in bad state, it means that we have tried to upload something, but got an exception.");
return;
}
if (size == 0)
{
@ -292,7 +300,10 @@ void WriteBufferFromS3::makeSinglepartUpload()
LOG_DEBUG(log, "Making single part upload. Bucket: {}, Key: {}, Size: {}, WithPool: {}", bucket, key, size, with_pool);
if (size < 0)
throw Exception("Failed to make single part upload. Buffer in invalid state", ErrorCodes::S3_ERROR);
{
LOG_WARNING(log, "Skipping single part upload. Buffer is in bad state, it mean that we have tried to upload something, but got an exception.");
return;
}
if (size == 0)
{

View File

@ -1106,6 +1106,20 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
throw Exception(storage_already_exists_error_code,
"{} {}.{} already exists", storage_name, backQuoteIfNeed(create.getDatabase()), backQuoteIfNeed(create.getTable()));
}
else if (!create.attach)
{
/// Checking that table may exists in detached/detached permanently state
try
{
database->checkMetadataFilenameAvailability(create.getTable());
}
catch (const Exception &)
{
if (create.if_not_exists)
return false;
throw;
}
}
data_path = database->getTableDataPath(create);

View File

@ -109,22 +109,31 @@ Block InterpreterInsertQuery::getSampleBlock(
const StoragePtr & table,
const StorageMetadataPtr & metadata_snapshot) const
{
Block table_sample = metadata_snapshot->getSampleBlock();
Block table_sample_non_materialized = metadata_snapshot->getSampleBlockNonMaterialized();
Block table_sample_physical = metadata_snapshot->getSampleBlock();
Block table_sample_insertable = metadata_snapshot->getSampleBlockInsertable();
Block res;
for (const auto & current_name : names)
{
/// The table does not have a column with that name
if (!table_sample.has(current_name))
throw Exception("No such column " + current_name + " in table " + table->getStorageID().getNameForLogs(),
ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
if (!allow_materialized && !table_sample_non_materialized.has(current_name))
throw Exception("Cannot insert column " + current_name + ", because it is MATERIALIZED column.", ErrorCodes::ILLEGAL_COLUMN);
if (res.has(current_name))
throw Exception("Column " + current_name + " specified more than once", ErrorCodes::DUPLICATE_COLUMN);
res.insert(ColumnWithTypeAndName(table_sample.getByName(current_name).type, current_name));
/// Column is not ordinary or ephemeral
if (!table_sample_insertable.has(current_name))
{
/// Column is materialized
if (table_sample_physical.has(current_name))
{
if (!allow_materialized)
throw Exception("Cannot insert column " + current_name + ", because it is MATERIALIZED column.",
ErrorCodes::ILLEGAL_COLUMN);
res.insert(ColumnWithTypeAndName(table_sample_physical.getByName(current_name).type, current_name));
}
else /// The table does not have a column with that name
throw Exception("No such column " + current_name + " in table " + table->getStorageID().getNameForLogs(),
ErrorCodes::NO_SUCH_COLUMN_IN_TABLE);
}
else
res.insert(ColumnWithTypeAndName(table_sample_insertable.getByName(current_name).type, current_name));
}
return res;
}

View File

@ -83,10 +83,10 @@ ActionsDAGPtr addMissingDefaults(
/// Computes explicitly specified values by default and materialized columns.
if (auto dag = evaluateMissingDefaults(actions->getResultColumns(), required_columns, columns, context, true, null_as_default))
actions = ActionsDAG::merge(std::move(*actions), std::move(*dag));
else
/// Removes unused columns and reorders result.
/// The same is done in evaluateMissingDefaults if not empty dag is returned.
actions->removeUnusedActions(required_columns.getNames());
/// Removes unused columns and reorders result.
actions->removeUnusedActions(required_columns.getNames(), false);
actions->addMaterializingOutputActions();
return actions;
}

View File

@ -134,7 +134,6 @@ ActionsDAGPtr createExpressions(
const Block & header,
ASTPtr expr_list,
bool save_unneeded_columns,
const NamesAndTypesList & required_columns,
ContextPtr context)
{
if (!expr_list)
@ -146,12 +145,6 @@ ActionsDAGPtr createExpressions(
auto actions = expression_analyzer.getActionsDAG(true, !save_unneeded_columns);
dag = ActionsDAG::merge(std::move(*dag), std::move(*actions));
if (save_unneeded_columns)
{
dag->removeUnusedActions(required_columns.getNames());
dag->addMaterializingOutputActions();
}
return dag;
}
@ -163,7 +156,7 @@ void performRequiredConversions(Block & block, const NamesAndTypesList & require
if (conversion_expr_list->children.empty())
return;
if (auto dag = createExpressions(block, conversion_expr_list, true, required_columns, context))
if (auto dag = createExpressions(block, conversion_expr_list, true, context))
{
auto expression = std::make_shared<ExpressionActions>(std::move(dag), ExpressionActionsSettings::fromContext(context));
expression->execute(block);
@ -182,7 +175,7 @@ ActionsDAGPtr evaluateMissingDefaults(
return nullptr;
ASTPtr expr_list = defaultRequiredExpressions(header, required_columns, columns, null_as_default);
return createExpressions(header, expr_list, save_unneeded_columns, required_columns, context);
return createExpressions(header, expr_list, save_unneeded_columns, context);
}
}

View File

@ -198,8 +198,6 @@ ASTPtr ASTCreateQuery::clone() const
res->set(res->storage, storage->clone());
if (select)
res->set(res->select, select->clone());
if (tables)
res->set(res->tables, tables->clone());
if (table_overrides)
res->set(res->table_overrides, table_overrides->clone());
@ -434,12 +432,6 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat
settings.ostr << (comment ? ")" : "");
}
if (tables)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WITH " << (settings.hilite ? hilite_none : "");
tables->formatImpl(settings, state, frame);
}
if (comment)
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << settings.nl_or_ws << "COMMENT " << (settings.hilite ? hilite_none : "");

View File

@ -73,7 +73,6 @@ public:
bool replace_view{false}; /// CREATE OR REPLACE VIEW
ASTColumns * columns_list = nullptr;
ASTExpressionList * tables = nullptr;
StorageID to_table_id = StorageID::createEmpty(); /// For CREATE MATERIALIZED VIEW mv TO table.
UUID to_inner_uuid = UUIDHelpers::Nil; /// For materialized view with inner table

View File

@ -124,6 +124,7 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
ParserKeyword s_null{"NULL"};
ParserKeyword s_not{"NOT"};
ParserKeyword s_materialized{"MATERIALIZED"};
ParserKeyword s_ephemeral{"EPHEMERAL"};
ParserKeyword s_alias{"ALIAS"};
ParserKeyword s_comment{"COMMENT"};
ParserKeyword s_codec{"CODEC"};
@ -171,6 +172,7 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
if (!s_default.checkWithoutMoving(pos, expected)
&& !s_materialized.checkWithoutMoving(pos, expected)
&& !s_ephemeral.checkWithoutMoving(pos, expected)
&& !s_alias.checkWithoutMoving(pos, expected)
&& (require_type
|| (!s_comment.checkWithoutMoving(pos, expected)
@ -183,7 +185,8 @@ bool IParserColumnDeclaration<NameParser>::parseImpl(Pos & pos, ASTPtr & node, E
}
Pos pos_before_specifier = pos;
if (s_default.ignore(pos, expected) || s_materialized.ignore(pos, expected) || s_alias.ignore(pos, expected))
if (s_default.ignore(pos, expected) || s_materialized.ignore(pos, expected) ||
s_ephemeral.ignore(pos, expected) || s_alias.ignore(pos, expected))
{
default_specifier = Poco::toUpper(std::string{pos_before_specifier->begin, pos_before_specifier->end});

View File

@ -26,20 +26,20 @@ namespace
const std::unordered_set<std::string_view> keywords
{
"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT",
"MATERIALIZED", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH", "DROP",
"RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY", "PROJECT",
"PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW", "INTO",
"OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN", "ELSE",
"END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT", "VALUES",
"SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL", "INNER",
"LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP", "BY",
"WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR", "ASC",
"IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE", "USER", "ROLE",
"PROFILE", "QUOTA", "POLICY", "ROW", "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT", "REPLACE",
"IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "RANDOMIZED",
"INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "DICTIONARY", "OFFSET",
"TRIM", "LTRIM", "RTRIM", "BOTH", "LEADING", "TRAILING"
"CREATE", "DATABASE", "IF", "NOT", "EXISTS", "TEMPORARY", "TABLE", "ON", "CLUSTER", "DEFAULT",
"MATERIALIZED", "EPHEMERAL", "ALIAS", "ENGINE", "AS", "VIEW", "POPULATE", "SETTINGS", "ATTACH", "DETACH",
"DROP", "RENAME", "TO", "ALTER", "ADD", "MODIFY", "CLEAR", "COLUMN", "AFTER", "COPY",
"PROJECT", "PRIMARY", "KEY", "CHECK", "PARTITION", "PART", "FREEZE", "FETCH", "FROM", "SHOW",
"INTO", "OUTFILE", "FORMAT", "TABLES", "DATABASES", "LIKE", "PROCESSLIST", "CASE", "WHEN", "THEN",
"ELSE", "END", "DESCRIBE", "DESC", "USE", "SET", "OPTIMIZE", "FINAL", "DEDUPLICATE", "INSERT",
"VALUES", "SELECT", "DISTINCT", "SAMPLE", "ARRAY", "JOIN", "GLOBAL", "LOCAL", "ANY", "ALL",
"INNER", "LEFT", "RIGHT", "FULL", "OUTER", "CROSS", "USING", "PREWHERE", "WHERE", "GROUP",
"BY", "WITH", "TOTALS", "HAVING", "ORDER", "COLLATE", "LIMIT", "UNION", "AND", "OR",
"ASC", "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE", "USER",
"ROLE", "PROFILE", "QUOTA", "POLICY", "ROW", "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT",
"REPLACE", "IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "RANDOMIZED",
"INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "DICTIONARY", "OFFSET", "TRIM",
"LTRIM", "RTRIM", "BOTH", "LEADING", "TRAILING"
};
const std::unordered_set<std::string_view> keep_words

View File

@ -138,8 +138,6 @@ void FilterTransform::transform(Chunk & chunk)
return;
}
FilterDescription filter_and_holder(*filter_column);
/** Let's find out how many rows will be in result.
* To do this, we filter out the first non-constant column
* or calculate number of set bytes in the filter.
@ -154,14 +152,20 @@ void FilterTransform::transform(Chunk & chunk)
}
}
std::unique_ptr<IFilterDescription> filter_description;
if (filter_column->isSparse())
filter_description = std::make_unique<SparseFilterDescription>(*filter_column);
else
filter_description = std::make_unique<FilterDescription>(*filter_column);
size_t num_filtered_rows = 0;
if (first_non_constant_column != num_columns)
{
columns[first_non_constant_column] = columns[first_non_constant_column]->filter(*filter_and_holder.data, -1);
columns[first_non_constant_column] = filter_description->filter(*columns[first_non_constant_column], -1);
num_filtered_rows = columns[first_non_constant_column]->size();
}
else
num_filtered_rows = countBytesInFilter(*filter_and_holder.data);
num_filtered_rows = filter_description->countBytesInFilter();
/// If the current block is completely filtered out, let's move on to the next one.
if (num_filtered_rows == 0)
@ -207,7 +211,7 @@ void FilterTransform::transform(Chunk & chunk)
if (isColumnConst(*current_column))
current_column = current_column->cut(0, num_filtered_rows);
else
current_column = current_column->filter(*filter_and_holder.data, num_filtered_rows);
current_column = filter_description->filter(*current_column, num_filtered_rows);
}
chunk.setColumns(std::move(columns), num_filtered_rows);

View File

@ -1,5 +1,6 @@
#include <Processors/Transforms/MergingAggregatedTransform.h>
#include <Processors/Transforms/AggregatingTransform.h>
#include <Processors/Transforms/AggregatingInOrderTransform.h>
namespace DB
{
@ -34,21 +35,30 @@ void MergingAggregatedTransform::consume(Chunk chunk)
if (!info)
throw Exception("Chunk info was not set for chunk in MergingAggregatedTransform.", ErrorCodes::LOGICAL_ERROR);
const auto * agg_info = typeid_cast<const AggregatedChunkInfo *>(info.get());
if (!agg_info)
if (const auto * agg_info = typeid_cast<const AggregatedChunkInfo *>(info.get()))
{
/** If the remote servers used a two-level aggregation method,
* then blocks will contain information about the number of the bucket.
* Then the calculations can be parallelized by buckets.
* We decompose the blocks to the bucket numbers indicated in them.
*/
auto block = getInputPort().getHeader().cloneWithColumns(chunk.getColumns());
block.info.is_overflows = agg_info->is_overflows;
block.info.bucket_num = agg_info->bucket_num;
bucket_to_blocks[agg_info->bucket_num].emplace_back(std::move(block));
}
else if (const auto * in_order_info = typeid_cast<const ChunkInfoWithAllocatedBytes *>(info.get()))
{
auto block = getInputPort().getHeader().cloneWithColumns(chunk.getColumns());
block.info.is_overflows = false;
block.info.bucket_num = -1;
bucket_to_blocks[block.info.bucket_num].emplace_back(std::move(block));
}
else
throw Exception("Chunk should have AggregatedChunkInfo in MergingAggregatedTransform.", ErrorCodes::LOGICAL_ERROR);
/** If the remote servers used a two-level aggregation method,
* then blocks will contain information about the number of the bucket.
* Then the calculations can be parallelized by buckets.
* We decompose the blocks to the bucket numbers indicated in them.
*/
auto block = getInputPort().getHeader().cloneWithColumns(chunk.getColumns());
block.info.is_overflows = agg_info->is_overflows;
block.info.bucket_num = agg_info->bucket_num;
bucket_to_blocks[agg_info->bucket_num].emplace_back(std::move(block));
}
Chunk MergingAggregatedTransform::generate()

View File

@ -9,6 +9,7 @@ struct AliasNames
static constexpr const char * DEFAULT = "DEFAULT";
static constexpr const char * MATERIALIZED = "MATERIALIZED";
static constexpr const char * ALIAS = "ALIAS";
static constexpr const char * EPHEMERAL = "EPHEMERAL";
};
}
@ -27,7 +28,8 @@ ColumnDefaultKind columnDefaultKindFromString(const std::string & str)
static const std::unordered_map<std::string, ColumnDefaultKind> map{
{ AliasNames::DEFAULT, ColumnDefaultKind::Default },
{ AliasNames::MATERIALIZED, ColumnDefaultKind::Materialized },
{ AliasNames::ALIAS, ColumnDefaultKind::Alias }
{ AliasNames::ALIAS, ColumnDefaultKind::Alias },
{ AliasNames::EPHEMERAL, ColumnDefaultKind::Ephemeral }
};
const auto it = map.find(str);
@ -43,7 +45,8 @@ std::string toString(const ColumnDefaultKind kind)
static const std::unordered_map<ColumnDefaultKind, std::string> map{
{ ColumnDefaultKind::Default, AliasNames::DEFAULT },
{ ColumnDefaultKind::Materialized, AliasNames::MATERIALIZED },
{ ColumnDefaultKind::Alias, AliasNames::ALIAS }
{ ColumnDefaultKind::Alias, AliasNames::ALIAS },
{ ColumnDefaultKind::Ephemeral, AliasNames::EPHEMERAL }
};
const auto it = map.find(kind);

View File

@ -13,7 +13,8 @@ enum class ColumnDefaultKind
{
Default,
Materialized,
Alias
Alias,
Ephemeral
};

View File

@ -340,6 +340,15 @@ NamesAndTypesList ColumnsDescription::getOrdinary() const
return ret;
}
NamesAndTypesList ColumnsDescription::getInsertable() const
{
NamesAndTypesList ret;
for (const auto & col : columns)
if (col.default_desc.kind == ColumnDefaultKind::Default || col.default_desc.kind == ColumnDefaultKind::Ephemeral)
ret.emplace_back(col.name, col.type);
return ret;
}
NamesAndTypesList ColumnsDescription::getMaterialized() const
{
NamesAndTypesList ret;
@ -358,6 +367,15 @@ NamesAndTypesList ColumnsDescription::getAliases() const
return ret;
}
NamesAndTypesList ColumnsDescription::getEphemeral() const
{
NamesAndTypesList ret;
for (const auto & col : columns)
if (col.default_desc.kind == ColumnDefaultKind::Ephemeral)
ret.emplace_back(col.name, col.type);
return ret;
}
NamesAndTypesList ColumnsDescription::getAll() const
{
NamesAndTypesList ret;
@ -402,6 +420,8 @@ static ColumnsDescription::GetFlags defaultKindToGetFlag(ColumnDefaultKind kind)
return ColumnsDescription::Materialized;
case ColumnDefaultKind::Alias:
return ColumnsDescription::Aliases;
case ColumnDefaultKind::Ephemeral:
return ColumnsDescription::Ephemeral;
}
__builtin_unreachable();
}
@ -441,7 +461,7 @@ NamesAndTypesList ColumnsDescription::getAllPhysical() const
{
NamesAndTypesList ret;
for (const auto & col : columns)
if (col.default_desc.kind != ColumnDefaultKind::Alias)
if (col.default_desc.kind != ColumnDefaultKind::Alias && col.default_desc.kind != ColumnDefaultKind::Ephemeral)
ret.emplace_back(col.name, col.type);
return ret;
}
@ -450,7 +470,7 @@ Names ColumnsDescription::getNamesOfPhysical() const
{
Names ret;
for (const auto & col : columns)
if (col.default_desc.kind != ColumnDefaultKind::Alias)
if (col.default_desc.kind != ColumnDefaultKind::Alias && col.default_desc.kind != ColumnDefaultKind::Ephemeral)
ret.emplace_back(col.name);
return ret;
}
@ -481,7 +501,8 @@ NameAndTypePair ColumnsDescription::getColumnOrSubcolumn(GetFlags flags, const S
std::optional<NameAndTypePair> ColumnsDescription::tryGetPhysical(const String & column_name) const
{
auto it = columns.get<1>().find(column_name);
if (it == columns.get<1>().end() || it->default_desc.kind == ColumnDefaultKind::Alias)
if (it == columns.get<1>().end() ||
it->default_desc.kind == ColumnDefaultKind::Alias || it->default_desc.kind == ColumnDefaultKind::Ephemeral)
return {};
return NameAndTypePair(it->name, it->type);
@ -500,7 +521,8 @@ NameAndTypePair ColumnsDescription::getPhysical(const String & column_name) cons
bool ColumnsDescription::hasPhysical(const String & column_name) const
{
auto it = columns.get<1>().find(column_name);
return it != columns.get<1>().end() && it->default_desc.kind != ColumnDefaultKind::Alias;
return it != columns.get<1>().end() &&
it->default_desc.kind != ColumnDefaultKind::Alias && it->default_desc.kind != ColumnDefaultKind::Ephemeral;
}
bool ColumnsDescription::hasColumnOrSubcolumn(GetFlags flags, const String & column_name) const
@ -652,17 +674,16 @@ ColumnsDescription ColumnsDescription::parse(const String & str)
void ColumnsDescription::addSubcolumns(const String & name_in_storage, const DataTypePtr & type_in_storage)
{
for (const auto & subcolumn_name : type_in_storage->getSubcolumnNames())
IDataType::forEachSubcolumn([&](const auto &, const auto & subname, const auto & subdata)
{
auto subcolumn = NameAndTypePair(name_in_storage, subcolumn_name,
type_in_storage, type_in_storage->getSubcolumnType(subcolumn_name));
auto subcolumn = NameAndTypePair(name_in_storage, subname, type_in_storage, subdata.type);
if (has(subcolumn.name))
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
"Cannot add subcolumn {}: column with this name already exists", subcolumn.name);
subcolumns.get<0>().insert(std::move(subcolumn));
}
}, {type_in_storage->getDefaultSerialization(), type_in_storage, nullptr, nullptr});
}
void ColumnsDescription::removeSubcolumns(const String & name_in_storage)

View File

@ -84,18 +84,21 @@ public:
Ordinary = 1,
Materialized = 2,
Aliases = 4,
Ephemeral = 8,
AllPhysical = Ordinary | Materialized,
All = AllPhysical | Aliases,
All = AllPhysical | Aliases | Ephemeral,
};
NamesAndTypesList getByNames(GetFlags flags, const Names & names, bool with_subcolumns) const;
NamesAndTypesList getOrdinary() const;
NamesAndTypesList getMaterialized() const;
NamesAndTypesList getInsertable() const; /// ordinary + ephemeral
NamesAndTypesList getAliases() const;
NamesAndTypesList getEphemeral() const;
NamesAndTypesList getAllPhysical() const; /// ordinary + materialized.
NamesAndTypesList getAll() const; /// ordinary + materialized + aliases
NamesAndTypesList getAll() const; /// ordinary + materialized + aliases + ephemeral
NamesAndTypesList getAllWithSubcolumns() const;
NamesAndTypesList getAllPhysicalWithSubcolumns() const;

View File

@ -25,7 +25,7 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl
HDFSBuilderWrapper builder;
HDFSFSPtr fs;
explicit WriteBufferFromHDFSImpl(
WriteBufferFromHDFSImpl(
const std::string & hdfs_uri_,
const Poco::Util::AbstractConfiguration & config_,
int replication_,

View File

@ -314,6 +314,10 @@ MergeTreeData::DataPart::Checksums Service::sendPartFromDisk(
void Service::sendPartFromDiskRemoteMeta(const MergeTreeData::DataPartPtr & part, WriteBuffer & out)
{
auto disk = part->volume->getDisk();
if (!disk->supportZeroCopyReplication())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Disk '{}' doesn't support zero-copy replication", disk->getName());
/// We'll take a list of files from the list of checksums.
MergeTreeData::DataPart::Checksums checksums = part->checksums;
/// Add files that are not in the checksum list.
@ -321,11 +325,13 @@ void Service::sendPartFromDiskRemoteMeta(const MergeTreeData::DataPartPtr & part
for (const auto & file_name : file_names_without_checksums)
checksums.files[file_name] = {};
auto disk = part->volume->getDisk();
if (!disk->supportZeroCopyReplication())
throw Exception(ErrorCodes::LOGICAL_ERROR, "disk {} doesn't support zero-copy replication", disk->getName());
std::vector<std::string> paths;
paths.reserve(checksums.files.size());
for (const auto & it : checksums.files)
paths.push_back(fs::path(part->getFullRelativePath()) / it.first);
part->storage.lockSharedData(*part);
/// Serialized metadatadatas with zero ref counts.
auto metadatas = disk->getSerializedMetadata(paths);
String part_id = part->getUniqueId();
writeStringBinary(part_id, out);
@ -333,29 +339,32 @@ void Service::sendPartFromDiskRemoteMeta(const MergeTreeData::DataPartPtr & part
writeBinary(checksums.files.size(), out);
for (const auto & it : checksums.files)
{
String file_name = it.first;
String metadata_file = fs::path(disk->getPath()) / part->getFullRelativePath() / file_name;
fs::path metadata(metadata_file);
const String & file_name = it.first;
String file_path_prefix = fs::path(part->getFullRelativePath()) / file_name;
/// Just some additional checks
String metadata_file_path = fs::path(disk->getPath()) / file_path_prefix;
fs::path metadata(metadata_file_path);
if (!fs::exists(metadata))
throw Exception(ErrorCodes::CORRUPTED_DATA, "Remote metadata '{}' is not exists", file_name);
if (!fs::is_regular_file(metadata))
throw Exception(ErrorCodes::CORRUPTED_DATA, "Remote metadata '{}' is not a file", file_name);
UInt64 file_size = fs::file_size(metadata);
/// Actual metadata send
auto metadata_str = metadatas[file_path_prefix];
UInt64 file_size = metadata_str.size();
ReadBufferFromString buf(metadata_str);
writeStringBinary(it.first, out);
writeBinary(file_size, out);
auto file_in = createReadBufferFromFileBase(metadata_file, /* settings= */ {});
HashingWriteBuffer hashing_out(out);
copyDataWithThrottler(*file_in, hashing_out, blocker.getCounter(), data.getSendsThrottler());
copyDataWithThrottler(buf, hashing_out, blocker.getCounter(), data.getSendsThrottler());
if (blocker.isCancelled())
throw Exception("Transferring part to replica was cancelled", ErrorCodes::ABORTED);
if (hashing_out.count() != file_size)
throw Exception(ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Unexpected size of file {}", metadata_file);
throw Exception(ErrorCodes::BAD_SIZE_OF_FILE_IN_DATA_PART, "Unexpected size of file {}", metadata_file_path);
writePODBinary(hashing_out.getHash(), out);
}
@ -770,9 +779,12 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta(
{
throw Exception(ErrorCodes::ZERO_COPY_REPLICATION_ERROR, "Part {} unique id {} doesn't exist on {}.", part_name, part_id, disk->getName());
}
LOG_DEBUG(log, "Downloading Part {} unique id {} metadata onto disk {}.",
part_name, part_id, disk->getName());
data.lockSharedDataTemporary(part_name, part_id, disk);
static const String TMP_PREFIX = "tmp-fetch_";
String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_;
@ -837,7 +849,10 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDiskRemoteMeta(
new_data_part->modification_time = time(nullptr);
new_data_part->loadColumnsChecksumsIndexes(true, false);
new_data_part->storage.lockSharedData(*new_data_part);
data.lockSharedData(*new_data_part, /* replace_existing_lock = */ true);
LOG_DEBUG(log, "Download of part {} unique id {} metadata onto disk {} finished.",
part_name, part_id, disk->getName());
return new_data_part;
}

View File

@ -63,7 +63,7 @@ private:
class Fetcher final : private boost::noncopyable
{
public:
explicit Fetcher(MergeTreeData & data_) : data(data_), log(&Poco::Logger::get("Fetcher")) {}
explicit Fetcher(StorageReplicatedMergeTree & data_) : data(data_), log(&Poco::Logger::get("Fetcher")) {}
/// Downloads a part to tmp_directory. If to_detached - downloads to the `detached` directory.
MergeTreeData::MutableDataPartPtr fetchPart(
@ -129,7 +129,7 @@ private:
PooledReadWriteBufferFromHTTP & in,
ThrottlerPtr throttler);
MergeTreeData & data;
StorageReplicatedMergeTree & data;
Poco::Logger * log;
};

View File

@ -1300,16 +1300,7 @@ std::optional<bool> IMergeTreeDataPart::keepSharedDataInDecoupledStorage() const
if (force_keep_shared_data)
return true;
/// TODO Unlocking in try-catch and ignoring exception look ugly
try
{
return !storage.unlockSharedData(*this);
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__, "There is a problem with deleting part " + name + " from filesystem");
}
return {};
return !storage.unlockSharedData(*this);
}
void IMergeTreeDataPart::remove() const
@ -1756,18 +1747,10 @@ String IMergeTreeDataPart::getUniqueId() const
if (!disk->supportZeroCopyReplication())
throw Exception(fmt::format("Disk {} doesn't support zero-copy replication", disk->getName()), ErrorCodes::LOGICAL_ERROR);
String id = disk->getUniqueId(fs::path(getFullRelativePath()) / "checksums.txt");
return id;
return disk->getUniqueId(fs::path(getFullRelativePath()) / FILE_FOR_REFERENCES_CHECK);
}
UInt32 IMergeTreeDataPart::getNumberOfRefereneces() const
{
return volume->getDisk()->getRefCount(fs::path(getFullRelativePath()) / "checksums.txt");
}
String IMergeTreeDataPart::getZeroLevelPartBlockID(const std::string_view token) const
String IMergeTreeDataPart::getZeroLevelPartBlockID(std::string_view token) const
{
if (info.level != 0)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Trying to get block id for non zero level part {}", name);

View File

@ -414,6 +414,18 @@ public:
static inline constexpr auto TXN_VERSION_METADATA_FILE_NAME = "txn_version.txt";
/// One of part files which is used to check how many references (I'd like
/// to say hardlinks, but it will confuse even more) we have for the part
/// for zero copy replication. Sadly it's very complex.
///
/// NOTE: it's not a random "metadata" file for part like 'columns.txt'. If
/// two relative parts (for example all_1_1_0 and all_1_1_0_100) has equal
/// checksums.txt it means that one part was obtained by FREEZE operation or
/// it was mutation without any change for source part. In this case we
/// really don't need to remove data from remote FS and need only decrement
/// reference counter locally.
static inline constexpr auto FILE_FOR_REFERENCES_CHECK = "checksums.txt";
/// Checks that all TTLs (table min/max, column ttls, so on) for part
/// calculated. Part without calculated TTL may exist if TTL was added after
/// part creation (using alter query with materialize_ttl setting).

View File

@ -192,6 +192,7 @@ void IMergeTreeReader::evaluateMissingDefaults(Block additional_columns, Columns
additional_columns, columns, metadata_snapshot->getColumns(), storage.getContext());
if (dag)
{
dag->addMaterializingOutputActions();
auto actions = std::make_shared<
ExpressionActions>(std::move(dag),
ExpressionActionsSettings::fromSettings(storage.getContext()->getSettingsRef()));

View File

@ -79,10 +79,7 @@ NameSet IMergedBlockOutputStream::removeEmptyColumnsFromPart(
for (const String & removed_file : remove_files)
{
if (checksums.files.count(removed_file))
{
data_part->volume->getDisk()->removeFile(data_part->getFullRelativePath() + removed_file);
checksums.files.erase(removed_file);
}
}
/// Remove columns from columns array

View File

@ -46,14 +46,10 @@ std::pair<bool, ReplicatedMergeMutateTaskBase::PartLogWriter> MergeFromLogEntryT
/// In some use cases merging can be more expensive than fetching
/// and it may be better to spread merges tasks across the replicas
/// instead of doing exactly the same merge cluster-wise
std::optional<String> replica_to_execute_merge;
bool replica_to_execute_merge_picked = false;
if (storage.merge_strategy_picker.shouldMergeOnSingleReplica(entry))
{
replica_to_execute_merge = storage.merge_strategy_picker.pickReplicaToExecuteMerge(entry);
replica_to_execute_merge_picked = true;
std::optional<String> replica_to_execute_merge = storage.merge_strategy_picker.pickReplicaToExecuteMerge(entry);
if (replica_to_execute_merge)
{
LOG_DEBUG(log,
@ -158,22 +154,24 @@ std::pair<bool, ReplicatedMergeMutateTaskBase::PartLogWriter> MergeFromLogEntryT
future_merged_part->updatePath(storage, reserved_space.get());
future_merged_part->merge_type = entry.merge_type;
if (storage_settings_ptr->allow_remote_fs_zero_copy_replication)
{
if (auto disk = reserved_space->getDisk(); disk->getType() == DB::DiskType::S3)
{
if (storage.merge_strategy_picker.shouldMergeOnSingleReplicaShared(entry))
String dummy;
if (!storage.findReplicaHavingCoveringPart(entry.new_part_name, true, dummy).empty())
{
if (!replica_to_execute_merge_picked)
replica_to_execute_merge = storage.merge_strategy_picker.pickReplicaToExecuteMerge(entry);
LOG_DEBUG(log, "Merge of part {} finished by some other replica, will fetch merged part", entry.new_part_name);
return {false, {}};
}
if (replica_to_execute_merge)
{
LOG_DEBUG(log,
"Prefer fetching part {} from replica {} due s3_execute_merges_on_single_replica_time_threshold",
entry.new_part_name, replica_to_execute_merge.value());
return {false, {}};
}
zero_copy_lock = storage.tryCreateZeroCopyExclusiveLock(entry.new_part_name, disk);
if (!zero_copy_lock)
{
LOG_DEBUG(log, "Merge of part {} started by some other replica, will wait it and fetch merged part", entry.new_part_name);
return {false, {}};
}
}
}
@ -272,6 +270,9 @@ bool MergeFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrite
throw;
}
if (zero_copy_lock)
zero_copy_lock->lock->unlock();
/** Removing old parts from ZK and from the disk is delayed - see ReplicatedMergeTreeCleanupThread, clearOldParts.
*/

View File

@ -8,6 +8,7 @@
#include <Storages/MergeTree/ReplicatedMergeTreeQueue.h>
#include <Storages/MergeTree/ReplicatedMergeTreeLogEntry.h>
#include <Storages/MergeTree/ReplicatedMergeMutateTaskBase.h>
#include <Storages/MergeTree/ZeroCopyLock.h>
namespace DB
@ -37,6 +38,7 @@ private:
MergeTreeData::DataPartsVector parts;
MergeTreeData::TransactionUniquePtr transaction_ptr{nullptr};
std::optional<ZeroCopyLock> zero_copy_lock;
StopwatchUniquePtr stopwatch_ptr{nullptr};
MergeTreeData::MutableDataPartPtr part;

View File

@ -265,6 +265,14 @@ MergeTreeData::MergeTreeData(
/// Creating directories, if not exist.
for (const auto & disk : getDisks())
{
/// TODO: implement it the main issue in DataPartsExchange (not able to send directories metadata)
if (supportsReplication() && settings->allow_remote_fs_zero_copy_replication
&& disk->supportZeroCopyReplication() && metadata_.hasProjections())
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Projections are not supported when zero-copy replication is enabled for table. "
"Currently disk '{}' supports zero copy replication", disk->getName());
}
if (disk->isBroken())
continue;
@ -2148,11 +2156,26 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context
"ALTER ADD INDEX is not supported for tables with the old syntax",
ErrorCodes::BAD_ARGUMENTS);
}
if (command.type == AlterCommand::ADD_PROJECTION && !is_custom_partitioned)
if (command.type == AlterCommand::ADD_PROJECTION)
{
throw Exception(
"ALTER ADD PROJECTION is not supported for tables with the old syntax",
ErrorCodes::BAD_ARGUMENTS);
if (!is_custom_partitioned)
throw Exception(
"ALTER ADD PROJECTION is not supported for tables with the old syntax",
ErrorCodes::BAD_ARGUMENTS);
/// TODO: implement it the main issue in DataPartsExchange (not able to send directories metadata)
if (supportsReplication() && getSettings()->allow_remote_fs_zero_copy_replication)
{
auto storage_policy = getStoragePolicy();
auto disks = storage_policy->getDisks();
for (const auto & disk : disks)
{
if (disk->supportZeroCopyReplication())
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "ALTER ADD PROJECTION is not supported when zero-copy replication is enabled for table. "
"Currently disk '{}' supports zero copy replication", disk->getName());
}
}
}
if (command.type == AlterCommand::RENAME_COLUMN)
{
@ -5969,7 +5992,7 @@ bool MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagge
/// replica will actually move the part from disk to some
/// zero-copy storage other replicas will just fetch
/// metainformation.
if (auto lock = tryCreateZeroCopyExclusiveLock(moving_part.part, disk); lock)
if (auto lock = tryCreateZeroCopyExclusiveLock(moving_part.part->name, disk); lock)
{
cloned_part = parts_mover.clonePart(moving_part);
parts_mover.swapClonedPart(cloned_part);

View File

@ -899,7 +899,7 @@ public:
/// Lock part in zookeeper for shared data in several nodes
/// Overridden in StorageReplicatedMergeTree
virtual void lockSharedData(const IMergeTreeDataPart &) const {}
virtual void lockSharedData(const IMergeTreeDataPart &, bool = false) const {}
/// Unlock shared data part in zookeeper
/// Overridden in StorageReplicatedMergeTree
@ -1225,7 +1225,7 @@ private:
/// Create zero-copy exclusive lock for part and disk. Useful for coordination of
/// distributed operations which can lead to data duplication. Implemented only in ReplicatedMergeTree.
virtual std::optional<ZeroCopyLock> tryCreateZeroCopyExclusiveLock(const DataPartPtr &, const DiskPtr &) { return std::nullopt; }
virtual std::optional<ZeroCopyLock> tryCreateZeroCopyExclusiveLock(const String &, const DiskPtr &) { return std::nullopt; }
};
/// RAII struct to record big parts that are submerging or emerging.

View File

@ -307,7 +307,6 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMerge(
SelectPartsDecision MergeTreeDataMergerMutator::selectAllPartsToMergeWithinPartition(
FutureMergedMutatedPartPtr future_part,
UInt64 & available_disk_space,
const AllowedMergingPredicate & can_merge,
const String & partition_id,
bool final,
@ -360,6 +359,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectAllPartsToMergeWithinParti
++it;
}
auto available_disk_space = data.getStoragePolicy()->getMaxUnreservedFreeSpace();
/// Enough disk space to cover the new merge with a margin.
auto required_disk_space = sum_bytes * DISK_USAGE_COEFFICIENT_TO_SELECT;
if (available_disk_space <= required_disk_space)
@ -387,7 +387,6 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectAllPartsToMergeWithinParti
LOG_DEBUG(log, "Selected {} parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name);
future_part->assign(std::move(parts));
available_disk_space -= required_disk_space;
return SelectPartsDecision::SELECTED;
}

View File

@ -85,7 +85,6 @@ public:
*/
SelectPartsDecision selectAllPartsToMergeWithinPartition(
FutureMergedMutatedPartPtr future_part,
UInt64 & available_disk_space,
const AllowedMergingPredicate & can_merge,
const String & partition_id,
bool final,

View File

@ -109,6 +109,8 @@ void MergeTreeWriteAheadLog::rotate(const std::unique_lock<std::mutex> &)
+ toString(min_block_number) + "_"
+ toString(max_block_number) + WAL_FILE_EXTENSION;
/// Finalize stream before file rename
out->finalize();
disk->replaceFile(path, storage.getRelativeDataPath() + new_name);
init();
}

View File

@ -64,11 +64,15 @@ struct MergedBlockOutputStream::Finalizer::Impl
{
IMergeTreeDataPartWriter & writer;
MergeTreeData::MutableDataPartPtr part;
NameSet files_to_remove_after_finish;
std::vector<std::unique_ptr<WriteBufferFromFileBase>> written_files;
bool sync;
Impl(IMergeTreeDataPartWriter & writer_, MergeTreeData::MutableDataPartPtr part_, bool sync_)
: writer(writer_), part(std::move(part_)), sync(sync_) {}
Impl(IMergeTreeDataPartWriter & writer_, MergeTreeData::MutableDataPartPtr part_, const NameSet & files_to_remove_after_finish_, bool sync_)
: writer(writer_)
, part(std::move(part_))
, files_to_remove_after_finish(files_to_remove_after_finish_)
, sync(sync_) {}
void finish();
};
@ -84,6 +88,10 @@ void MergedBlockOutputStream::Finalizer::Impl::finish()
{
writer.finish(sync);
auto disk = part->volume->getDisk();
for (const auto & file_name: files_to_remove_after_finish)
disk->removeFile(part->getFullRelativePath() + file_name);
for (auto & file : written_files)
{
file->finalize();
@ -142,19 +150,20 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync(
projection_part->checksums.getTotalSizeOnDisk(),
projection_part->checksums.getTotalChecksumUInt128());
NameSet files_to_remove_after_sync;
if (reset_columns)
{
auto part_columns = total_columns_list ? *total_columns_list : columns_list;
auto serialization_infos = new_part->getSerializationInfos();
serialization_infos.replaceData(new_serialization_infos);
removeEmptyColumnsFromPart(new_part, part_columns, serialization_infos, checksums);
files_to_remove_after_sync = removeEmptyColumnsFromPart(new_part, part_columns, serialization_infos, checksums);
new_part->setColumns(part_columns);
new_part->setSerializationInfos(serialization_infos);
}
auto finalizer = std::make_unique<Finalizer::Impl>(*writer, new_part, sync);
auto finalizer = std::make_unique<Finalizer::Impl>(*writer, new_part, files_to_remove_after_sync, sync);
if (new_part->isStoredOnDisk())
finalizer->written_files = finalizePartOnDisk(new_part, checksums);

View File

@ -74,9 +74,18 @@ MergedColumnOnlyOutputStream::fillChecksums(
serialization_infos.replaceData(new_serialization_infos);
auto removed_files = removeEmptyColumnsFromPart(new_part, columns, serialization_infos, checksums);
auto disk = new_part->volume->getDisk();
for (const String & removed_file : removed_files)
{
auto file_path = new_part->getFullRelativePath() + removed_file;
/// Can be called multiple times, don't need to remove file twice
if (disk->exists(file_path))
disk->removeFile(file_path);
if (all_checksums.files.count(removed_file))
all_checksums.files.erase(removed_file);
}
new_part->setColumns(columns);
new_part->setSerializationInfos(serialization_infos);

View File

@ -52,6 +52,23 @@ std::pair<bool, ReplicatedMergeMutateTaskBase::PartLogWriter> MutateFromLogEntry
}
}
/// In some use cases merging can be more expensive than fetching
/// and it may be better to spread merges tasks across the replicas
/// instead of doing exactly the same merge cluster-wise
if (storage.merge_strategy_picker.shouldMergeOnSingleReplica(entry))
{
std::optional<String> replica_to_execute_merge = storage.merge_strategy_picker.pickReplicaToExecuteMerge(entry);
if (replica_to_execute_merge)
{
LOG_DEBUG(log,
"Prefer fetching part {} from replica {} due to execute_merges_on_single_replica_time_threshold",
entry.new_part_name, replica_to_execute_merge.value());
return {false, {}};
}
}
new_part_info = MergeTreePartInfo::fromPartName(entry.new_part_name, storage.format_version);
commands = MutationCommands::create(storage.queue.getMutationCommands(source_part, new_part_info.mutation));
@ -73,6 +90,28 @@ std::pair<bool, ReplicatedMergeMutateTaskBase::PartLogWriter> MutateFromLogEntry
future_mutated_part->updatePath(storage, reserved_space.get());
future_mutated_part->type = source_part->getType();
if (storage_settings_ptr->allow_remote_fs_zero_copy_replication)
{
if (auto disk = reserved_space->getDisk(); disk->getType() == DB::DiskType::S3)
{
String dummy;
if (!storage.findReplicaHavingCoveringPart(entry.new_part_name, true, dummy).empty())
{
LOG_DEBUG(log, "Mutation of part {} finished by some other replica, will download merged part", entry.new_part_name);
return {false, {}};
}
zero_copy_lock = storage.tryCreateZeroCopyExclusiveLock(entry.new_part_name, disk);
if (!zero_copy_lock)
{
LOG_DEBUG(log, "Mutation of part {} started by some other replica, will wait it and fetch merged part", entry.new_part_name);
return {false, {}};
}
}
}
const Settings & settings = storage.getContext()->getSettingsRef();
merge_mutate_entry = storage.getContext()->getMergeList().insert(
storage.getStorageID(),
@ -140,6 +179,12 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit
throw;
}
if (zero_copy_lock)
{
LOG_DEBUG(log, "Removing zero-copy lock");
zero_copy_lock->lock->unlock();
}
/** With `ZSESSIONEXPIRED` or `ZOPERATIONTIMEOUT`, we can inadvertently roll back local changes to the parts.
* This is not a problem, because in this case the entry will remain in the queue, and we will try again.
*/

View File

@ -7,6 +7,7 @@
#include <Storages/MergeTree/ReplicatedMergeMutateTaskBase.h>
#include <Storages/MergeTree/ReplicatedMergeTreeQueue.h>
#include <Storages/MergeTree/ReplicatedMergeTreeLogEntry.h>
#include <Storages/MergeTree/ZeroCopyLock.h>
namespace DB
{
@ -41,6 +42,7 @@ private:
MutationCommandsConstPtr commands;
MergeTreeData::TransactionUniquePtr transaction_ptr{nullptr};
std::optional<ZeroCopyLock> zero_copy_lock;
StopwatchUniquePtr stopwatch_ptr{nullptr};
MergeTreeData::MutableDataPartPtr new_part{nullptr};

View File

@ -31,6 +31,7 @@ bool ReplicatedMergeMutateTaskBase::executeStep()
{
std::exception_ptr saved_exception;
bool retryable_error = false;
try
{
/// We don't have any backoff for failed entries
@ -46,16 +47,19 @@ bool ReplicatedMergeMutateTaskBase::executeStep()
{
/// If no one has the right part, probably not all replicas work; We will not write to log with Error level.
LOG_INFO(log, fmt::runtime(e.displayText()));
retryable_error = true;
}
else if (e.code() == ErrorCodes::ABORTED)
{
/// Interrupted merge or downloading a part is not an error.
LOG_INFO(log, fmt::runtime(e.message()));
retryable_error = true;
}
else if (e.code() == ErrorCodes::PART_IS_TEMPORARILY_LOCKED)
{
/// Part cannot be added temporarily
LOG_INFO(log, fmt::runtime(e.displayText()));
retryable_error = true;
storage.cleanup_thread.wakeup();
}
else
@ -80,7 +84,7 @@ bool ReplicatedMergeMutateTaskBase::executeStep()
}
if (saved_exception)
if (!retryable_error && saved_exception)
{
std::lock_guard lock(storage.queue.state_mutex);

View File

@ -57,17 +57,6 @@ bool ReplicatedMergeTreeMergeStrategyPicker::shouldMergeOnSingleReplica(const Re
}
bool ReplicatedMergeTreeMergeStrategyPicker::shouldMergeOnSingleReplicaShared(const ReplicatedMergeTreeLogEntryData & entry) const
{
time_t threshold = remote_fs_execute_merges_on_single_replica_time_threshold;
return (
threshold > 0 /// feature turned on
&& entry.type == ReplicatedMergeTreeLogEntry::MERGE_PARTS /// it is a merge log entry
&& entry.create_time + threshold > time(nullptr) /// not too much time waited
);
}
/// that will return the same replica name for ReplicatedMergeTreeLogEntry on all the replicas (if the replica set is the same).
/// that way each replica knows who is responsible for doing a certain merge.

View File

@ -52,10 +52,6 @@ public:
/// and we may need to do a fetch (or postpone) instead of merge
bool shouldMergeOnSingleReplica(const ReplicatedMergeTreeLogEntryData & entry) const;
/// return true if remote_fs_execute_merges_on_single_replica_time_threshold feature is active
/// and we may need to do a fetch (or postpone) instead of merge
bool shouldMergeOnSingleReplicaShared(const ReplicatedMergeTreeLogEntryData & entry) const;
/// returns the replica name
/// and it's not current replica should do the merge
std::optional<String> pickReplicaToExecuteMerge(const ReplicatedMergeTreeLogEntryData & entry);

View File

@ -1205,31 +1205,32 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
return false;
}
bool should_execute_on_single_replica = merge_strategy_picker.shouldMergeOnSingleReplica(entry);
if (!should_execute_on_single_replica)
const auto data_settings = data.getSettings();
if (data_settings->allow_remote_fs_zero_copy_replication)
{
/// Separate check. If we use only s3, check remote_fs_execute_merges_on_single_replica_time_threshold as well.
auto disks = storage.getDisks();
bool only_s3_storage = true;
for (const auto & disk : disks)
if (disk->getType() != DB::DiskType::S3)
only_s3_storage = false;
if (!disks.empty() && only_s3_storage)
should_execute_on_single_replica = merge_strategy_picker.shouldMergeOnSingleReplicaShared(entry);
if (!disks.empty() && only_s3_storage && storage.checkZeroCopyLockExists(entry.new_part_name, disks[0]))
{
out_postpone_reason = "Not executing merge/mutation for the part " + entry.new_part_name
+ ", waiting other replica to execute it and will fetch after.";
return false;
}
}
if (should_execute_on_single_replica)
if (merge_strategy_picker.shouldMergeOnSingleReplica(entry))
{
auto replica_to_execute_merge = merge_strategy_picker.pickReplicaToExecuteMerge(entry);
if (replica_to_execute_merge && !merge_strategy_picker.isMergeFinishedByReplica(replica_to_execute_merge.value(), entry))
{
out_postpone_reason = fmt::format(
"Not executing merge for the part {}, waiting for {} to execute merge.",
entry.new_part_name, replica_to_execute_merge.value());
LOG_DEBUG(log, fmt::runtime(out_postpone_reason));
String reason = "Not executing merge for the part " + entry.new_part_name
+ ", waiting for " + replica_to_execute_merge.value() + " to execute merge.";
out_postpone_reason = reason;
return false;
}
}
@ -1242,7 +1243,6 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry(
* Setting max_bytes_to_merge_at_max_space_in_pool still working for regular merges,
* because the leader replica does not assign merges of greater size (except OPTIMIZE PARTITION and OPTIMIZE FINAL).
*/
const auto data_settings = data.getSettings();
bool ignore_max_size = false;
if (entry.type == LogEntry::MERGE_PARTS)
{
@ -1674,6 +1674,7 @@ bool ReplicatedMergeTreeQueue::tryFinalizeMutations(zkutil::ZooKeeperPtr zookeep
{
LOG_TRACE(log, "Marking mutation {} done because it is <= mutation_pointer ({})", znode, mutation_pointer);
mutation.is_done = true;
mutation.latest_fail_reason.clear();
alter_sequence.finishDataAlter(mutation.entry->alter_version, lock);
if (mutation.parts_to_do.size() != 0)
{
@ -1718,6 +1719,7 @@ bool ReplicatedMergeTreeQueue::tryFinalizeMutations(zkutil::ZooKeeperPtr zookeep
{
LOG_TRACE(log, "Mutation {} is done", entry->znode_name);
it->second.is_done = true;
it->second.latest_fail_reason.clear();
if (entry->isAlterMutation())
{
LOG_TRACE(log, "Finishing data alter with version {} for entry {}", entry->alter_version, entry->znode_name);

View File

@ -42,15 +42,31 @@ static void localBackupImpl(const DiskPtr & disk, const String & source_path, co
}
}
namespace
{
class CleanupOnFail
{
public:
explicit CleanupOnFail(std::function<void()> && cleaner_) : cleaner(cleaner_), is_success(false) {}
explicit CleanupOnFail(std::function<void()> && cleaner_)
: cleaner(cleaner_)
{}
~CleanupOnFail()
{
if (!is_success)
cleaner();
{
/// We are trying to handle race condition here. So if we was not
/// able to backup directory try to remove garbage, but it's ok if
/// it doesn't exist.
try
{
cleaner();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
}
void success()
@ -60,8 +76,9 @@ public:
private:
std::function<void()> cleaner;
bool is_success;
bool is_success{false};
};
}
void localBackup(const DiskPtr & disk, const String & source_path, const String & destination_path, std::optional<size_t> max_level)
{
@ -73,11 +90,11 @@ void localBackup(const DiskPtr & disk, const String & source_path, const String
size_t try_no = 0;
const size_t max_tries = 10;
CleanupOnFail cleanup([&](){disk->removeRecursive(destination_path);});
CleanupOnFail cleanup([disk, destination_path]() { disk->removeRecursive(destination_path); });
/** Files in the directory can be permanently added and deleted.
* If some file is deleted during an attempt to make a backup, then try again,
* because it's important to take into account any new files that might appear.
* because it's important to take into account any new files that might appear.
*/
while (true)
{

View File

@ -300,6 +300,16 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies(const NameSet
}
Block StorageInMemoryMetadata::getSampleBlockInsertable() const
{
Block res;
for (const auto & column : getColumns().getInsertable())
res.insert({column.type->createColumn(), column.type, column.name});
return res;
}
Block StorageInMemoryMetadata::getSampleBlockNonMaterialized() const
{
Block res;

View File

@ -151,6 +151,9 @@ struct StorageInMemoryMetadata
/// Block with ordinary + materialized columns.
Block getSampleBlock() const;
/// Block with ordinary + ephemeral.
Block getSampleBlockInsertable() const;
/// Block with ordinary columns.
Block getSampleBlockNonMaterialized() const;

View File

@ -816,9 +816,8 @@ std::shared_ptr<MergeMutateSelectedEntry> StorageMergeTree::selectPartsToMerge(
{
while (true)
{
UInt64 disk_space = getStoragePolicy()->getMaxUnreservedFreeSpace();
select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
future_part, disk_space, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
future_part, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
auto timeout_ms = getSettings()->lock_acquire_timeout_for_background_operations.totalMilliseconds();
auto timeout = std::chrono::milliseconds(timeout_ms);

View File

@ -1290,6 +1290,7 @@ void StorageReplicatedMergeTree::checkPartChecksumsAndAddCommitOps(const zkutil:
{
String columns_str;
String checksums_str;
if (zookeeper->tryGet(fs::path(current_part_path) / "columns", columns_str) &&
zookeeper->tryGet(fs::path(current_part_path) / "checksums", checksums_str))
{
@ -3787,24 +3788,41 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Stora
if (source_part)
{
MinimalisticDataPartChecksums source_part_checksums;
source_part_checksums.computeTotalChecksums(source_part->checksums);
auto source_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksums(
source_part->getColumns(), source_part->checksums);
MinimalisticDataPartChecksums desired_checksums;
String part_path = fs::path(source_replica_path) / "parts" / part_name;
String part_znode = zookeeper->get(part_path);
std::optional<ReplicatedMergeTreePartHeader> desired_part_header;
if (!part_znode.empty())
desired_checksums = ReplicatedMergeTreePartHeader::fromString(part_znode).getChecksums();
{
desired_part_header = ReplicatedMergeTreePartHeader::fromString(part_znode);
}
else
{
String desired_checksums_str = zookeeper->get(fs::path(part_path) / "checksums");
desired_checksums = MinimalisticDataPartChecksums::deserializeFrom(desired_checksums_str);
String columns_str;
String checksums_str;
if (zookeeper->tryGet(fs::path(part_path) / "columns", columns_str) &&
zookeeper->tryGet(fs::path(part_path) / "checksums", checksums_str))
{
desired_part_header = ReplicatedMergeTreePartHeader::fromColumnsAndChecksumsZNodes(columns_str, checksums_str);
}
else
{
LOG_INFO(log, "Not checking checksums of part {} with replica {} because part was removed from ZooKeeper", part_name, source_replica_path);
}
}
if (source_part_checksums == desired_checksums)
/// Checking both checksums and columns hash. For example we can have empty part
/// with same checksums but different columns. And we attaching it exception will
/// be thrown.
if (desired_part_header
&& source_part_header.getColumnsHash() == desired_part_header->getColumnsHash()
&& source_part_header.getChecksums() == desired_part_header->getChecksums())
{
LOG_TRACE(log, "Found local part {} with the same checksums as {}", source_part->name, part_name);
LOG_TRACE(log, "Found local part {} with the same checksums and columns hash as {}", source_part->name, part_name);
part_to_clone = source_part;
}
}
@ -4351,7 +4369,6 @@ bool StorageReplicatedMergeTree::optimize(
};
auto zookeeper = getZooKeeperAndAssertNotReadonly();
UInt64 disk_space = getStoragePolicy()->getMaxUnreservedFreeSpace();
const auto storage_settings_ptr = getSettings();
auto metadata_snapshot = getInMemoryMetadataPtr();
std::vector<ReplicatedMergeTreeLogEntryData> merge_entries;
@ -4384,7 +4401,7 @@ bool StorageReplicatedMergeTree::optimize(
else
{
select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
future_merged_part, disk_space, can_merge, partition_id, final, metadata_snapshot, nullptr,
future_merged_part, can_merge, partition_id, final, metadata_snapshot, nullptr,
&disable_reason, query_context->getSettingsRef().optimize_skip_merged_partitions);
}
@ -7155,10 +7172,35 @@ void StorageReplicatedMergeTree::createTableSharedID()
}
void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part) const
void StorageReplicatedMergeTree::lockSharedDataTemporary(const String & part_name, const String & part_id, const DiskPtr & disk) const
{
if (!part.volume)
if (!disk || !disk->supportZeroCopyReplication())
return;
zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper();
if (!zookeeper)
return;
String id = part_id;
boost::replace_all(id, "/", "_");
Strings zc_zookeeper_paths = getZeroCopyPartPath(*getSettings(), disk->getType(), getTableSharedID(),
part_name, zookeeper_path);
for (const auto & zc_zookeeper_path : zc_zookeeper_paths)
{
String zookeeper_node = fs::path(zc_zookeeper_path) / id / replica_name;
LOG_TRACE(log, "Set zookeeper temporary ephemeral lock {}", zookeeper_node);
createZeroCopyLockNode(zookeeper, zookeeper_node, zkutil::CreateMode::Ephemeral, false);
}
}
void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part, bool replace_existing_lock) const
{
if (!part.volume || !part.isStoredOnDisk())
return;
DiskPtr disk = part.volume->getDisk();
if (!disk || !disk->supportZeroCopyReplication())
return;
@ -7176,8 +7218,9 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part)
{
String zookeeper_node = fs::path(zc_zookeeper_path) / id / replica_name;
LOG_TRACE(log, "Set zookeeper lock {}", zookeeper_node);
createZeroCopyLockNode(zookeeper, zookeeper_node);
LOG_TRACE(log, "Set zookeeper persistent lock {}", zookeeper_node);
createZeroCopyLockNode(zookeeper, zookeeper_node, zkutil::CreateMode::Persistent, replace_existing_lock);
}
}
@ -7190,21 +7233,28 @@ bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & par
bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part, const String & name) const
{
if (!part.volume)
if (!part.volume || !part.isStoredOnDisk())
return true;
DiskPtr disk = part.volume->getDisk();
if (!disk || !disk->supportZeroCopyReplication())
return true;
zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper();
if (!zookeeper)
/// If part is temporary refcount file may be absent
auto ref_count_path = fs::path(part.getFullRelativePath()) / IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK;
if (disk->exists(ref_count_path))
{
auto ref_count = disk->getRefCount(ref_count_path);
if (ref_count > 0) /// Keep part shard info for frozen backups
return false;
}
else
{
/// Temporary part with some absent file cannot be locked in shared mode
return true;
}
auto ref_count = part.getNumberOfRefereneces();
if (ref_count > 0) /// Keep part shard info for frozen backups
return false;
return unlockSharedDataByID(part.getUniqueId(), getTableSharedID(), name, replica_name, disk, zookeeper, *getSettings(), log,
return unlockSharedDataByID(part.getUniqueId(), getTableSharedID(), name, replica_name, disk, getZooKeeper(), *getSettings(), log,
zookeeper_path);
}
@ -7217,7 +7267,7 @@ bool StorageReplicatedMergeTree::unlockSharedDataByID(String part_id, const Stri
Strings zc_zookeeper_paths = getZeroCopyPartPath(settings, disk->getType(), table_uuid, part_name, zookeeper_path_old);
bool res = true;
bool part_has_no_more_locks = true;
for (const auto & zc_zookeeper_path : zc_zookeeper_paths)
{
@ -7237,7 +7287,7 @@ bool StorageReplicatedMergeTree::unlockSharedDataByID(String part_id, const Stri
if (!children.empty())
{
LOG_TRACE(logger, "Found zookeper locks for {}", zookeeper_part_uniq_node);
res = false;
part_has_no_more_locks = false;
continue;
}
@ -7266,7 +7316,7 @@ bool StorageReplicatedMergeTree::unlockSharedDataByID(String part_id, const Stri
}
}
return res;
return part_has_no_more_locks;
}
@ -7388,8 +7438,31 @@ Strings StorageReplicatedMergeTree::getZeroCopyPartPath(const MergeTreeSettings
return res;
}
bool StorageReplicatedMergeTree::checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk)
{
auto path = getZeroCopyPartPath(part_name, disk);
if (path)
{
/// FIXME
auto lock_path = fs::path(*path) / "part_exclusive_lock";
if (getZooKeeper()->exists(lock_path))
{
return true;
}
}
std::optional<ZeroCopyLock> StorageReplicatedMergeTree::tryCreateZeroCopyExclusiveLock(const DataPartPtr & part, const DiskPtr & disk)
return false;
}
std::optional<String> StorageReplicatedMergeTree::getZeroCopyPartPath(const String & part_name, const DiskPtr & disk)
{
if (!disk || !disk->supportZeroCopyReplication())
return std::nullopt;
return getZeroCopyPartPath(*getSettings(), disk->getType(), getTableSharedID(), part_name, zookeeper_path)[0];
}
std::optional<ZeroCopyLock> StorageReplicatedMergeTree::tryCreateZeroCopyExclusiveLock(const String & part_name, const DiskPtr & disk)
{
if (!disk || !disk->supportZeroCopyReplication())
return std::nullopt;
@ -7398,8 +7471,7 @@ std::optional<ZeroCopyLock> StorageReplicatedMergeTree::tryCreateZeroCopyExclusi
if (!zookeeper)
return std::nullopt;
String zc_zookeeper_path = getZeroCopyPartPath(*getSettings(), disk->getType(), getTableSharedID(),
part->name, zookeeper_path)[0];
String zc_zookeeper_path = *getZeroCopyPartPath(part_name, disk);
/// Just recursively create ancestors for lock
zookeeper->createAncestors(zc_zookeeper_path);
@ -7634,7 +7706,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP
}
void StorageReplicatedMergeTree::createZeroCopyLockNode(const zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_node)
void StorageReplicatedMergeTree::createZeroCopyLockNode(const zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_node, int32_t mode, bool replace_existing_lock)
{
/// In rare case other replica can remove path between createAncestors and createIfNotExists
/// So we make up to 5 attempts
@ -7644,8 +7716,22 @@ void StorageReplicatedMergeTree::createZeroCopyLockNode(const zkutil::ZooKeeperP
try
{
zookeeper->createAncestors(zookeeper_node);
zookeeper->createIfNotExists(zookeeper_node, "lock");
break;
if (replace_existing_lock && zookeeper->exists(zookeeper_node))
{
Coordination::Requests ops;
ops.emplace_back(zkutil::makeRemoveRequest(zookeeper_node, -1));
ops.emplace_back(zkutil::makeCreateRequest(zookeeper_node, "", mode));
Coordination::Responses responses;
auto error = zookeeper->tryMulti(ops, responses);
if (error == Coordination::Error::ZOK)
break;
}
else
{
auto error = zookeeper->tryCreate(zookeeper_node, "", mode);
if (error == Coordination::Error::ZOK || error == Coordination::Error::ZNODEEXISTS)
break;
}
}
catch (const zkutil::KeeperException & e)
{
@ -7674,10 +7760,12 @@ public:
table_shared_id = storage.getTableSharedID();
}
void save(DiskPtr disk, const String & path) const
void save(DiskPtr data_disk, const String & path) const
{
auto metadata_disk = data_disk->getMetadataDiskIfExistsOrSelf();
auto file_path = getFileName(path);
auto buffer = disk->writeMetaFile(file_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
auto buffer = metadata_disk->writeFile(file_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
writeIntText(version, *buffer);
buffer->write("\n", 1);
writeBoolText(is_replicated, *buffer);
@ -7692,12 +7780,14 @@ public:
buffer->write("\n", 1);
}
bool load(DiskPtr disk, const String & path)
bool load(DiskPtr data_disk, const String & path)
{
auto metadata_disk = data_disk->getMetadataDiskIfExistsOrSelf();
auto file_path = getFileName(path);
if (!disk->exists(file_path))
if (!metadata_disk->exists(file_path))
return false;
auto buffer = disk->readMetaFile(file_path, ReadSettings(), {});
auto buffer = metadata_disk->readFile(file_path, ReadSettings(), {});
readIntText(version, *buffer);
if (version != 1)
{
@ -7718,9 +7808,10 @@ public:
return true;
}
static void clean(DiskPtr disk, const String & path)
static void clean(DiskPtr data_disk, const String & path)
{
disk->removeMetaFileIfExists(getFileName(path));
auto metadata_disk = data_disk->getMetadataDiskIfExistsOrSelf();
metadata_disk->removeFileIfExists(getFileName(path));
}
private:
@ -7774,22 +7865,18 @@ bool StorageReplicatedMergeTree::removeSharedDetachedPart(DiskPtr disk, const St
zkutil::ZooKeeperPtr zookeeper = getZooKeeper();
if (zookeeper)
fs::path checksums = fs::path(path) / IMergeTreeDataPart::FILE_FOR_REFERENCES_CHECK;
if (disk->exists(checksums))
{
fs::path checksums = fs::path(path) / "checksums.txt";
if (disk->exists(checksums))
if (disk->getRefCount(checksums) == 0)
{
auto ref_count = disk->getRefCount(checksums);
if (ref_count == 0)
{
String id = disk->getUniqueId(checksums);
keep_shared = !StorageReplicatedMergeTree::unlockSharedDataByID(id, table_uuid, part_name,
detached_replica_name, disk, zookeeper, getContext()->getReplicatedMergeTreeSettings(), log,
detached_zookeeper_path);
}
else
keep_shared = true;
String id = disk->getUniqueId(checksums);
keep_shared = !StorageReplicatedMergeTree::unlockSharedDataByID(id, table_uuid, part_name,
detached_replica_name, disk, zookeeper, getContext()->getReplicatedMergeTreeSettings(), log,
detached_zookeeper_path);
}
else
keep_shared = true;
}
disk->removeSharedRecursive(path, keep_shared);

View File

@ -231,7 +231,9 @@ public:
bool executeFetchShared(const String & source_replica, const String & new_part_name, const DiskPtr & disk, const String & path);
/// Lock part in zookeeper for use shared data in several nodes
void lockSharedData(const IMergeTreeDataPart & part) const override;
void lockSharedData(const IMergeTreeDataPart & part, bool replace_existing_lock) const override;
void lockSharedDataTemporary(const String & part_name, const String & part_id, const DiskPtr & disk) const;
/// Unlock shared data part in zookeeper
/// Return true if data unlocked
@ -758,7 +760,7 @@ private:
static Strings getZeroCopyPartPath(const MergeTreeSettings & settings, DiskType disk_type, const String & table_uuid,
const String & part_name, const String & zookeeper_path_old);
static void createZeroCopyLockNode(const zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_node);
static void createZeroCopyLockNode(const zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_node, int32_t mode = zkutil::CreateMode::Persistent, bool replace_existing_lock = false);
bool removeDetachedPart(DiskPtr disk, const String & path, const String & part_name, bool is_freezed) override;
@ -771,9 +773,14 @@ private:
// Create table id if needed
void createTableSharedID();
bool checkZeroCopyLockExists(const String & part_name, const DiskPtr & disk);
std::optional<String> getZeroCopyPartPath(const String & part_name, const DiskPtr & disk);
/// Create ephemeral lock in zookeeper for part and disk which support zero copy replication.
/// If somebody already holding the lock -- return std::nullopt.
std::optional<ZeroCopyLock> tryCreateZeroCopyExclusiveLock(const DataPartPtr & part, const DiskPtr & disk) override;
std::optional<ZeroCopyLock> tryCreateZeroCopyExclusiveLock(const String & part_name, const DiskPtr & disk) override;
protected:
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.

View File

@ -1,12 +1,16 @@
// autogenerated by ./StorageSystemContributors.sh
// autogenerated by tests/ci/version_helper.py
const char * auto_contributors[] {
"0xflotus",
"13DaGGeR",
"20018712",
"243f6a88 85a308d3",
"243f6a8885a308d313198a2e037",
"3ldar-nasyrov",
"7",
"821008736@qq.com",
"ANDREI STAROVEROV",
"Aaron Katz",
"Adri Fernandez",
"Ahmed Dardery",
"Aimiyoo",
"Akazz",
@ -19,15 +23,15 @@ const char * auto_contributors[] {
"Aleksandrov Vladimir",
"Aleksei Levushkin",
"Aleksei Semiglazov",
"Aleksey Akulovich",
"Aleksey",
"Aleksey Akulovich",
"Alex",
"Alex Bocharov",
"Alex Cao",
"Alex Karo",
"Alex Krash",
"Alex Ryndin",
"Alex Zatelepin",
"Alex",
"Alexander Avdonkin",
"Alexander Bezpiatov",
"Alexander Burmak",
@ -55,20 +59,21 @@ const char * auto_contributors[] {
"Alexandr Kondratev",
"Alexandr Krasheninnikov",
"Alexandr Orlov",
"Alexandra Latysheva",
"Alexandra",
"Alexandra Latysheva",
"Alexandre Snarskii",
"Alexei Averchenko",
"Alexey",
"Alexey Arno",
"Alexey Boykov",
"Alexey Dushechkin",
"Alexey Elymanov",
"Alexey Gusev",
"Alexey Ilyukhov",
"Alexey Milovidov",
"Alexey Tronov",
"Alexey Vasiliev",
"Alexey Zatelepin",
"Alexey",
"Alexsey Shestakov",
"Ali Demirci",
"Aliaksandr Pliutau",
@ -84,14 +89,17 @@ const char * auto_contributors[] {
"Anastasiya Tsarkova",
"Anatoly Pugachev",
"Andr0901",
"Andre Marianiello",
"Andreas Hunkeler",
"AndreevDm",
"Andrei Bodrov",
"Andrei Ch",
"Andrei Chulkov",
"Andrei Nekrashevich",
"Andrew",
"Andrew Grigorev",
"Andrew Onyshchuk",
"Andrey",
"Andrey Chulkov",
"Andrey Dudin",
"Andrey Kadochnikov",
@ -103,12 +111,13 @@ const char * auto_contributors[] {
"Andrey Torsunov",
"Andrey Urusov",
"Andrey Z",
"Andrey",
"Andrii Buriachevskyi",
"Andy Liang",
"Andy Yang",
"Anmol Arora",
"Anna Shakhova",
"Anna",
"Anna Shakhova",
"Anselmo D. Adams",
"Anthony N. Simon",
"Anton Ivashkin",
"Anton Kobzev",
@ -121,6 +130,7 @@ const char * auto_contributors[] {
"Anton Tikhonov",
"Anton Yuzhaninov",
"Anton Zhabolenko",
"Antonio Andelic",
"Ariel Robaldo",
"Arsen Hakobyan",
"Arslan G",
@ -136,9 +146,9 @@ const char * auto_contributors[] {
"Arthur Petukhovsky",
"Arthur Tokarchuk",
"Arthur Wong",
"Artur",
"Artur Beglaryan",
"Artur Filatenkov",
"Artur",
"AsiaKorushkina",
"Atri Sharma",
"Avogar",
@ -149,6 +159,7 @@ const char * auto_contributors[] {
"BanyRule",
"Baudouin Giard",
"BayoNet",
"Ben",
"Benjamin Naecker",
"Bertrand Junqua",
"Bharat Nallan",
@ -156,12 +167,13 @@ const char * auto_contributors[] {
"Bill",
"BiteTheDDDDt",
"BlahGeek",
"Bogdan Voronin",
"Bogdan",
"Bogdan Voronin",
"BohuTANG",
"Bolinov",
"BoloniniD",
"Boris Granveaud",
"Boris Kuschel",
"Bowen Masco",
"Braulio Valdivielso",
"Brett Hoerner",
@ -173,6 +185,8 @@ const char * auto_contributors[] {
"Chen Yufei",
"Chienlung Cheung",
"Christian",
"Christoph Wurm",
"Chun-Sheng, Li",
"Ciprian Hacman",
"Clement Rodriguez",
"ClickHouse Admin",
@ -181,12 +195,15 @@ const char * auto_contributors[] {
"Colum",
"Constantin S. Pan",
"Constantine Peresypkin",
"CoolT2",
"CurtizJ",
"DF5HSE",
"DIAOZHAFENG",
"Daniel Bershatsky",
"Daniel Dao",
"Daniel Qin",
"Danila Kutenin",
"Dao",
"Dao Minh Thuc",
"Daria Mozhaeva",
"Dario",
@ -198,13 +215,16 @@ const char * auto_contributors[] {
"Denis Zhuravlev",
"Denny Crane",
"Derek Perkins",
"DimaAmega",
"Ding Xiang Fei",
"Dmitriev Mikhail",
"Dmitrii Kovalkov",
"Dmitrii Mokhnatkin",
"Dmitrii Raev",
"Dmitriy",
"Dmitriy Dorofeev",
"Dmitriy Lushnikov",
"Dmitriy",
"Dmitry",
"Dmitry Belyavtsev",
"Dmitry Bilunov",
"Dmitry Galuza",
@ -217,7 +237,6 @@ const char * auto_contributors[] {
"Dmitry Rubashkin",
"Dmitry S..ky / skype: dvska-at-skype",
"Dmitry Ukolov",
"Dmitry",
"Doge",
"Dongdong Yang",
"DoomzD",
@ -232,8 +251,8 @@ const char * auto_contributors[] {
"Elizaveta Mironyuk",
"Elykov Alexandr",
"Emmanuel Donin de Rosière",
"Eric Daniel",
"Eric",
"Eric Daniel",
"Erixonich",
"Ernest Poletaev",
"Eugene Klimov",
@ -243,9 +262,9 @@ const char * auto_contributors[] {
"Evgeniia Sudarikova",
"Evgeniy Gatov",
"Evgeniy Udodov",
"Evgeny",
"Evgeny Konkov",
"Evgeny Markov",
"Evgeny",
"Ewout",
"FArthur-cmd",
"Fabian Stäber",
@ -254,10 +273,12 @@ const char * auto_contributors[] {
"Fan()",
"FawnD2",
"Federico Ceratto",
"Federico Rodriguez",
"FeehanG",
"FgoDt",
"Filatenkov Artur",
"Filipe Caixeta",
"Filippov Denis",
"Flowyi",
"Francisco Barón",
"Frank Chen",
@ -269,8 +290,10 @@ const char * auto_contributors[] {
"Gagan Arneja",
"Gao Qiang",
"Gary Dotzler",
"George G",
"Gaurav Kumar",
"Geoff Genz",
"George",
"George G",
"George3d6",
"Georgy Ginzburg",
"Gervasio Varela",
@ -278,28 +301,35 @@ const char * auto_contributors[] {
"Gleb Novikov",
"Gleb-Tretyakov",
"Gregory",
"Grigory",
"Grigory Buteyko",
"Grigory Pervakov",
"Grigory",
"Guillaume Tassery",
"Guo Wei (William)",
"Haavard Kvaalen",
"Habibullah Oladepo",
"Hamoon",
"Harry-Lee",
"HarryLeeIBM",
"Hasitha Kanchana",
"Hasnat",
"Heena Bansal",
"HeenaBansal2009",
"Hiroaki Nakamura",
"HuFuwang",
"Hui Wang",
"ILya Limarenko",
"Igor",
"Igor Hatarist",
"Igor Mineev",
"Igor Nikonov",
"Igor Strykhar",
"Igor",
"Igr Mineev",
"Igr",
"Igr Mineev",
"Ikko Ashimine",
"Ildar Musin",
"Ildus Kurbangaliev",
"Ilya",
"Ilya Breev",
"Ilya Golshtein",
"Ilya Khomutov",
@ -310,10 +340,11 @@ const char * auto_contributors[] {
"Ilya Shipitsin",
"Ilya Skrypitsa",
"Ilya Yatsishin",
"Ilya",
"IlyaTsoi",
"ImgBotApp",
"Islam Israfilov (Islam93)",
"Islam Israfilov",
"Islam Israfilov (Islam93)",
"Ivan",
"Ivan A. Torgashov",
"Ivan Babrou",
"Ivan Blinkov",
@ -325,27 +356,29 @@ const char * auto_contributors[] {
"Ivan Remen",
"Ivan Starkov",
"Ivan Zhukov",
"Ivan",
"Jack Song",
"JackyWoo",
"Jacob Hayes",
"Jake Liu",
"Jakub Kuklis",
"JaosnHsieh",
"Jason Keirstead",
"Jason",
"Jason Keirstead",
"Javi Santana",
"Javi santana bot",
"JaySon-Huang",
"Jean Baptiste Favre",
"Jeffrey Dang",
"Jiading Guo",
"Jiang Tao",
"Jochen Schalanda",
"John",
"John Hummel",
"John Skopis",
"John",
"Jonatas Freitas",
"João Figueiredo",
"Julian Zhou",
"Justin Hilliard",
"Kang Liu",
"Karl Pietrzak",
"Keiji Yoshida",
@ -375,7 +408,9 @@ const char * auto_contributors[] {
"Ky Li",
"LB",
"Latysheva Alexandra",
"Lemore",
"Leonardo Cecchi",
"Leonid Krylov",
"Leopold Schabel",
"Lev Borodin",
"Lewinma",
@ -391,9 +426,9 @@ const char * auto_contributors[] {
"M0r64n",
"MagiaGroz",
"Maks Skorokhod",
"Maksim",
"Maksim Fedotov",
"Maksim Kita",
"Maksim",
"Malte",
"Marat IDRISOV",
"Marek Vavrusa",
@ -412,10 +447,11 @@ const char * auto_contributors[] {
"Masha",
"Matthew Peveler",
"Matwey V. Kornilov",
"Max",
"Max Akhmedov",
"Max Bruce",
"Max Vetrov",
"Max",
"MaxTheHuman",
"MaxWk",
"Maxim Akhmedov",
"Maxim Babenko",
@ -430,6 +466,7 @@ const char * auto_contributors[] {
"Maxim Ulanovskiy",
"MaximAL",
"Mc.Spring",
"Meena-Renganathan",
"MeiK",
"Memo",
"Metehan Çetinkaya",
@ -439,18 +476,21 @@ const char * auto_contributors[] {
"Michael Monashev",
"Michael Razuvaev",
"Michael Smitasin",
"Michail Safronov",
"Michal Lisowski",
"MicrochipQ",
"Miguel Fernández",
"Mihail Fandyushin",
"Mikahil Nacharov",
"Mike",
"Mike F",
"Mike Kot",
"Mike",
"Mikhail",
"Mikhail Andreev",
"Mikhail Cheshkov",
"Mikhail Fandyushin",
"Mikhail Filimonov",
"Mikhail Fursov",
"Mikhail Gaidamaka",
"Mikhail Korotov",
"Mikhail Malafeev",
@ -458,18 +498,19 @@ const char * auto_contributors[] {
"Mikhail Salosin",
"Mikhail Surin",
"Mikhail f. Shiryaev",
"Mikhail",
"MikuSugar",
"Milad Arabi",
"Misko Lee",
"Mohamad Fadhil",
"Mohammad Hossein Sekhavat",
"Mojtaba Yaghoobzadeh",
"Mostafa Dahab",
"MovElb",
"Mr.General",
"Murat Kabilov",
"MyroTk",
"Mátyás Jani",
"N. Kolotov",
"NIKITA MIKHAILOV",
"Narek Galstyan",
"Natasha Murashkina",
@ -477,15 +518,17 @@ const char * auto_contributors[] {
"Neeke Gao",
"Neng Liu",
"NengLiu",
"Nickita Taranov",
"Nickita",
"Nickita Taranov",
"Nickolay Yastrebov",
"Nico Mandery",
"Nico Piderman",
"Nicolae Vartolomei",
"Niek",
"Nik",
"Nikhil Nadig",
"Nikhil Raman",
"Nikita",
"Nikita Lapkov",
"Nikita Mikhailov",
"Nikita Mikhalev",
@ -495,13 +538,13 @@ const char * auto_contributors[] {
"Nikita Vasilev",
"Nikolai Kochetov",
"Nikolai Sorokin",
"Nikolay",
"Nikolay Degterinsky",
"Nikolay Kirsh",
"Nikolay Semyachkin",
"Nikolay Shcheglov",
"Nikolay Vasiliev",
"Nikolay Volosatov",
"Nikolay",
"Niu Zhaojie",
"Odin Hultgren Van Der Horst",
"Okada Haruki",
@ -517,11 +560,13 @@ const char * auto_contributors[] {
"OnePiece",
"Onehr7",
"Orivej Desh",
"Orkhan Zeynalli",
"Oskar Wojciski",
"OuO",
"PHO",
"Paramtamtam",
"Patrick Zippenfenig",
"Pavel",
"Pavel Cheremushkin",
"Pavel Kartaviy",
"Pavel Kartavyy",
@ -531,7 +576,6 @@ const char * auto_contributors[] {
"Pavel Medvedev",
"Pavel Patrin",
"Pavel Yakunin",
"Pavel",
"Pavlo Bashynskiy",
"Pawel Rog",
"Peignon Melvyn",
@ -545,6 +589,7 @@ const char * auto_contributors[] {
"Pysaoke",
"Quid37",
"Rafael David Tinoco",
"Rajkumar",
"Ramazan Polat",
"Ravengg",
"Raúl Marín",
@ -556,8 +601,10 @@ const char * auto_contributors[] {
"Ri",
"Rich Raposa",
"Robert Hodges",
"RogerYK",
"Rohit Agarwal",
"Romain Neutron",
"Roman",
"Roman Bug",
"Roman Chyrva",
"Roman Lipovsky",
@ -566,13 +613,15 @@ const char * auto_contributors[] {
"Roman Peshkurov",
"Roman Tsisyk",
"Roman Zhukov",
"Roman",
"Ruslan Savchenko",
"Ruslan",
"Ruslan Savchenko",
"Russ Frank",
"Ruzal Ibragimov",
"Ryad ZENINE",
"S.M.A. Djawadi",
"Saad Ur Rahman",
"Sabyanin Maxim",
"Safronov Michail",
"SaltTan",
"Sami Kerola",
"Samuel Chou",
@ -583,6 +632,7 @@ const char * auto_contributors[] {
"Sergei Bocharov",
"Sergei Semin",
"Sergei Shtykov",
"Sergei Trifonov",
"Sergei Tsetlin (rekub)",
"Sergey Demurin",
"Sergey Elantsev",
@ -614,26 +664,28 @@ const char * auto_contributors[] {
"Stas Kelvich",
"Stas Pavlovichev",
"Stefan Thies",
"Stepan Herold",
"Stepan",
"Stepan Herold",
"Steve-金勇",
"Stig Bakken",
"Storozhuk Kostiantyn",
"Stupnikov Andrey",
"SuperBot",
"SuperDJY",
"Sébastien Launay",
"Suzy Wang",
"Sébastien",
"Sébastien Launay",
"TABLUM.IO",
"TAC",
"TCeason",
"Tagir Kuskarov",
"Tai White",
"Taleh Zaliyev",
"Tangaev",
"Tatiana Kirillova",
"Tatiana",
"Teja Srivastasa",
"Tatiana Kirillova",
"Teja",
"Teja Srivastasa",
"Tema Novikov",
"Tentoshka",
"The-Alchemist",
@ -655,10 +707,10 @@ const char * auto_contributors[] {
"UnamedRus",
"V",
"VDimir",
"Vadim",
"Vadim Plakhtinskiy",
"Vadim Skipin",
"Vadim Volodin",
"Vadim",
"VadimPE",
"Val",
"Valera Ryaboshapko",
@ -672,8 +724,8 @@ const char * auto_contributors[] {
"Veniamin Gvozdikov",
"Veselkov Konstantin",
"Viachaslau Boben",
"Victor Tarnavsky",
"Victor",
"Victor Tarnavsky",
"Viktor Taranenko",
"Vitalii S",
"Vitaliy Fedorchenko",
@ -681,13 +733,15 @@ const char * auto_contributors[] {
"Vitaliy Kozlovskiy",
"Vitaliy Lyudvichenko",
"Vitaliy Zakaznikov",
"Vitaly",
"Vitaly Artemyev",
"Vitaly Baranov",
"Vitaly Orlov",
"Vitaly Samigullin",
"Vitaly Stoyan",
"Vitaly",
"Vivien Maisonneuve",
"Vlad Arkhipov",
"Vladimir",
"Vladimir Bunchuk",
"Vladimir C",
"Vladimir Ch",
@ -699,7 +753,6 @@ const char * auto_contributors[] {
"Vladimir Kopysov",
"Vladimir Kozbin",
"Vladimir Smirnov",
"Vladimir",
"Vladislav Rassokhin",
"Vladislav Smirnov",
"Vojtech Splichal",
@ -707,6 +760,7 @@ const char * auto_contributors[] {
"Vsevolod Orlov",
"Vxider",
"Vyacheslav Alipov",
"W",
"Wang Fenjin",
"WangZengrui",
"Weiqing Xu",
@ -714,8 +768,10 @@ const char * auto_contributors[] {
"Winter Zhang",
"Xianda Ke",
"Xiang Zhou",
"Xin Wang",
"Y Lu",
"Yangkuan Liu",
"Yatian Xu",
"Yatsishin Ilya",
"Yağızcan Değirmenci",
"Yegor Andreenko",
@ -724,13 +780,14 @@ const char * auto_contributors[] {
"Yingfan Chen",
"Yiğit Konur",
"Yohann Jardin",
"Youenn Lebras",
"Yuntao Wu",
"Yuri Dyachenko",
"Yurii Vlasenko",
"Yuriy",
"Yuriy Baranov",
"Yuriy Chernyshov",
"Yuriy Korzhenevskiy",
"Yuriy",
"Yury Karpovich",
"Yury Stankevich",
"ZhiYong Wang",
@ -756,6 +813,7 @@ const char * auto_contributors[] {
"alex.lvxin",
"alexander kozhikhov",
"alexey-milovidov",
"alexeypavlenko",
"alfredlu",
"amesaru",
"amoschen",
@ -810,10 +868,12 @@ const char * auto_contributors[] {
"cms",
"cmsxbc",
"cn-ds",
"cnmade",
"comunodi",
"congbaoyangrou",
"coraxster",
"d.v.semenov",
"dalei2019",
"damozhaeva",
"dankondr",
"daoready",
@ -849,6 +909,7 @@ const char * auto_contributors[] {
"ezhaka",
"f1yegor",
"fancno",
"fanzhou",
"fastio",
"favstovol",
"feihengye",
@ -863,8 +924,8 @@ const char * auto_contributors[] {
"flow",
"flynn",
"foxxmary",
"frank chen",
"frank",
"frank chen",
"franklee",
"fredchenbj",
"freedomDR",
@ -877,8 +938,11 @@ const char * auto_contributors[] {
"giordyb",
"glockbender",
"glushkovds",
"grantovsky",
"gulige",
"guoleiyi",
"gyuton",
"hanqf-git",
"hao.he",
"hchen9",
"hcz",
@ -890,6 +954,7 @@ const char * auto_contributors[] {
"huangzhaowei",
"hustnn",
"huzhichengdd",
"ianton-ru",
"ice1x",
"idfer",
"igomac",
@ -907,8 +972,8 @@ const char * auto_contributors[] {
"jasine",
"jasperzhu",
"javartisan",
"javi santana",
"javi",
"javi santana",
"jennyma",
"jetgm",
"jianmei zhang",
@ -937,6 +1002,8 @@ const char * auto_contributors[] {
"levie",
"levushkin aleksej",
"levysh",
"lgbo",
"lgbo-ustc",
"lhuang0928",
"lhuang09287750",
"liang.huang",
@ -947,6 +1014,7 @@ const char * auto_contributors[] {
"listar",
"litao91",
"liu-bov",
"liuneng1994",
"liuyangkuan",
"liuyimin",
"liyang",
@ -983,11 +1051,15 @@ const char * auto_contributors[] {
"mikael",
"mikepop7",
"millb",
"minhthucdao",
"mlkui",
"mnkonkova",
"mo-avatar",
"morty",
"moscas",
"mreddy017",
"msaf1980",
"msirm",
"muzzlerator",
"mwish",
"myrrc",
@ -1007,6 +1079,7 @@ const char * auto_contributors[] {
"ocadaruma",
"ogorbacheva",
"olegkv",
"olevino",
"olgarev",
"orantius",
"p0ny",
@ -1014,6 +1087,7 @@ const char * auto_contributors[] {
"pawelsz-rb",
"pdv-ru",
"peshkurov",
"peter279k",
"philip.han",
"pingyu",
"potya",
@ -1040,8 +1114,10 @@ const char * auto_contributors[] {
"roverxu",
"ruct",
"ryzuo",
"s-kat",
"santaux",
"satanson",
"save-my-heart",
"sdk2",
"serebrserg",
"sev7e0",
@ -1068,8 +1144,10 @@ const char * auto_contributors[] {
"taiyang-li",
"tao jiang",
"tavplubix",
"tekeri",
"templarzq",
"terrylin",
"tesw yew isal",
"tianzhou",
"tiger.yan",
"tison",
@ -1080,6 +1158,8 @@ const char * auto_contributors[] {
"unegare",
"unknown",
"urgordeadbeef",
"usurai",
"vahid-sohrabloo",
"vdimir",
"velom",
"vesslanjin",
@ -1121,8 +1201,11 @@ const char * auto_contributors[] {
"zhangxiao018",
"zhangxiao871",
"zhen ni",
"zhifeng",
"zhongyuankai",
"zhoubintao",
"zhukai",
"zkun",
"zlx19950903",
"zvonand",
"zvrr",
@ -1150,6 +1233,7 @@ const char * auto_contributors[] {
"曲正鹏",
"木木夕120",
"未来星___费",
"李扬",
"极客青年",
"枢木",
"董海镔",
@ -1159,5 +1243,6 @@ const char * auto_contributors[] {
"靳阳",
"黄朝晖",
"黄璞",
"박동철",
"박현우",
nullptr};

View File

@ -1,5 +1,6 @@
#!/usr/bin/env bash
echo "THIS IS HEAVILY DEPRECATED, USE tests/ci/version_helper.py:update_contributors()"
set -x
# doesn't actually cd to directory, but return absolute path

View File

@ -32,6 +32,7 @@ void TableFunctionS3::parseArguments(const ASTPtr & ast_function, ContextPtr con
" - url\n"
" - url, format\n" \
" - url, format, structure\n" \
" - url, access_key_id, secret_access_key\n" \
" - url, format, structure, compression_method\n" \
" - url, access_key_id, secret_access_key, format\n"
" - url, access_key_id, secret_access_key, format, structure\n" \
@ -75,7 +76,6 @@ void TableFunctionS3::parseArguments(const ASTPtr & ast_function, ContextPtr con
{
{1, {{}}},
{2, {{"format", 1}}},
{3, {{"format", 1}, {"structure", 2}}},
{5, {{"access_key_id", 1}, {"secret_access_key", 2}, {"format", 3}, {"structure", 4}}},
{6, {{"access_key_id", 1}, {"secret_access_key", 2}, {"format", 3}, {"structure", 4}, {"compression_method", 5}}}
};
@ -83,14 +83,26 @@ void TableFunctionS3::parseArguments(const ASTPtr & ast_function, ContextPtr con
std::map<String, size_t> args_to_idx;
/// For 4 arguments we support 2 possible variants:
/// s3(source, format, structure, compression_method) and s3(source, access_key_id, access_key_id, format)
/// We can distinguish them by looking at the 4-th argument: check if it's a format name or not.
/// We can distinguish them by looking at the 2-nd argument: check if it's a format name or not.
if (args.size() == 4)
{
auto last_arg = args[3]->as<ASTLiteral &>().value.safeGet<String>();
if (FormatFactory::instance().getAllFormats().contains(last_arg))
args_to_idx = {{"access_key_id", 1}, {"access_key_id", 2}, {"format", 3}};
else
auto second_arg = args[1]->as<ASTLiteral &>().value.safeGet<String>();
if (FormatFactory::instance().getAllFormats().contains(second_arg))
args_to_idx = {{"format", 1}, {"structure", 2}, {"compression_method", 3}};
else
args_to_idx = {{"access_key_id", 1}, {"secret_access_key", 2}, {"format", 3}};
}
/// For 3 arguments we support 2 possible variants:
/// s3(source, format, structure) and s3(source, access_key_id, access_key_id)
/// We can distinguish them by looking at the 2-nd argument: check if it's a format name or not.
else if (args.size() == 3)
{
auto second_arg = args[1]->as<ASTLiteral &>().value.safeGet<String>();
if (FormatFactory::instance().getAllFormats().contains(second_arg))
args_to_idx = {{"format", 1}, {"structure", 2}};
else
args_to_idx = {{"access_key_id", 1}, {"secret_access_key", 2}};
}
else
{

Some files were not shown because too many files have changed in this diff Show More