mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into better_logs
This commit is contained in:
commit
a625dd41e8
20
.github/workflows/main.yml
vendored
20
.github/workflows/main.yml
vendored
@ -1356,7 +1356,7 @@ jobs:
|
||||
##############################################################################################
|
||||
ASTFuzzerTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1384,7 +1384,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1412,7 +1412,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestUBSan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1440,7 +1440,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestMSan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1468,7 +1468,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1614,7 +1614,7 @@ jobs:
|
||||
#############################################################################################
|
||||
UnitTestsAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1642,7 +1642,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsReleaseClang:
|
||||
needs: [BuilderBinRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1670,7 +1670,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1698,7 +1698,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1726,7 +1726,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
|
20
.github/workflows/master.yml
vendored
20
.github/workflows/master.yml
vendored
@ -1356,7 +1356,7 @@ jobs:
|
||||
##############################################################################################
|
||||
ASTFuzzerTestAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1384,7 +1384,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1412,7 +1412,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestUBSan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1440,7 +1440,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestMSan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1468,7 +1468,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
ASTFuzzerTestDebug:
|
||||
needs: [BuilderDebDebug]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1499,7 +1499,7 @@ jobs:
|
||||
#############################################################################################
|
||||
UnitTestsAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1527,7 +1527,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsReleaseClang:
|
||||
needs: [BuilderBinRelease]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1555,7 +1555,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsTsan:
|
||||
needs: [BuilderDebTsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1583,7 +1583,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsMsan:
|
||||
needs: [BuilderDebMsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1611,7 +1611,7 @@ jobs:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
UnitTestsUBsan:
|
||||
needs: [BuilderDebUBsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
|
@ -104,7 +104,7 @@ static void writeSignalIDtoSignalPipe(int sig)
|
||||
errno = saved_errno;
|
||||
}
|
||||
|
||||
/** Signal handler for HUP / USR1 */
|
||||
/** Signal handler for HUP */
|
||||
static void closeLogsSignalHandler(int sig, siginfo_t *, void *)
|
||||
{
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
@ -161,7 +161,7 @@ __attribute__((__weak__)) void collectCrashLog(
|
||||
|
||||
|
||||
/** The thread that read info about signal or std::terminate from pipe.
|
||||
* On HUP / USR1, close log files (for new files to be opened later).
|
||||
* On HUP, close log files (for new files to be opened later).
|
||||
* On information about std::terminate, write it to log.
|
||||
* On other signals, write info to log.
|
||||
*/
|
||||
@ -201,7 +201,7 @@ public:
|
||||
LOG_INFO(log, "Stop SignalListener thread");
|
||||
break;
|
||||
}
|
||||
else if (sig == SIGHUP || sig == SIGUSR1)
|
||||
else if (sig == SIGHUP)
|
||||
{
|
||||
LOG_DEBUG(log, "Received signal to close logs.");
|
||||
BaseDaemon::instance().closeLogs(BaseDaemon::instance().logger());
|
||||
@ -832,7 +832,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
/// SIGTSTP is added for debugging purposes. To output a stack trace of any running thread at anytime.
|
||||
|
||||
addSignalHandler({SIGABRT, SIGSEGV, SIGILL, SIGBUS, SIGSYS, SIGFPE, SIGPIPE, SIGTSTP, SIGTRAP}, signalHandler, &handled_signals);
|
||||
addSignalHandler({SIGHUP, SIGUSR1}, closeLogsSignalHandler, &handled_signals);
|
||||
addSignalHandler({SIGHUP}, closeLogsSignalHandler, &handled_signals);
|
||||
addSignalHandler({SIGINT, SIGQUIT, SIGTERM}, terminateRequestedSignalHandler, &handled_signals);
|
||||
|
||||
#if defined(SANITIZER)
|
||||
@ -1006,7 +1006,7 @@ void BaseDaemon::setupWatchdog()
|
||||
|
||||
/// Forward signals to the child process.
|
||||
addSignalHandler(
|
||||
{SIGHUP, SIGUSR1, SIGINT, SIGQUIT, SIGTERM},
|
||||
{SIGHUP, SIGINT, SIGQUIT, SIGTERM},
|
||||
[](int sig, siginfo_t *, void *)
|
||||
{
|
||||
/// Forward all signals except INT as it can be send by terminal to the process group when user press Ctrl+C,
|
||||
|
@ -70,7 +70,7 @@ ALTER DATABASE postgres_database MODIFY SETTING materialized_postgresql_max_bloc
|
||||
|
||||
## PostgreSQL schema {#schema}
|
||||
|
||||
PostgreSQL [schema](https://www.postgresql.org/docs/9.1/ddl-schemas.html) can be used in two ways.
|
||||
PostgreSQL [schema](https://www.postgresql.org/docs/9.1/ddl-schemas.html) can be configured in 3 ways (starting from version 21.12).
|
||||
|
||||
1. One schema for one `MaterializedPostgreSQL` database engine. Requires to use setting `materialized_postgresql_schema`.
|
||||
Tables are accessed via table name only:
|
||||
|
@ -4057,6 +4057,41 @@ Possible values:
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
## alter_partition_verbose_result {#alter-partition-verbose-result}
|
||||
|
||||
Enables or disables the display of information about the parts to which the manipulation operations with partitions and parts have been successfully applied.
|
||||
Applicable to [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md#alter_attach-partition) and to [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition).
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — disable verbosity.
|
||||
- 1 — enable verbosity.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE test(a Int64, d Date, s String) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY a;
|
||||
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||
ALTER TABLE test DETACH PARTITION ID '202101';
|
||||
|
||||
ALTER TABLE test ATTACH PARTITION ID '202101' SETTINGS alter_partition_verbose_result = 1;
|
||||
|
||||
┌─command_type─────┬─partition_id─┬─part_name────┬─old_part_name─┐
|
||||
│ ATTACH PARTITION │ 202101 │ 202101_7_7_0 │ 202101_5_5_0 │
|
||||
│ ATTACH PARTITION │ 202101 │ 202101_8_8_0 │ 202101_6_6_0 │
|
||||
└──────────────────┴──────────────┴──────────────┴───────────────┘
|
||||
|
||||
ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1;
|
||||
|
||||
┌─command_type─┬─partition_id─┬─part_name────┬─backup_name─┬─backup_path───────────────────┬─part_backup_path────────────────────────────────────────────┐
|
||||
│ FREEZE ALL │ 202101 │ 202101_7_7_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_7_7_0 │
|
||||
│ FREEZE ALL │ 202101 │ 202101_8_8_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_8_8_0 │
|
||||
└──────────────┴──────────────┴──────────────┴─────────────┴───────────────────────────────┴─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## format_capn_proto_enum_comparising_mode {#format-capn-proto-enum-comparising-mode}
|
||||
|
||||
Determines how to map ClickHouse `Enum` data type and [CapnProto](../../interfaces/formats.md#capnproto) `Enum` data type from schema.
|
||||
|
@ -3808,6 +3808,40 @@ SELECT * FROM positional_arguments ORDER BY 2,3;
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
## alter_partition_verbose_result {#alter-partition-verbose-result}
|
||||
|
||||
Включает или отключает вывод информации о кусках, к которым были успешно применены операции манипуляции с партициями и кусками. Применимо к [ATTACH PARTITION|PART](../../sql-reference/statements/alter/partition.md#alter_attach-partition) и к [FREEZE PARTITION](../../sql-reference/statements/alter/partition.md#alter_freeze-partition)
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — отображение отключено.
|
||||
- 1 — отображение включено.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
**Пример**
|
||||
|
||||
```sql
|
||||
CREATE TABLE test(a Int64, d Date, s String) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY a;
|
||||
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||
INSERT INTO test VALUES(1, '2021-01-01', '');
|
||||
ALTER TABLE test DETACH PARTITION ID '202101';
|
||||
|
||||
ALTER TABLE test ATTACH PARTITION ID '202101' SETTINGS alter_partition_verbose_result = 1;
|
||||
|
||||
┌─command_type─────┬─partition_id─┬─part_name────┬─old_part_name─┐
|
||||
│ ATTACH PARTITION │ 202101 │ 202101_7_7_0 │ 202101_5_5_0 │
|
||||
│ ATTACH PARTITION │ 202101 │ 202101_8_8_0 │ 202101_6_6_0 │
|
||||
└──────────────────┴──────────────┴──────────────┴───────────────┘
|
||||
|
||||
ALTER TABLE test FREEZE SETTINGS alter_partition_verbose_result = 1;
|
||||
|
||||
┌─command_type─┬─partition_id─┬─part_name────┬─backup_name─┬─backup_path───────────────────┬─part_backup_path────────────────────────────────────────────┐
|
||||
│ FREEZE ALL │ 202101 │ 202101_7_7_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_7_7_0 │
|
||||
│ FREEZE ALL │ 202101 │ 202101_8_8_0 │ 8 │ /var/lib/clickhouse/shadow/8/ │ /var/lib/clickhouse/shadow/8/data/default/test/202101_8_8_0 │
|
||||
└──────────────┴──────────────┴──────────────┴─────────────┴───────────────────────────────┴─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## format_capn_proto_enum_comparising_mode {#format-capn-proto-enum-comparising-mode}
|
||||
|
||||
Определяет, как сопоставить тип данных ClickHouse `Enum` и тип данных `Enum` формата [CapnProto](../../interfaces/formats.md#capnproto) из схемы.
|
||||
|
@ -84,13 +84,13 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
char c = 0;
|
||||
buf.write(c);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr /* place */, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
char c = 0;
|
||||
buf.read(c);
|
||||
@ -119,4 +119,3 @@ void registerAggregateFunctionAggThrow(AggregateFunctionFactory & factory)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -79,13 +79,13 @@ public:
|
||||
this->data(place).result.change(this->data(rhs).result, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).result.write(buf, *serialization_res);
|
||||
this->data(place).value.write(buf, *serialization_val);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).result.read(buf, *serialization_res, arena);
|
||||
this->data(place).value.read(buf, *serialization_val, arena);
|
||||
|
@ -49,6 +49,16 @@ public:
|
||||
return nested_func->getReturnType();
|
||||
}
|
||||
|
||||
bool isVersioned() const override
|
||||
{
|
||||
return nested_func->isVersioned();
|
||||
}
|
||||
|
||||
size_t getDefaultVersion() const override
|
||||
{
|
||||
return nested_func->getDefaultVersion();
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override
|
||||
{
|
||||
nested_func->create(place);
|
||||
@ -111,14 +121,14 @@ public:
|
||||
nested_func->merge(place, rhs, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
nested_func->serialize(place, buf);
|
||||
nested_func->serialize(place, buf, version);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
nested_func->deserialize(place, buf, arena);
|
||||
nested_func->deserialize(place, buf, version, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
|
@ -115,7 +115,7 @@ public:
|
||||
this->data(place).denominator += this->data(rhs).denominator;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeBinary(this->data(place).numerator, buf);
|
||||
|
||||
@ -125,7 +125,7 @@ public:
|
||||
writeBinary(this->data(place).denominator, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
readBinary(this->data(place).numerator, buf);
|
||||
|
||||
|
@ -118,12 +118,12 @@ public:
|
||||
this->data(place).update(this->data(rhs).value);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeBinary(this->data(place).value, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
readBinary(this->data(place).value, buf);
|
||||
}
|
||||
|
@ -142,12 +142,12 @@ public:
|
||||
data(place).merge(data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -61,12 +61,7 @@ public:
|
||||
return alignof(T);
|
||||
}
|
||||
|
||||
void add(
|
||||
AggregateDataPtr place,
|
||||
const IColumn ** columns,
|
||||
size_t row_num,
|
||||
Arena *
|
||||
) const override
|
||||
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
auto y_col = static_cast<const ColumnUInt8 *>(columns[category_count]);
|
||||
bool y = y_col->getData()[row_num];
|
||||
@ -83,11 +78,7 @@ public:
|
||||
reinterpret_cast<T *>(place)[category_count * 2 + size_t(y)] += 1;
|
||||
}
|
||||
|
||||
void merge(
|
||||
AggregateDataPtr place,
|
||||
ConstAggregateDataPtr rhs,
|
||||
Arena *
|
||||
) const override
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
for (size_t i : collections::range(0, category_count + 1))
|
||||
{
|
||||
@ -96,19 +87,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(
|
||||
ConstAggregateDataPtr place,
|
||||
WriteBuffer & buf
|
||||
) const override
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
buf.write(place, sizeOfData());
|
||||
}
|
||||
|
||||
void deserialize(
|
||||
AggregateDataPtr place,
|
||||
ReadBuffer & buf,
|
||||
Arena *
|
||||
) const override
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
buf.read(place, sizeOfData());
|
||||
}
|
||||
@ -120,10 +104,7 @@ public:
|
||||
);
|
||||
}
|
||||
|
||||
void insertResultInto(
|
||||
AggregateDataPtr place,
|
||||
IColumn & to,
|
||||
Arena *) const override
|
||||
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
|
||||
{
|
||||
auto & col = static_cast<ColumnArray &>(to);
|
||||
auto & data_col = static_cast<ColumnFloat64 &>(col.getData());
|
||||
|
@ -91,12 +91,12 @@ public:
|
||||
data(place).count += data(rhs).count;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeVarUInt(data(place).count, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
readVarUInt(data(place).count, buf);
|
||||
}
|
||||
@ -223,12 +223,12 @@ public:
|
||||
data(place).count += data(rhs).count;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeVarUInt(data(place).count, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
readVarUInt(data(place).count, buf);
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ public:
|
||||
// Otherwise lhs either has data or is uninitialized, so we don't need to modify its values.
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeIntBinary(this->data(place).sum, buf);
|
||||
writeIntBinary(this->data(place).first, buf);
|
||||
@ -109,7 +109,7 @@ public:
|
||||
writePODBinary<bool>(this->data(place).seen, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
readIntBinary(this->data(place).sum, buf);
|
||||
readIntBinary(this->data(place).first, buf);
|
||||
|
@ -142,7 +142,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeIntBinary(this->data(place).sum, buf);
|
||||
writeIntBinary(this->data(place).first, buf);
|
||||
@ -152,7 +152,7 @@ public:
|
||||
writePODBinary<bool>(this->data(place).seen, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
readIntBinary(this->data(place).sum, buf);
|
||||
readIntBinary(this->data(place).first, buf);
|
||||
|
@ -182,12 +182,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).deserialize(buf, arena);
|
||||
}
|
||||
|
@ -125,12 +125,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(const_cast<AggregateDataPtr>(place)).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -62,13 +62,13 @@ public:
|
||||
this->data(place).merge(this->data(rhs), half_decay);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeBinary(this->data(place).value, buf);
|
||||
writeBinary(this->data(place).time, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
readBinary(this->data(place).value, buf);
|
||||
readBinary(this->data(place).time, buf);
|
||||
|
@ -129,6 +129,16 @@ public:
|
||||
return std::make_shared<DataTypeArray>(nested_func->getReturnType());
|
||||
}
|
||||
|
||||
bool isVersioned() const override
|
||||
{
|
||||
return nested_func->isVersioned();
|
||||
}
|
||||
|
||||
size_t getDefaultVersion() const override
|
||||
{
|
||||
return nested_func->getDefaultVersion();
|
||||
}
|
||||
|
||||
void destroy(AggregateDataPtr __restrict place) const noexcept override
|
||||
{
|
||||
AggregateFunctionForEachData & state = data(place);
|
||||
@ -196,7 +206,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
const AggregateFunctionForEachData & state = data(place);
|
||||
writeBinary(state.dynamic_array_size, buf);
|
||||
@ -209,7 +219,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
AggregateFunctionForEachData & state = data(place);
|
||||
|
||||
@ -221,7 +231,7 @@ public:
|
||||
char * nested_state = state.array_of_aggregate_datas;
|
||||
for (size_t i = 0; i < new_size; ++i)
|
||||
{
|
||||
nested_func->deserialize(nested_state, buf, arena);
|
||||
nested_func->deserialize(nested_state, buf, version, arena);
|
||||
nested_state += nested_size_of_data;
|
||||
}
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ public:
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
const auto & value = this->data(place).value;
|
||||
size_t size = value.size();
|
||||
@ -256,7 +256,7 @@ public:
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
size_t size = 0;
|
||||
readVarUInt(size, buf);
|
||||
@ -550,7 +550,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
writeVarUInt(data(place).value.size(), buf);
|
||||
|
||||
@ -570,7 +570,7 @@ public:
|
||||
// if constexpr (Trait::sampler == Sampler::DETERMINATOR)
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
UInt64 elems;
|
||||
readVarUInt(elems, buf);
|
||||
|
@ -145,7 +145,7 @@ public:
|
||||
arr_lhs[i] = arr_rhs[i];
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
const Array & arr = data(place).value;
|
||||
size_t size = arr.size();
|
||||
@ -165,7 +165,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
size_t size = 0;
|
||||
readVarUInt(size, buf);
|
||||
|
@ -124,7 +124,7 @@ public:
|
||||
cur_elems.sum += rhs_elems.sum;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
const auto & value = this->data(place).value;
|
||||
size_t size = value.size();
|
||||
@ -132,7 +132,7 @@ public:
|
||||
buf.write(reinterpret_cast<const char *>(value.data()), size * sizeof(value[0]));
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
size_t size = 0;
|
||||
readVarUInt(size, buf);
|
||||
|
@ -39,9 +39,9 @@ public:
|
||||
this->data(place).rbs.merge(this->data(rhs).rbs);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override { this->data(place).rbs.write(buf); }
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override { this->data(place).rbs.write(buf); }
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override { this->data(place).rbs.read(buf); }
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override { this->data(place).rbs.read(buf); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
@ -105,9 +105,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override { this->data(place).rbs.write(buf); }
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override { this->data(place).rbs.write(buf); }
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override { this->data(place).rbs.read(buf); }
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override { this->data(place).rbs.read(buf); }
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
|
@ -87,7 +87,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
auto & set = this->data(place).value;
|
||||
size_t size = set.size();
|
||||
@ -96,7 +96,7 @@ public:
|
||||
writeIntBinary(elem, buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).value.read(buf);
|
||||
}
|
||||
@ -169,7 +169,7 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
auto & set = this->data(place).value;
|
||||
writeVarUInt(set.size(), buf);
|
||||
@ -180,7 +180,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
auto & set = this->data(place).value;
|
||||
size_t size;
|
||||
|
@ -346,12 +346,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs), max_bins);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).read(buf, max_bins);
|
||||
}
|
||||
|
@ -56,6 +56,16 @@ public:
|
||||
return nested_func->getReturnType();
|
||||
}
|
||||
|
||||
bool isVersioned() const override
|
||||
{
|
||||
return nested_func->isVersioned();
|
||||
}
|
||||
|
||||
size_t getDefaultVersion() const override
|
||||
{
|
||||
return nested_func->getDefaultVersion();
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override
|
||||
{
|
||||
nested_func->create(place);
|
||||
@ -130,14 +140,14 @@ public:
|
||||
nested_func->mergeBatch(batch_size, places, place_offset, rhs, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
nested_func->serialize(place, buf);
|
||||
nested_func->serialize(place, buf, version);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
nested_func->deserialize(place, buf, arena);
|
||||
nested_func->deserialize(place, buf, version, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
|
@ -197,12 +197,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -357,9 +357,9 @@ public:
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override { this->data(place).merge(this->data(rhs)); }
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override { this->data(place).write(buf); }
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override { this->data(place).write(buf); }
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override { this->data(place).read(buf); }
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override { this->data(place).read(buf); }
|
||||
|
||||
void predictValues(
|
||||
ConstAggregateDataPtr place,
|
||||
|
@ -215,12 +215,12 @@ public:
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
auto & merged_maps = this->data(place).merged_maps;
|
||||
writeVarUInt(merged_maps.size(), buf);
|
||||
@ -189,7 +189,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
auto & merged_maps = this->data(place).merged_maps;
|
||||
UInt64 size;
|
||||
@ -204,7 +204,7 @@ public:
|
||||
nested_place = arena->alignedAlloc(nested_func->sizeOfData(), nested_func->alignOfData());
|
||||
nested_func->create(nested_place);
|
||||
merged_maps.emplace(key, nested_place);
|
||||
nested_func->deserialize(nested_place, buf, arena);
|
||||
nested_func->deserialize(nested_place, buf, std::nullopt, arena);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ public:
|
||||
cur_elems.value.insert(rhs_elems.value.begin(), rhs_elems.value.end(), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
const auto & value = this->data(place).value;
|
||||
size_t size = value.size();
|
||||
@ -118,7 +118,7 @@ public:
|
||||
buf.write(reinterpret_cast<const char *>(value.data()), size * sizeof(value[0]));
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
size_t size = 0;
|
||||
readVarUInt(size, buf);
|
||||
|
@ -50,6 +50,16 @@ public:
|
||||
return nested_func->getReturnType();
|
||||
}
|
||||
|
||||
bool isVersioned() const override
|
||||
{
|
||||
return nested_func->isVersioned();
|
||||
}
|
||||
|
||||
size_t getDefaultVersion() const override
|
||||
{
|
||||
return nested_func->getDefaultVersion();
|
||||
}
|
||||
|
||||
DataTypePtr getStateType() const override
|
||||
{
|
||||
return nested_func->getStateType();
|
||||
@ -90,14 +100,14 @@ public:
|
||||
nested_func->merge(place, rhs, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
nested_func->serialize(place, buf);
|
||||
nested_func->serialize(place, buf, version);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
nested_func->deserialize(place, buf, arena);
|
||||
nested_func->deserialize(place, buf, version, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
|
@ -1157,12 +1157,12 @@ public:
|
||||
this->data(place).changeIfBetter(this->data(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, *serialization);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, *serialization, arena);
|
||||
}
|
||||
|
@ -62,11 +62,11 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr, WriteBuffer &) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t>) const override
|
||||
{
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr, ReadBuffer &, Arena *) const override
|
||||
void deserialize(AggregateDataPtr, ReadBuffer &, std::optional<size_t>, Arena *) const override
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -137,16 +137,16 @@ public:
|
||||
nested_function->merge(nestedPlace(place), nestedPlace(rhs), arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
bool flag = getFlag(place);
|
||||
if constexpr (serialize_flag)
|
||||
writeBinary(flag, buf);
|
||||
if (flag)
|
||||
nested_function->serialize(nestedPlace(place), buf);
|
||||
nested_function->serialize(nestedPlace(place), buf, version);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
bool flag = 1;
|
||||
if constexpr (serialize_flag)
|
||||
@ -154,7 +154,7 @@ public:
|
||||
if (flag)
|
||||
{
|
||||
setFlag(place);
|
||||
nested_function->deserialize(nestedPlace(place), buf, arena);
|
||||
nested_function->deserialize(nestedPlace(place), buf, version, arena);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,6 +52,16 @@ public:
|
||||
return nested_function->getName() + "OrDefault";
|
||||
}
|
||||
|
||||
bool isVersioned() const override
|
||||
{
|
||||
return nested_function->isVersioned();
|
||||
}
|
||||
|
||||
size_t getDefaultVersion() const override
|
||||
{
|
||||
return nested_function->getDefaultVersion();
|
||||
}
|
||||
|
||||
bool isState() const override
|
||||
{
|
||||
return nested_function->isState();
|
||||
@ -209,21 +219,16 @@ public:
|
||||
(places[i] + place_offset)[size_of_data] |= rhs[i][size_of_data];
|
||||
}
|
||||
|
||||
void serialize(
|
||||
ConstAggregateDataPtr place,
|
||||
WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
nested_function->serialize(place, buf);
|
||||
nested_function->serialize(place, buf, version);
|
||||
|
||||
writeChar(place[size_of_data], buf);
|
||||
}
|
||||
|
||||
void deserialize(
|
||||
AggregateDataPtr place,
|
||||
ReadBuffer & buf,
|
||||
Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
nested_function->deserialize(place, buf, arena);
|
||||
nested_function->deserialize(place, buf, version, arena);
|
||||
|
||||
readChar(place[size_of_data], buf);
|
||||
}
|
||||
|
@ -136,13 +136,13 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
/// const_cast is required because some data structures apply finalizaton (like compactization) before serializing.
|
||||
this->data(const_cast<AggregateDataPtr>(place)).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -82,12 +82,12 @@ public:
|
||||
a.merge(b, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
this->data(place).read(buf, arena);
|
||||
}
|
||||
|
@ -134,11 +134,7 @@ public:
|
||||
nested_function->destroy(place + i * size_of_data);
|
||||
}
|
||||
|
||||
void add(
|
||||
AggregateDataPtr place,
|
||||
const IColumn ** columns,
|
||||
size_t row_num,
|
||||
Arena * arena) const override
|
||||
void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena * arena) const override
|
||||
{
|
||||
Key key;
|
||||
|
||||
@ -155,30 +151,22 @@ public:
|
||||
nested_function->add(place + pos * size_of_data, columns, row_num, arena);
|
||||
}
|
||||
|
||||
void merge(
|
||||
AggregateDataPtr place,
|
||||
ConstAggregateDataPtr rhs,
|
||||
Arena * arena) const override
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < total; ++i)
|
||||
nested_function->merge(place + i * size_of_data, rhs + i * size_of_data, arena);
|
||||
}
|
||||
|
||||
void serialize(
|
||||
ConstAggregateDataPtr place,
|
||||
WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
for (size_t i = 0; i < total; ++i)
|
||||
nested_function->serialize(place + i * size_of_data, buf);
|
||||
nested_function->serialize(place + i * size_of_data, buf, version);
|
||||
}
|
||||
|
||||
void deserialize(
|
||||
AggregateDataPtr place,
|
||||
ReadBuffer & buf,
|
||||
Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < total; ++i)
|
||||
nested_function->deserialize(place + i * size_of_data, buf, arena);
|
||||
nested_function->deserialize(place + i * size_of_data, buf, version, arena);
|
||||
}
|
||||
|
||||
DataTypePtr getReturnType() const override
|
||||
@ -186,10 +174,7 @@ public:
|
||||
return std::make_shared<DataTypeArray>(nested_function->getReturnType());
|
||||
}
|
||||
|
||||
void insertResultInto(
|
||||
AggregateDataPtr place,
|
||||
IColumn & to,
|
||||
Arena * arena) const override
|
||||
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena * arena) const override
|
||||
{
|
||||
auto & col = assert_cast<ColumnArray &>(to);
|
||||
auto & col_offsets = assert_cast<ColumnArray::ColumnOffsets &>(col.getOffsetsColumn());
|
||||
|
@ -115,12 +115,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -152,12 +152,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ public:
|
||||
data(place).sorted = true;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
/// Temporarily do a const_cast to sort the values. It helps to reduce the computational burden on the initiator node.
|
||||
this->data(const_cast<AggregateDataPtr>(place)).sort();
|
||||
@ -316,7 +316,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
readBinary(data(place).sorted, buf);
|
||||
|
||||
|
@ -125,26 +125,17 @@ public:
|
||||
this->data(place).add(x, y);
|
||||
}
|
||||
|
||||
void merge(
|
||||
AggregateDataPtr place,
|
||||
ConstAggregateDataPtr rhs, Arena *
|
||||
) const override
|
||||
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
|
||||
{
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(
|
||||
ConstAggregateDataPtr place,
|
||||
WriteBuffer & buf
|
||||
) const override
|
||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(
|
||||
AggregateDataPtr place,
|
||||
ReadBuffer & buf, Arena *
|
||||
) const override
|
||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -49,6 +49,16 @@ public:
|
||||
return storage_type;
|
||||
}
|
||||
|
||||
bool isVersioned() const override
|
||||
{
|
||||
return nested_func->isVersioned();
|
||||
}
|
||||
|
||||
size_t getDefaultVersion() const override
|
||||
{
|
||||
return nested_func->getDefaultVersion();
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override { nested_func->create(place); }
|
||||
|
||||
void destroy(AggregateDataPtr __restrict place) const noexcept override { nested_func->destroy(place); }
|
||||
@ -64,13 +74,19 @@ public:
|
||||
nested_func->add(place, columns, row_num, arena);
|
||||
}
|
||||
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override { nested_func->merge(place, rhs, arena); }
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override { nested_func->serialize(place, buf); }
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
|
||||
{
|
||||
nested_func->deserialize(place, buf, arena);
|
||||
nested_func->merge(place, rhs, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
nested_func->serialize(place, buf, version);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
nested_func->deserialize(place, buf, version, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const override
|
||||
|
@ -303,12 +303,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -43,6 +43,16 @@ public:
|
||||
return nested_func->getStateType();
|
||||
}
|
||||
|
||||
bool isVersioned() const override
|
||||
{
|
||||
return nested_func->isVersioned();
|
||||
}
|
||||
|
||||
size_t getDefaultVersion() const override
|
||||
{
|
||||
return nested_func->getDefaultVersion();
|
||||
}
|
||||
|
||||
void create(AggregateDataPtr __restrict place) const override
|
||||
{
|
||||
nested_func->create(place);
|
||||
@ -78,14 +88,14 @@ public:
|
||||
nested_func->merge(place, rhs, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
nested_func->serialize(place, buf);
|
||||
nested_func->serialize(place, buf, version);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena * arena) const override
|
||||
{
|
||||
nested_func->deserialize(place, buf, arena);
|
||||
nested_func->deserialize(place, buf, version, arena);
|
||||
}
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
|
@ -136,12 +136,12 @@ public:
|
||||
this->data(place).mergeWith(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
@ -390,12 +390,12 @@ public:
|
||||
this->data(place).mergeWith(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -148,12 +148,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).read(buf);
|
||||
}
|
||||
|
@ -424,12 +424,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).read(buf);
|
||||
}
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <Common/assert_cast.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <map>
|
||||
#include <base/logger_useful.h>
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -38,7 +40,7 @@ struct AggregateFunctionMapData
|
||||
std::map<T, Array> merged_maps;
|
||||
};
|
||||
|
||||
/** Aggregate function, that takes at least two arguments: keys and values, and as a result, builds a tuple of of at least 2 arrays -
|
||||
/** Aggregate function, that takes at least two arguments: keys and values, and as a result, builds a tuple of at least 2 arrays -
|
||||
* ordered keys and variable number of argument values aggregated by corresponding keys.
|
||||
*
|
||||
* sumMap function is the most useful when using SummingMergeTree to sum Nested columns, which name ends in "Map".
|
||||
@ -64,10 +66,13 @@ class AggregateFunctionMapBase : public IAggregateFunctionDataHelper<
|
||||
AggregateFunctionMapData<NearestFieldType<T>>, Derived>
|
||||
{
|
||||
private:
|
||||
static constexpr auto STATE_VERSION_1_MIN_REVISION = 54452;
|
||||
|
||||
DataTypePtr keys_type;
|
||||
SerializationPtr keys_serialization;
|
||||
DataTypes values_types;
|
||||
Serializations values_serializations;
|
||||
Serializations promoted_values_serializations;
|
||||
|
||||
public:
|
||||
using Base = IAggregateFunctionDataHelper<
|
||||
@ -81,8 +86,35 @@ public:
|
||||
, values_types(values_types_)
|
||||
{
|
||||
values_serializations.reserve(values_types.size());
|
||||
promoted_values_serializations.reserve(values_types.size());
|
||||
for (const auto & type : values_types)
|
||||
{
|
||||
values_serializations.emplace_back(type->getDefaultSerialization());
|
||||
if (type->canBePromoted())
|
||||
{
|
||||
if (type->isNullable())
|
||||
promoted_values_serializations.emplace_back(
|
||||
makeNullable(removeNullable(type)->promoteNumericType())->getDefaultSerialization());
|
||||
else
|
||||
promoted_values_serializations.emplace_back(type->promoteNumericType()->getDefaultSerialization());
|
||||
}
|
||||
else
|
||||
{
|
||||
promoted_values_serializations.emplace_back(type->getDefaultSerialization());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool isVersioned() const override { return true; }
|
||||
|
||||
size_t getDefaultVersion() const override { return 1; }
|
||||
|
||||
size_t getVersionFromRevision(size_t revision) const override
|
||||
{
|
||||
if (revision >= STATE_VERSION_1_MIN_REVISION)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
DataTypePtr getReturnType() const override
|
||||
@ -250,26 +282,62 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version) const override
|
||||
{
|
||||
if (!version)
|
||||
version = getDefaultVersion();
|
||||
|
||||
const auto & merged_maps = this->data(place).merged_maps;
|
||||
size_t size = merged_maps.size();
|
||||
writeVarUInt(size, buf);
|
||||
|
||||
std::function<void(size_t, const Array &)> serialize;
|
||||
switch (*version)
|
||||
{
|
||||
case 0:
|
||||
{
|
||||
serialize = [&](size_t col_idx, const Array & values){ values_serializations[col_idx]->serializeBinary(values[col_idx], buf); };
|
||||
break;
|
||||
}
|
||||
case 1:
|
||||
{
|
||||
serialize = [&](size_t col_idx, const Array & values){ promoted_values_serializations[col_idx]->serializeBinary(values[col_idx], buf); };
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & elem : merged_maps)
|
||||
{
|
||||
keys_serialization->serializeBinary(elem.first, buf);
|
||||
for (size_t col = 0; col < values_types.size(); ++col)
|
||||
values_serializations[col]->serializeBinary(elem.second[col], buf);
|
||||
serialize(col, elem.second);
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version, Arena *) const override
|
||||
{
|
||||
if (!version)
|
||||
version = getDefaultVersion();
|
||||
|
||||
auto & merged_maps = this->data(place).merged_maps;
|
||||
size_t size = 0;
|
||||
readVarUInt(size, buf);
|
||||
|
||||
std::function<void(size_t, Array &)> deserialize;
|
||||
switch (*version)
|
||||
{
|
||||
case 0:
|
||||
{
|
||||
deserialize = [&](size_t col_idx, Array & values){ values_serializations[col_idx]->deserializeBinary(values[col_idx], buf); };
|
||||
break;
|
||||
}
|
||||
case 1:
|
||||
{
|
||||
deserialize = [&](size_t col_idx, Array & values){ promoted_values_serializations[col_idx]->deserializeBinary(values[col_idx], buf); };
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
Field key;
|
||||
@ -277,8 +345,9 @@ public:
|
||||
|
||||
Array values;
|
||||
values.resize(values_types.size());
|
||||
|
||||
for (size_t col = 0; col < values_types.size(); ++col)
|
||||
values_serializations[col]->deserializeBinary(values[col], buf);
|
||||
deserialize(col, values);
|
||||
|
||||
if constexpr (is_decimal<T>)
|
||||
merged_maps[key.get<DecimalField<T>>()] = values;
|
||||
|
@ -128,12 +128,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).read(buf);
|
||||
}
|
||||
|
@ -72,12 +72,12 @@ public:
|
||||
set.merge(this->data(rhs).value);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).value.write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
auto & set = this->data(place).value;
|
||||
set.resize(reserved);
|
||||
@ -148,12 +148,12 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).value.write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena * arena) const override
|
||||
{
|
||||
auto & set = this->data(place).value;
|
||||
set.clear();
|
||||
|
@ -242,12 +242,12 @@ public:
|
||||
this->data(place).set.merge(this->data(rhs).set);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).set.write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).set.read(buf);
|
||||
}
|
||||
@ -299,12 +299,12 @@ public:
|
||||
this->data(place).set.merge(this->data(rhs).set);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).set.write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).set.read(buf);
|
||||
}
|
||||
|
@ -162,12 +162,12 @@ public:
|
||||
this->data(place).set.merge(this->data(rhs).set);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).set.write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).set.read(buf);
|
||||
}
|
||||
@ -226,12 +226,12 @@ public:
|
||||
this->data(place).set.merge(this->data(rhs).set);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).set.write(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).set.read(buf);
|
||||
}
|
||||
|
@ -209,12 +209,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs), threshold);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, threshold);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).read(buf, threshold);
|
||||
}
|
||||
@ -273,12 +273,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs), threshold);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).write(buf, threshold);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).read(buf, threshold);
|
||||
}
|
||||
@ -295,4 +295,3 @@ public:
|
||||
#if !defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
@ -283,12 +283,12 @@ public:
|
||||
this->data(place).merge(this->data(rhs));
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> /* version */) const override
|
||||
{
|
||||
this->data(place).serialize(buf);
|
||||
}
|
||||
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena *) const override
|
||||
void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> /* version */, Arena *) const override
|
||||
{
|
||||
this->data(place).deserialize(buf);
|
||||
}
|
||||
|
@ -88,6 +88,12 @@ public:
|
||||
throw Exception("Prediction is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
virtual bool isVersioned() const { return false; }
|
||||
|
||||
virtual size_t getVersionFromRevision(size_t /* revision */) const { return 0; }
|
||||
|
||||
virtual size_t getDefaultVersion() const { return 0; }
|
||||
|
||||
virtual ~IAggregateFunction() = default;
|
||||
|
||||
/** Data manipulating functions. */
|
||||
@ -120,10 +126,10 @@ public:
|
||||
virtual void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const = 0;
|
||||
|
||||
/// Serializes state (to transmit it over the network, for example).
|
||||
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const = 0;
|
||||
virtual void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf, std::optional<size_t> version = std::nullopt) const = 0;
|
||||
|
||||
/// Deserializes state. This function is called only for empty (just created) states.
|
||||
virtual void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, Arena * arena) const = 0;
|
||||
virtual void deserialize(AggregateDataPtr __restrict place, ReadBuffer & buf, std::optional<size_t> version = std::nullopt, Arena * arena = nullptr) const = 0;
|
||||
|
||||
/// Returns true if a function requires Arena to handle own states (see add(), merge(), deserialize()).
|
||||
virtual bool allocatesMemoryInArena() const = 0;
|
||||
|
@ -28,7 +28,7 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
static std::string getTypeString(const AggregateFunctionPtr & func)
|
||||
static String getTypeString(const AggregateFunctionPtr & func)
|
||||
{
|
||||
WriteBufferFromOwnString stream;
|
||||
stream << "AggregateFunction(" << func->getName();
|
||||
@ -55,8 +55,8 @@ static std::string getTypeString(const AggregateFunctionPtr & func)
|
||||
}
|
||||
|
||||
|
||||
ColumnAggregateFunction::ColumnAggregateFunction(const AggregateFunctionPtr & func_)
|
||||
: func(func_), type_string(getTypeString(func))
|
||||
ColumnAggregateFunction::ColumnAggregateFunction(const AggregateFunctionPtr & func_, std::optional<size_t> version_)
|
||||
: func(func_), type_string(getTypeString(func)), version(version_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -343,7 +343,7 @@ INSTANTIATE_INDEX_IMPL(ColumnAggregateFunction)
|
||||
void ColumnAggregateFunction::updateHashWithValue(size_t n, SipHash & hash) const
|
||||
{
|
||||
WriteBufferFromOwnString wbuf;
|
||||
func->serialize(data[n], wbuf);
|
||||
func->serialize(data[n], wbuf, version);
|
||||
hash.update(wbuf.str().c_str(), wbuf.str().size());
|
||||
}
|
||||
|
||||
@ -360,7 +360,7 @@ void ColumnAggregateFunction::updateWeakHash32(WeakHash32 & hash) const
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
{
|
||||
WriteBufferFromVector<std::vector<UInt8>> wbuf(v);
|
||||
func->serialize(data[i], wbuf);
|
||||
func->serialize(data[i], wbuf, version);
|
||||
wbuf.finalize();
|
||||
hash_data[i] = ::updateWeakHash32(v.data(), v.size(), hash_data[i]);
|
||||
}
|
||||
@ -412,7 +412,7 @@ Field ColumnAggregateFunction::operator[](size_t n) const
|
||||
field.get<AggregateFunctionStateData &>().name = type_string;
|
||||
{
|
||||
WriteBufferFromString buffer(field.get<AggregateFunctionStateData &>().data);
|
||||
func->serialize(data[n], buffer);
|
||||
func->serialize(data[n], buffer, version);
|
||||
}
|
||||
return field;
|
||||
}
|
||||
@ -423,7 +423,7 @@ void ColumnAggregateFunction::get(size_t n, Field & res) const
|
||||
res.get<AggregateFunctionStateData &>().name = type_string;
|
||||
{
|
||||
WriteBufferFromString buffer(res.get<AggregateFunctionStateData &>().data);
|
||||
func->serialize(data[n], buffer);
|
||||
func->serialize(data[n], buffer, version);
|
||||
}
|
||||
}
|
||||
|
||||
@ -504,7 +504,7 @@ void ColumnAggregateFunction::insert(const Field & x)
|
||||
Arena & arena = createOrGetArena();
|
||||
pushBackAndCreateState(data, arena, func.get());
|
||||
ReadBufferFromString read_buffer(x.get<const AggregateFunctionStateData &>().data);
|
||||
func->deserialize(data.back(), read_buffer, &arena);
|
||||
func->deserialize(data.back(), read_buffer, version, &arena);
|
||||
}
|
||||
|
||||
void ColumnAggregateFunction::insertDefault()
|
||||
@ -517,7 +517,7 @@ void ColumnAggregateFunction::insertDefault()
|
||||
StringRef ColumnAggregateFunction::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const
|
||||
{
|
||||
WriteBufferFromArena out(arena, begin);
|
||||
func->serialize(data[n], out);
|
||||
func->serialize(data[n], out, version);
|
||||
return out.complete();
|
||||
}
|
||||
|
||||
@ -539,7 +539,7 @@ const char * ColumnAggregateFunction::deserializeAndInsertFromArena(const char *
|
||||
* Probably this will not work under UBSan.
|
||||
*/
|
||||
ReadBufferFromMemory read_buffer(src_arena, std::numeric_limits<char *>::max() - src_arena - 1);
|
||||
func->deserialize(data.back(), read_buffer, &dst_arena);
|
||||
func->deserialize(data.back(), read_buffer, version, &dst_arena);
|
||||
|
||||
return read_buffer.position();
|
||||
}
|
||||
@ -639,7 +639,7 @@ void ColumnAggregateFunction::getExtremes(Field & min, Field & max) const
|
||||
try
|
||||
{
|
||||
WriteBufferFromString buffer(serialized.data);
|
||||
func->serialize(place, buffer);
|
||||
func->serialize(place, buffer, version);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -82,6 +82,8 @@ private:
|
||||
/// Name of the type to distinguish different aggregation states.
|
||||
String type_string;
|
||||
|
||||
std::optional<size_t> version;
|
||||
|
||||
ColumnAggregateFunction() = default;
|
||||
|
||||
/// Create a new column that has another column as a source.
|
||||
@ -92,10 +94,9 @@ private:
|
||||
/// but ownership of different elements cannot be mixed by different columns.
|
||||
void ensureOwnership();
|
||||
|
||||
ColumnAggregateFunction(const AggregateFunctionPtr & func_);
|
||||
ColumnAggregateFunction(const AggregateFunctionPtr & func_, std::optional<size_t> version_ = std::nullopt);
|
||||
|
||||
ColumnAggregateFunction(const AggregateFunctionPtr & func_,
|
||||
const ConstArenas & arenas_);
|
||||
ColumnAggregateFunction(const AggregateFunctionPtr & func_, const ConstArenas & arenas_);
|
||||
|
||||
ColumnAggregateFunction(const ColumnAggregateFunction & src_);
|
||||
|
||||
|
@ -25,6 +25,12 @@ namespace
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
thread_local size_t write_trace_iteration = 0;
|
||||
/// Even after timer_delete() the signal can be delivered,
|
||||
/// since it does not do anything with pending signals.
|
||||
///
|
||||
/// And so to overcome this flag is exists,
|
||||
/// to ignore delivered signals after timer_delete().
|
||||
thread_local bool signal_handler_disarmed = true;
|
||||
#endif
|
||||
|
||||
void writeTraceInfo(TraceType trace_type, int /* sig */, siginfo_t * info, void * context)
|
||||
@ -117,10 +123,8 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const UInt64 thread_id, const
|
||||
if (sigaddset(&sa.sa_mask, pause_signal))
|
||||
throwFromErrno("Failed to add signal to mask for query profiler", ErrorCodes::CANNOT_MANIPULATE_SIGSET);
|
||||
|
||||
struct sigaction local_previous_handler;
|
||||
if (sigaction(pause_signal, &sa, &local_previous_handler))
|
||||
if (sigaction(pause_signal, &sa, nullptr))
|
||||
throwFromErrno("Failed to setup signal handler for query profiler", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
previous_handler.emplace(local_previous_handler);
|
||||
|
||||
try
|
||||
{
|
||||
@ -160,6 +164,8 @@ QueryProfilerBase<ProfilerImpl>::QueryProfilerBase(const UInt64 thread_id, const
|
||||
struct itimerspec timer_spec = {.it_interval = interval, .it_value = offset};
|
||||
if (timer_settime(*timer_id, 0, &timer_spec, nullptr))
|
||||
throwFromErrno("Failed to set thread timer period", ErrorCodes::CANNOT_SET_TIMER_PERIOD);
|
||||
|
||||
signal_handler_disarmed = false;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -179,11 +185,14 @@ template <typename ProfilerImpl>
|
||||
void QueryProfilerBase<ProfilerImpl>::tryCleanup()
|
||||
{
|
||||
#if USE_UNWIND
|
||||
if (timer_id.has_value() && timer_delete(*timer_id))
|
||||
LOG_ERROR(log, "Failed to delete query profiler timer {}", errnoToString(ErrorCodes::CANNOT_DELETE_TIMER));
|
||||
if (timer_id.has_value())
|
||||
{
|
||||
if (timer_delete(*timer_id))
|
||||
LOG_ERROR(log, "Failed to delete query profiler timer {}", errnoToString(ErrorCodes::CANNOT_DELETE_TIMER));
|
||||
timer_id.reset();
|
||||
}
|
||||
|
||||
if (previous_handler.has_value() && sigaction(pause_signal, &*previous_handler, nullptr))
|
||||
LOG_ERROR(log, "Failed to restore signal handler after query profiler {}", errnoToString(ErrorCodes::CANNOT_SET_SIGNAL_HANDLER));
|
||||
signal_handler_disarmed = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -196,6 +205,9 @@ QueryProfilerReal::QueryProfilerReal(const UInt64 thread_id, const UInt32 period
|
||||
|
||||
void QueryProfilerReal::signalHandler(int sig, siginfo_t * info, void * context)
|
||||
{
|
||||
if (signal_handler_disarmed)
|
||||
return;
|
||||
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
writeTraceInfo(TraceType::Real, sig, info, context);
|
||||
}
|
||||
@ -206,6 +218,9 @@ QueryProfilerCPU::QueryProfilerCPU(const UInt64 thread_id, const UInt32 period)
|
||||
|
||||
void QueryProfilerCPU::signalHandler(int sig, siginfo_t * info, void * context)
|
||||
{
|
||||
if (signal_handler_disarmed)
|
||||
return;
|
||||
|
||||
DENY_ALLOCATIONS_IN_SCOPE;
|
||||
writeTraceInfo(TraceType::CPU, sig, info, context);
|
||||
}
|
||||
|
@ -46,9 +46,6 @@ private:
|
||||
|
||||
/// Pause signal to interrupt threads to get traces
|
||||
int pause_signal;
|
||||
|
||||
/// Previous signal handler to restore after query profiler exits
|
||||
std::optional<struct sigaction> previous_handler;
|
||||
};
|
||||
|
||||
/// Query profiler with timer based on real clock
|
||||
|
@ -19,6 +19,11 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_prefix + ".raft_configuration", keys);
|
||||
|
||||
/// Sometimes (especially in cloud envs) users can provide incorrect
|
||||
/// configuration with duplicated raft ids or endpoints. We check them
|
||||
/// on config parsing stage and never commit to quorum.
|
||||
std::unordered_map<std::string, int> check_duplicated_hostnames;
|
||||
|
||||
size_t total_servers = 0;
|
||||
for (const auto & server_key : keys)
|
||||
{
|
||||
@ -37,6 +42,24 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC
|
||||
result.servers_start_as_followers.insert(new_server_id);
|
||||
|
||||
auto endpoint = hostname + ":" + std::to_string(port);
|
||||
if (check_duplicated_hostnames.count(endpoint))
|
||||
{
|
||||
throw Exception(ErrorCodes::RAFT_ERROR, "Raft config contain duplicate endpoints: "
|
||||
"endpoint {} has been already added with id {}, but going to add it one more time with id {}",
|
||||
endpoint, check_duplicated_hostnames[endpoint], new_server_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Fullscan to check duplicated ids
|
||||
for (const auto & [id_endpoint, id] : check_duplicated_hostnames)
|
||||
{
|
||||
if (new_server_id == id)
|
||||
throw Exception(ErrorCodes::RAFT_ERROR, "Raft config contain duplicate ids: id {} has been already added with endpoint {}, "
|
||||
"but going to add it one more time with endpoint {}", id, id_endpoint, endpoint);
|
||||
}
|
||||
check_duplicated_hostnames.emplace(endpoint, new_server_id);
|
||||
}
|
||||
|
||||
auto peer_config = nuraft::cs_new<nuraft::srv_config>(new_server_id, 0, endpoint, "", !can_become_leader, priority);
|
||||
if (my_server_id == new_server_id)
|
||||
{
|
||||
|
@ -27,6 +27,7 @@
|
||||
/// Minimum revision supporting OpenTelemetry
|
||||
#define DBMS_MIN_REVISION_WITH_OPENTELEMETRY 54442
|
||||
|
||||
#define DBMS_MIN_REVISION_WITH_AGGREGATE_FUNCTIONS_VERSIONING 54452
|
||||
|
||||
#define DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION 1
|
||||
|
||||
@ -47,6 +48,6 @@
|
||||
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
||||
/// later is just a number for server version (one number instead of commit SHA)
|
||||
/// for simplicity (sometimes it may be more convenient in some use cases).
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54451
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54452
|
||||
|
||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
|
||||
|
@ -32,10 +32,36 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
std::string DataTypeAggregateFunction::doGetName() const
|
||||
String DataTypeAggregateFunction::doGetName() const
|
||||
{
|
||||
return getNameImpl(true);
|
||||
}
|
||||
|
||||
|
||||
String DataTypeAggregateFunction::getNameWithoutVersion() const
|
||||
{
|
||||
return getNameImpl(false);
|
||||
}
|
||||
|
||||
|
||||
size_t DataTypeAggregateFunction::getVersion() const
|
||||
{
|
||||
if (version)
|
||||
return *version;
|
||||
return function->getDefaultVersion();
|
||||
}
|
||||
|
||||
|
||||
String DataTypeAggregateFunction::getNameImpl(bool with_version) const
|
||||
{
|
||||
WriteBufferFromOwnString stream;
|
||||
stream << "AggregateFunction(" << function->getName();
|
||||
stream << "AggregateFunction(";
|
||||
|
||||
/// If aggregate function does not support versioning its version is 0 and is not printed.
|
||||
auto data_type_version = getVersion();
|
||||
if (with_version && data_type_version)
|
||||
stream << data_type_version << ", ";
|
||||
stream << function->getName();
|
||||
|
||||
if (!parameters.empty())
|
||||
{
|
||||
@ -56,9 +82,10 @@ std::string DataTypeAggregateFunction::doGetName() const
|
||||
return stream.str();
|
||||
}
|
||||
|
||||
|
||||
MutableColumnPtr DataTypeAggregateFunction::createColumn() const
|
||||
{
|
||||
return ColumnAggregateFunction::create(function);
|
||||
return ColumnAggregateFunction::create(function, version);
|
||||
}
|
||||
|
||||
|
||||
@ -76,7 +103,7 @@ Field DataTypeAggregateFunction::getDefault() const
|
||||
try
|
||||
{
|
||||
WriteBufferFromString buffer_from_field(field.get<AggregateFunctionStateData &>().data);
|
||||
function->serialize(place, buffer_from_field);
|
||||
function->serialize(place, buffer_from_field, version);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -92,12 +119,13 @@ Field DataTypeAggregateFunction::getDefault() const
|
||||
|
||||
bool DataTypeAggregateFunction::equals(const IDataType & rhs) const
|
||||
{
|
||||
return typeid(rhs) == typeid(*this) && getName() == rhs.getName();
|
||||
return typeid(rhs) == typeid(*this) && getNameWithoutVersion() == typeid_cast<const DataTypeAggregateFunction &>(rhs).getNameWithoutVersion();
|
||||
}
|
||||
|
||||
|
||||
SerializationPtr DataTypeAggregateFunction::doGetDefaultSerialization() const
|
||||
{
|
||||
return std::make_shared<SerializationAggregateFunction>(function, getName());
|
||||
return std::make_shared<SerializationAggregateFunction>(function, getName(), getVersion());
|
||||
}
|
||||
|
||||
|
||||
@ -107,15 +135,31 @@ static DataTypePtr create(const ASTPtr & arguments)
|
||||
AggregateFunctionPtr function;
|
||||
DataTypes argument_types;
|
||||
Array params_row;
|
||||
std::optional<size_t> version;
|
||||
|
||||
if (!arguments || arguments->children.empty())
|
||||
throw Exception("Data type AggregateFunction requires parameters: "
|
||||
"name of aggregate function and list of data types for arguments", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (const auto * parametric = arguments->children[0]->as<ASTFunction>())
|
||||
ASTPtr data_type_ast = arguments->children[0];
|
||||
size_t argument_types_start_idx = 1;
|
||||
|
||||
/* If aggregate function definition doesn't have version, it will have in AST children args [ASTFunction, types...] - in case
|
||||
* it is parametric, or [ASTIdentifier, types...] - otherwise. If aggregate function has version in AST, then it will be:
|
||||
* [ASTLitearl, ASTFunction (or ASTIdentifier), types...].
|
||||
*/
|
||||
if (auto * version_ast = arguments->children[0]->as<ASTLiteral>())
|
||||
{
|
||||
version = version_ast->value.safeGet<UInt64>();
|
||||
data_type_ast = arguments->children[1];
|
||||
argument_types_start_idx = 2;
|
||||
}
|
||||
|
||||
if (const auto * parametric = data_type_ast->as<ASTFunction>())
|
||||
{
|
||||
if (parametric->parameters)
|
||||
throw Exception("Unexpected level of parameters to aggregate function", ErrorCodes::SYNTAX_ERROR);
|
||||
|
||||
function_name = parametric->name;
|
||||
|
||||
if (parametric->arguments)
|
||||
@ -137,11 +181,11 @@ static DataTypePtr create(const ASTPtr & arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (auto opt_name = tryGetIdentifierName(arguments->children[0]))
|
||||
else if (auto opt_name = tryGetIdentifierName(data_type_ast))
|
||||
{
|
||||
function_name = *opt_name;
|
||||
}
|
||||
else if (arguments->children[0]->as<ASTLiteral>())
|
||||
else if (data_type_ast->as<ASTLiteral>())
|
||||
{
|
||||
throw Exception("Aggregate function name for data type AggregateFunction must be passed as identifier (without quotes) or function",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
@ -150,7 +194,7 @@ static DataTypePtr create(const ASTPtr & arguments)
|
||||
throw Exception("Unexpected AST element passed as aggregate function name for data type AggregateFunction. Must be identifier or function.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
for (size_t i = 1; i < arguments->children.size(); ++i)
|
||||
for (size_t i = argument_types_start_idx; i < arguments->children.size(); ++i)
|
||||
argument_types.push_back(DataTypeFactory::instance().get(arguments->children[i]));
|
||||
|
||||
if (function_name.empty())
|
||||
@ -158,13 +202,13 @@ static DataTypePtr create(const ASTPtr & arguments)
|
||||
|
||||
AggregateFunctionProperties properties;
|
||||
function = AggregateFunctionFactory::instance().get(function_name, argument_types, params_row, properties);
|
||||
return std::make_shared<DataTypeAggregateFunction>(function, argument_types, params_row);
|
||||
return std::make_shared<DataTypeAggregateFunction>(function, argument_types, params_row, version);
|
||||
}
|
||||
|
||||
|
||||
void registerDataTypeAggregateFunction(DataTypeFactory & factory)
|
||||
{
|
||||
factory.registerDataType("AggregateFunction", create);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -10,6 +10,11 @@ namespace DB
|
||||
|
||||
/** Type - the state of the aggregate function.
|
||||
* Type parameters is an aggregate function, the types of its arguments, and its parameters (for parametric aggregate functions).
|
||||
*
|
||||
* Data type can support versioning for serialization of aggregate function state.
|
||||
* Version 0 also means no versioning. When a table with versioned data type is attached, its version is parsed from AST. If
|
||||
* there is no version in AST, then it is either attach with no version in metadata (then version is 0) or it
|
||||
* is a new data type (then version is default - latest).
|
||||
*/
|
||||
class DataTypeAggregateFunction final : public IDataType
|
||||
{
|
||||
@ -17,19 +22,28 @@ private:
|
||||
AggregateFunctionPtr function;
|
||||
DataTypes argument_types;
|
||||
Array parameters;
|
||||
mutable std::optional<size_t> version;
|
||||
|
||||
String getNameImpl(bool with_version) const;
|
||||
size_t getVersion() const;
|
||||
|
||||
public:
|
||||
static constexpr bool is_parametric = true;
|
||||
|
||||
DataTypeAggregateFunction(const AggregateFunctionPtr & function_, const DataTypes & argument_types_, const Array & parameters_)
|
||||
: function(function_), argument_types(argument_types_), parameters(parameters_)
|
||||
DataTypeAggregateFunction(const AggregateFunctionPtr & function_, const DataTypes & argument_types_,
|
||||
const Array & parameters_, std::optional<size_t> version_ = std::nullopt)
|
||||
: function(function_)
|
||||
, argument_types(argument_types_)
|
||||
, parameters(parameters_)
|
||||
, version(version_)
|
||||
{
|
||||
}
|
||||
|
||||
std::string getFunctionName() const { return function->getName(); }
|
||||
String getFunctionName() const { return function->getName(); }
|
||||
AggregateFunctionPtr getFunction() const { return function; }
|
||||
|
||||
std::string doGetName() const override;
|
||||
String doGetName() const override;
|
||||
String getNameWithoutVersion() const;
|
||||
const char * getFamilyName() const override { return "AggregateFunction"; }
|
||||
TypeIndex getTypeId() const override { return TypeIndex::AggregateFunction; }
|
||||
|
||||
@ -52,8 +66,23 @@ public:
|
||||
bool shouldAlignRightInPrettyFormats() const override { return false; }
|
||||
|
||||
SerializationPtr doGetDefaultSerialization() const override;
|
||||
|
||||
bool isVersioned() const { return function->isVersioned(); }
|
||||
|
||||
size_t getVersionFromRevision(size_t revision) const { return function->getVersionFromRevision(revision); }
|
||||
|
||||
/// Version is not empty only if it was parsed from AST or implicitly cast to 0 or version according
|
||||
/// to server revision.
|
||||
/// It is ok to have an empty version value here - then for serialization a default (latest)
|
||||
/// version is used. This method is used to force some zero version to be used instead of
|
||||
/// default, or to set version for serialization in distributed queries.
|
||||
void setVersion(size_t version_, bool if_empty) const
|
||||
{
|
||||
if (version && if_empty)
|
||||
return;
|
||||
|
||||
version = version_;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@ public:
|
||||
size_t getSizeOfValueInMemory() const override;
|
||||
bool onlyNull() const override;
|
||||
bool canBeInsideLowCardinality() const override { return nested_data_type->canBeInsideLowCardinality(); }
|
||||
bool canBePromoted() const override { return nested_data_type->canBePromoted(); }
|
||||
|
||||
const DataTypePtr & getNestedType() const { return nested_data_type; }
|
||||
private:
|
||||
|
@ -33,7 +33,7 @@ void SerializationAggregateFunction::deserializeBinary(Field & field, ReadBuffer
|
||||
|
||||
void SerializationAggregateFunction::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
|
||||
{
|
||||
function->serialize(assert_cast<const ColumnAggregateFunction &>(column).getData()[row_num], ostr);
|
||||
function->serialize(assert_cast<const ColumnAggregateFunction &>(column).getData()[row_num], ostr, version);
|
||||
}
|
||||
|
||||
void SerializationAggregateFunction::deserializeBinary(IColumn & column, ReadBuffer & istr) const
|
||||
@ -47,7 +47,7 @@ void SerializationAggregateFunction::deserializeBinary(IColumn & column, ReadBuf
|
||||
function->create(place);
|
||||
try
|
||||
{
|
||||
function->deserialize(place, istr, &arena);
|
||||
function->deserialize(place, istr, version, &arena);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -70,7 +70,7 @@ void SerializationAggregateFunction::serializeBinaryBulk(const IColumn & column,
|
||||
end = vec.end();
|
||||
|
||||
for (; it != end; ++it)
|
||||
function->serialize(*it, ostr);
|
||||
function->serialize(*it, ostr, version);
|
||||
}
|
||||
|
||||
void SerializationAggregateFunction::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double /*avg_value_size_hint*/) const
|
||||
@ -96,7 +96,7 @@ void SerializationAggregateFunction::deserializeBinaryBulk(IColumn & column, Rea
|
||||
|
||||
try
|
||||
{
|
||||
function->deserialize(place, istr, &arena);
|
||||
function->deserialize(place, istr, version, &arena);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -108,14 +108,14 @@ void SerializationAggregateFunction::deserializeBinaryBulk(IColumn & column, Rea
|
||||
}
|
||||
}
|
||||
|
||||
static String serializeToString(const AggregateFunctionPtr & function, const IColumn & column, size_t row_num)
|
||||
static String serializeToString(const AggregateFunctionPtr & function, const IColumn & column, size_t row_num, size_t version)
|
||||
{
|
||||
WriteBufferFromOwnString buffer;
|
||||
function->serialize(assert_cast<const ColumnAggregateFunction &>(column).getData()[row_num], buffer);
|
||||
function->serialize(assert_cast<const ColumnAggregateFunction &>(column).getData()[row_num], buffer, version);
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
static void deserializeFromString(const AggregateFunctionPtr & function, IColumn & column, const String & s)
|
||||
static void deserializeFromString(const AggregateFunctionPtr & function, IColumn & column, const String & s, size_t version)
|
||||
{
|
||||
ColumnAggregateFunction & column_concrete = assert_cast<ColumnAggregateFunction &>(column);
|
||||
|
||||
@ -128,7 +128,7 @@ static void deserializeFromString(const AggregateFunctionPtr & function, IColumn
|
||||
try
|
||||
{
|
||||
ReadBufferFromString istr(s);
|
||||
function->deserialize(place, istr, &arena);
|
||||
function->deserialize(place, istr, version, &arena);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -141,13 +141,13 @@ static void deserializeFromString(const AggregateFunctionPtr & function, IColumn
|
||||
|
||||
void SerializationAggregateFunction::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
writeString(serializeToString(function, column, row_num), ostr);
|
||||
writeString(serializeToString(function, column, row_num, version), ostr);
|
||||
}
|
||||
|
||||
|
||||
void SerializationAggregateFunction::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
writeEscapedString(serializeToString(function, column, row_num), ostr);
|
||||
writeEscapedString(serializeToString(function, column, row_num, version), ostr);
|
||||
}
|
||||
|
||||
|
||||
@ -155,13 +155,13 @@ void SerializationAggregateFunction::deserializeTextEscaped(IColumn & column, Re
|
||||
{
|
||||
String s;
|
||||
readEscapedString(s, istr);
|
||||
deserializeFromString(function, column, s);
|
||||
deserializeFromString(function, column, s, version);
|
||||
}
|
||||
|
||||
|
||||
void SerializationAggregateFunction::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
writeQuotedString(serializeToString(function, column, row_num), ostr);
|
||||
writeQuotedString(serializeToString(function, column, row_num, version), ostr);
|
||||
}
|
||||
|
||||
|
||||
@ -169,7 +169,7 @@ void SerializationAggregateFunction::deserializeTextQuoted(IColumn & column, Rea
|
||||
{
|
||||
String s;
|
||||
readQuotedStringWithSQLStyle(s, istr);
|
||||
deserializeFromString(function, column, s);
|
||||
deserializeFromString(function, column, s, version);
|
||||
}
|
||||
|
||||
|
||||
@ -177,13 +177,13 @@ void SerializationAggregateFunction::deserializeWholeText(IColumn & column, Read
|
||||
{
|
||||
String s;
|
||||
readStringUntilEOF(s, istr);
|
||||
deserializeFromString(function, column, s);
|
||||
deserializeFromString(function, column, s, version);
|
||||
}
|
||||
|
||||
|
||||
void SerializationAggregateFunction::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
writeJSONString(serializeToString(function, column, row_num), ostr, settings);
|
||||
writeJSONString(serializeToString(function, column, row_num, version), ostr, settings);
|
||||
}
|
||||
|
||||
|
||||
@ -191,19 +191,19 @@ void SerializationAggregateFunction::deserializeTextJSON(IColumn & column, ReadB
|
||||
{
|
||||
String s;
|
||||
readJSONString(s, istr);
|
||||
deserializeFromString(function, column, s);
|
||||
deserializeFromString(function, column, s, version);
|
||||
}
|
||||
|
||||
|
||||
void SerializationAggregateFunction::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
writeXMLStringForTextElement(serializeToString(function, column, row_num), ostr);
|
||||
writeXMLStringForTextElement(serializeToString(function, column, row_num, version), ostr);
|
||||
}
|
||||
|
||||
|
||||
void SerializationAggregateFunction::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const
|
||||
{
|
||||
writeCSV(serializeToString(function, column, row_num), ostr);
|
||||
writeCSV(serializeToString(function, column, row_num, version), ostr);
|
||||
}
|
||||
|
||||
|
||||
@ -211,7 +211,7 @@ void SerializationAggregateFunction::deserializeTextCSV(IColumn & column, ReadBu
|
||||
{
|
||||
String s;
|
||||
readCSV(s, istr, settings.csv);
|
||||
deserializeFromString(function, column, s);
|
||||
deserializeFromString(function, column, s, version);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -13,12 +13,13 @@ class SerializationAggregateFunction final : public ISerialization
|
||||
private:
|
||||
AggregateFunctionPtr function;
|
||||
String type_name;
|
||||
size_t version;
|
||||
|
||||
public:
|
||||
static constexpr bool is_parametric = true;
|
||||
|
||||
SerializationAggregateFunction(const AggregateFunctionPtr & function_, String type_name_)
|
||||
: function(function_), type_name(std::move(type_name_)) {}
|
||||
SerializationAggregateFunction(const AggregateFunctionPtr & function_, String type_name_, size_t version_)
|
||||
: function(function_), type_name(std::move(type_name_)), version(version_) {}
|
||||
|
||||
/// NOTE These two functions for serializing single values are incompatible with the functions below.
|
||||
void serializeBinary(const Field & field, WriteBuffer & ostr) const override;
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <Formats/NativeReader.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -62,7 +63,7 @@ void NativeReader::resetParser()
|
||||
use_index = false;
|
||||
}
|
||||
|
||||
void NativeReader::readData(const IDataType & type, ColumnPtr & column, ReadBuffer & istr, size_t rows, double avg_value_size_hint)
|
||||
void NativeReader::readData(const IDataType & type, ColumnPtr & column, ReadBuffer & istr, size_t rows, double avg_value_size_hint, size_t revision)
|
||||
{
|
||||
ISerialization::DeserializeBinaryBulkSettings settings;
|
||||
settings.getter = [&](ISerialization::SubstreamPath) -> ReadBuffer * { return &istr; };
|
||||
@ -71,6 +72,14 @@ void NativeReader::readData(const IDataType & type, ColumnPtr & column, ReadBuff
|
||||
settings.native_format = true;
|
||||
|
||||
ISerialization::DeserializeBinaryBulkStatePtr state;
|
||||
|
||||
const auto * aggregate_function_data_type = typeid_cast<const DataTypeAggregateFunction *>(&type);
|
||||
if (aggregate_function_data_type && aggregate_function_data_type->isVersioned())
|
||||
{
|
||||
auto version = aggregate_function_data_type->getVersionFromRevision(revision);
|
||||
aggregate_function_data_type->setVersion(version, /* if_empty */true);
|
||||
}
|
||||
|
||||
auto serialization = type.getDefaultSerialization();
|
||||
|
||||
serialization->deserializeBinaryBulkStatePrefix(settings, state);
|
||||
@ -156,7 +165,7 @@ Block NativeReader::read()
|
||||
|
||||
double avg_value_size_hint = avg_value_size_hints.empty() ? 0 : avg_value_size_hints[i];
|
||||
if (rows) /// If no rows, nothing to read.
|
||||
readData(*column.type, read_column, istr, rows, avg_value_size_hint);
|
||||
readData(*column.type, read_column, istr, rows, avg_value_size_hint, server_revision);
|
||||
|
||||
column.column = std::move(read_column);
|
||||
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
IndexForNativeFormat::Blocks::const_iterator index_block_it_,
|
||||
IndexForNativeFormat::Blocks::const_iterator index_block_end_);
|
||||
|
||||
static void readData(const IDataType & type, ColumnPtr & column, ReadBuffer & istr, size_t rows, double avg_value_size_hint);
|
||||
static void readData(const IDataType & type, ColumnPtr & column, ReadBuffer & istr, size_t rows, double avg_value_size_hint, size_t revision);
|
||||
|
||||
Block getHeader() const;
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -113,6 +114,21 @@ void NativeWriter::write(const Block & block)
|
||||
/// Name
|
||||
writeStringBinary(column.name, ostr);
|
||||
|
||||
bool include_version = client_revision >= DBMS_MIN_REVISION_WITH_AGGREGATE_FUNCTIONS_VERSIONING;
|
||||
const auto * aggregate_function_data_type = typeid_cast<const DataTypeAggregateFunction *>(column.type.get());
|
||||
if (aggregate_function_data_type && aggregate_function_data_type->isVersioned())
|
||||
{
|
||||
if (include_version)
|
||||
{
|
||||
auto version = aggregate_function_data_type->getVersionFromRevision(client_revision);
|
||||
aggregate_function_data_type->setVersion(version, /* if_empty */true);
|
||||
}
|
||||
else
|
||||
{
|
||||
aggregate_function_data_type->setVersion(0, /* if_empty */false);
|
||||
}
|
||||
}
|
||||
|
||||
/// Type
|
||||
String type_name = column.type->getName();
|
||||
|
||||
|
@ -1657,7 +1657,7 @@ namespace
|
||||
{
|
||||
aggregate_function->create(data);
|
||||
ReadBufferFromMemory buf(str.data(), str.length());
|
||||
aggregate_function->deserialize(data, buf, &arena);
|
||||
aggregate_function->deserialize(data, buf, std::nullopt, &arena);
|
||||
return data;
|
||||
}
|
||||
catch (...)
|
||||
|
@ -110,6 +110,10 @@ public:
|
||||
IColumn::MutablePtr result = return_type->createColumn();
|
||||
result->reserve(null_map_data_size);
|
||||
|
||||
ColumnNullable * result_nullable = nullptr;
|
||||
if (result->isNullable())
|
||||
result_nullable = assert_cast<ColumnNullable *>(&*result);
|
||||
|
||||
size_t start_insert_index = 0;
|
||||
|
||||
/// Created separate branch because cast and inserting field from other column is slower
|
||||
@ -125,7 +129,12 @@ public:
|
||||
continue;
|
||||
|
||||
if (i != start_insert_index)
|
||||
result->insertRangeFrom(nested_column, start_insert_index, i - start_insert_index);
|
||||
{
|
||||
if (result_nullable)
|
||||
result_nullable->insertRangeFromNotNullable(nested_column, start_insert_index, i - start_insert_index);
|
||||
else
|
||||
result->insertRangeFrom(nested_column, start_insert_index, i - start_insert_index);
|
||||
}
|
||||
|
||||
result->insertFrom(*default_column, i);
|
||||
start_insert_index = i + 1;
|
||||
@ -140,7 +149,12 @@ public:
|
||||
continue;
|
||||
|
||||
if (i != start_insert_index)
|
||||
result->insertRangeFrom(nested_column, start_insert_index, i - start_insert_index);
|
||||
{
|
||||
if (result_nullable)
|
||||
result_nullable->insertRangeFromNotNullable(nested_column, start_insert_index, i - start_insert_index);
|
||||
else
|
||||
result->insertRangeFrom(nested_column, start_insert_index, i - start_insert_index);
|
||||
}
|
||||
|
||||
result->insertDefault();
|
||||
start_insert_index = i + 1;
|
||||
@ -148,7 +162,12 @@ public:
|
||||
}
|
||||
|
||||
if (null_map_data_size != start_insert_index)
|
||||
result->insertRangeFrom(nested_column, start_insert_index, null_map_data_size - start_insert_index);
|
||||
{
|
||||
if (result_nullable)
|
||||
result_nullable->insertRangeFromNotNullable(nested_column, start_insert_index, null_map_data_size - start_insert_index);
|
||||
else
|
||||
result->insertRangeFrom(nested_column, start_insert_index, null_map_data_size - start_insert_index);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1026,7 +1026,6 @@ void Context::addQueryFactoriesInfo(QueryLogFactories factory_type, const String
|
||||
|
||||
StoragePtr Context::executeTableFunction(const ASTPtr & table_expression)
|
||||
{
|
||||
/// Slightly suboptimal.
|
||||
auto hash = table_expression->getTreeHash();
|
||||
String key = toString(hash.first) + '_' + toString(hash.second);
|
||||
|
||||
@ -1035,9 +1034,20 @@ StoragePtr Context::executeTableFunction(const ASTPtr & table_expression)
|
||||
if (!res)
|
||||
{
|
||||
TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().get(table_expression, shared_from_this());
|
||||
|
||||
/// Run it and remember the result
|
||||
res = table_function_ptr->execute(table_expression, shared_from_this(), table_function_ptr->getName());
|
||||
|
||||
/// Since ITableFunction::parseArguments() may change table_expression, i.e.:
|
||||
///
|
||||
/// remote('127.1', system.one) -> remote('127.1', 'system.one'),
|
||||
///
|
||||
auto new_hash = table_expression->getTreeHash();
|
||||
if (hash != new_hash)
|
||||
{
|
||||
key = toString(new_hash.first) + '_' + toString(new_hash.second);
|
||||
table_function_results[key] = res;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
return res;
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Interpreters/AddDefaultDatabaseVisitor.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/FunctionNameNormalizer.h>
|
||||
#include <Interpreters/MutationsInterpreter.h>
|
||||
#include <Interpreters/QueryLog.h>
|
||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||
@ -44,6 +45,7 @@ InterpreterAlterQuery::InterpreterAlterQuery(const ASTPtr & query_ptr_, ContextP
|
||||
|
||||
BlockIO InterpreterAlterQuery::execute()
|
||||
{
|
||||
FunctionNameNormalizer().visit(query_ptr.get());
|
||||
const auto & alter = query_ptr->as<ASTAlterQuery &>();
|
||||
if (alter.alter_object == ASTAlterQuery::AlterObjectType::DATABASE)
|
||||
return executeToDatabase(alter);
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
|
||||
#include <Databases/DatabaseFactory.h>
|
||||
#include <Databases/DatabaseReplicated.h>
|
||||
@ -454,6 +455,10 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription(
|
||||
{
|
||||
column_type = DataTypeFactory::instance().get(col_decl.type);
|
||||
|
||||
const auto * aggregate_function_type = typeid_cast<const DataTypeAggregateFunction *>(column_type.get());
|
||||
if (attach && aggregate_function_type && aggregate_function_type->isVersioned())
|
||||
aggregate_function_type->setVersion(0, /* if_empty */true);
|
||||
|
||||
if (col_decl.null_modifier)
|
||||
{
|
||||
if (column_type->isNullable())
|
||||
|
@ -320,6 +320,12 @@ static void onExceptionBeforeStart(const String & query_for_logging, ContextPtr
|
||||
span.attribute_names.push_back("clickhouse.query_id");
|
||||
span.attribute_values.push_back(elem.client_info.current_query_id);
|
||||
|
||||
span.attribute_names.push_back("clickhouse.exception");
|
||||
span.attribute_values.push_back(elem.exception);
|
||||
|
||||
span.attribute_names.push_back("clickhouse.exception_code");
|
||||
span.attribute_values.push_back(elem.exception_code);
|
||||
|
||||
if (!context->query_trace_context.tracestate.empty())
|
||||
{
|
||||
span.attribute_names.push_back("clickhouse.tracestate");
|
||||
|
@ -794,6 +794,10 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
command->children.push_back(command->constraint_decl);
|
||||
if (command->constraint)
|
||||
command->children.push_back(command->constraint);
|
||||
if (command->projection_decl)
|
||||
command->children.push_back(command->projection_decl);
|
||||
if (command->projection)
|
||||
command->children.push_back(command->projection);
|
||||
if (command->predicate)
|
||||
command->children.push_back(command->predicate);
|
||||
if (command->update_assignments)
|
||||
|
@ -1458,8 +1458,8 @@ struct WindowFunction
|
||||
size_t alignOfData() const override { return 1; }
|
||||
void add(AggregateDataPtr __restrict, const IColumn **, size_t, Arena *) const override { fail(); }
|
||||
void merge(AggregateDataPtr __restrict, ConstAggregateDataPtr, Arena *) const override { fail(); }
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &) const override { fail(); }
|
||||
void deserialize(AggregateDataPtr __restrict, ReadBuffer &, Arena *) const override { fail(); }
|
||||
void serialize(ConstAggregateDataPtr __restrict, WriteBuffer &, std::optional<size_t>) const override { fail(); }
|
||||
void deserialize(AggregateDataPtr __restrict, ReadBuffer &, std::optional<size_t>, Arena *) const override { fail(); }
|
||||
void insertResultInto(AggregateDataPtr __restrict, IColumn &, Arena *) const override { fail(); }
|
||||
};
|
||||
|
||||
|
@ -1,41 +0,0 @@
|
||||
#include <Storages/JoinSettings.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_SETTING;
|
||||
}
|
||||
|
||||
IMPLEMENT_SETTINGS_TRAITS(joinSettingsTraits, LIST_OF_JOIN_SETTINGS)
|
||||
|
||||
void JoinSettings::loadFromQuery(ASTStorage & storage_def)
|
||||
{
|
||||
if (storage_def.settings)
|
||||
{
|
||||
try
|
||||
{
|
||||
applyChanges(storage_def.settings->changes);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
if (e.code() == ErrorCodes::UNKNOWN_SETTING)
|
||||
e.addMessage("for storage " + storage_def.engine->name);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
auto settings_ast = std::make_shared<ASTSetQuery>();
|
||||
settings_ast->is_standalone = false;
|
||||
storage_def.set(storage_def.settings, settings_ast);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/BaseSettings.h>
|
||||
#include <Core/Settings.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class ASTStorage;
|
||||
|
||||
|
||||
#define JOIN_RELATED_SETTINGS(M) \
|
||||
M(Bool, persistent, true, "Disable setting to avoid the overhead of writing to disk for StorageJoin", 0)
|
||||
|
||||
#define LIST_OF_JOIN_SETTINGS(M) \
|
||||
JOIN_RELATED_SETTINGS(M) \
|
||||
FORMAT_FACTORY_SETTINGS(M)
|
||||
|
||||
DECLARE_SETTINGS_TRAITS(joinSettingsTraits, LIST_OF_JOIN_SETTINGS)
|
||||
|
||||
|
||||
/** Settings for the Join engine.
|
||||
* Could be loaded from a CREATE TABLE query (SETTINGS clause).
|
||||
*/
|
||||
struct JoinSettings : public BaseSettings<joinSettingsTraits>
|
||||
{
|
||||
void loadFromQuery(ASTStorage & storage_def);
|
||||
};
|
||||
|
||||
}
|
@ -20,6 +20,7 @@
|
||||
#include <Compression/getCompressionCodecForFile.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <DataTypes/NestedUtils.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
|
||||
|
||||
namespace CurrentMetrics
|
||||
@ -1034,6 +1035,13 @@ void IMergeTreeDataPart::loadColumns(bool require)
|
||||
else
|
||||
{
|
||||
loaded_columns.readText(*volume->getDisk()->readFile(path));
|
||||
|
||||
for (const auto & column : loaded_columns)
|
||||
{
|
||||
const auto * aggregate_function_data_type = typeid_cast<const DataTypeAggregateFunction *>(column.type.get());
|
||||
if (aggregate_function_data_type && aggregate_function_data_type->isVersioned())
|
||||
aggregate_function_data_type->setVersion(0, /* if_empty */true);
|
||||
}
|
||||
}
|
||||
|
||||
setColumns(loaded_columns);
|
||||
|
@ -76,18 +76,24 @@ void ReplicatedMergeTreeSink::checkQuorumPrecondition(zkutil::ZooKeeperPtr & zoo
|
||||
{
|
||||
quorum_info.status_path = storage.zookeeper_path + "/quorum/status";
|
||||
|
||||
Strings replicas = zookeeper->getChildren(fs::path(storage.zookeeper_path) / "replicas");
|
||||
std::vector<std::future<Coordination::ExistsResponse>> replicas_status_futures;
|
||||
replicas_status_futures.reserve(replicas.size());
|
||||
for (const auto & replica : replicas)
|
||||
if (replica != storage.replica_name)
|
||||
replicas_status_futures.emplace_back(zookeeper->asyncExists(fs::path(storage.zookeeper_path) / "replicas" / replica / "is_active"));
|
||||
|
||||
std::future<Coordination::GetResponse> is_active_future = zookeeper->asyncTryGet(storage.replica_path + "/is_active");
|
||||
std::future<Coordination::GetResponse> host_future = zookeeper->asyncTryGet(storage.replica_path + "/host");
|
||||
|
||||
/// List of live replicas. All of them register an ephemeral node for leader_election.
|
||||
size_t active_replicas = 1; /// Assume current replica is active (will check below)
|
||||
for (auto & status : replicas_status_futures)
|
||||
if (status.get().error == Coordination::Error::ZOK)
|
||||
++active_replicas;
|
||||
|
||||
Coordination::Stat leader_election_stat;
|
||||
zookeeper->get(storage.zookeeper_path + "/leader_election", &leader_election_stat);
|
||||
|
||||
if (leader_election_stat.numChildren < static_cast<int32_t>(quorum))
|
||||
throw Exception("Number of alive replicas ("
|
||||
+ toString(leader_election_stat.numChildren) + ") is less than requested quorum (" + toString(quorum) + ").",
|
||||
ErrorCodes::TOO_FEW_LIVE_REPLICAS);
|
||||
if (active_replicas < quorum)
|
||||
throw Exception(ErrorCodes::TOO_FEW_LIVE_REPLICAS, "Number of alive replicas ({}) is less than requested quorum ({}).",
|
||||
active_replicas, quorum);
|
||||
|
||||
/** Is there a quorum for the last part for which a quorum is needed?
|
||||
* Write of all the parts with the included quorum is linearly ordered.
|
||||
|
@ -261,9 +261,7 @@ void registerStorageJoin(StorageFactory & factory)
|
||||
disk_name = setting.value.get<String>();
|
||||
else if (setting.name == "persistent")
|
||||
{
|
||||
auto join_settings = std::make_unique<JoinSettings>();
|
||||
join_settings->loadFromQuery(*args.storage_def);
|
||||
persistent = join_settings->persistent;
|
||||
persistent = setting.value.get<bool>();
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown setting " + setting.name + " for storage " + args.engine_name, ErrorCodes::BAD_ARGUMENTS);
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <Common/RWLock.h>
|
||||
#include <Storages/StorageSet.h>
|
||||
#include <Storages/TableLockHolder.h>
|
||||
#include <Storages/JoinSettings.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
|
||||
|
||||
|
@ -17,6 +17,7 @@ from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_
|
||||
from stopwatch import Stopwatch
|
||||
from rerun_helper import RerunHelper
|
||||
from tee_popen import TeePopen
|
||||
from ccache_utils import get_ccache_if_not_exists, upload_ccache
|
||||
|
||||
NAME = 'Fast test (actions)'
|
||||
|
||||
@ -87,7 +88,12 @@ if __name__ == "__main__":
|
||||
os.makedirs(output_path)
|
||||
|
||||
cache_path = os.path.join(caches_path, "fasttest")
|
||||
|
||||
logging.info("Will try to fetch cache for our build")
|
||||
get_ccache_if_not_exists(cache_path, s3_helper, pr_info.number, temp_path)
|
||||
|
||||
if not os.path.exists(cache_path):
|
||||
logging.info("cache was not fetched, will create empty dir")
|
||||
os.makedirs(cache_path)
|
||||
|
||||
repo_path = os.path.join(temp_path, "fasttest-repo")
|
||||
@ -138,6 +144,9 @@ if __name__ == "__main__":
|
||||
else:
|
||||
state, description, test_results, additional_logs = process_results(output_path)
|
||||
|
||||
logging.info("Will upload cache")
|
||||
upload_ccache(cache_path, s3_helper, pr_info.number, temp_path)
|
||||
|
||||
ch_helper = ClickHouseHelper()
|
||||
mark_flaky_tests(ch_helper, NAME, test_results)
|
||||
|
||||
|
@ -49,7 +49,9 @@ class S3Helper():
|
||||
else:
|
||||
logging.info("No content type provied for %s", file_path)
|
||||
else:
|
||||
if s3_path.endswith("txt") or s3_path.endswith("log") or ".log." in s3_path or s3_path.endswith("err") or s3_path.endswith("out"):
|
||||
is_log = s3_path.endswith("log") or ".log." in s3_path
|
||||
is_text = s3_path.endswith("txt") or is_log or s3_path.endswith("err") or s3_path.endswith("out")
|
||||
if not s3_path.endswith('.gz') and (is_text or is_log):
|
||||
logging.info("Going to compress file log file %s to %s", file_path, file_path + ".gz")
|
||||
compress_file_fast(file_path, file_path + ".gz")
|
||||
file_path += ".gz"
|
||||
|
34
tests/ci/worker/init_fuzzer_unit_tester.sh
Normal file
34
tests/ci/worker/init_fuzzer_unit_tester.sh
Normal file
@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
set -uo pipefail
|
||||
|
||||
echo "Running init script"
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
export RUNNER_HOME=/home/ubuntu/actions-runner
|
||||
|
||||
export RUNNER_URL="https://github.com/ClickHouse"
|
||||
# Funny fact, but metadata service has fixed IP
|
||||
export INSTANCE_ID=`curl -s http://169.254.169.254/latest/meta-data/instance-id`
|
||||
|
||||
while true; do
|
||||
runner_pid=`pgrep run.sh`
|
||||
echo "Got runner pid $runner_pid"
|
||||
|
||||
cd $RUNNER_HOME
|
||||
if [ -z "$runner_pid" ]; then
|
||||
echo "Receiving token"
|
||||
RUNNER_TOKEN=`/usr/local/bin/aws ssm get-parameter --name github_runner_registration_token --with-decryption --output text --query Parameter.Value`
|
||||
|
||||
echo "Will try to remove runner"
|
||||
sudo -u ubuntu ./config.sh remove --token $RUNNER_TOKEN ||:
|
||||
|
||||
echo "Going to configure runner"
|
||||
sudo -u ubuntu ./config.sh --url $RUNNER_URL --token $RUNNER_TOKEN --name $INSTANCE_ID --runnergroup Default --labels 'self-hosted,Linux,X64,fuzzer-unit-tester' --work _work
|
||||
|
||||
echo "Run"
|
||||
sudo -u ubuntu ./run.sh &
|
||||
sleep 15
|
||||
else
|
||||
echo "Runner is working with pid $runner_pid, nothing to do"
|
||||
sleep 10
|
||||
fi
|
||||
done
|
@ -371,9 +371,10 @@ class TestCase:
|
||||
else:
|
||||
# If --database is not specified, we will create temporary database with unique name
|
||||
# And we will recreate and drop it for each test
|
||||
def random_str(length=8):
|
||||
def random_str(length=6):
|
||||
alphabet = string.ascii_lowercase + string.digits
|
||||
return ''.join(random.choice(alphabet) for _ in range(length))
|
||||
# NOTE: it is important not to use default random generator, since it shares state.
|
||||
return ''.join(random.SystemRandom().choice(alphabet) for _ in range(length))
|
||||
|
||||
database = 'test_{suffix}'.format(suffix=random_str())
|
||||
|
||||
|
@ -127,11 +127,11 @@ def _check_timeout_and_exception(node, user, query_base, query):
|
||||
|
||||
extra_repeats = 1
|
||||
# Table function remote() are executed two times.
|
||||
# It tries to get table stucture from remote shards.
|
||||
# On 'node'2 it will firsty try to get structure from 'node1' (which is not available),
|
||||
# so so threre are two extra conection attempts for 'node2' and 'remote'
|
||||
# It tries to get table structure from remote shards.
|
||||
# On 'node2' it will firstly try to get structure from 'node1' (which is not available),
|
||||
# so there are 1 extra connection attempts for 'node2' and 'remote'
|
||||
if node.name == 'node2' and query_base == 'remote':
|
||||
extra_repeats = 3
|
||||
extra_repeats = 2
|
||||
|
||||
expected_timeout = EXPECTED_BEHAVIOR[user]['timeout'] * repeats * extra_repeats
|
||||
|
||||
|
@ -0,0 +1 @@
|
||||
#!/usr/bin/env python3
|
@ -0,0 +1,22 @@
|
||||
<clickhouse>
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
<server_id>1</server_id>
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>5000</operation_timeout_ms>
|
||||
<session_timeout_ms>10000</session_timeout_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>node1</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</clickhouse>
|
119
tests/integration/test_keeper_incorrect_config/test.py
Normal file
119
tests/integration/test_keeper_incorrect_config/test.py
Normal file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True)
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
DUPLICATE_ID_CONFIG = """
|
||||
<clickhouse>
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
<server_id>1</server_id>
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>5000</operation_timeout_ms>
|
||||
<session_timeout_ms>10000</session_timeout_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>node1</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>node2</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</clickhouse>
|
||||
"""
|
||||
|
||||
DUPLICATE_ENDPOINT_CONFIG = """
|
||||
<clickhouse>
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
<server_id>1</server_id>
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>5000</operation_timeout_ms>
|
||||
<session_timeout_ms>10000</session_timeout_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>node1</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
<server>
|
||||
<id>2</id>
|
||||
<hostname>node1</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</clickhouse>
|
||||
"""
|
||||
|
||||
NORMAL_CONFIG = """
|
||||
<clickhouse>
|
||||
<keeper_server>
|
||||
<tcp_port>9181</tcp_port>
|
||||
<server_id>1</server_id>
|
||||
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
|
||||
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||
|
||||
<coordination_settings>
|
||||
<operation_timeout_ms>5000</operation_timeout_ms>
|
||||
<session_timeout_ms>10000</session_timeout_ms>
|
||||
<raft_logs_level>trace</raft_logs_level>
|
||||
</coordination_settings>
|
||||
|
||||
<raft_configuration>
|
||||
<server>
|
||||
<id>1</id>
|
||||
<hostname>node1</hostname>
|
||||
<port>9234</port>
|
||||
</server>
|
||||
</raft_configuration>
|
||||
</keeper_server>
|
||||
</clickhouse>
|
||||
"""
|
||||
|
||||
def test_duplicate_endpoint(started_cluster):
|
||||
node1.stop_clickhouse()
|
||||
node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ENDPOINT_CONFIG)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
node1.start_clickhouse(start_wait_sec=10)
|
||||
|
||||
node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ID_CONFIG)
|
||||
with pytest.raises(Exception):
|
||||
node1.start_clickhouse(start_wait_sec=10)
|
||||
|
||||
node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", NORMAL_CONFIG)
|
||||
node1.start_clickhouse()
|
||||
|
||||
assert node1.query("SELECT 1") == "1\n"
|
@ -180,12 +180,16 @@ def check_tables_are_synchronized(table_name, order_by='key', postgres_database=
|
||||
else:
|
||||
result = instance.query('select * from {}.`{}.{}` order by {};'.format(materialized_database, schema_name, table_name, order_by))
|
||||
|
||||
try_num = 0
|
||||
while result != expected:
|
||||
time.sleep(0.5)
|
||||
if len(schema_name) == 0:
|
||||
result = instance.query('select * from {}.{} order by {};'.format(materialized_database, table_name, order_by))
|
||||
else:
|
||||
result = instance.query('select * from {}.`{}.{}` order by {};'.format(materialized_database, schema_name, table_name, order_by))
|
||||
try_num += 1
|
||||
if try_num > 30:
|
||||
break
|
||||
|
||||
assert(result == expected)
|
||||
|
||||
@ -212,6 +216,7 @@ def test_add_new_table_to_replication(started_cluster):
|
||||
port=started_cluster.postgres_port,
|
||||
database=True)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DROP TABLE IF EXISTS test_table')
|
||||
NUM_TABLES = 5
|
||||
|
||||
for i in range(NUM_TABLES):
|
||||
@ -293,6 +298,7 @@ def test_remove_table_from_replication(started_cluster):
|
||||
port=started_cluster.postgres_port,
|
||||
database=True)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DROP TABLE IF EXISTS test_table')
|
||||
NUM_TABLES = 5
|
||||
|
||||
for i in range(NUM_TABLES):
|
||||
@ -352,8 +358,9 @@ def test_predefined_connection_configuration(started_cluster):
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f'DROP TABLE IF EXISTS test_table')
|
||||
cursor.execute(f'CREATE TABLE test_table (key integer PRIMARY KEY, value integer)')
|
||||
cursor.execute(f'INSERT INTO test_table SELECT 1, 2')
|
||||
|
||||
instance.query("CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL(postgres1)")
|
||||
instance.query("CREATE DATABASE test_database ENGINE = MaterializedPostgreSQL(postgres1) SETTINGS materialized_postgresql_tables_list='test_table'")
|
||||
check_tables_are_synchronized("test_table");
|
||||
drop_materialized_db()
|
||||
cursor.execute('DROP TABLE IF EXISTS test_table')
|
||||
|
11
tests/integration/test_version_update/configs/log_conf.xml
Normal file
11
tests/integration/test_version_update/configs/log_conf.xml
Normal file
@ -0,0 +1,11 @@
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/log.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
|
||||
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
|
||||
</logger>
|
||||
</yandex>
|
@ -1,10 +1,50 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
node1 = cluster.add_instance('node1', with_zookeeper=True, image='yandex/clickhouse-server', tag='21.2', with_installed_binary=True, stay_alive=True)
|
||||
|
||||
node1 = cluster.add_instance('node1', stay_alive=True)
|
||||
|
||||
node2 = cluster.add_instance('node2', with_zookeeper=True, image='yandex/clickhouse-server', tag='21.2', with_installed_binary=True, stay_alive=True)
|
||||
|
||||
# Use differents nodes because if there is node.restart_from_latest_version(), then in later tests
|
||||
# it will be with latest version, but shouldn't, order of tests in CI is shuffled.
|
||||
node3 = cluster.add_instance('node3', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True)
|
||||
node4 = cluster.add_instance('node4', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True)
|
||||
node5 = cluster.add_instance('node5', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True)
|
||||
node6 = cluster.add_instance('node6', image='yandex/clickhouse-server', tag='21.5', with_installed_binary=True, stay_alive=True)
|
||||
|
||||
|
||||
|
||||
def insert_data(node, table_name='test_table', n=1, col2=1):
|
||||
node.query(""" INSERT INTO {}
|
||||
SELECT toDateTime(NOW()), {},
|
||||
sumMapState(arrayMap(i -> 1, range(300)), arrayMap(i -> 1, range(300)))
|
||||
FROM numbers({});""".format(table_name, col2, n))
|
||||
|
||||
|
||||
def create_table(node, name='test_table', version=None):
|
||||
node.query("DROP TABLE IF EXISTS {};".format(name))
|
||||
if version is None:
|
||||
node.query("""
|
||||
CREATE TABLE {}
|
||||
(
|
||||
`col1` DateTime,
|
||||
`col2` Int64,
|
||||
`col3` AggregateFunction(sumMap, Array(UInt8), Array(UInt8))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree() ORDER BY (col1, col2) """.format(name))
|
||||
else:
|
||||
node.query("""
|
||||
CREATE TABLE {}
|
||||
(
|
||||
`col1` DateTime,
|
||||
`col2` Int64,
|
||||
`col3` AggregateFunction({}, sumMap, Array(UInt8), Array(UInt8))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree() ORDER BY (col1, col2) """.format(name, version))
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@ -12,17 +52,165 @@ def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_modulo_partition_key_after_update(start_cluster):
|
||||
node1.query("CREATE TABLE test (id Int64, v UInt64, value String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/table1', '1', v) PARTITION BY id % 20 ORDER BY (id, v)")
|
||||
node1.query("INSERT INTO test SELECT number, number, toString(number) FROM numbers(10)")
|
||||
expected = node1.query("SELECT number, number, toString(number) FROM numbers(10)")
|
||||
partition_data = node1.query("SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition")
|
||||
assert(expected == node1.query("SELECT * FROM test ORDER BY id"))
|
||||
node1.restart_with_latest_version(signal=9)
|
||||
assert(expected == node1.query("SELECT * FROM test ORDER BY id"))
|
||||
assert(partition_data == node1.query("SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition"))
|
||||
def test_modulo_partition_key_issue_23508(start_cluster):
|
||||
node2.query("CREATE TABLE test (id Int64, v UInt64, value String) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/table1', '1', v) PARTITION BY id % 20 ORDER BY (id, v)")
|
||||
node2.query("INSERT INTO test SELECT number, number, toString(number) FROM numbers(10)")
|
||||
|
||||
expected = node2.query("SELECT number, number, toString(number) FROM numbers(10)")
|
||||
partition_data = node2.query("SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition")
|
||||
assert(expected == node2.query("SELECT * FROM test ORDER BY id"))
|
||||
|
||||
node2.restart_with_latest_version()
|
||||
|
||||
assert(expected == node2.query("SELECT * FROM test ORDER BY id"))
|
||||
assert(partition_data == node2.query("SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition"))
|
||||
|
||||
|
||||
# Test from issue 16587
|
||||
def test_aggregate_function_versioning_issue_16587(start_cluster):
|
||||
for node in [node1, node3]:
|
||||
node.query("DROP TABLE IF EXISTS test_table;")
|
||||
node.query("""
|
||||
CREATE TABLE test_table (`col1` DateTime, `col2` Int64)
|
||||
ENGINE = MergeTree() ORDER BY col1""")
|
||||
node.query("insert into test_table select '2020-10-26 00:00:00', 1929292 from numbers(300)")
|
||||
|
||||
expected = "([1],[600])"
|
||||
|
||||
result_on_old_version = node3.query("select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);").strip()
|
||||
assert(result_on_old_version != expected)
|
||||
|
||||
result_on_new_version = node1.query("select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);").strip()
|
||||
assert(result_on_new_version == expected)
|
||||
|
||||
|
||||
def test_aggregate_function_versioning_fetch_data_from_old_to_new_server(start_cluster):
|
||||
for node in [node1, node4]:
|
||||
create_table(node)
|
||||
insert_data(node)
|
||||
|
||||
expected = "([1],[300])"
|
||||
|
||||
new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(new_server_data == expected)
|
||||
|
||||
old_server_data = node4.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(old_server_data != expected)
|
||||
|
||||
data_from_old_to_new_server = node1.query("select finalizeAggregation(col3) from remote('node4', default.test_table);").strip()
|
||||
assert(data_from_old_to_new_server == old_server_data)
|
||||
|
||||
|
||||
def test_aggregate_function_versioning_server_upgrade(start_cluster):
|
||||
for node in [node1, node5]:
|
||||
create_table(node)
|
||||
insert_data(node1, col2=5)
|
||||
insert_data(node5, col2=1)
|
||||
|
||||
# Serialization with version 0, server does not support versioning of aggregate function states.
|
||||
old_server_data = node5.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(old_server_data == "([1],[44])")
|
||||
create = node5.query("describe table default.test_table;").strip()
|
||||
assert(create.strip().endswith("col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))"))
|
||||
print('Ok 1')
|
||||
|
||||
# Upgrade server.
|
||||
node5.restart_with_latest_version()
|
||||
|
||||
# Deserialized with version 0, server supports versioning.
|
||||
upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(upgraded_server_data == "([1],[44])")
|
||||
create = node5.query("describe table default.test_table;").strip()
|
||||
assert(create.strip().endswith("col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))"))
|
||||
print('Ok 2')
|
||||
|
||||
create = node1.query("describe table default.test_table;").strip()
|
||||
print(create)
|
||||
assert(create.strip().endswith("col3\tAggregateFunction(1, sumMap, Array(UInt8), Array(UInt8))"))
|
||||
|
||||
# Data from upgraded server to new server. Deserialize with version 0.
|
||||
data_from_upgraded_to_new_server = node1.query("select finalizeAggregation(col3) from remote('node5', default.test_table);").strip()
|
||||
assert(data_from_upgraded_to_new_server == upgraded_server_data == "([1],[44])")
|
||||
print('Ok 3')
|
||||
|
||||
# Data is serialized according to version 0 (though one of the states is version 1, but result is version 0).
|
||||
upgraded_server_data = node5.query("select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);").strip()
|
||||
assert(upgraded_server_data == "([1],[44])\n([1],[44])")
|
||||
print('Ok 4')
|
||||
|
||||
# Check insertion after server upgarde.
|
||||
insert_data(node5, col2=2)
|
||||
|
||||
# Check newly inserted data is still serialized with 0 version.
|
||||
upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table order by col2;").strip()
|
||||
assert(upgraded_server_data == "([1],[44])\n([1],[44])")
|
||||
print('Ok 5')
|
||||
|
||||
# New table has latest version.
|
||||
new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(new_server_data == "([1],[300])")
|
||||
print('Ok 6')
|
||||
|
||||
# Insert from new server (with version 1) to upgraded server (where version will be 0), result version 0.
|
||||
node1.query("insert into table function remote('node5', default.test_table) select * from default.test_table;").strip()
|
||||
upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table order by col2;").strip()
|
||||
assert(upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])")
|
||||
print('Ok 7')
|
||||
|
||||
# But new table gets data with latest version.
|
||||
insert_data(node1)
|
||||
new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(new_server_data == "([1],[300])\n([1],[300])")
|
||||
print('Ok 8')
|
||||
|
||||
# Create table with column implicitly with older version (version 0).
|
||||
create_table(node1, name='test_table_0', version=0)
|
||||
insert_data(node1, table_name='test_table_0', col2=3)
|
||||
data = node1.query("select finalizeAggregation(col3) from default.test_table_0;").strip()
|
||||
assert(data == "([1],[44])")
|
||||
print('Ok')
|
||||
|
||||
# Insert from new server to upgraded server to a new table but the version was set implicitly to 0, so data version 0.
|
||||
node1.query("insert into table function remote('node5', default.test_table) select * from default.test_table_0;").strip()
|
||||
upgraded_server_data = node5.query("select finalizeAggregation(col3) from default.test_table order by col2;").strip()
|
||||
assert(upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])")
|
||||
print('Ok')
|
||||
|
||||
|
||||
def test_aggregate_function_versioning_persisting_metadata(start_cluster):
|
||||
for node in [node1, node6]:
|
||||
create_table(node)
|
||||
insert_data(node)
|
||||
data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(data == "([1],[300])")
|
||||
data = node6.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(data == "([1],[44])")
|
||||
|
||||
node6.restart_with_latest_version()
|
||||
|
||||
for node in [node1, node6]:
|
||||
node.query("DETACH TABLE test_table")
|
||||
node.query("ATTACH TABLE test_table")
|
||||
|
||||
for node in [node1, node6]:
|
||||
insert_data(node)
|
||||
|
||||
new_server_data = node1.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(new_server_data == "([1],[300])\n([1],[300])")
|
||||
|
||||
upgraded_server_data = node6.query("select finalizeAggregation(col3) from default.test_table;").strip()
|
||||
assert(upgraded_server_data == "([1],[44])\n([1],[44])")
|
||||
|
||||
for node in [node1, node6]:
|
||||
node.restart_clickhouse()
|
||||
insert_data(node)
|
||||
|
||||
result = node1.query("select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);").strip()
|
||||
assert(result == "([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])")
|
||||
|
||||
result = node6.query("select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);").strip()
|
||||
assert(result == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])")
|
||||
|
@ -1,13 +1,11 @@
|
||||
===http===
|
||||
{"query":"select 1 from remote('127.0.0.2', system, one) format Null\n","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1}
|
||||
{"query":"DESC TABLE system.one","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1}
|
||||
{"query":"DESC TABLE system.one","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1}
|
||||
{"query":"SELECT 1 FROM system.one","status":"QueryFinish","tracestate":"some custom state","sorted_by_start_time":1}
|
||||
{"query":"DESC TABLE system.one","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1}
|
||||
{"query":"DESC TABLE system.one","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1}
|
||||
{"query":"SELECT 1 FROM system.one","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1}
|
||||
{"query":"select 1 from remote('127.0.0.2', system, one) format Null\n","query_status":"QueryFinish","tracestate":"some custom state","sorted_by_finish_time":1}
|
||||
{"total spans":"4","unique spans":"4","unique non-zero parent spans":"3"}
|
||||
{"total spans":"3","unique spans":"3","unique non-zero parent spans":"3"}
|
||||
{"initial query spans with proper parent":"1"}
|
||||
{"unique non-empty tracestate values":"1"}
|
||||
===native===
|
||||
|
@ -0,0 +1,2 @@
|
||||
CREATE TABLE default.x\n(\n `i` Int32,\n INDEX mm rand() TYPE minmax GRANULARITY 1,\n INDEX nn rand() TYPE minmax GRANULARITY 1,\n PROJECTION p\n (\n SELECT max(i)\n ),\n PROJECTION p2\n (\n SELECT min(i)\n )\n)\nENGINE = ReplicatedMergeTree(\'/clickhouse/tables/default/x\', \'r\')\nORDER BY i\nSETTINGS index_granularity = 8192
|
||||
metadata format version: 1\ndate column: \nsampling expression: \nindex granularity: 8192\nmode: 0\nsign column: \nprimary key: i\ndata format version: 1\npartition key: \nindices: mm rand() TYPE minmax GRANULARITY 1, nn rand() TYPE minmax GRANULARITY 1\nprojections: p(SELECT max(i)), p2(SELECT min(i))\ngranularity bytes: 10485760\n
|
@ -0,0 +1,13 @@
|
||||
-- Tags: zookeeper, no-replicated-database, no-parallel
|
||||
|
||||
drop table if exists x;
|
||||
|
||||
create table x(i int, index mm RAND() type minmax granularity 1, projection p (select MAX(i))) engine ReplicatedMergeTree('/clickhouse/tables/{database}/x', 'r') order by i;
|
||||
|
||||
alter table x add index nn RAND() type minmax granularity 1, add projection p2 (select MIN(i));
|
||||
|
||||
show create x;
|
||||
|
||||
select value from system.zookeeper WHERE name = 'metadata' and path = '/clickhouse/tables/'||currentDatabase()||'/x';
|
||||
|
||||
drop table x;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user