fix conflict

This commit is contained in:
taiyang-li 2022-01-10 10:43:13 +08:00
commit 1b4b727d79
138 changed files with 3169 additions and 1360 deletions

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit ff100a8713146e1ca4b4158dd6cc4eef9af47fc3
Subproject commit c2043aa250e53ad5cf75e596e319d587af4dcb3c

View File

@ -103,6 +103,10 @@
"name": "clickhouse/mysql-golang-client",
"dependent": []
},
"docker/test/integration/dotnet_client": {
"name": "clickhouse/dotnet-client",
"dependent": []
},
"docker/test/integration/mysql_java_client": {
"name": "clickhouse/mysql-java-client",
"dependent": []

View File

@ -0,0 +1,2 @@
bin/
obj/

View File

@ -0,0 +1,10 @@
# docker build .
# docker run -it --rm --network=host 14f23e59669c dotnet run --host localhost --port 8123 --user default --database default
FROM mcr.microsoft.com/dotnet/sdk:3.1
WORKDIR /client
COPY *.cs *.csproj /client/
ARG VERSION=4.1.0
RUN dotnet add package ClickHouse.Client -v ${VERSION}

View File

@ -0,0 +1,90 @@
using System;
using System.Threading.Tasks;
using ClickHouse.Client.ADO;
using ClickHouse.Client.Utility;
namespace clickhouse.test
{
class Program
{
static async Task Main(string[] args)
{
try
{
using var connection = new ClickHouseConnection(GetConnectionString(args));
await connection.ExecuteStatementAsync("CREATE DATABASE IF NOT EXISTS test");
await connection.ExecuteStatementAsync("TRUNCATE TABLE IF EXISTS test.dotnet_test");
await connection.ExecuteStatementAsync("CREATE TABLE IF NOT EXISTS test.dotnet_test (`age` Int32, `name` String) Engine = Memory");
using var command = connection.CreateCommand();
command.AddParameter("name", "Linus Torvalds");
command.AddParameter("age", 51);
command.CommandText = "INSERT INTO test.dotnet_test VALUES({age:Int32}, {name:String})";
await command.ExecuteNonQueryAsync();
using var result1 = await connection.ExecuteReaderAsync("SELECT * FROM test.dotnet_test");
while (result1.Read())
{
var values = new object[result1.FieldCount];
result1.GetValues(values);
foreach (var row in values)
{
Console.WriteLine(row);
}
}
using var result2 = await connection.ExecuteReaderAsync(selectSql);
while (result2.Read())
{
var values = new object[result2.FieldCount];
result2.GetValues(values);
foreach (var row in values)
{
Console.WriteLine(row);
}
}
}
catch (Exception e)
{
Console.Error.WriteLine(e);
Environment.ExitCode = 1;
}
}
private static string GetConnectionString(string[] args)
{
var builder = new ClickHouseConnectionStringBuilder();
int i = 0;
while (i < args.Length)
{
switch (args[i])
{
case "--host":
builder.Host = args[++i];
break;
case "--port":
builder.Port = UInt16.Parse(args[++i]);
break;
case "--user":
builder.Username = args[++i];
break;
case "--password":
builder.Password = args[++i];
break;
case "--database":
builder.Database = args[++i];
break;
default:
i++;
break;
}
}
return builder.ToString();
}
private static string selectSql = @"SELECT NULL, toInt8(-8), toUInt8(8), toInt16(-16), toUInt16(16), toInt16(-32), toUInt16(32), toInt64(-64), toUInt64(64), toFloat32(32e6), toFloat32(-32e6), toFloat64(64e6), toFloat64(-64e6), 'TestString', toFixedString('ASD',3), toFixedString('ASD',5), toUUID('00000000-0000-0000-0000-000000000000'), toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'), toIPv4('1.2.3.4'), toIPv4('255.255.255.255'), CAST('a', 'Enum(\'a\' = 1, \'b\' = 2)'), CAST('a', 'Enum8(\'a\' = -1, \'b\' = 127)'), CAST('a', 'Enum16(\'a\' = -32768, \'b\' = 32767)'), array(1, 2, 3), array('a', 'b', 'c'), array(1, 2, NULL), toInt32OrNull('123'), toInt32OrNull(NULL), CAST(NULL AS Nullable(DateTime)), CAST(NULL AS LowCardinality(Nullable(String))), toLowCardinality('lowcardinality'), tuple(1, 'a', 8), tuple(123, tuple(5, 'a', 7)), toDateOrNull('1999-11-12'), toDateTime('1988-08-28 11:22:33'), toDateTime64('2043-03-01 18:34:04.4444444', 9), toDecimal32(123.45, 3), toDecimal32(-123.45, 3), toDecimal64(1.2345, 7), toDecimal64(-1.2345, 7), toDecimal128(12.34, 9), toDecimal128(-12.34, 9), toIPv6('2001:0db8:85a3:0000:0000:8a2e:0370:7334')";
}
}

View File

@ -0,0 +1,13 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<TargetFramework>netcoreapp3.1</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="clickhouse.client" Version="4.1.0" />
<PackageReference Include="dapper" Version="2.0.30" />
</ItemGroup>
</Project>

View File

@ -0,0 +1,6 @@
version: '2.3'
services:
dotnet1:
image: clickhouse/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest}
# to keep container running
command: sleep infinity

View File

@ -39,6 +39,7 @@ export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
export CLICKHOUSE_LIBRARY_BRIDGE_BINARY_PATH=/clickhouse-library-bridge
export DOCKER_MYSQL_GOLANG_CLIENT_TAG=${DOCKER_MYSQL_GOLANG_CLIENT_TAG:=latest}
export DOCKER_DOTNET_CLIENT_TAG=${DOCKER_DOTNET_CLIENT_TAG:=latest}
export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest}
export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}

View File

@ -1,3 +1,3 @@
wget 'https://builds.clickhouse.com/master/freebsd/clickhouse'
fetch 'https://builds.clickhouse.com/master/freebsd/clickhouse'
chmod a+x ./clickhouse
sudo ./clickhouse install
su -m root -c './clickhouse install'

View File

@ -7,7 +7,7 @@ toc_title: MaterializedPostgreSQL
Creates a ClickHouse database with tables from PostgreSQL database. Firstly, database with engine `MaterializedPostgreSQL` creates a snapshot of PostgreSQL database and loads required tables. Required tables can include any subset of tables from any subset of schemas from specified database. Along with the snapshot database engine acquires LSN and once initial dump of tables is performed - it starts pulling updates from WAL. After database is created, newly added tables to PostgreSQL database are not automatically added to replication. They have to be added manually with `ATTACH TABLE db.table` query.
Replication is implemented with PostgreSQL Logical Replication Protocol, which does not allow to replicate DDL, but allows to know whether replication breaking changes happened (column type changes, adding/removing columns). Such changes are detected and according tables stop receiving updates. Such tables can be automatically reloaded in the background in case required setting is turned on. Safest way for now is to use `ATTACH`/ `DETACH` queries to reload table completely. If DDL does not break replication (for example, renaming a column) table will still receive updates (insertion is done by position).
Replication is implemented with PostgreSQL Logical Replication Protocol, which does not allow to replicate DDL, but allows to know whether replication breaking changes happened (column type changes, adding/removing columns). Such changes are detected and according tables stop receiving updates. Such tables can be automatically reloaded in the background in case required setting is turned on (can be used starting from 22.1). Safest way for now is to use `ATTACH`/ `DETACH` queries to reload table completely. If DDL does not break replication (for example, renaming a column) table will still receive updates (insertion is done by position).
## Creating a Database {#creating-a-database}
@ -46,7 +46,7 @@ After `MaterializedPostgreSQL` database is created, it does not automatically de
ATTACH TABLE postgres_database.new_table;
```
Warning: before version 21.13 adding table to replication left unremoved temprorary replication slot (named `{db_name}_ch_replication_slot_tmp`). If attaching tables in clickhouse version before 21.13, make sure to delete it manually (`SELECT pg_drop_replication_slot('{db_name}_ch_replication_slot_tmp')`). Otherwise disk usage will grow. Issue is fixed in 21.13.
Warning: before version 22.1 adding table to replication left unremoved temprorary replication slot (named `{db_name}_ch_replication_slot_tmp`). If attaching tables in clickhouse version before 22.1, make sure to delete it manually (`SELECT pg_drop_replication_slot('{db_name}_ch_replication_slot_tmp')`). Otherwise disk usage will grow. Issue is fixed in 22.1.
## Dynamically removing tables from replication {#dynamically-removing-table-from-replication}
@ -156,6 +156,8 @@ Default value: empty list. (Default schema is used)
4. materialized_postgresql_allow_automatic_update {#materialized-postgresql-allow-automatic-update}
Do not use this setting before 22.1 version.
Allows reloading table in the background, when schema changes are detected. DDL queries on the PostgreSQL side are not replicated via ClickHouse [MaterializedPostgreSQL](../../engines/database-engines/materialized-postgresql.md) engine, because it is not allowed with PostgreSQL logical replication protocol, but the fact of DDL changes is detected transactionally. In this case, the default behaviour is to stop replicating those tables once DDL is detected. However, if this setting is enabled, then, instead of stopping the replication of those tables, they will be reloaded in the background via database snapshot without data losses and replication will continue for them.
Possible values:

View File

@ -1288,6 +1288,20 @@ Example
<mysql_port>9004</mysql_port>
```
## postgresql_port {#server_configuration_parameters-postgresql_port}
Port for communicating with clients over PostgreSQL protocol.
**Possible values**
Positive integer.
Example
``` xml
<postgresql_port>9005</postgresql_port>
```
## tmp_path {#tmp-path}
Path to temporary data for processing large queries.

View File

@ -27,6 +27,10 @@ An example of changing the settings for a specific table with the `ALTER TABLE .
``` sql
ALTER TABLE foo
MODIFY SETTING max_suspicious_broken_parts = 100;
-- reset to default (use value from system.merge_tree_settings)
ALTER TABLE foo
RESET SETTING max_suspicious_broken_parts;
```
## parts_to_throw_insert {#parts-to-throw-insert}

View File

@ -124,7 +124,7 @@ Setting fields:
- `format` — The file format. All the formats described in [Formats](../../../interfaces/formats.md#formats) are supported.
- `command_termination_timeout` — executable script should contain main read-write loop. After dictionary is destroyed, pipe is closed, and executable file will have `command_termination_timeout` seconds to shutdown, before ClickHouse will send SIGTERM signal to child process. Specified in seconds. Default value is 10. Optional parameter.
- `command_read_timeout` - timeout for reading data from command stdout in milliseconds. Default value 10000. Optional parameter.
- `command_read_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter.
- `command_write_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter.
- `implicit_key` — The executable source file can return only values, and the correspondence to the requested keys is determined implicitly — by the order of rows in the result. Default value is false.
- `execute_direct` - If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder. Additional script arguments can be specified using whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c`. Default value is `0`. Optional parameter.
- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Optional. Default value is `false`.
@ -159,7 +159,7 @@ Setting fields:
- `command_termination_timeout` — executable script should contain main read-write loop. After dictionary is destroyed, pipe is closed, and executable file will have `command_termination_timeout` seconds to shutdown, before ClickHouse will send SIGTERM signal to child process. Specified in seconds. Default value is 10. Optional parameter.
- `max_command_execution_time` — Maximum executable script command execution time for processing block of data. Specified in seconds. Default value is 10. Optional parameter.
- `command_read_timeout` - timeout for reading data from command stdout in milliseconds. Default value 10000. Optional parameter.
- `command_read_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter.
- `command_write_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter.
- `implicit_key` — The executable source file can return only values, and the correspondence to the requested keys is determined implicitly — by the order of rows in the result. Default value is false. Optional parameter.
- `execute_direct` - If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder. Additional script arguments can be specified using whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c`. Default value is `1`. Optional parameter.
- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Optional. Default value is `false`.

View File

@ -1392,12 +1392,24 @@ Returns the first element in the `arr1` array for which `func` returns something
Note that the `arrayFirst` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it cant be omitted.
## arrayLast(func, arr1, …) {#array-last}
Returns the last element in the `arr1` array for which `func` returns something other than 0.
Note that the `arrayLast` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it cant be omitted.
## arrayFirstIndex(func, arr1, …) {#array-first-index}
Returns the index of the first element in the `arr1` array for which `func` returns something other than 0.
Note that the `arrayFirstIndex` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it cant be omitted.
## arrayLastIndex(func, arr1, …) {#array-last-index}
Returns the index of the last element in the `arr1` array for which `func` returns something other than 0.
Note that the `arrayLastIndex` is a [higher-order function](../../sql-reference/functions/index.md#higher-order-functions). You must pass a lambda function to it as the first argument, and it cant be omitted.
## arrayMin {#array-min}
Returns the minimum of elements in the source array.

View File

@ -217,8 +217,8 @@ Result:
``` text
(0,'2019-05-20') 0 \N \N (NULL,NULL)
(1,'2019-05-20') 1 First First ('First','First')
(2,'2019-05-20') 0 \N \N (NULL,NULL)
(3,'2019-05-20') 0 \N \N (NULL,NULL)
(2,'2019-05-20') 1 Second \N ('Second',NULL)
(3,'2019-05-20') 1 Third Third ('Third','Third')
(4,'2019-05-20') 0 \N \N (NULL,NULL)
```

View File

@ -81,7 +81,7 @@ A function configuration contains the following settings:
- `max_command_execution_time` - maximum execution time in seconds for processing block of data. This setting is valid for `executable_pool` commands only. Optional. Default value is `10`.
- `command_termination_timeout` - time in seconds during which a command should finish after its pipe is closed. After that time `SIGTERM` is sent to the process executing the command. Optional. Default value is `10`.
- `command_read_timeout` - timeout for reading data from command stdout in milliseconds. Default value 10000. Optional parameter.
- `command_read_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter.
- `command_write_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000. Optional parameter.
- `pool_size` - the size of a command pool. Optional. Default value is `16`.
- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Optional. Default value is `false`.
- `execute_direct` - If `execute_direct` = `1`, then `command` will be searched inside user_scripts folder. Additional script arguments can be specified using whitespace separator. Example: `script_name arg1 arg2`. If `execute_direct` = `0`, `command` is passed as argument for `bin/sh -c`. Default value is `1`. Optional parameter.

View File

@ -344,9 +344,9 @@ SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val);
Result:
``` text
┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.11100 │ Nullable(Decimal(9, 5)) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.111 │ Nullable(Decimal(9, 5)) │
└────────┴────────────────────────────────────────────────────┘
```
Query:
@ -451,9 +451,9 @@ SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val);
Result:
``` text
┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.11100 │ Decimal(9, 5) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.111 │ Decimal(9, 5) │
└────────┴────────────────────────────────────────────────────┘
```
Query:

View File

@ -360,6 +360,21 @@ SELECT decodeURLComponent('http://127.0.0.1:8123/?query=SELECT%201%3B') AS Decod
└────────────────────────────────────────┘
```
### decodeURLFormComponent(URL) {#decodeurlformcomponenturl}
Returns the decoded URL. Follows rfc-1866, plain plus(`+`) is decoded as space(` `).
Example:
``` sql
SELECT decodeURLFormComponent('http://127.0.0.1:8123/?query=SELECT%201+2%2B3') AS DecodedURL;
```
``` text
┌─DecodedURL────────────────────────────────┐
│ http://127.0.0.1:8123/?query=SELECT 1 2+3 │
└───────────────────────────────────────────┘
```
### netloc {#netloc}
Extracts network locality (`username:password@host:port`) from a URL.

View File

@ -170,9 +170,9 @@ SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val)
```
``` text
┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.11100 │ Nullable(Decimal(9, 5)) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.111 │ Nullable(Decimal(9, 5)) │
└────────┴────────────────────────────────────────────────────┘
```
``` sql
@ -214,9 +214,9 @@ SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val)
```
``` text
┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.11100 │ Decimal(9, 5) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.111 │ Decimal(9, 5) │
└────────┴────────────────────────────────────────────────────┘
```
``` sql

View File

@ -343,9 +343,9 @@ SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val);
Результат:
``` text
┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.11100 │ Nullable(Decimal(9, 5)) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.111 │ Nullable(Decimal(9, 5)) │
└────────┴────────────────────────────────────────────────────┘
```
Запрос:
@ -449,9 +449,9 @@ SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val);
Результат:
``` text
┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.11100 │ Decimal(9, 5) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.111 │ Decimal(9, 5) │
└────────┴────────────────────────────────────────────────────┘
```
Запрос:

View File

@ -167,9 +167,9 @@ SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val)
```
``` text
┌──────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.11100 │ Nullable(Decimal(9, 5)) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.111 │ Nullable(Decimal(9, 5)) │
└────────┴────────────────────────────────────────────────────┘
```
``` sql
@ -210,9 +210,9 @@ SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val)
```
``` text
┌──────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.11100 │ Decimal(9, 5) │
└──────────┴────────────────────────────────────────────────────┘
┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.111 │ Decimal(9, 5) │
└────────┴────────────────────────────────────────────────────┘
```
``` sql

View File

@ -153,10 +153,12 @@ static void createGroup(const String & group_name)
if (!group_name.empty())
{
#if defined(OS_DARWIN)
// TODO: implement.
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unable to create a group in macOS");
#elif defined(OS_FREEBSD)
std::string command = fmt::format("pw groupadd {}", group_name);
fmt::print(" {}\n", command);
executeScript(command);
#else
std::string command = fmt::format("groupadd -r {}", group_name);
fmt::print(" {}\n", command);
@ -170,10 +172,14 @@ static void createUser(const String & user_name, [[maybe_unused]] const String &
if (!user_name.empty())
{
#if defined(OS_DARWIN)
// TODO: implement.
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unable to create a user in macOS");
#elif defined(OS_FREEBSD)
std::string command = group_name.empty()
? fmt::format("pw useradd -s /bin/false -d /nonexistent -n {}", user_name)
: fmt::format("pw useradd -s /bin/false -d /nonexistent -g {} -n {}", group_name, user_name);
fmt::print(" {}\n", command);
executeScript(command);
#else
std::string command = group_name.empty()
? fmt::format("useradd -r --shell /bin/false --home-dir /nonexistent --user-group {}", user_name)
@ -185,6 +191,20 @@ static void createUser(const String & user_name, [[maybe_unused]] const String &
}
static std::string formatWithSudo(std::string command, bool needed = true)
{
if (!needed)
return command;
#if defined(OS_FREEBSD)
/// FreeBSD does not have 'sudo' installed.
return fmt::format("su -m root -c '{}'", command);
#else
return fmt::format("sudo {}", command);
#endif
}
int mainEntryClickHouseInstall(int argc, char ** argv)
{
try
@ -207,10 +227,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
if (options.count("help"))
{
std::cout << "Usage: "
<< (getuid() == 0 ? "" : "sudo ")
<< argv[0]
<< " install [options]\n";
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " install [options]", getuid() != 0) << '\n';
std::cout << desc << '\n';
return 1;
}
@ -233,6 +250,9 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
path.pop_back();
fs::path binary_self_path(path);
#elif defined(OS_FREEBSD)
/// https://stackoverflow.com/questions/1023306/finding-current-executables-path-without-proc-self-exe
fs::path binary_self_path = argc >= 1 ? argv[0] : "/proc/curproc/file";
#else
fs::path binary_self_path = "/proc/self/exe";
#endif
@ -314,7 +334,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
catch (const Exception & e)
{
if (e.code() == ErrorCodes::CANNOT_OPEN_FILE && geteuid() != 0)
std::cerr << "Install must be run as root: sudo ./clickhouse install\n";
std::cerr << "Install must be run as root: " << formatWithSudo("./clickhouse install") << '\n';
throw;
}
@ -824,9 +844,10 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
fmt::print(
"\nClickHouse has been successfully installed.\n"
"\nRestart clickhouse-server with:\n"
" sudo clickhouse restart\n"
" {}\n"
"\nStart clickhouse-client with:\n"
" clickhouse-client{}\n\n",
formatWithSudo("clickhouse restart"),
maybe_password);
}
else
@ -834,9 +855,10 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
fmt::print(
"\nClickHouse has been successfully installed.\n"
"\nStart clickhouse-server with:\n"
" sudo clickhouse start\n"
" {}\n"
"\nStart clickhouse-client with:\n"
" clickhouse-client{}\n\n",
formatWithSudo("clickhouse start"),
maybe_password);
}
}
@ -845,7 +867,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
std::cerr << getCurrentExceptionMessage(false) << '\n';
if (getuid() != 0)
std::cerr << "\nRun with sudo.\n";
std::cerr << "\nRun with " << formatWithSudo("...") << "\n";
return getCurrentExceptionCode();
}
@ -901,6 +923,9 @@ namespace
if (!user.empty())
{
#if defined(OS_FREEBSD)
command = fmt::format("su -m '{}' -c '{}'", user, command);
#else
bool may_need_sudo = geteuid() != 0;
if (may_need_sudo)
{
@ -910,7 +935,10 @@ namespace
command = fmt::format("sudo -u '{}' {}", user, command);
}
else
{
command = fmt::format("su -s /bin/sh '{}' -c '{}'", user, command);
}
#endif
}
fmt::print("Will run {}\n", command);
@ -1114,10 +1142,7 @@ int mainEntryClickHouseStart(int argc, char ** argv)
if (options.count("help"))
{
std::cout << "Usage: "
<< (getuid() == 0 ? "" : "sudo ")
<< argv[0]
<< " start\n";
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " start", getuid() != 0) << '\n';
return 1;
}
@ -1155,10 +1180,7 @@ int mainEntryClickHouseStop(int argc, char ** argv)
if (options.count("help"))
{
std::cout << "Usage: "
<< (getuid() == 0 ? "" : "sudo ")
<< argv[0]
<< " stop\n";
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " stop", getuid() != 0) << '\n';
return 1;
}
@ -1191,10 +1213,7 @@ int mainEntryClickHouseStatus(int argc, char ** argv)
if (options.count("help"))
{
std::cout << "Usage: "
<< (getuid() == 0 ? "" : "sudo ")
<< argv[0]
<< " status\n";
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " status", getuid() != 0) << '\n';
return 1;
}
@ -1233,10 +1252,7 @@ int mainEntryClickHouseRestart(int argc, char ** argv)
if (options.count("help"))
{
std::cout << "Usage: "
<< (getuid() == 0 ? "" : "sudo ")
<< argv[0]
<< " restart\n";
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " restart", getuid() != 0) << '\n';
return 1;
}

View File

@ -42,11 +42,14 @@ bool ACLMap::ACLsComparator::operator()(const Coordination::ACLs & left, const C
uint64_t ACLMap::convertACLs(const Coordination::ACLs & acls)
{
if (acls.empty())
return 0;
if (acl_to_num.count(acls))
return acl_to_num[acls];
/// Start from one
auto index = acl_to_num.size() + 1;
auto index = max_acl_id++;
acl_to_num[acls] = index;
num_to_acl[index] = acls;
@ -69,6 +72,7 @@ void ACLMap::addMapping(uint64_t acls_id, const Coordination::ACLs & acls)
{
num_to_acl[acls_id] = acls;
acl_to_num[acls] = acls_id;
max_acl_id = std::max(acls_id + 1, max_acl_id); /// max_acl_id pointer next slot
}
void ACLMap::addUsage(uint64_t acl_id)

View File

@ -31,6 +31,7 @@ private:
ACLToNumMap acl_to_num;
NumToACLMap num_to_acl;
UsageCounter usage_counter;
uint64_t max_acl_id{1};
public:
/// Convert ACL to number. If it's new ACL than adds it to map
@ -43,7 +44,7 @@ public:
/// Mapping from numbers to ACLs vectors. Used during serialization.
const NumToACLMap & getMapping() const { return num_to_acl; }
/// Add mapping to ACLMap. Used during deserialization.
/// Add mapping to ACLMap. Used during deserialization from snapshot.
void addMapping(uint64_t acls_id, const Coordination::ACLs & acls);
/// Add/remove usage of some id. Used to remove unused ACLs.

View File

@ -41,6 +41,7 @@ const String KeeperConfigurationAndSettings::DEFAULT_FOUR_LETTER_WORD_CMD = "con
KeeperConfigurationAndSettings::KeeperConfigurationAndSettings()
: server_id(NOT_EXIST)
, enable_ipv6(true)
, tcp_port(NOT_EXIST)
, tcp_port_secure(NOT_EXIST)
, standalone_keeper(false)
@ -67,6 +68,9 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const
writeText("server_id=", buf);
write_int(server_id);
writeText("enable_ipv6=", buf);
write_bool(enable_ipv6);
if (tcp_port != NOT_EXIST)
{
writeText("tcp_port=", buf);
@ -156,6 +160,8 @@ KeeperConfigurationAndSettings::loadFromConfig(const Poco::Util::AbstractConfigu
ret->server_id = config.getInt("keeper_server.server_id");
ret->standalone_keeper = standalone_keeper_;
ret->enable_ipv6 = config.getBool("keeper_server.enable_ipv6", true);
if (config.has("keeper_server.tcp_port"))
{
ret->tcp_port = config.getInt("keeper_server.tcp_port");

View File

@ -63,6 +63,7 @@ struct KeeperConfigurationAndSettings
KeeperConfigurationAndSettings();
int server_id;
bool enable_ipv6;
int tcp_port;
int tcp_port_secure;

View File

@ -276,7 +276,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
try
{
LOG_DEBUG(log, "Waiting server to initialize");
server->startup();
server->startup(configuration_and_settings->enable_ipv6);
LOG_DEBUG(log, "Server initialized, waiting for quorum");
if (!start_async)

View File

@ -107,7 +107,7 @@ KeeperServer::KeeperServer(
LOG_WARNING(log, "Quorum reads enabled, Keeper will work slower.");
}
void KeeperServer::startup()
void KeeperServer::startup(bool enable_ipv6)
{
state_machine->init();
@ -171,13 +171,14 @@ void KeeperServer::startup()
#endif
}
launchRaftServer(params, asio_opts);
launchRaftServer(enable_ipv6, params, asio_opts);
if (!raft_instance)
throw Exception(ErrorCodes::RAFT_ERROR, "Cannot allocate RAFT instance");
}
void KeeperServer::launchRaftServer(
bool enable_ipv6,
const nuraft::raft_params & params,
const nuraft::asio_service::options & asio_opts)
{
@ -192,7 +193,7 @@ void KeeperServer::launchRaftServer(
nuraft::ptr<nuraft::logger> logger = nuraft::cs_new<LoggerWrapper>("RaftInstance", coordination_settings->raft_logs_level);
asio_service = nuraft::cs_new<nuraft::asio_service>(asio_opts, logger);
asio_listener = asio_service->create_rpc_listener(state_manager->getPort(), logger);
asio_listener = asio_service->create_rpc_listener(state_manager->getPort(), logger, enable_ipv6);
if (!asio_listener)
return;

View File

@ -44,6 +44,7 @@ private:
/// Almost copy-paste from nuraft::launcher, but with separated server init and start
/// Allows to avoid race conditions.
void launchRaftServer(
bool enable_ipv6,
const nuraft::raft_params & params,
const nuraft::asio_service::options & asio_opts);
@ -57,7 +58,7 @@ public:
SnapshotsQueue & snapshots_queue_);
/// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings.
void startup();
void startup(bool enable_ipv6 = true);
/// Put local read request and execute in state machine directly and response into
/// responses queue

View File

@ -182,19 +182,19 @@ StoragePtr DatabasePostgreSQL::fetchTable(const String & table_name, ContextPtr,
return StoragePtr{};
auto connection_holder = pool->get();
auto columns = fetchPostgreSQLTableStructure(connection_holder->get(), table_name, configuration.schema).columns;
auto columns_info = fetchPostgreSQLTableStructure(connection_holder->get(), table_name, configuration.schema).physical_columns;
if (!columns)
if (!columns_info)
return StoragePtr{};
auto storage = StoragePostgreSQL::create(
StorageID(database_name, table_name), pool, table_name,
ColumnsDescription{*columns}, ConstraintsDescription{}, String{}, configuration.schema, configuration.on_conflict);
ColumnsDescription{columns_info->columns}, ConstraintsDescription{}, String{}, configuration.schema, configuration.on_conflict);
if (cache_tables)
cached_tables[table_name] = storage;
return storage;
return std::move(storage);
}
if (table_checked || checkPostgresTable(table_name))
@ -414,7 +414,7 @@ ASTPtr DatabasePostgreSQL::getCreateTableQueryImpl(const String & table_name, Co
assert(storage_engine_arguments->children.size() >= 2);
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, std::make_shared<ASTLiteral>(table_id.table_name));
return create_table_query;
return std::move(create_table_query);
}

View File

@ -15,7 +15,7 @@
#include <Common/quoteString.h>
#include <Core/PostgreSQL/Utils.h>
#include <base/FnTraits.h>
#include <IO/ReadHelpers.h>
namespace DB
{
@ -155,10 +155,11 @@ static DataTypePtr convertPostgreSQLDataType(String & type, Fn<void()> auto && r
template<typename T>
std::shared_ptr<NamesAndTypesList> readNamesAndTypesList(
T & tx, const String & postgres_table, const String & query, bool use_nulls, bool only_names_and_types)
PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList(
T & tx, const String & postgres_table, const String & query, bool use_nulls, bool only_names_and_types)
{
auto columns = NamesAndTypes();
PostgreSQLTableStructure::Attributes attributes;
try
{
@ -180,14 +181,22 @@ std::shared_ptr<NamesAndTypesList> readNamesAndTypesList(
}
else
{
std::tuple<std::string, std::string, std::string, uint16_t> row;
std::tuple<std::string, std::string, std::string, uint16_t, std::string, std::string> row;
while (stream >> row)
{
auto data_type = convertPostgreSQLDataType(std::get<1>(row),
recheck_array,
use_nulls && (std::get<2>(row) == "f"), /// 'f' means that postgres `not_null` is false, i.e. value is nullable
std::get<3>(row));
auto data_type = convertPostgreSQLDataType(
std::get<1>(row), recheck_array,
use_nulls && (std::get<2>(row) == /* not nullable */"f"),
std::get<3>(row));
columns.push_back(NameAndTypePair(std::get<0>(row), data_type));
attributes.emplace_back(
PostgreSQLTableStructure::PGAttribute{
.atttypid = parse<int>(std::get<4>(row)),
.atttypmod = parse<int>(std::get<5>(row)),
});
++i;
}
}
@ -226,7 +235,9 @@ std::shared_ptr<NamesAndTypesList> readNamesAndTypesList(
throw;
}
return !columns.empty() ? std::make_shared<NamesAndTypesList>(columns.begin(), columns.end()) : nullptr;
return !columns.empty()
? std::make_shared<PostgreSQLTableStructure::ColumnsInfo>(NamesAndTypesList(columns.begin(), columns.end()), std::move(attributes))
: nullptr;
}
@ -244,14 +255,14 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure(
std::string query = fmt::format(
"SELECT attname AS name, format_type(atttypid, atttypmod) AS type, "
"attnotnull AS not_null, attndims AS dims "
"attnotnull AS not_null, attndims AS dims, atttypid as type_id, atttypmod as type_modifier "
"FROM pg_attribute "
"WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) "
"AND NOT attisdropped AND attnum > 0", where);
table.columns = readNamesAndTypesList(tx, postgres_table, query, use_nulls, false);
table.physical_columns = readNamesAndTypesList(tx, postgres_table, query, use_nulls, false);
if (!table.columns)
if (!table.physical_columns)
throw Exception(ErrorCodes::UNKNOWN_TABLE, "PostgreSQL table {} does not exist", postgres_table);
if (with_primary_key)

View File

@ -12,9 +12,24 @@ namespace DB
struct PostgreSQLTableStructure
{
std::shared_ptr<NamesAndTypesList> columns = nullptr;
std::shared_ptr<NamesAndTypesList> primary_key_columns = nullptr;
std::shared_ptr<NamesAndTypesList> replica_identity_columns = nullptr;
struct PGAttribute
{
Int32 atttypid;
Int32 atttypmod;
};
using Attributes = std::vector<PGAttribute>;
struct ColumnsInfo
{
NamesAndTypesList columns;
Attributes attributes;
ColumnsInfo(NamesAndTypesList && columns_, Attributes && attributes_) : columns(columns_), attributes(attributes_) {}
};
using ColumnsInfoPtr = std::shared_ptr<ColumnsInfo>;
ColumnsInfoPtr physical_columns;
ColumnsInfoPtr primary_key_columns;
ColumnsInfoPtr replica_identity_columns;
};
using PostgreSQLTableStructurePtr = std::unique_ptr<PostgreSQLTableStructure>;

View File

@ -13,6 +13,7 @@
#include <Dictionaries/ICacheDictionaryStorage.h>
#include <Dictionaries/DictionaryHelpers.h>
namespace DB
{
@ -308,7 +309,7 @@ private:
if (was_inserted)
{
if constexpr (std::is_same_v<KeyType, StringRef>)
cell.key = copyStringInArena(key);
cell.key = copyStringInArena(arena, key);
else
cell.key = key;
@ -332,8 +333,7 @@ private:
else if constexpr (std::is_same_v<ElementType, StringRef>)
{
const String & string_value = column_value.get<String>();
StringRef string_value_ref = StringRef {string_value.data(), string_value.size()};
StringRef inserted_value = copyStringInArena(string_value_ref);
StringRef inserted_value = copyStringInArena(arena, string_value);
container.back() = inserted_value;
}
else
@ -353,7 +353,7 @@ private:
{
char * data = const_cast<char *>(cell.key.data);
arena.free(data, cell.key.size);
cell.key = copyStringInArena(key);
cell.key = copyStringInArena(arena, key);
}
else
cell.key = key;
@ -379,8 +379,7 @@ private:
else if constexpr (std::is_same_v<ElementType, StringRef>)
{
const String & string_value = column_value.get<String>();
StringRef string_ref_value = StringRef {string_value.data(), string_value.size()};
StringRef inserted_value = copyStringInArena(string_ref_value);
StringRef inserted_value = copyStringInArena(arena, string_value);
if (!cell_was_default)
{
@ -423,7 +422,7 @@ private:
if (was_inserted)
{
if constexpr (std::is_same_v<KeyType, StringRef>)
cell.key = copyStringInArena(key);
cell.key = copyStringInArena(arena, key);
else
cell.key = key;
@ -463,7 +462,7 @@ private:
{
char * data = const_cast<char *>(cell.key.data);
arena.free(data, cell.key.size);
cell.key = copyStringInArena(key);
cell.key = copyStringInArena(arena, key);
}
else
cell.key = key;
@ -526,16 +525,6 @@ private:
return const_cast<std::decay_t<decltype(*this)> *>(this)->template getAttributeContainer(attribute_index, std::forward<GetContainerFunc>(func));
}
StringRef copyStringInArena(StringRef value_to_copy)
{
size_t value_to_copy_size = value_to_copy.size;
char * place_for_key = arena.alloc(value_to_copy_size);
memcpy(reinterpret_cast<void *>(place_for_key), reinterpret_cast<const void *>(value_to_copy.data), value_to_copy_size);
StringRef updated_value{place_for_key, value_to_copy_size};
return updated_value;
}
template<typename ValueType>
using ContainerType = std::conditional_t<
std::is_same_v<ValueType, Field> || std::is_same_v<ValueType, Array>,

View File

@ -28,6 +28,10 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
static const std::unordered_set<std::string_view> dictionary_allowed_keys = {
"host", "port", "user", "password", "db", "database", "table",
"update_field", "update_tag", "invalidate_query", "query", "where", "name", "secure"};
namespace
{
constexpr size_t MAX_CONNECTIONS = 16;
@ -235,9 +239,11 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory)
std::string db = config.getString(settings_config_prefix + ".db", default_database);
std::string table = config.getString(settings_config_prefix + ".table", "");
UInt16 port = static_cast<UInt16>(config.getUInt(settings_config_prefix + ".port", default_port));
auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key); };
auto named_collection = created_from_ddl ?
getExternalDataSourceConfiguration(config, settings_config_prefix, global_context) : std::nullopt;
auto named_collection = created_from_ddl
? getExternalDataSourceConfiguration(config, settings_config_prefix, global_context, has_config_key)
: std::nullopt;
if (named_collection)
{

View File

@ -623,6 +623,17 @@ void mergeBlockWithPipe(
}
}
template <typename Arena>
static StringRef copyStringInArena(Arena & arena, StringRef value)
{
size_t key_size = value.size;
char * place_for_key = arena.alloc(key_size);
memcpy(reinterpret_cast<void *>(place_for_key), reinterpret_cast<const void *>(value.data), key_size);
StringRef result{place_for_key, key_size};
return result;
}
/**
* Returns ColumnVector data as PaddedPodArray.

View File

@ -2,7 +2,6 @@
#include <Core/Defines.h>
#include <Common/HashTable/HashMap.h>
#include <DataTypes/DataTypesDecimal.h>
#include <Functions/FunctionHelpers.h>
#include <Dictionaries/DictionaryFactory.h>

View File

@ -3,15 +3,12 @@
#include <atomic>
#include <variant>
#include <vector>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnString.h>
#include <Common/Arena.h>
#include <Core/Block.h>
#include <Common/HashTable/HashMap.h>
#include "DictionaryStructure.h"
#include "IDictionary.h"
#include "IDictionarySource.h"
#include "DictionaryHelpers.h"
#include <Dictionaries/DictionaryStructure.h>
#include <Dictionaries/IDictionary.h>
#include <Dictionaries/IDictionarySource.h>
#include <Dictionaries/DictionaryHelpers.h>
namespace DB
{

View File

@ -399,9 +399,6 @@ void FlatDictionary::calculateBytesAllocated()
}
bucket_count = container.capacity();
if constexpr (std::is_same_v<ValueType, StringRef>)
bytes_allocated += sizeof(Arena) + attribute.string_arena->size();
};
callOnDictionaryAttributeType(attribute.type, type_call);
@ -414,12 +411,14 @@ void FlatDictionary::calculateBytesAllocated()
if (update_field_loaded_block)
bytes_allocated += update_field_loaded_block->allocatedBytes();
bytes_allocated += string_arena.size();
}
FlatDictionary::Attribute FlatDictionary::createAttribute(const DictionaryAttribute & dictionary_attribute)
{
auto is_nullable_set = dictionary_attribute.is_nullable ? std::make_optional<NullableSet>() : std::optional<NullableSet>{};
Attribute attribute{dictionary_attribute.underlying_type, std::move(is_nullable_set), {}, {}};
Attribute attribute{dictionary_attribute.underlying_type, std::move(is_nullable_set), {}};
auto type_call = [&](const auto & dictionary_attribute_type)
{
@ -427,9 +426,6 @@ FlatDictionary::Attribute FlatDictionary::createAttribute(const DictionaryAttrib
using AttributeType = typename Type::AttributeType;
using ValueType = DictionaryValueType<AttributeType>;
if constexpr (std::is_same_v<ValueType, StringRef>)
attribute.string_arena = std::make_unique<Arena>();
attribute.container.emplace<ContainerType<ValueType>>(configuration.initial_array_size, ValueType());
};
@ -510,8 +506,8 @@ void FlatDictionary::setAttributeValueImpl(Attribute & attribute, UInt64 key, co
template <>
void FlatDictionary::setAttributeValueImpl<String>(Attribute & attribute, UInt64 key, const String & value)
{
const auto * string_in_arena = attribute.string_arena->insert(value.data(), value.size());
setAttributeValueImpl(attribute, key, StringRef{string_in_arena, value.size()});
auto arena_value = copyStringInArena(string_arena, value);
setAttributeValueImpl(attribute, key, arena_value);
}
void FlatDictionary::setAttributeValue(Attribute & attribute, const UInt64 key, const Field & value)

View File

@ -133,8 +133,6 @@ private:
ContainerType<StringRef>,
ContainerType<Array>>
container;
std::unique_ptr<Arena> string_arena;
};
void createAttributes();
@ -176,6 +174,7 @@ private:
mutable std::atomic<size_t> found_count{0};
BlockPtr update_field_loaded_block;
Arena string_arena;
};
}

View File

@ -352,8 +352,7 @@ void HashedArrayDictionary<dictionary_key_type>::createAttributes()
using ValueType = DictionaryValueType<AttributeType>;
auto is_index_null = dictionary_attribute.is_nullable ? std::make_optional<std::vector<bool>>() : std::optional<std::vector<bool>>{};
std::unique_ptr<Arena> string_arena = std::is_same_v<AttributeType, String> ? std::make_unique<Arena>() : nullptr;
Attribute attribute{dictionary_attribute.underlying_type, AttributeContainerType<ValueType>(), std::move(is_index_null), std::move(string_arena)};
Attribute attribute{dictionary_attribute.underlying_type, AttributeContainerType<ValueType>(), std::move(is_index_null)};
attributes.emplace_back(std::move(attribute));
};
@ -431,7 +430,7 @@ void HashedArrayDictionary<dictionary_key_type>::blockToAttributes(const Block &
}
if constexpr (std::is_same_v<KeyType, StringRef>)
key = copyKeyInArena(key);
key = copyStringInArena(string_arena, key);
key_attribute.container.insert({key, element_count});
@ -466,11 +465,7 @@ void HashedArrayDictionary<dictionary_key_type>::blockToAttributes(const Block &
if constexpr (std::is_same_v<AttributeValueType, StringRef>)
{
String & value_to_insert = column_value_to_insert.get<String>();
size_t value_to_insert_size = value_to_insert.size();
const char * string_in_arena = attribute.string_arena->insert(value_to_insert.data(), value_to_insert_size);
StringRef string_in_arena_reference = StringRef{string_in_arena, value_to_insert_size};
StringRef string_in_arena_reference = copyStringInArena(string_arena, value_to_insert);
attribute_container.back() = string_in_arena_reference;
}
else
@ -676,16 +671,6 @@ void HashedArrayDictionary<dictionary_key_type>::getItemsImpl(
}
}
template <DictionaryKeyType dictionary_key_type>
StringRef HashedArrayDictionary<dictionary_key_type>::copyKeyInArena(StringRef key)
{
size_t key_size = key.size;
char * place_for_key = complex_key_arena.alloc(key_size);
memcpy(reinterpret_cast<void *>(place_for_key), reinterpret_cast<const void *>(key.data), key_size);
StringRef updated_key{place_for_key, key_size};
return updated_key;
}
template <DictionaryKeyType dictionary_key_type>
void HashedArrayDictionary<dictionary_key_type>::loadData()
{
@ -742,21 +727,15 @@ void HashedArrayDictionary<dictionary_key_type>::calculateBytesAllocated()
}
bucket_count = container.capacity();
if constexpr (std::is_same_v<ValueType, StringRef>)
bytes_allocated += sizeof(Arena) + attribute.string_arena->size();
};
callOnDictionaryAttributeType(attribute.type, type_call);
if (attribute.string_arena)
bytes_allocated += attribute.string_arena->size();
if (attribute.is_index_null.has_value())
bytes_allocated += (*attribute.is_index_null).size();
}
bytes_allocated += complex_key_arena.size();
bytes_allocated += string_arena.size();
if (update_field_loaded_block)
bytes_allocated += update_field_loaded_block->allocatedBytes();

View File

@ -155,7 +155,6 @@ private:
container;
std::optional<std::vector<bool>> is_index_null;
std::unique_ptr<Arena> string_arena;
};
struct KeyAttribute final
@ -205,8 +204,6 @@ private:
void resize(size_t added_rows);
StringRef copyKeyInArena(StringRef key);
const DictionaryStructure dict_struct;
const DictionarySourcePtr source_ptr;
const HashedArrayDictionaryStorageConfiguration configuration;
@ -222,7 +219,7 @@ private:
mutable std::atomic<size_t> found_count{0};
BlockPtr update_field_loaded_block;
Arena complex_key_arena;
Arena string_arena;
};
extern template class HashedArrayDictionary<DictionaryKeyType::Simple>;

View File

@ -239,7 +239,7 @@ ColumnPtr HashedDictionary<dictionary_key_type, sparse>::getHierarchy(ColumnPtr
if (it != parent_keys_map.end())
result = getValueFromCell(it);
keys_found +=result.has_value();
keys_found += result.has_value();
return result;
};
@ -354,8 +354,7 @@ void HashedDictionary<dictionary_key_type, sparse>::createAttributes()
using ValueType = DictionaryValueType<AttributeType>;
auto is_nullable_set = dictionary_attribute.is_nullable ? std::make_optional<NullableSet>() : std::optional<NullableSet>{};
std::unique_ptr<Arena> string_arena = std::is_same_v<AttributeType, String> ? std::make_unique<Arena>() : nullptr;
Attribute attribute{dictionary_attribute.underlying_type, std::move(is_nullable_set), CollectionType<ValueType>(), std::move(string_arena)};
Attribute attribute{dictionary_attribute.underlying_type, std::move(is_nullable_set), CollectionType<ValueType>()};
attributes.emplace_back(std::move(attribute));
};
@ -449,7 +448,7 @@ void HashedDictionary<dictionary_key_type, sparse>::blockToAttributes(const Bloc
}
if constexpr (std::is_same_v<KeyType, StringRef>)
key = copyKeyInArena(key);
key = copyStringInArena(string_arena, key);
attribute_column.get(key_index, column_value_to_insert);
@ -463,12 +462,8 @@ void HashedDictionary<dictionary_key_type, sparse>::blockToAttributes(const Bloc
if constexpr (std::is_same_v<AttributeValueType, StringRef>)
{
String & value_to_insert = column_value_to_insert.get<String>();
size_t value_to_insert_size = value_to_insert.size();
const char * string_in_arena = attribute.string_arena->insert(value_to_insert.data(), value_to_insert_size);
StringRef string_in_arena_reference = StringRef{string_in_arena, value_to_insert_size};
container.insert({key, string_in_arena_reference});
StringRef arena_value = copyStringInArena(string_arena, value_to_insert);
container.insert({key, arena_value});
}
else
{
@ -548,16 +543,6 @@ void HashedDictionary<dictionary_key_type, sparse>::getItemsImpl(
found_count.fetch_add(keys_found, std::memory_order_relaxed);
}
template <DictionaryKeyType dictionary_key_type, bool sparse>
StringRef HashedDictionary<dictionary_key_type, sparse>::copyKeyInArena(StringRef key)
{
size_t key_size = key.size;
char * place_for_key = complex_key_arena.alloc(key_size);
memcpy(reinterpret_cast<void *>(place_for_key), reinterpret_cast<const void *>(key.data), key_size);
StringRef updated_key{place_for_key, key_size};
return updated_key;
}
template <DictionaryKeyType dictionary_key_type, bool sparse>
void HashedDictionary<dictionary_key_type, sparse>::loadData()
{
@ -631,16 +616,13 @@ void HashedDictionary<dictionary_key_type, sparse>::calculateBytesAllocated()
}
});
if (attributes[i].string_arena)
bytes_allocated += attributes[i].string_arena->size();
bytes_allocated += sizeof(attributes[i].is_nullable_set);
if (attributes[i].is_nullable_set.has_value())
bytes_allocated = attributes[i].is_nullable_set->getBufferSizeInBytes();
}
bytes_allocated += complex_key_arena.size();
bytes_allocated += string_arena.size();
if (update_field_loaded_block)
bytes_allocated += update_field_loaded_block->allocatedBytes();

View File

@ -173,8 +173,6 @@ private:
CollectionType<StringRef>,
CollectionType<Array>>
container;
std::unique_ptr<Arena> string_arena;
};
void createAttributes();
@ -202,8 +200,6 @@ private:
void resize(size_t added_rows);
StringRef copyKeyInArena(StringRef key);
const DictionaryStructure dict_struct;
const DictionarySourcePtr source_ptr;
const HashedDictionaryStorageConfiguration configuration;
@ -217,7 +213,7 @@ private:
mutable std::atomic<size_t> found_count{0};
BlockPtr update_field_loaded_block;
Arena complex_key_arena;
Arena string_arena;
};
extern template class HashedDictionary<DictionaryKeyType::Simple, false>;

View File

@ -8,6 +8,9 @@
namespace DB
{
static const std::unordered_set<std::string_view> dictionary_allowed_keys = {
"host", "port", "user", "password", "db", "database", "uri", "collection", "name", "method"};
void registerDictionarySourceMongoDB(DictionarySourceFactory & factory)
{
auto create_mongo_db_dictionary = [](
@ -21,7 +24,8 @@ void registerDictionarySourceMongoDB(DictionarySourceFactory & factory)
{
const auto config_prefix = root_config_prefix + ".mongodb";
ExternalDataSourceConfiguration configuration;
auto named_collection = getExternalDataSourceConfiguration(config, config_prefix, context);
auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key); };
auto named_collection = getExternalDataSourceConfiguration(config, config_prefix, context, has_config_key);
if (named_collection)
{
configuration = *named_collection;

View File

@ -30,6 +30,18 @@ namespace ErrorCodes
extern const int UNSUPPORTED_METHOD;
}
static const std::unordered_set<std::string_view> dictionary_allowed_keys = {
"host", "port", "user", "password",
"db", "database", "table", "schema",
"update_field", "invalidate_query", "priority",
"update_tag", "dont_check_update_time",
"query", "where", "name" /* name_collection */, "socket",
"share_connection", "fail_on_connection_loss", "close_connection",
"ssl_ca", "ssl_cert", "ssl_key",
"enable_local_infile", "opt_reconnect",
"connect_timeout", "mysql_connect_timeout",
"mysql_rw_timeout", "rw_timeout"};
void registerDictionarySourceMysql(DictionarySourceFactory & factory)
{
auto create_table_source = [=]([[maybe_unused]] const DictionaryStructure & dict_struct,
@ -48,8 +60,11 @@ void registerDictionarySourceMysql(DictionarySourceFactory & factory)
auto settings_config_prefix = config_prefix + ".mysql";
std::shared_ptr<mysqlxx::PoolWithFailover> pool;
auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key) || key.starts_with("replica"); };
StorageMySQLConfiguration configuration;
auto named_collection = created_from_ddl ? getExternalDataSourceConfiguration(config, settings_config_prefix, global_context) : std::nullopt;
auto named_collection = created_from_ddl
? getExternalDataSourceConfiguration(config, settings_config_prefix, global_context, has_config_key)
: std::nullopt;
if (named_collection)
{
configuration.set(*named_collection);

View File

@ -3,16 +3,14 @@
#include <atomic>
#include <variant>
#include <Core/Block.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnString.h>
#include <Common/Arena.h>
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/multi_polygon.hpp>
#include "DictionaryStructure.h"
#include "IDictionary.h"
#include "IDictionarySource.h"
#include "DictionaryHelpers.h"
#include <Dictionaries/DictionaryStructure.h>
#include <Dictionaries/IDictionary.h>
#include <Dictionaries/IDictionarySource.h>
#include <Dictionaries/DictionaryHelpers.h>
namespace DB
{

View File

@ -28,6 +28,10 @@ namespace ErrorCodes
static const UInt64 max_block_size = 8192;
static const std::unordered_set<std::string_view> dictionary_allowed_keys = {
"host", "port", "user", "password", "db", "database", "table", "schema",
"update_field", "update_tag", "invalidate_query", "query", "where", "name", "priority"};
namespace
{
ExternalQueryBuilder makeExternalQueryBuilder(const DictionaryStructure & dict_struct, const String & schema, const String & table, const String & query, const String & where)
@ -185,8 +189,8 @@ void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory)
{
#if USE_LIBPQXX
const auto settings_config_prefix = config_prefix + ".postgresql";
auto configuration = getExternalDataSourceConfigurationByPriority(config, settings_config_prefix, context);
auto has_config_key = [](const String & key) { return dictionary_allowed_keys.contains(key) || key.starts_with("replica"); };
auto configuration = getExternalDataSourceConfigurationByPriority(config, settings_config_prefix, context, has_config_key);
auto pool = std::make_shared<postgres::PoolWithFailover>(
configuration.replicas_configurations,
context->getSettingsRef().postgresql_connection_pool_size,

View File

@ -345,9 +345,6 @@ void RangeHashedDictionary<dictionary_key_type>::calculateBytesAllocated()
const auto & collection = std::get<CollectionType<ValueType>>(attribute.maps);
bytes_allocated += sizeof(CollectionType<ValueType>) + collection.getBufferSizeInBytes();
bucket_count = collection.getBufferSizeInCells();
if constexpr (std::is_same_v<ValueType, StringRef>)
bytes_allocated += sizeof(Arena) + attribute.string_arena->size();
};
callOnDictionaryAttributeType(attribute.type, type_call);
@ -358,12 +355,14 @@ void RangeHashedDictionary<dictionary_key_type>::calculateBytesAllocated()
if (update_field_loaded_block)
bytes_allocated += update_field_loaded_block->allocatedBytes();
bytes_allocated += string_arena.size();
}
template <DictionaryKeyType dictionary_key_type>
typename RangeHashedDictionary<dictionary_key_type>::Attribute RangeHashedDictionary<dictionary_key_type>::createAttribute(const DictionaryAttribute & dictionary_attribute)
{
Attribute attribute{dictionary_attribute.underlying_type, dictionary_attribute.is_nullable, {}, {}};
Attribute attribute{dictionary_attribute.underlying_type, dictionary_attribute.is_nullable, {}};
auto type_call = [&](const auto &dictionary_attribute_type)
{
@ -371,9 +370,6 @@ typename RangeHashedDictionary<dictionary_key_type>::Attribute RangeHashedDictio
using AttributeType = typename Type::AttributeType;
using ValueType = DictionaryValueType<AttributeType>;
if constexpr (std::is_same_v<AttributeType, String>)
attribute.string_arena = std::make_unique<Arena>();
attribute.maps = CollectionType<ValueType>();
};
@ -544,7 +540,7 @@ void RangeHashedDictionary<dictionary_key_type>::blockToAttributes(const Block &
}
if constexpr (std::is_same_v<KeyType, StringRef>)
key = copyKeyInArena(key);
key = copyStringInArena(string_arena, key);
setAttributeValue(attribute, key, Range{lower_bound, upper_bound}, attribute_column[key_index]);
keys_extractor.rollbackCurrentKey();
@ -572,8 +568,7 @@ void RangeHashedDictionary<dictionary_key_type>::setAttributeValueImpl(Attribute
if constexpr (std::is_same_v<T, String>)
{
const auto & string = value.get<String>();
const auto * string_in_arena = attribute.string_arena->insert(string.data(), string.size());
const StringRef string_ref{string_in_arena, string.size()};
StringRef string_ref = copyStringInArena(string_arena, string);
value_to_insert = Value<ValueType>{ range, { string_ref }};
}
else
@ -671,16 +666,6 @@ void RangeHashedDictionary<dictionary_key_type>::getKeysAndDates(
}
}
template <DictionaryKeyType dictionary_key_type>
StringRef RangeHashedDictionary<dictionary_key_type>::copyKeyInArena(StringRef key)
{
size_t key_size = key.size;
char * place_for_key = complex_key_arena.alloc(key_size);
memcpy(reinterpret_cast<void *>(place_for_key), reinterpret_cast<const void *>(key.data), key_size);
StringRef updated_key{place_for_key, key_size};
return updated_key;
}
template <DictionaryKeyType dictionary_key_type>
template <typename RangeType>
PaddedPODArray<Int64> RangeHashedDictionary<dictionary_key_type>::makeDateKeys(

View File

@ -139,7 +139,6 @@ private:
CollectionType<StringRef>,
CollectionType<Array>>
maps;
std::unique_ptr<Arena> string_arena;
};
void createAttributes();
@ -162,9 +161,9 @@ private:
void blockToAttributes(const Block & block);
template <typename T>
static void setAttributeValueImpl(Attribute & attribute, KeyType key, const Range & range, const Field & value);
void setAttributeValueImpl(Attribute & attribute, KeyType key, const Range & range, const Field & value);
static void setAttributeValue(Attribute & attribute, KeyType key, const Range & range, const Field & value);
void setAttributeValue(Attribute & attribute, KeyType key, const Range & range, const Field & value);
template <typename RangeType>
void getKeysAndDates(
@ -184,8 +183,6 @@ private:
const PaddedPODArray<RangeType> & block_start_dates,
const PaddedPODArray<RangeType> & block_end_dates) const;
StringRef copyKeyInArena(StringRef key);
const DictionaryStructure dict_struct;
const DictionarySourcePtr source_ptr;
const DictionaryLifetime dict_lifetime;
@ -200,6 +197,7 @@ private:
size_t bucket_count = 0;
mutable std::atomic<size_t> query_count{0};
mutable std::atomic<size_t> found_count{0};
Arena string_arena;
};
}

View File

@ -1148,10 +1148,7 @@ private:
if constexpr (dictionary_key_type == DictionaryKeyType::Complex)
{
/// Copy complex key into arena and put in cache
size_t key_size = key.size;
char * place_for_key = complex_key_arena.alloc(key_size);
memcpy(reinterpret_cast<void *>(place_for_key), reinterpret_cast<const void *>(key.data), key_size);
KeyType updated_key{place_for_key, key_size};
KeyType updated_key = copyStringInArena(complex_key_arena, key);
ssd_cache_key.key = updated_key;
}

View File

@ -35,7 +35,13 @@ namespace ErrorCodes
namespace
{
using NamesToTypeNames = std::unordered_map<std::string, std::string>;
struct AttributeConfiguration
{
std::string type;
std::string expression;
};
using AttributeNameToConfiguration = std::unordered_map<std::string, AttributeConfiguration>;
/// Get value from field and convert it to string.
/// Also remove quotes from strings.
@ -46,6 +52,21 @@ String getFieldAsString(const Field & field)
return applyVisitor(FieldVisitorToString(), field);
}
String getAttributeExpression(const ASTDictionaryAttributeDeclaration * dict_attr)
{
if (!dict_attr->expression)
return {};
/// EXPRESSION PROPERTY should be expression or string
String expression_str;
if (const auto * literal = dict_attr->expression->as<ASTLiteral>(); literal && literal->value.getType() == Field::Types::String)
expression_str = getFieldAsString(literal->value);
else
expression_str = queryToString(dict_attr->expression);
return expression_str;
}
using namespace Poco;
using namespace Poco::XML;
@ -63,20 +84,19 @@ void buildLifetimeConfiguration(
AutoPtr<Element> root,
const ASTDictionaryLifetime * lifetime)
{
if (!lifetime)
return;
if (lifetime)
{
AutoPtr<Element> lifetime_element(doc->createElement("lifetime"));
AutoPtr<Element> min_element(doc->createElement("min"));
AutoPtr<Element> max_element(doc->createElement("max"));
AutoPtr<Text> min_sec(doc->createTextNode(toString(lifetime->min_sec)));
min_element->appendChild(min_sec);
AutoPtr<Text> max_sec(doc->createTextNode(toString(lifetime->max_sec)));
max_element->appendChild(max_sec);
lifetime_element->appendChild(min_element);
lifetime_element->appendChild(max_element);
root->appendChild(lifetime_element);
}
AutoPtr<Element> lifetime_element(doc->createElement("lifetime"));
AutoPtr<Element> min_element(doc->createElement("min"));
AutoPtr<Element> max_element(doc->createElement("max"));
AutoPtr<Text> min_sec(doc->createTextNode(toString(lifetime->min_sec)));
min_element->appendChild(min_sec);
AutoPtr<Text> max_sec(doc->createTextNode(toString(lifetime->max_sec)));
max_element->appendChild(max_sec);
lifetime_element->appendChild(min_element);
lifetime_element->appendChild(max_element);
root->appendChild(lifetime_element);
}
/* Transforms next definition
@ -105,40 +125,43 @@ void buildLayoutConfiguration(
AutoPtr<Element> layout_type_element(doc->createElement(layout->layout_type));
layout_element->appendChild(layout_type_element);
if (layout->parameters)
if (!layout->parameters)
return;
for (const auto & param : layout->parameters->children)
{
for (const auto & param : layout->parameters->children)
const ASTPair * pair = param->as<ASTPair>();
if (!pair)
{
const ASTPair * pair = param->as<ASTPair>();
if (!pair)
{
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Dictionary layout parameters must be key/value pairs, got '{}' instead",
param->formatForErrorMessage());
}
const ASTLiteral * value_literal = pair->second->as<ASTLiteral>();
if (!value_literal)
{
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS,
"Dictionary layout parameter value must be a literal, got '{}' instead",
pair->second->formatForErrorMessage());
}
const auto value_field = value_literal->value;
if (value_field.getType() != Field::Types::UInt64
&& value_field.getType() != Field::Types::String)
{
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS,
"Dictionary layout parameter value must be an UInt64 or String, got '{}' instead",
value_field.getTypeName());
}
AutoPtr<Element> layout_type_parameter_element(doc->createElement(pair->first));
AutoPtr<Text> value_to_append(doc->createTextNode(toString(value_field)));
layout_type_parameter_element->appendChild(value_to_append);
layout_type_element->appendChild(layout_type_parameter_element);
throw DB::Exception(
ErrorCodes::BAD_ARGUMENTS,
"Dictionary layout parameters must be key/value pairs, got '{}' instead",
param->formatForErrorMessage());
}
const ASTLiteral * value_literal = pair->second->as<ASTLiteral>();
if (!value_literal)
{
throw DB::Exception(
ErrorCodes::BAD_ARGUMENTS,
"Dictionary layout parameter value must be a literal, got '{}' instead",
pair->second->formatForErrorMessage());
}
const auto value_field = value_literal->value;
if (value_field.getType() != Field::Types::UInt64 && value_field.getType() != Field::Types::String)
{
throw DB::Exception(
ErrorCodes::BAD_ARGUMENTS,
"Dictionary layout parameter value must be an UInt64 or String, got '{}' instead",
value_field.getTypeName());
}
AutoPtr<Element> layout_type_parameter_element(doc->createElement(pair->first));
AutoPtr<Text> value_to_append(doc->createTextNode(toString(value_field)));
layout_type_parameter_element->appendChild(value_to_append);
layout_type_element->appendChild(layout_type_parameter_element);
}
}
@ -149,10 +172,10 @@ void buildLayoutConfiguration(
* <range_min><name>StartDate</name></range_min>
* <range_max><name>EndDate</name></range_max>
*/
void buildRangeConfiguration(AutoPtr<Document> doc, AutoPtr<Element> root, const ASTDictionaryRange * range, const NamesToTypeNames & all_attrs)
void buildRangeConfiguration(AutoPtr<Document> doc, AutoPtr<Element> root, const ASTDictionaryRange * range, const AttributeNameToConfiguration & all_attrs)
{
// appends <key><name>value</name></key> to root
auto append_element = [&doc, &root](const std::string & key, const std::string & name, const std::string & type)
auto append_element = [&doc, &root](const std::string & key, const std::string & name, const AttributeConfiguration & configuration)
{
AutoPtr<Element> element(doc->createElement(key));
AutoPtr<Element> name_node(doc->createElement("name"));
@ -161,22 +184,33 @@ void buildRangeConfiguration(AutoPtr<Document> doc, AutoPtr<Element> root, const
element->appendChild(name_node);
AutoPtr<Element> type_node(doc->createElement("type"));
AutoPtr<Text> type_text(doc->createTextNode(type));
AutoPtr<Text> type_text(doc->createTextNode(configuration.type));
type_node->appendChild(type_text);
element->appendChild(type_node);
if (!configuration.expression.empty())
{
AutoPtr<Element> expression_node(doc->createElement("expression"));
AutoPtr<Text> expression_text(doc->createTextNode(configuration.expression));
expression_node->appendChild(expression_text);
element->appendChild(expression_node);
}
root->appendChild(element);
};
if (!all_attrs.count(range->min_attr_name))
auto range_min_attribute_it = all_attrs.find(range->min_attr_name);
if (range_min_attribute_it == all_attrs.end())
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION,
"MIN ({}) attribute is not defined in the dictionary attributes", range->min_attr_name);
if (!all_attrs.count(range->max_attr_name))
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION,
"MAX ({}) attribute is not defined in the dictionary attributes", range->max_attr_name);
"MIN {} attribute is not defined in the dictionary attributes", range->min_attr_name);
append_element("range_min", range->min_attr_name, all_attrs.at(range->min_attr_name));
append_element("range_max", range->max_attr_name, all_attrs.at(range->max_attr_name));
auto range_max_attribute_it = all_attrs.find(range->min_attr_name);
if (range_max_attribute_it == all_attrs.end())
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION,
"MAX {} attribute is not defined in the dictionary attributes", range->max_attr_name);
append_element("range_min", range->min_attr_name, range_min_attribute_it->second);
append_element("range_max", range->max_attr_name, range_max_attribute_it->second);
}
@ -199,25 +233,14 @@ void buildAttributeExpressionIfNeeded(
AutoPtr<Element> root,
const ASTDictionaryAttributeDeclaration * dict_attr)
{
if (dict_attr->expression != nullptr)
{
AutoPtr<Element> expression_element(doc->createElement("expression"));
if (!dict_attr->expression)
return;
/// EXPRESSION PROPERTY should be expression or string
String expression_str;
if (const auto * literal = dict_attr->expression->as<ASTLiteral>();
literal && literal->value.getType() == Field::Types::String)
{
expression_str = getFieldAsString(literal->value);
}
else
expression_str = queryToString(dict_attr->expression);
AutoPtr<Text> expression(doc->createTextNode(expression_str));
expression_element->appendChild(expression);
root->appendChild(expression_element);
}
AutoPtr<Element> expression_element(doc->createElement("expression"));
String expression_str = getAttributeExpression(dict_attr);
AutoPtr<Text> expression(doc->createTextNode(expression_str));
expression_element->appendChild(expression);
root->appendChild(expression_element);
}
/** Transofrms single dictionary attribute to configuration
@ -373,25 +396,28 @@ void buildPrimaryKeyConfiguration(
/** Transforms list of ASTDictionaryAttributeDeclarations to list of dictionary attributes
*/
NamesToTypeNames buildDictionaryAttributesConfiguration(
AttributeNameToConfiguration buildDictionaryAttributesConfiguration(
AutoPtr<Document> doc,
AutoPtr<Element> root,
const ASTExpressionList * dictionary_attributes,
const Names & key_columns)
{
const auto & children = dictionary_attributes->children;
NamesToTypeNames attributes_names_and_types;
AttributeNameToConfiguration attributes_name_to_configuration;
for (const auto & child : children)
{
const ASTDictionaryAttributeDeclaration * dict_attr = child->as<const ASTDictionaryAttributeDeclaration>();
if (!dict_attr->type)
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION, "Dictionary attribute must has type");
attributes_names_and_types.emplace(dict_attr->name, queryToString(dict_attr->type));
AttributeConfiguration attribute_configuration {queryToString(dict_attr->type), getAttributeExpression(dict_attr)};
attributes_name_to_configuration.emplace(dict_attr->name, std::move(attribute_configuration));
if (std::find(key_columns.begin(), key_columns.end(), dict_attr->name) == key_columns.end())
buildSingleAttribute(doc, root, dict_attr);
}
return attributes_names_and_types;
return attributes_name_to_configuration;
}
/** Transform function with key-value arguments to configuration
@ -513,10 +539,10 @@ void checkAST(const ASTCreateQuery & query)
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION, "Cannot create dictionary with empty source");
}
void checkPrimaryKey(const NamesToTypeNames & all_attrs, const Names & key_attrs)
void checkPrimaryKey(const AttributeNameToConfiguration & all_attrs, const Names & key_attrs)
{
for (const auto & key_attr : key_attrs)
if (all_attrs.count(key_attr) == 0)
if (all_attrs.find(key_attr) == all_attrs.end())
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION, "Unknown key attribute '{}'", key_attr);
}

View File

@ -71,6 +71,20 @@ public:
void startup() override;
void applyNewSettings(const Poco::Util::AbstractConfiguration & config, ContextPtr context, const String & config_prefix, const DisksMap & map) override;
std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const override { return delegate->readMetaFile(path, settings, size); }
std::unique_ptr<WriteBufferFromFileBase> writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode) override { return delegate->writeMetaFile(path, buf_size, mode); }
void removeMetaFileIfExists(const String & path) override { delegate->removeMetaFileIfExists(path); }
UInt32 getRefCount(const String & path) const override { return delegate->getRefCount(path); }
protected:
Executor & getExecutor() override;

View File

@ -86,4 +86,28 @@ SyncGuardPtr IDisk::getDirectorySyncGuard(const String & /* path */) const
return nullptr;
}
std::unique_ptr<ReadBufferFromFileBase> IDisk::readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const
{
LOG_TRACE(&Poco::Logger::get("IDisk"), "Read local metafile: {}", path);
return readFile(path, settings, size);
}
std::unique_ptr<WriteBufferFromFileBase> IDisk::writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode)
{
LOG_TRACE(&Poco::Logger::get("IDisk"), "Write local metafile: {}", path);
return writeFile(path, buf_size, mode);
}
void IDisk::removeMetaFileIfExists(const String & path)
{
LOG_TRACE(&Poco::Logger::get("IDisk"), "Remove local metafile: {}", path);
removeFileIfExists(path);
}
}

View File

@ -248,6 +248,28 @@ public:
/// Applies new settings for disk in runtime.
virtual void applyNewSettings(const Poco::Util::AbstractConfiguration &, ContextPtr, const String &, const DisksMap &) {}
/// Open the local file for read and return ReadBufferFromFileBase object.
/// Overridden in IDiskRemote.
/// Used for work with custom metadata.
virtual std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const;
/// Open the local file for write and return WriteBufferFromFileBase object.
/// Overridden in IDiskRemote.
/// Used for work with custom metadata.
virtual std::unique_ptr<WriteBufferFromFileBase> writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode);
virtual void removeMetaFileIfExists(const String & path);
/// Return reference count for remote FS.
/// Overridden in IDiskRemote.
virtual UInt32 getRefCount(const String &) const { return 0; }
protected:
friend class DiskDecorator;

View File

@ -484,6 +484,7 @@ bool IDiskRemote::tryReserve(UInt64 bytes)
String IDiskRemote::getUniqueId(const String & path) const
{
LOG_TRACE(log, "Remote path: {}, Path: {}", remote_fs_root_path, path);
Metadata metadata(remote_fs_root_path, metadata_disk, path);
String id;
if (!metadata.remote_fs_objects.empty())
@ -500,4 +501,34 @@ AsynchronousReaderPtr IDiskRemote::getThreadPoolReader()
return reader;
}
std::unique_ptr<ReadBufferFromFileBase> IDiskRemote::readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const
{
LOG_TRACE(log, "Read metafile: {}", path);
return metadata_disk->readFile(path, settings, size);
}
std::unique_ptr<WriteBufferFromFileBase> IDiskRemote::writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode)
{
LOG_TRACE(log, "Write metafile: {}", path);
return metadata_disk->writeFile(path, buf_size, mode);
}
void IDiskRemote::removeMetaFileIfExists(const String & path)
{
LOG_TRACE(log, "Remove metafile: {}", path);
return metadata_disk->removeFileIfExists(path);
}
UInt32 IDiskRemote::getRefCount(const String & path) const
{
auto meta = readMeta(path);
return meta.ref_count;
}
}

View File

@ -136,6 +136,21 @@ public:
static AsynchronousReaderPtr getThreadPoolReader();
virtual std::unique_ptr<ReadBufferFromFileBase> readMetaFile(
const String & path,
const ReadSettings & settings,
std::optional<size_t> size) const override;
virtual std::unique_ptr<WriteBufferFromFileBase> writeMetaFile(
const String & path,
size_t buf_size,
WriteMode mode) override;
virtual void removeMetaFileIfExists(
const String & path) override;
UInt32 getRefCount(const String & path) const override;
protected:
Poco::Logger * log;
const String name;

View File

@ -32,7 +32,7 @@ public:
explicit AsynchronousReadIndirectBufferFromRemoteFS(
AsynchronousReaderPtr reader_, const ReadSettings & settings_,
std::shared_ptr<ReadBufferFromRemoteFSGather> impl_,
size_t min_bytes_for_seek = 1024 * 1024);
size_t min_bytes_for_seek = DBMS_DEFAULT_BUFFER_SIZE);
~AsynchronousReadIndirectBufferFromRemoteFS() override;

View File

@ -12,7 +12,7 @@ namespace ErrorCodes
}
/// We assume that size of the dst buf isn't less than src_size.
static size_t decodeURL(const char * src, size_t src_size, char * dst)
static size_t decodeURL(const char * src, size_t src_size, char * dst, bool plus_as_space)
{
const char * src_prev_pos = src;
const char * src_curr_pos = src;
@ -21,12 +21,28 @@ static size_t decodeURL(const char * src, size_t src_size, char * dst)
while (true)
{
src_curr_pos = find_first_symbols<'%'>(src_curr_pos, src_end);
src_curr_pos = find_first_symbols<'%', '+'>(src_curr_pos, src_end);
if (src_curr_pos == src_end)
{
break;
}
else if (*src_curr_pos == '+')
{
if (!plus_as_space)
{
++src_curr_pos;
continue;
}
size_t bytes_to_copy = src_curr_pos - src_prev_pos;
memcpySmallAllowReadWriteOverflow15(dst_pos, src_prev_pos, bytes_to_copy);
dst_pos += bytes_to_copy;
++src_curr_pos;
src_prev_pos = src_curr_pos;
*dst_pos = ' ';
++dst_pos;
}
else if (src_end - src_curr_pos < 3)
{
src_curr_pos = src_end;
@ -67,6 +83,7 @@ static size_t decodeURL(const char * src, size_t src_size, char * dst)
/// Percent decode of URL data.
template <bool plus_as_space>
struct DecodeURLComponentImpl
{
static void vector(const ColumnString::Chars & data, const ColumnString::Offsets & offsets,
@ -83,7 +100,7 @@ struct DecodeURLComponentImpl
{
const char * src_data = reinterpret_cast<const char *>(&data[prev_offset]);
size_t src_size = offsets[i] - prev_offset;
size_t dst_size = decodeURL(src_data, src_size, reinterpret_cast<char *>(res_data.data() + res_offset));
size_t dst_size = decodeURL(src_data, src_size, reinterpret_cast<char *>(res_data.data() + res_offset), plus_as_space);
res_offset += dst_size;
res_offsets[i] = res_offset;
@ -101,11 +118,14 @@ struct DecodeURLComponentImpl
struct NameDecodeURLComponent { static constexpr auto name = "decodeURLComponent"; };
using FunctionDecodeURLComponent = FunctionStringToString<DecodeURLComponentImpl, NameDecodeURLComponent>;
struct NameDecodeURLFormComponent { static constexpr auto name = "decodeURLFormComponent"; };
using FunctionDecodeURLComponent = FunctionStringToString<DecodeURLComponentImpl<false>, NameDecodeURLComponent>;
using FunctionDecodeURLFormComponent = FunctionStringToString<DecodeURLComponentImpl<true>, NameDecodeURLFormComponent>;
void registerFunctionDecodeURLComponent(FunctionFactory & factory)
{
factory.registerFunction<FunctionDecodeURLComponent>();
factory.registerFunction<FunctionDecodeURLFormComponent>();
}
}

View File

@ -11,7 +11,14 @@ namespace ErrorCodes
extern const int ILLEGAL_COLUMN;
}
struct ArrayFirstImpl
enum class ArrayFirstLastStrategy
{
First,
Last
};
template <ArrayFirstLastStrategy strategy>
struct ArrayFirstLastImpl
{
static bool needBoolean() { return false; }
static bool needExpression() { return true; }
@ -40,15 +47,23 @@ struct ArrayFirstImpl
auto out = data.cloneEmpty();
out->reserve(data.size());
size_t pos{};
for (auto offset : offsets)
size_t offsets_size = offsets.size();
for (size_t offset_index = 0; offset_index < offsets_size; ++offset_index)
{
if (offset - pos > 0)
out->insert(data[pos]);
else
out->insertDefault();
size_t start_offset = offsets[offset_index - 1];
size_t end_offset = offsets[offset_index];
pos = offset;
if (end_offset > start_offset)
{
if constexpr (strategy == ArrayFirstLastStrategy::First)
out->insert(data[start_offset]);
else
out->insert(data[end_offset - 1]);
}
else
{
out->insertDefault();
}
}
return out;
@ -67,18 +82,36 @@ struct ArrayFirstImpl
auto out = data.cloneEmpty();
out->reserve(data.size());
size_t pos{};
for (auto offset : offsets)
size_t offsets_size = offsets.size();
for (size_t offset_index = 0; offset_index < offsets_size; ++offset_index)
{
auto exists = false;
for (; pos < offset; ++pos)
size_t start_offset = offsets[offset_index - 1];
size_t end_offset = offsets[offset_index];
bool exists = false;
if constexpr (strategy == ArrayFirstLastStrategy::First)
{
if (filter[pos])
for (; start_offset != end_offset; ++start_offset)
{
out->insert(data[pos]);
exists = true;
pos = offset;
break;
if (filter[start_offset])
{
out->insert(data[start_offset]);
exists = true;
break;
}
}
}
else
{
for (; end_offset != start_offset; --end_offset)
{
if (filter[end_offset - 1])
{
out->insert(data[end_offset - 1]);
exists = true;
break;
}
}
}
@ -91,11 +124,17 @@ struct ArrayFirstImpl
};
struct NameArrayFirst { static constexpr auto name = "arrayFirst"; };
using ArrayFirstImpl = ArrayFirstLastImpl<ArrayFirstLastStrategy::First>;
using FunctionArrayFirst = FunctionArrayMapped<ArrayFirstImpl, NameArrayFirst>;
struct NameArrayLast { static constexpr auto name = "arrayLast"; };
using ArrayLastImpl = ArrayFirstLastImpl<ArrayFirstLastStrategy::Last>;
using FunctionArrayLast = FunctionArrayMapped<ArrayLastImpl, NameArrayLast>;
void registerFunctionArrayFirst(FunctionFactory & factory)
{
factory.registerFunction<FunctionArrayFirst>();
factory.registerFunction<FunctionArrayLast>();
}
}

View File

@ -1,90 +0,0 @@
#include <DataTypes/DataTypesNumber.h>
#include <Columns/ColumnsNumber.h>
#include "FunctionArrayMapped.h"
#include <Functions/FunctionFactory.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_COLUMN;
}
struct ArrayFirstIndexImpl
{
static bool needBoolean() { return false; }
static bool needExpression() { return true; }
static bool needOneArray() { return false; }
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & /*array_element*/)
{
return std::make_shared<DataTypeUInt32>();
}
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped)
{
const auto * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
if (!column_filter)
{
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
if (!column_filter_const)
throw Exception("Unexpected type of filter column", ErrorCodes::ILLEGAL_COLUMN);
if (column_filter_const->getValue<UInt8>())
{
const auto & offsets = array.getOffsets();
auto out_column = ColumnUInt32::create(offsets.size());
auto & out_index = out_column->getData();
size_t pos{};
for (size_t i = 0; i < offsets.size(); ++i)
{
out_index[i] = offsets[i] - pos > 0;
pos = offsets[i];
}
return out_column;
}
else
return DataTypeUInt32().createColumnConst(array.size(), 0u);
}
const auto & filter = column_filter->getData();
const auto & offsets = array.getOffsets();
auto out_column = ColumnUInt32::create(offsets.size());
auto & out_index = out_column->getData();
size_t pos{};
for (size_t i = 0; i < offsets.size(); ++i)
{
UInt32 index{};
for (size_t idx{1}; pos < offsets[i]; ++pos, ++idx)
{
if (filter[pos])
{
index = idx;
pos = offsets[i];
break;
}
}
out_index[i] = index;
}
return out_column;
}
};
struct NameArrayFirstIndex { static constexpr auto name = "arrayFirstIndex"; };
using FunctionArrayFirstIndex = FunctionArrayMapped<ArrayFirstIndexImpl, NameArrayFirstIndex>;
void registerFunctionArrayFirstIndex(FunctionFactory & factory)
{
factory.registerFunction<FunctionArrayFirstIndex>();
}
}

View File

@ -0,0 +1,134 @@
#include <DataTypes/DataTypesNumber.h>
#include <Columns/ColumnsNumber.h>
#include "FunctionArrayMapped.h"
#include <Functions/FunctionFactory.h>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_COLUMN;
}
enum class ArrayFirstLastIndexStrategy
{
First,
Last
};
template <ArrayFirstLastIndexStrategy strategy>
struct ArrayFirstLastIndexImpl
{
static bool needBoolean() { return false; }
static bool needExpression() { return true; }
static bool needOneArray() { return false; }
static DataTypePtr getReturnType(const DataTypePtr & /*expression_return*/, const DataTypePtr & /*array_element*/)
{
return std::make_shared<DataTypeUInt32>();
}
static ColumnPtr execute(const ColumnArray & array, ColumnPtr mapped)
{
const auto * column_filter = typeid_cast<const ColumnUInt8 *>(&*mapped);
if (!column_filter)
{
const auto * column_filter_const = checkAndGetColumnConst<ColumnUInt8>(&*mapped);
if (!column_filter_const)
throw Exception("Unexpected type of filter column", ErrorCodes::ILLEGAL_COLUMN);
if (column_filter_const->getValue<UInt8>())
{
const auto & offsets = array.getOffsets();
auto out_column = ColumnUInt32::create(offsets.size());
auto & out_index = out_column->getData();
size_t offsets_size = offsets.size();
for (size_t offset_index = 0; offset_index < offsets_size; ++offset_index)
{
size_t start_offset = offsets[offset_index - 1];
size_t end_offset = offsets[offset_index];
if (end_offset > start_offset)
{
if constexpr (strategy == ArrayFirstLastIndexStrategy::First)
out_index[offset_index] = 1;
else
out_index[offset_index] = end_offset - start_offset;
}
else
{
out_index[offset_index] = 0;
}
}
return out_column;
}
else
{
return DataTypeUInt32().createColumnConst(array.size(), 0u);
}
}
const auto & filter = column_filter->getData();
const auto & offsets = array.getOffsets();
size_t offsets_size = offsets.size();
auto out_column = ColumnUInt32::create(offsets_size);
auto & out_index = out_column->getData();
for (size_t offset_index = 0; offset_index < offsets_size; ++offset_index)
{
size_t start_offset = offsets[offset_index - 1];
size_t end_offset = offsets[offset_index];
size_t result_index = 0;
if constexpr (strategy == ArrayFirstLastIndexStrategy::First)
{
for (size_t index = 1; start_offset != end_offset; ++start_offset, ++index)
{
if (filter[start_offset])
{
result_index = index;
break;
}
}
}
else
{
for (size_t index = end_offset - start_offset; end_offset != start_offset; --end_offset, --index)
{
if (filter[end_offset - 1])
{
result_index = index;
break;
}
}
}
out_index[offset_index] = result_index;
}
return out_column;
}
};
struct NameArrayFirstIndex { static constexpr auto name = "arrayFirstIndex"; };
using ArrayFirstIndexImpl = ArrayFirstLastIndexImpl<ArrayFirstLastIndexStrategy::First>;
using FunctionArrayFirstIndex = FunctionArrayMapped<ArrayFirstIndexImpl, NameArrayFirstIndex>;
struct NameArrayLastIndex { static constexpr auto name = "arrayLastIndex"; };
using ArrayLastIndexImpl = ArrayFirstLastIndexImpl<ArrayFirstLastIndexStrategy::Last>;
using FunctionArrayLastIndex = FunctionArrayMapped<ArrayLastIndexImpl, NameArrayLastIndex>;
void registerFunctionArrayFirstIndex(FunctionFactory & factory)
{
factory.registerFunction<FunctionArrayFirstIndex>();
factory.registerFunction<FunctionArrayLastIndex>();
}
}

View File

@ -0,0 +1,80 @@
#include <Columns/ColumnString.h>
#include <DataTypes/DataTypeString.h>
#include <IO/WriteHelpers.h>
#include <Functions/FunctionFactory.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
class FunctionMonthName : public IFunction
{
public:
static constexpr auto name = "monthName";
static constexpr auto month_str = "month";
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionMonthName>(context); }
explicit FunctionMonthName(ContextPtr context_)
: function_resolver(FunctionFactory::instance().get("dateName", std::move(context_)))
{}
String getName() const override { return name; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
size_t getNumberOfArguments() const override { return 1; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() != 1)
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be 1",
getName(),
toString(arguments.size()));
WhichDataType argument_type(arguments[0].type);
if (!argument_type.isDate() && !argument_type.isDateTime() && !argument_type.isDateTime64())
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type of argument of function {}, should be Date, DateTime or DateTime64",
getName());
return std::make_shared<DataTypeString>();
}
ColumnPtr executeImpl(
const ColumnsWithTypeAndName & arguments,
const DataTypePtr & result_type,
size_t input_rows_count) const override
{
auto month_column = DataTypeString().createColumnConst(arguments[0].column->size(), month_str);
ColumnsWithTypeAndName temporary_columns
{
ColumnWithTypeAndName(month_column, std::make_shared<DataTypeString>(), ""),
arguments[0]
};
auto date_name_func = function_resolver->build(temporary_columns);
return date_name_func->execute(temporary_columns, result_type, input_rows_count);
}
private:
FunctionOverloadResolverPtr function_resolver;
};
void registerFunctionMonthName(FunctionFactory & factory)
{
factory.registerFunction<FunctionMonthName>(FunctionFactory::CaseInsensitive);
}
}

View File

@ -65,6 +65,7 @@ void registerFunctionSubtractQuarters(FunctionFactory &);
void registerFunctionSubtractYears(FunctionFactory &);
void registerFunctionDateDiff(FunctionFactory &);
void registerFunctionDateName(FunctionFactory &);
void registerFunctionMonthName(FunctionFactory &);
void registerFunctionToTimeZone(FunctionFactory &);
void registerFunctionFormatDateTime(FunctionFactory &);
void registerFunctionFromModifiedJulianDay(FunctionFactory &);
@ -136,6 +137,7 @@ void registerFunctionsDateTime(FunctionFactory & factory)
registerFunctionSubtractYears(factory);
registerFunctionDateDiff(factory);
registerFunctionDateName(factory);
registerFunctionMonthName(factory);
registerFunctionToTimeZone(factory);
registerFunctionFormatDateTime(factory);
registerFunctionFromModifiedJulianDay(factory);

View File

@ -697,6 +697,10 @@ ASTs ActionsMatcher::doUntuple(const ASTFunction * function, ActionsMatcher::Dat
for (const auto & name [[maybe_unused]] : tuple_type->getElementNames())
{
auto tuple_ast = function->arguments->children[0];
/// This transformation can lead to exponential growth of AST size, let's check it.
tuple_ast->checkSize(data.getContext()->getSettingsRef().max_ast_elements);
if (tid != 0)
tuple_ast = tuple_ast->clone();

View File

@ -3,6 +3,7 @@
namespace DB
{
bool ParserKeyword::parseImpl(Pos & pos, [[maybe_unused]] ASTPtr & node, Expected & expected)
{
if (pos->type != TokenType::BareWord)
@ -36,4 +37,5 @@ bool ParserKeyword::parseImpl(Pos & pos, [[maybe_unused]] ASTPtr & node, Expecte
return true;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -228,49 +228,6 @@ protected:
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
ASTPtr createFunctionCast(const ASTPtr & expr_ast, const ASTPtr & type_ast);
class ParserCastAsExpression : public IParserBase
{
protected:
const char * getName() const override { return "CAST AS expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
class ParserSubstringExpression : public IParserBase
{
protected:
const char * getName() const override { return "SUBSTRING expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
class ParserTrimExpression : public IParserBase
{
protected:
const char * getName() const override { return "TRIM expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
class ParserExtractExpression : public IParserBase
{
protected:
const char * getName() const override { return "EXTRACT expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
class ParserDateAddExpression : public IParserBase
{
protected:
const char * getName() const override { return "DATE_ADD expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
class ParserDateDiffExpression : public IParserBase
{
protected:
const char * getName() const override { return "DATE_DIFF expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
/** NULL literal.
*/
class ParserNull : public IParserBase
@ -319,17 +276,6 @@ protected:
};
/**
* Parse query with EXISTS expression.
*/
class ParserExistsExpression : public IParserBase
{
protected:
const char * getName() const override { return "exists expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
/** An array or tuple of literals.
* Arrays can also be parsed as an application of [] operator and tuples as an application of 'tuple' function.
* But parsing the whole array/tuple as a whole constant seriously speeds up the analysis of expressions in the case of very large collection.
@ -521,4 +467,6 @@ protected:
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
ASTPtr createFunctionCast(const ASTPtr & expr_ast, const ASTPtr & type_ast);
}

View File

@ -689,7 +689,7 @@ bool ParserUnaryExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
bool ParserCastExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
ASTPtr expr_ast;
if (!elem_parser.parse(pos, expr_ast, expected))
if (!ParserExpressionElement().parse(pos, expr_ast, expected))
return false;
ASTPtr type_ast;

View File

@ -203,9 +203,6 @@ protected:
/// Example: "[1, 1 + 1, 1 + 2]::Array(UInt8)"
class ParserCastExpression : public IParserBase
{
private:
ParserExpressionElement elem_parser;
protected:
const char * getName() const override { return "CAST expression"; }

View File

@ -60,7 +60,9 @@ public:
uint32_t depth = 0;
uint32_t max_depth = 0;
Pos(Tokens & tokens_, uint32_t max_depth_) : TokenIterator(tokens_), max_depth(max_depth_) {}
Pos(Tokens & tokens_, uint32_t max_depth_) : TokenIterator(tokens_), max_depth(max_depth_)
{
}
ALWAYS_INLINE void increaseDepth()
{

View File

@ -17,7 +17,7 @@ public:
Pos begin = pos;
bool res = func();
if (!res)
pos = begin;
pos = begin;
return res;
}
@ -31,7 +31,7 @@ public:
bool res = func();
pos.decreaseDepth();
if (!res)
pos = begin;
pos = begin;
return res;
}

View File

@ -289,8 +289,6 @@ void ReadFromRemote::initializePipeline(QueryPipelineBuilder & pipeline, const B
{
for (const auto & shard : shards)
{
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>();
if (shard.lazy)
addLazyPipe(pipes, shard, /*coordinator=*/nullptr, /*pool*/{}, /*replica_info*/std::nullopt);
else

View File

@ -325,7 +325,7 @@ void TCPHandler::runImpl()
if (state.is_cancelled)
return std::nullopt;
sendMergeTreeReadTaskRequstAssumeLocked(std::move(request));
sendMergeTreeReadTaskRequestAssumeLocked(std::move(request));
return receivePartitionMergeTreeReadTaskResponseAssumeLocked();
});
@ -805,7 +805,7 @@ void TCPHandler::sendReadTaskRequestAssumeLocked()
}
void TCPHandler::sendMergeTreeReadTaskRequstAssumeLocked(PartitionReadRequest request)
void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(PartitionReadRequest request)
{
writeVarUInt(Protocol::Server::MergeTreeReadTaskRequest, *out);
request.serialize(*out);

View File

@ -239,7 +239,7 @@ private:
void sendEndOfStream();
void sendPartUUIDs();
void sendReadTaskRequestAssumeLocked();
void sendMergeTreeReadTaskRequstAssumeLocked(PartitionReadRequest request);
void sendMergeTreeReadTaskRequestAssumeLocked(PartitionReadRequest request);
void sendProfileInfo(const ProfileInfo & info);
void sendTotals(const Block & totals);
void sendExtremes(const Block & extremes);

View File

@ -16,6 +16,8 @@
#include <Storages/Kafka/KafkaSettings.h>
#endif
#include <re2/re2.h>
namespace DB
{
@ -24,6 +26,12 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
static const std::unordered_set<std::string_view> dictionary_allowed_keys = {
"host", "port", "user", "password", "db",
"database", "table", "schema", "replica",
"update_field", "update_tag", "invalidate_query", "query",
"where", "name", "secure", "uri", "collection"};
String ExternalDataSourceConfiguration::toString() const
{
WriteBufferFromOwnString configuration_info;
@ -159,10 +167,23 @@ std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const
return std::nullopt;
}
static void validateConfigKeys(
const Poco::Util::AbstractConfiguration & dict_config, const String & config_prefix, HasConfigKeyFunc has_config_key_func)
{
Poco::Util::AbstractConfiguration::Keys config_keys;
dict_config.keys(config_prefix, config_keys);
for (const auto & config_key : config_keys)
{
if (!has_config_key_func(config_key))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected key `{}` in dictionary source configuration", config_key);
}
}
std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguration(
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context)
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix,
ContextPtr context, HasConfigKeyFunc has_config_key)
{
validateConfigKeys(dict_config, dict_config_prefix, has_config_key);
ExternalDataSourceConfiguration configuration;
auto collection_name = dict_config.getString(dict_config_prefix + ".name", "");
@ -170,6 +191,7 @@ std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguratio
{
const auto & config = context->getConfigRef();
const auto & collection_prefix = fmt::format("named_collections.{}", collection_name);
validateConfigKeys(dict_config, collection_prefix, has_config_key);
if (!config.has(collection_prefix))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", collection_name);
@ -178,14 +200,15 @@ std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguratio
configuration.port = dict_config.getInt(dict_config_prefix + ".port", config.getUInt(collection_prefix + ".port", 0));
configuration.username = dict_config.getString(dict_config_prefix + ".user", config.getString(collection_prefix + ".user", ""));
configuration.password = dict_config.getString(dict_config_prefix + ".password", config.getString(collection_prefix + ".password", ""));
configuration.database = dict_config.getString(dict_config_prefix + ".db", config.getString(collection_prefix + ".database", ""));
configuration.database = dict_config.getString(dict_config_prefix + ".db", config.getString(dict_config_prefix + ".database",
config.getString(collection_prefix + ".db", config.getString(collection_prefix + ".database", ""))));
configuration.table = dict_config.getString(dict_config_prefix + ".table", config.getString(collection_prefix + ".table", ""));
configuration.schema = dict_config.getString(dict_config_prefix + ".schema", config.getString(collection_prefix + ".schema", ""));
if (configuration.host.empty() || configuration.port == 0 || configuration.username.empty() || configuration.table.empty())
{
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Named collection of connection parameters is missing some of the parameters and dictionary parameters are added");
"Named collection of connection parameters is missing some of the parameters and dictionary parameters are not added");
}
return configuration;
}
@ -194,11 +217,12 @@ std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguratio
ExternalDataSourcesByPriority getExternalDataSourceConfigurationByPriority(
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context)
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context, HasConfigKeyFunc has_config_key)
{
validateConfigKeys(dict_config, dict_config_prefix, has_config_key);
ExternalDataSourceConfiguration common_configuration;
auto named_collection = getExternalDataSourceConfiguration(dict_config, dict_config_prefix, context);
auto named_collection = getExternalDataSourceConfiguration(dict_config, dict_config_prefix, context, has_config_key);
if (named_collection)
{
common_configuration = *named_collection;
@ -209,7 +233,7 @@ ExternalDataSourcesByPriority getExternalDataSourceConfigurationByPriority(
common_configuration.port = dict_config.getUInt(dict_config_prefix + ".port", 0);
common_configuration.username = dict_config.getString(dict_config_prefix + ".user", "");
common_configuration.password = dict_config.getString(dict_config_prefix + ".password", "");
common_configuration.database = dict_config.getString(dict_config_prefix + ".db", "");
common_configuration.database = dict_config.getString(dict_config_prefix + ".db", dict_config.getString(dict_config_prefix + ".database", ""));
common_configuration.table = dict_config.getString(fmt::format("{}.table", dict_config_prefix), "");
common_configuration.schema = dict_config.getString(fmt::format("{}.schema", dict_config_prefix), "");
}
@ -233,8 +257,9 @@ ExternalDataSourcesByPriority getExternalDataSourceConfigurationByPriority(
{
ExternalDataSourceConfiguration replica_configuration(common_configuration);
String replica_name = dict_config_prefix + "." + config_key;
size_t priority = dict_config.getInt(replica_name + ".priority", 0);
validateConfigKeys(dict_config, replica_name, has_config_key);
size_t priority = dict_config.getInt(replica_name + ".priority", 0);
replica_configuration.host = dict_config.getString(replica_name + ".host", common_configuration.host);
replica_configuration.port = dict_config.getUInt(replica_name + ".port", common_configuration.port);
replica_configuration.username = dict_config.getString(replica_name + ".user", common_configuration.username);

View File

@ -64,8 +64,11 @@ struct ExternalDataSourceConfig
*/
std::optional<ExternalDataSourceConfig> getExternalDataSourceConfiguration(const ASTs & args, ContextPtr context, bool is_database_engine = false, bool throw_on_no_collection = true);
using HasConfigKeyFunc = std::function<bool(const String &)>;
std::optional<ExternalDataSourceConfiguration> getExternalDataSourceConfiguration(
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context);
const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix,
ContextPtr context, HasConfigKeyFunc has_config_key);
/// Highest priority is 0, the bigger the number in map, the less the priority.
@ -80,7 +83,7 @@ struct ExternalDataSourcesByPriority
};
ExternalDataSourcesByPriority
getExternalDataSourceConfigurationByPriority(const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context);
getExternalDataSourceConfigurationByPriority(const Poco::Util::AbstractConfiguration & dict_config, const String & dict_config_prefix, ContextPtr context, HasConfigKeyFunc has_config_key);
struct URLBasedDataSourceConfiguration

View File

@ -1284,6 +1284,14 @@ void IMergeTreeDataPart::renameTo(const String & new_relative_path, bool remove_
storage.lockSharedData(*this);
}
void IMergeTreeDataPart::cleanupOldName(const String & old_part_name) const
{
if (name == old_part_name)
return;
storage.unlockSharedData(*this, old_part_name);
}
std::optional<bool> IMergeTreeDataPart::keepSharedDataInDecoupledStorage() const
{
/// NOTE: It's needed for zero-copy replication
@ -1764,6 +1772,12 @@ String IMergeTreeDataPart::getUniqueId() const
}
UInt32 IMergeTreeDataPart::getNumberOfRefereneces() const
{
return volume->getDisk()->getRefCount(fs::path(getFullRelativePath()) / "checksums.txt");
}
String IMergeTreeDataPart::getZeroLevelPartBlockID() const
{
if (info.level != 0)

View File

@ -346,6 +346,9 @@ public:
/// Changes only relative_dir_name, you need to update other metadata (name, is_temp) explicitly
virtual void renameTo(const String & new_relative_path, bool remove_new_dir_if_exists) const;
/// Cleanup shared locks made with old name after part renaming
virtual void cleanupOldName(const String & old_part_name) const;
/// Makes clone of a part in detached/ directory via hard links
virtual void makeCloneInDetached(const String & prefix, const StorageMetadataPtr & metadata_snapshot) const;
@ -412,8 +415,8 @@ public:
/// part creation (using alter query with materialize_ttl setting).
bool checkAllTTLCalculated(const StorageMetadataPtr & metadata_snapshot) const;
/// Return some uniq string for file
/// Required for distinguish different copies of the same part on S3
/// Return some uniq string for file.
/// Required for distinguish different copies of the same part on remote FS.
String getUniqueId() const;
/// Get checksums of metadata file in part directory
@ -422,6 +425,10 @@ public:
/// Check metadata in cache is consistent with actual metadata on disk(if use_metadata_cache is true)
std::unordered_map<String, uint128> checkMetadata() const;
/// Return hardlink count for part.
/// Required for keep data on remote FS when part has shadow copies.
UInt32 getNumberOfRefereneces() const;
protected:
/// Total size of all columns, calculated once in calcuateColumnSizesOnDisk

View File

@ -22,6 +22,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER;
extern const int LOGICAL_ERROR;
extern const int QUERY_WAS_CANCELLED;
}
@ -131,8 +132,9 @@ bool MergeTreeBaseSelectProcessor::getTaskFromBuffer()
if (Status::Accepted == res)
return true;
/// To avoid any possibility of ignoring cancellation, exception will be thrown.
if (Status::Cancelled == res)
break;
throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Query had been cancelled");
}
return false;
}
@ -165,8 +167,18 @@ Chunk MergeTreeBaseSelectProcessor::generate()
{
while (!isCancelled())
{
if ((!task || task->isFinished()) && !getNewTask())
return {};
try
{
if ((!task || task->isFinished()) && !getNewTask())
return {};
}
catch (const Exception & e)
{
/// See MergeTreeBaseSelectProcessor::getTaskFromBuffer()
if (e.code() == ErrorCodes::QUERY_WAS_CANCELLED)
return {};
throw;
}
auto res = readFromPart();

View File

@ -63,6 +63,7 @@
#include <boost/range/adaptor/filtered.hpp>
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <base/insertAtEnd.h>
#include <base/scope_guard_safe.h>
@ -2475,6 +2476,8 @@ bool MergeTreeData::renameTempPartAndReplace(
MergeTreePartInfo part_info = part->info;
String part_name;
String old_part_name = part->name;
if (DataPartPtr existing_part_in_partition = getAnyPartInPartition(part->info.partition_id, lock))
{
if (part->partition.value != existing_part_in_partition->partition.value)
@ -2538,6 +2541,7 @@ bool MergeTreeData::renameTempPartAndReplace(
/// So, we maintain invariant: if a non-temporary part in filesystem then it is in data_parts
///
/// If out_transaction is null, we commit the part to the active set immediately, else add it to the transaction.
part->name = part_name;
part->info = part_info;
part->is_temp = false;
@ -2586,6 +2590,9 @@ bool MergeTreeData::renameTempPartAndReplace(
out_covered_parts->emplace_back(std::move(covered_part));
}
/// Cleanup shared locks made with old name
part->cleanupOldName(old_part_name);
return true;
}
@ -3924,8 +3931,8 @@ void MergeTreeData::dropDetached(const ASTPtr & partition, bool part, ContextPtr
for (auto & [old_name, new_name, disk] : renamed_parts.old_and_new_names)
{
disk->removeRecursive(fs::path(relative_data_path) / "detached" / new_name / "");
LOG_DEBUG(log, "Dropped detached part {}", old_name);
bool keep_shared = removeDetachedPart(disk, fs::path(relative_data_path) / "detached" / new_name / "", old_name, false);
LOG_DEBUG(log, "Dropped detached part {}, keep shared data: {}", old_name, keep_shared);
old_name.clear();
}
}
@ -5213,7 +5220,9 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
LOG_DEBUG(log, "Freezing part {} snapshot will be placed at {}", part->name, backup_path);
part->volume->getDisk()->createDirectories(backup_path);
auto disk = part->volume->getDisk();
disk->createDirectories(backup_path);
String src_part_path = part->getFullRelativePath();
String backup_part_path = fs::path(backup_path) / relative_data_path / part->relative_path;
@ -5224,16 +5233,20 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
src_part_path = fs::path(relative_data_path) / flushed_part_path / "";
}
localBackup(part->volume->getDisk(), src_part_path, backup_part_path);
localBackup(disk, src_part_path, backup_part_path);
part->volume->getDisk()->removeFileIfExists(fs::path(backup_part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
// Store metadata for replicated table.
// Do nothing for non-replocated.
createAndStoreFreezeMetadata(disk, part, backup_part_path);
disk->removeFileIfExists(fs::path(backup_part_path) / IMergeTreeDataPart::DELETE_ON_DESTROY_MARKER_FILE_NAME);
part->is_frozen.store(true, std::memory_order_relaxed);
result.push_back(PartitionCommandResultInfo{
.partition_id = part->info.partition_id,
.part_name = part->name,
.backup_path = fs::path(part->volume->getDisk()->getPath()) / backup_path,
.part_backup_path = fs::path(part->volume->getDisk()->getPath()) / backup_part_path,
.backup_path = fs::path(disk->getPath()) / backup_path,
.part_backup_path = fs::path(disk->getPath()) / backup_part_path,
.backup_name = backup_name,
});
++parts_processed;
@ -5243,6 +5256,11 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
return result;
}
void MergeTreeData::createAndStoreFreezeMetadata(DiskPtr, DataPartPtr, String) const
{
}
PartitionCommandsResultInfo MergeTreeData::unfreezePartition(
const ASTPtr & partition,
const String & backup_name,
@ -5260,6 +5278,13 @@ PartitionCommandsResultInfo MergeTreeData::unfreezeAll(
return unfreezePartitionsByMatcher([] (const String &) { return true; }, backup_name, local_context);
}
bool MergeTreeData::removeDetachedPart(DiskPtr disk, const String & path, const String &, bool)
{
disk->removeRecursive(path);
return false;
}
PartitionCommandsResultInfo MergeTreeData::unfreezePartitionsByMatcher(MatcherFn matcher, const String & backup_name, ContextPtr)
{
auto backup_path = fs::path("shadow") / escapeForFileName(backup_name) / relative_data_path;
@ -5288,7 +5313,7 @@ PartitionCommandsResultInfo MergeTreeData::unfreezePartitionsByMatcher(MatcherFn
const auto & path = it->path();
disk->removeRecursive(path);
bool keep_shared = removeDetachedPart(disk, path, partition_directory, true);
result.push_back(PartitionCommandResultInfo{
.partition_id = partition_id,
@ -5298,7 +5323,7 @@ PartitionCommandsResultInfo MergeTreeData::unfreezePartitionsByMatcher(MatcherFn
.backup_name = backup_name,
});
LOG_DEBUG(log, "Unfreezed part by path {}", disk->getPath() + path);
LOG_DEBUG(log, "Unfreezed part by path {}, keep shared data: {}", disk->getPath() + path, keep_shared);
}
}

View File

@ -875,10 +875,21 @@ public:
/// Overridden in StorageReplicatedMergeTree
virtual bool unlockSharedData(const IMergeTreeDataPart &) const { return true; }
/// Remove lock with old name for shared data part after rename
virtual bool unlockSharedData(const IMergeTreeDataPart &, const String &) const { return true; }
/// Fetch part only if some replica has it on shared storage like S3
/// Overridden in StorageReplicatedMergeTree
virtual bool tryToFetchIfShared(const IMergeTreeDataPart &, const DiskPtr &, const String &) { return false; }
/// Check shared data usage on other replicas for detached/freezed part
/// Remove local files and remote files if needed
virtual bool removeDetachedPart(DiskPtr disk, const String & path, const String & part_name, bool is_freezed);
/// Store metadata for replicated tables
/// Do nothing for non-replicated tables
virtual void createAndStoreFreezeMetadata(DiskPtr disk, DataPartPtr part, String backup_part_path) const;
/// Parts that currently submerging (merging to bigger parts) or emerging
/// (to be appeared after merging finished). These two variables have to be used
/// with `currently_submerging_emerging_mutex`.

View File

@ -125,8 +125,10 @@ struct Settings;
M(UInt64, concurrent_part_removal_threshold, 100, "Activate concurrent part removal (see 'max_part_removal_threads') only if the number of inactive data parts is at least this.", 0) \
M(String, storage_policy, "default", "Name of storage disk policy", 0) \
M(Bool, allow_nullable_key, false, "Allow Nullable types as primary keys.", 0) \
M(Bool, allow_remote_fs_zero_copy_replication, true, "Allow Zero-copy replication over remote fs", 0) \
M(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm", 0) \
M(Bool, allow_remote_fs_zero_copy_replication, true, "Allow Zero-copy replication over remote fs.", 0) \
M(String, remote_fs_zero_copy_zookeeper_path, "/clickhouse/zero_copy", "ZooKeeper path for Zero-copy table-independet info.", 0) \
M(Bool, remote_fs_zero_copy_path_compatible_mode, false, "Run zero-copy in compatible mode during conversion process.", 0) \
M(Bool, remove_empty_parts, true, "Remove empty parts after they were pruned by TTL, mutation, or collapsing merge algorithm.", 0) \
M(Bool, assign_part_uuids, false, "Generate UUIDs for parts. Before enabling check that all replicas support new format.", 0) \
M(Int64, max_partitions_to_read, -1, "Limit the max number of partitions that can be accessed in one query. <= 0 means unlimited. This setting is the default that can be overridden by the query-level setting with the same name.", 0) \
M(UInt64, max_concurrent_queries, 0, "Max number of concurrently executed queries related to the MergeTree table (0 - disabled). Queries will still be limited by other max_concurrent_queries settings.", 0) \

View File

@ -228,6 +228,8 @@ void ReplicatedMergeTreeSink::commitPart(
bool is_already_existing_part = false;
String old_part_name = part->name;
while (true)
{
/// Obtain incremental block number and lock it. The lock holds our intention to add the block to the filesystem.
@ -508,6 +510,9 @@ void ReplicatedMergeTreeSink::commitPart(
waitForQuorum(zookeeper, part->name, quorum_info.status_path, quorum_info.is_active_node_value);
}
/// Cleanup shared locks made with old name
part->cleanupOldName(old_part_name);
}
void ReplicatedMergeTreeSink::onStart()

View File

@ -18,6 +18,7 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int POSTGRESQL_REPLICATION_INTERNAL_ERROR;
}
MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer(
@ -29,7 +30,7 @@ MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer(
const size_t max_block_size_,
bool schema_as_a_part_of_table_name_,
bool allow_automatic_update_,
Storages storages_,
StorageInfos storages_info_,
const String & name_for_logger)
: log(&Poco::Logger::get("PostgreSQLReplicaConsumer(" + name_for_logger + ")"))
, context(context_)
@ -41,7 +42,6 @@ MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer(
, max_block_size(max_block_size_)
, schema_as_a_part_of_table_name(schema_as_a_part_of_table_name_)
, allow_automatic_update(allow_automatic_update_)
, storages(storages_)
{
final_lsn = start_lsn;
auto tx = std::make_shared<pqxx::nontransaction>(connection->getRef());
@ -49,19 +49,28 @@ MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer(
LOG_TRACE(log, "Starting replication. LSN: {} (last: {})", getLSNValue(current_lsn), getLSNValue(final_lsn));
tx->commit();
for (const auto & [table_name, storage] : storages)
{
buffers.emplace(table_name, Buffer(storage));
}
for (const auto & [table_name, storage_info] : storages_info_)
storages.emplace(table_name, storage_info);
}
void MaterializedPostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storage)
MaterializedPostgreSQLConsumer::StorageData::StorageData(const StorageInfo & storage_info)
: storage(storage_info.storage), buffer(storage_info.storage->getInMemoryMetadataPtr(), storage_info.attributes)
{
auto table_id = storage_info.storage->getStorageID();
LOG_TRACE(&Poco::Logger::get("StorageMaterializedPostgreSQL"),
"New buffer for table {}, number of attributes: {}, number if columns: {}, structure: {}",
table_id.getNameForLogs(), buffer.attributes.size(), buffer.getColumnsNum(), buffer.description.sample_block.dumpStructure());
}
MaterializedPostgreSQLConsumer::StorageData::Buffer::Buffer(
StorageMetadataPtr storage_metadata, const PostgreSQLTableStructure::Attributes & attributes_)
: attributes(attributes_)
{
const auto storage_metadata = storage->getInMemoryMetadataPtr();
const Block sample_block = storage_metadata->getSampleBlock();
/// Need to clear type, because in description.init() the types are appended (emplace_back)
/// Need to clear type, because in description.init() the types are appended
description.types.clear();
description.init(sample_block);
@ -69,13 +78,13 @@ void MaterializedPostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storag
const auto & storage_columns = storage_metadata->getColumns().getAllPhysical();
auto insert_columns = std::make_shared<ASTExpressionList>();
auto table_id = storage->getStorageID();
LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLBuffer"), "New buffer for table {}.{} ({}), structure: {}",
table_id.database_name, table_id.table_name, toString(table_id.uuid), sample_block.dumpStructure());
auto columns_num = description.sample_block.columns();
assert(columns_num == storage_columns.size());
if (attributes.size() + 2 != columns_num) /// +2 because sign and version columns
throw Exception(ErrorCodes::LOGICAL_ERROR, "Columns number mismatch. Attributes: {}, buffer: {}",
attributes.size(), columns_num);
assert(description.sample_block.columns() == storage_columns.size());
size_t idx = 0;
for (const auto & column : storage_columns)
{
if (description.types[idx].first == ExternalResultDescription::ValueType::vtArray)
@ -85,37 +94,45 @@ void MaterializedPostgreSQLConsumer::Buffer::createEmptyBuffer(StoragePtr storag
insert_columns->children.emplace_back(std::make_shared<ASTIdentifier>(column.name));
}
columnsAST = std::move(insert_columns);
columns_ast = std::move(insert_columns);
}
void MaterializedPostgreSQLConsumer::insertValue(Buffer & buffer, const std::string & value, size_t column_idx)
void MaterializedPostgreSQLConsumer::insertValue(StorageData::Buffer & buffer, const std::string & value, size_t column_idx)
{
const auto & sample = buffer.description.sample_block.getByPosition(column_idx);
bool is_nullable = buffer.description.types[column_idx].second;
if (is_nullable)
try
{
ColumnNullable & column_nullable = assert_cast<ColumnNullable &>(*buffer.columns[column_idx]);
const auto & data_type = assert_cast<const DataTypeNullable &>(*sample.type);
if (is_nullable)
{
ColumnNullable & column_nullable = assert_cast<ColumnNullable &>(*buffer.columns[column_idx]);
const auto & data_type = assert_cast<const DataTypeNullable &>(*sample.type);
insertPostgreSQLValue(
column_nullable.getNestedColumn(), value,
buffer.description.types[column_idx].first, data_type.getNestedType(), buffer.array_info, column_idx);
insertPostgreSQLValue(
column_nullable.getNestedColumn(), value,
buffer.description.types[column_idx].first, data_type.getNestedType(), buffer.array_info, column_idx);
column_nullable.getNullMapData().emplace_back(0);
column_nullable.getNullMapData().emplace_back(0);
}
else
{
insertPostgreSQLValue(
*buffer.columns[column_idx], value,
buffer.description.types[column_idx].first, sample.type,
buffer.array_info, column_idx);
}
}
else
catch (const pqxx::conversion_error & e)
{
insertPostgreSQLValue(
*buffer.columns[column_idx], value,
buffer.description.types[column_idx].first, sample.type,
buffer.array_info, column_idx);
LOG_ERROR(log, "Conversion failed while inserting PostgreSQL value {}, will insert default value. Error: {}", value, e.what());
insertDefaultValue(buffer, column_idx);
}
}
void MaterializedPostgreSQLConsumer::insertDefaultValue(Buffer & buffer, size_t column_idx)
void MaterializedPostgreSQLConsumer::insertDefaultValue(StorageData::Buffer & buffer, size_t column_idx)
{
const auto & sample = buffer.description.sample_block.getByPosition(column_idx);
insertDefaultPostgreSQLValue(*buffer.columns[column_idx], *sample.column);
@ -186,10 +203,16 @@ Int8 MaterializedPostgreSQLConsumer::readInt8(const char * message, size_t & pos
void MaterializedPostgreSQLConsumer::readTupleData(
Buffer & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value)
StorageData::Buffer & buffer, const char * message, size_t & pos, [[maybe_unused]] size_t size, PostgreSQLQuery type, bool old_value)
{
Int16 num_columns = readInt16(message, pos, size);
/// Sanity check. In fact, it was already checked.
if (static_cast<size_t>(num_columns) + 2 != buffer.getColumnsNum()) /// +2 -- sign and version columns
throw Exception(ErrorCodes::POSTGRESQL_REPLICATION_INTERNAL_ERROR,
"Number of columns does not match. Got: {}, expected {}, current buffer structure: {}",
num_columns, buffer.getColumnsNum(), buffer.description.sample_block.dumpStructure());
auto proccess_column_value = [&](Int8 identifier, Int16 column_idx)
{
switch (identifier)
@ -202,8 +225,15 @@ void MaterializedPostgreSQLConsumer::readTupleData(
case 't': /// Text formatted value
{
Int32 col_len = readInt32(message, pos, size);
String value;
/// Sanity check for protocol misuse.
/// PostgreSQL uses a fixed page size (commonly 8 kB), and does not allow tuples to span multiple pages.
static constexpr Int32 sanity_check_max_col_len = 1024 * 8 * 2; /// *2 -- just in case.
if (unlikely(col_len > sanity_check_max_col_len))
throw Exception(ErrorCodes::POSTGRESQL_REPLICATION_INTERNAL_ERROR,
"Column length is suspiciously long: {}", col_len);
String value;
for (Int32 i = 0; i < col_len; ++i)
value += readInt8(message, pos, size);
@ -276,19 +306,20 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
{
Int32 relation_id = readInt32(replication_message, pos, size);
const auto & table_name = relation_id_to_name[relation_id];
/// FIXME:If table name is empty here, it means we failed to load it, but it was included in publication. Need to remove?
if (table_name.empty())
LOG_WARNING(log, "No table mapping for relation id: {}. Probably table failed to be loaded", relation_id);
{
LOG_ERROR(log, "No table mapping for relation id: {}. It's a bug", relation_id);
return;
}
if (!isSyncAllowed(relation_id, table_name))
return;
Int8 new_tuple = readInt8(replication_message, pos, size);
auto buffer = buffers.find(table_name);
assert(buffer != buffers.end());
auto & buffer = storages.find(table_name)->second.buffer;
if (new_tuple)
readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::INSERT);
readTupleData(buffer, replication_message, pos, size, PostgreSQLQuery::INSERT);
break;
}
@ -296,15 +327,16 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
{
Int32 relation_id = readInt32(replication_message, pos, size);
const auto & table_name = relation_id_to_name[relation_id];
/// FIXME:If table name is empty here, it means we failed to load it, but it was included in publication. Need to remove?
if (table_name.empty())
LOG_WARNING(log, "No table mapping for relation id: {}. Probably table failed to be loaded", relation_id);
{
LOG_ERROR(log, "No table mapping for relation id: {}. It's a bug", relation_id);
return;
}
if (!isSyncAllowed(relation_id, table_name))
return;
auto buffer = buffers.find(table_name);
assert(buffer != buffers.end());
auto & buffer = storages.find(table_name)->second.buffer;
auto proccess_identifier = [&](Int8 identifier) -> bool
{
@ -319,13 +351,13 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
/// it is much more efficient to use replica identity index, but support all possible cases.
case 'O':
{
readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE, true);
readTupleData(buffer, replication_message, pos, size, PostgreSQLQuery::UPDATE, true);
break;
}
case 'N':
{
/// New row.
readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::UPDATE);
readTupleData(buffer, replication_message, pos, size, PostgreSQLQuery::UPDATE);
read_next = false;
break;
}
@ -347,9 +379,11 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
{
Int32 relation_id = readInt32(replication_message, pos, size);
const auto & table_name = relation_id_to_name[relation_id];
/// FIXME:If table name is empty here, it means we failed to load it, but it was included in publication. Need to remove?
if (table_name.empty())
LOG_WARNING(log, "No table mapping for relation id: {}. Probably table failed to be loaded", relation_id);
{
LOG_ERROR(log, "No table mapping for relation id: {}. It's a bug", relation_id);
return;
}
if (!isSyncAllowed(relation_id, table_name))
return;
@ -357,10 +391,8 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
/// 0 or 1 if replica identity is set to full. For now only default replica identity is supported (with primary keys).
readInt8(replication_message, pos, size);
auto buffer = buffers.find(table_name);
assert(buffer != buffers.end());
readTupleData(buffer->second, replication_message, pos, size, PostgreSQLQuery::DELETE);
auto & buffer = storages.find(table_name)->second.buffer;
readTupleData(buffer, replication_message, pos, size, PostgreSQLQuery::DELETE);
break;
}
case 'C': // Commit
@ -379,7 +411,6 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
Int32 relation_id = readInt32(replication_message, pos, size);
String relation_namespace, relation_name;
readString(replication_message, pos, size, relation_namespace);
readString(replication_message, pos, size, relation_name);
@ -389,22 +420,26 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
else
table_name = relation_name;
if (!relation_id_to_name.contains(relation_id))
relation_id_to_name[relation_id] = table_name;
if (!isSyncAllowed(relation_id, relation_name))
return;
if (storages.find(table_name) == storages.end())
auto storage_iter = storages.find(table_name);
if (storage_iter == storages.end())
{
markTableAsSkipped(relation_id, table_name);
/// TODO: This can happen if we created a publication with this table but then got an exception that this
/// FIXME: This can happen if we created a publication with this table but then got an exception that this
/// table has primary key or something else.
LOG_ERROR(log,
"Storage for table {} does not exist, but is included in replication stream. (Storages number: {})",
"Storage for table {} does not exist, but is included in replication stream. (Storages number: {})"
"Please manually remove this table from replication (DETACH TABLE query) to avoid redundant replication",
table_name, storages.size());
markTableAsSkipped(relation_id, table_name);
return;
}
assert(buffers.contains(table_name));
auto & buffer = storage_iter->second.buffer;
/// 'd' - default (primary key if any)
/// 'n' - nothing
@ -412,7 +447,6 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
/// 'i' - user defined index with indisreplident set
/// Only 'd' and 'i' - are supported.
char replica_identity = readInt8(replication_message, pos, size);
if (replica_identity != 'd' && replica_identity != 'i')
{
LOG_WARNING(log,
@ -423,25 +457,29 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
Int16 num_columns = readInt16(replication_message, pos, size);
Int32 data_type_id;
Int32 type_modifier; /// For example, n in varchar(n)
bool new_relation_definition = false;
if (schema_data.find(relation_id) == schema_data.end())
{
relation_id_to_name[relation_id] = table_name;
schema_data.emplace(relation_id, SchemaData(num_columns));
new_relation_definition = true;
}
auto & current_schema_data = schema_data.find(relation_id)->second;
if (current_schema_data.number_of_columns != num_columns)
if (static_cast<size_t>(num_columns) + 2 != buffer.getColumnsNum()) /// +2 -- sign and version columns
{
markTableAsSkipped(relation_id, table_name);
return;
}
if (static_cast<size_t>(num_columns) != buffer.attributes.size())
{
#ifndef NDEBUG
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Mismatch in attributes size. Got {}, expected {}. It's a bug. Current buffer structure: {}",
num_columns, buffer.attributes.size(), buffer.description.sample_block.dumpStructure());
#else
LOG_ERROR(log, "Mismatch in attributes size. Got {}, expected {}. It's a bug. Current buffer structure: {}",
num_columns, buffer.attributes.size(), buffer.description.sample_block.dumpStructure());
markTableAsSkipped(relation_id, table_name);
return;
#endif
}
Int32 data_type_id;
Int32 type_modifier; /// For example, n in varchar(n)
for (uint16_t i = 0; i < num_columns; ++i)
{
String column_name;
@ -451,23 +489,14 @@ void MaterializedPostgreSQLConsumer::processReplicationMessage(const char * repl
data_type_id = readInt32(replication_message, pos, size);
type_modifier = readInt32(replication_message, pos, size);
if (new_relation_definition)
if (buffer.attributes[i].atttypid != data_type_id || buffer.attributes[i].atttypmod != type_modifier)
{
current_schema_data.column_identifiers.emplace_back(std::make_pair(data_type_id, type_modifier));
}
else
{
if (current_schema_data.column_identifiers[i].first != data_type_id
|| current_schema_data.column_identifiers[i].second != type_modifier)
{
markTableAsSkipped(relation_id, table_name);
return;
}
markTableAsSkipped(relation_id, table_name);
return;
}
}
tables_to_sync.insert(table_name);
break;
}
case 'O': // Origin
@ -489,19 +518,19 @@ void MaterializedPostgreSQLConsumer::syncTables()
{
for (const auto & table_name : tables_to_sync)
{
auto & buffer = buffers.find(table_name)->second;
Block result_rows = buffer.description.sample_block.cloneWithColumns(std::move(buffer.columns));
auto & storage_data = storages.find(table_name)->second;
Block result_rows = storage_data.buffer.description.sample_block.cloneWithColumns(std::move(storage_data.buffer.columns));
if (result_rows.rows())
{
auto storage = storages[table_name];
auto storage = storage_data.storage;
auto insert_context = Context::createCopy(context);
insert_context->setInternalQuery(true);
auto insert = std::make_shared<ASTInsertQuery>();
insert->table_id = storage->getStorageID();
insert->columns = buffer.columnsAST;
insert->columns = storage_data.buffer.columns_ast;
InterpreterInsertQuery interpreter(insert, insert_context, true);
auto io = interpreter.execute();
@ -514,7 +543,7 @@ void MaterializedPostgreSQLConsumer::syncTables()
CompletedPipelineExecutor executor(io.pipeline);
executor.execute();
buffer.columns = buffer.description.sample_block.cloneEmptyColumns();
storage_data.buffer.columns = storage_data.buffer.description.sample_block.cloneEmptyColumns();
}
}
@ -599,34 +628,21 @@ bool MaterializedPostgreSQLConsumer::isSyncAllowed(Int32 relation_id, const Stri
void MaterializedPostgreSQLConsumer::markTableAsSkipped(Int32 relation_id, const String & relation_name)
{
/// Empty lsn string means - continue waiting for valid lsn.
skip_list.insert({relation_id, ""});
skip_list.insert({relation_id, ""}); /// Empty lsn string means - continue waiting for valid lsn.
storages.erase(relation_name);
if (storages.count(relation_name))
{
/// Erase cached schema identifiers. It will be updated again once table is allowed back into replication stream
/// and it receives first data after update.
schema_data.erase(relation_id);
/// Clear table buffer.
auto & buffer = buffers.find(relation_name)->second;
buffer.columns = buffer.description.sample_block.cloneEmptyColumns();
if (allow_automatic_update)
LOG_TRACE(log, "Table {} (relation_id: {}) is skipped temporarily. It will be reloaded in the background", relation_name, relation_id);
else
LOG_WARNING(log, "Table {} (relation_id: {}) is skipped, because table schema has changed", relation_name, relation_id);
}
if (allow_automatic_update)
LOG_TRACE(log, "Table {} (relation_id: {}) is skipped temporarily. It will be reloaded in the background", relation_name, relation_id);
else
LOG_WARNING(log, "Table {} (relation_id: {}) is skipped, because table schema has changed", relation_name, relation_id);
}
void MaterializedPostgreSQLConsumer::addNested(const String & postgres_table_name, StoragePtr nested_storage, const String & table_start_lsn)
void MaterializedPostgreSQLConsumer::addNested(
const String & postgres_table_name, StorageInfo nested_storage_info, const String & table_start_lsn)
{
/// Cache new pointer to replacingMergeTree table.
storages.emplace(postgres_table_name, nested_storage);
/// Add new in-memory buffer.
buffers.emplace(postgres_table_name, Buffer(nested_storage));
assert(!storages.contains(postgres_table_name));
storages.emplace(postgres_table_name, nested_storage_info);
/// Replication consumer will read wall and check for currently processed table whether it is allowed to start applying
/// changes to this table.
@ -634,14 +650,10 @@ void MaterializedPostgreSQLConsumer::addNested(const String & postgres_table_nam
}
void MaterializedPostgreSQLConsumer::updateNested(const String & table_name, StoragePtr nested_storage, Int32 table_id, const String & table_start_lsn)
void MaterializedPostgreSQLConsumer::updateNested(const String & table_name, StorageInfo nested_storage_info, Int32 table_id, const String & table_start_lsn)
{
/// Cache new pointer to replacingMergeTree table.
storages[table_name] = nested_storage;
/// Create a new empty buffer (with updated metadata), where data is first loaded before syncing into actual table.
auto & buffer = buffers.find(table_name)->second;
buffer.createEmptyBuffer(nested_storage);
assert(!storages.contains(table_name));
storages.emplace(table_name, nested_storage_info);
/// Set start position to valid lsn. Before it was an empty string. Further read for table allowed, if it has a valid lsn.
skip_list[table_id] = table_start_lsn;
@ -651,7 +663,6 @@ void MaterializedPostgreSQLConsumer::updateNested(const String & table_name, Sto
void MaterializedPostgreSQLConsumer::removeNested(const String & postgres_table_name)
{
storages.erase(postgres_table_name);
buffers.erase(postgres_table_name);
deleted_tables.insert(postgres_table_name);
}
@ -706,7 +717,17 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot()
current_lsn = (*row)[0];
lsn_value = getLSNValue(current_lsn);
processReplicationMessage((*row)[1].c_str(), (*row)[1].size());
try
{
// LOG_DEBUG(log, "Current message: {}", (*row)[1]);
processReplicationMessage((*row)[1].c_str(), (*row)[1].size());
}
catch (const Exception & e)
{
if (e.code() == ErrorCodes::POSTGRESQL_REPLICATION_INTERNAL_ERROR)
continue;
throw;
}
}
}
catch (const Exception &)
@ -737,11 +758,6 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot()
LOG_ERROR(log, "Conversion error: {}", e.what());
return false;
}
catch (const pqxx::in_doubt_error & e)
{
LOG_ERROR(log, "PostgreSQL library has some doubts: {}", e.what());
return false;
}
catch (const pqxx::internal_error & e)
{
LOG_ERROR(log, "PostgreSQL library internal error: {}", e.what());
@ -749,16 +765,8 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot()
}
catch (...)
{
/// Since reading is done from a background task, it is important to catch any possible error
/// in order to understand why something does not work.
try
{
std::rethrow_exception(std::current_exception());
}
catch (const std::exception& e)
{
LOG_ERROR(log, "Unexpected error: {}", e.what());
}
tryLogCurrentException(__PRETTY_FUNCTION__);
return false;
}
if (!tables_to_sync.empty())
@ -770,6 +778,11 @@ bool MaterializedPostgreSQLConsumer::readFromReplicationSlot()
bool MaterializedPostgreSQLConsumer::consume(std::vector<std::pair<Int32, String>> & skipped_tables)
{
/// Read up to max_block_size changed (approximately - in same cases might be more).
/// false: no data was read, reschedule.
/// true: some data was read, schedule as soon as possible.
auto read_next = readFromReplicationSlot();
/// Check if there are tables, which are skipped from being updated by changes from replication stream,
/// because schema changes were detected. Update them, if it is allowed.
if (allow_automatic_update && !skip_list.empty())
@ -786,10 +799,6 @@ bool MaterializedPostgreSQLConsumer::consume(std::vector<std::pair<Int32, String
}
}
/// Read up to max_block_size changed (approximately - in same cases might be more).
/// false: no data was read, reschedule.
/// true: some data was read, schedule as soon as possible.
auto read_next = readFromReplicationSlot();
return read_next;
}

View File

@ -8,36 +8,78 @@
#include <base/logger_useful.h>
#include <Storages/IStorage.h>
#include <Parsers/ASTExpressionList.h>
#include <Databases/PostgreSQL/fetchPostgreSQLTableStructure.h>
namespace DB
{
struct SettingChange;
struct StorageInfo
{
StoragePtr storage;
PostgreSQLTableStructure::Attributes attributes;
StorageInfo(StoragePtr storage_, const PostgreSQLTableStructure::Attributes & attributes_)
: storage(storage_), attributes(attributes_) {}
};
using StorageInfos = std::unordered_map<String, StorageInfo>;
class MaterializedPostgreSQLConsumer
{
public:
using Storages = std::unordered_map<String, StoragePtr>;
private:
struct StorageData
{
struct Buffer
{
ExternalResultDescription description;
MutableColumns columns;
/// Needed to pass to insert query columns list in syncTables().
std::shared_ptr<ASTExpressionList> columns_ast;
/// Needed for insertPostgreSQLValue() method to parse array
std::unordered_map<size_t, PostgreSQLArrayInfo> array_info;
/// To validate ddl.
PostgreSQLTableStructure::Attributes attributes;
Buffer(StorageMetadataPtr storage_metadata, const PostgreSQLTableStructure::Attributes & attributes_);
size_t getColumnsNum() const
{
const auto & sample_block = description.sample_block;
return sample_block.columns();
}
};
StoragePtr storage;
Buffer buffer;
explicit StorageData(const StorageInfo & storage_info);
StorageData(const StorageData & other) = delete;
};
using Storages = std::unordered_map<String, StorageData>;
public:
MaterializedPostgreSQLConsumer(
ContextPtr context_,
std::shared_ptr<postgres::Connection> connection_,
const String & replication_slot_name_,
const String & publication_name_,
const String & start_lsn,
const size_t max_block_size_,
size_t max_block_size_,
bool schema_as_a_part_of_table_name_,
bool allow_automatic_update_,
Storages storages_,
StorageInfos storages_,
const String & name_for_logger);
bool consume(std::vector<std::pair<Int32, String>> & skipped_tables);
/// Called from reloadFromSnapshot by replication handler. This method is needed to move a table back into synchronization
/// process if it was skipped due to schema changes.
void updateNested(const String & table_name, StoragePtr nested_storage, Int32 table_id, const String & table_start_lsn);
void updateNested(const String & table_name, StorageInfo nested_storage_info, Int32 table_id, const String & table_start_lsn);
void addNested(const String & postgres_table_name, StoragePtr nested_storage, const String & table_start_lsn);
void addNested(const String & postgres_table_name, StorageInfo nested_storage_info, const String & table_start_lsn);
void removeNested(const String & postgres_table_name);
@ -55,25 +97,8 @@ private:
bool isSyncAllowed(Int32 relation_id, const String & relation_name);
struct Buffer
{
ExternalResultDescription description;
MutableColumns columns;
/// Needed to pass to insert query columns list in syncTables().
std::shared_ptr<ASTExpressionList> columnsAST;
/// Needed for insertPostgreSQLValue() method to parse array
std::unordered_map<size_t, PostgreSQLArrayInfo> array_info;
Buffer(StoragePtr storage) { createEmptyBuffer(storage); }
void createEmptyBuffer(StoragePtr storage);
};
using Buffers = std::unordered_map<String, Buffer>;
static void insertDefaultValue(Buffer & buffer, size_t column_idx);
static void insertValue(Buffer & buffer, const std::string & value, size_t column_idx);
static void insertDefaultValue(StorageData::Buffer & buffer, size_t column_idx);
void insertValue(StorageData::Buffer & buffer, const std::string & value, size_t column_idx);
enum class PostgreSQLQuery
{
@ -82,7 +107,7 @@ private:
DELETE
};
void readTupleData(Buffer & buffer, const char * message, size_t & pos, size_t size, PostgreSQLQuery type, bool old_value = false);
void readTupleData(StorageData::Buffer & buffer, const char * message, size_t & pos, size_t size, PostgreSQLQuery type, bool old_value = false);
template<typename T>
static T unhexN(const char * message, size_t pos, size_t n);
@ -95,7 +120,7 @@ private:
void markTableAsSkipped(Int32 relation_id, const String & relation_name);
/// lsn - log sequnce nuumber, like wal offset (64 bit).
Int64 getLSNValue(const std::string & lsn)
static Int64 getLSNValue(const std::string & lsn)
{
UInt32 upper_half, lower_half;
std::sscanf(lsn.data(), "%X/%X", &upper_half, &lower_half);
@ -125,28 +150,11 @@ private:
/// Holds `postgres_table_name` set.
std::unordered_set<std::string> tables_to_sync;
/// `postgres_table_name` -> ReplacingMergeTree table.
/// `postgres_table_name` -> StorageData.
Storages storages;
/// `postgres_table_name` -> In-memory buffer.
Buffers buffers;
std::unordered_map<Int32, String> relation_id_to_name;
struct SchemaData
{
Int16 number_of_columns;
/// data_type_id and type_modifier
std::vector<std::pair<Int32, Int32>> column_identifiers;
SchemaData(Int16 number_of_columns_) : number_of_columns(number_of_columns_) {}
};
/// Cache for table schema data to be able to detect schema changes, because ddl is not
/// replicated with postgresql logical replication protocol, but some table schema info
/// is received if it is the first time we received dml message for given relation in current session or
/// if relation definition has changed since the last relation definition message.
std::unordered_map<Int32, SchemaData> schema_data;
/// `postgres_relation_id` -> `start_lsn`
/// skip_list contains relation ids for tables on which ddl was performed, which can break synchronization.
/// This breaking changes are detected in replication stream in according replication message and table is added to skip list.

View File

@ -175,6 +175,7 @@ void PostgreSQLReplicationHandler::shutdown()
startup_task->deactivate();
consumer_task->deactivate();
cleanup_task->deactivate();
consumer.reset(); /// Clear shared pointers to inner storages.
}
@ -185,7 +186,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
createPublicationIfNeeded(tx);
/// List of nested tables (table_name -> nested_storage), which is passed to replication consumer.
std::unordered_map<String, StoragePtr> nested_storages;
std::unordered_map<String, StorageInfo> nested_storages;
/// snapshot_name is initialized only if a new replication slot is created.
/// start_lsn is initialized in two places:
@ -220,7 +221,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
{
try
{
nested_storages[table_name] = loadFromSnapshot(*tmp_connection, snapshot_name, table_name, storage->as<StorageMaterializedPostgreSQL>());
nested_storages.emplace(table_name, loadFromSnapshot(*tmp_connection, snapshot_name, table_name, storage->as<StorageMaterializedPostgreSQL>()));
}
catch (Exception & e)
{
@ -262,7 +263,12 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error)
auto * materialized_storage = storage->as <StorageMaterializedPostgreSQL>();
try
{
nested_storages[table_name] = materialized_storage->getNested();
auto [postgres_table_schema, postgres_table_name] = getSchemaAndTableName(table_name);
auto table_structure = fetchPostgreSQLTableStructure(tx, postgres_table_name, postgres_table_schema, true, true, true);
if (!table_structure.physical_columns)
throw Exception(ErrorCodes::LOGICAL_ERROR, "No columns");
auto storage_info = StorageInfo(materialized_storage->getNested(), table_structure.physical_columns->attributes);
nested_storages.emplace(table_name, std::move(storage_info));
}
catch (Exception & e)
{
@ -315,7 +321,7 @@ ASTPtr PostgreSQLReplicationHandler::getCreateNestedTableQuery(StorageMaterializ
}
StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(postgres::Connection & connection, String & snapshot_name, const String & table_name,
StorageInfo PostgreSQLReplicationHandler::loadFromSnapshot(postgres::Connection & connection, String & snapshot_name, const String & table_name,
StorageMaterializedPostgreSQL * materialized_storage)
{
auto tx = std::make_shared<pqxx::ReplicationTransaction>(connection.getRef());
@ -329,8 +335,13 @@ StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(postgres::Connection &
query_str = fmt::format("SELECT * FROM {}", quoted_name);
LOG_DEBUG(log, "Loading PostgreSQL table {}.{}", postgres_database, quoted_name);
auto table_structure = fetchTableStructure(*tx, table_name);
if (!table_structure->physical_columns)
throw Exception(ErrorCodes::LOGICAL_ERROR, "No table attributes");
auto table_attributes = table_structure->physical_columns->attributes;
auto table_override = tryGetTableOverride(current_database_name, table_name);
materialized_storage->createNestedIfNeeded(fetchTableStructure(*tx, table_name), table_override ? table_override->as<ASTTableOverride>() : nullptr);
materialized_storage->createNestedIfNeeded(std::move(table_structure), table_override ? table_override->as<ASTTableOverride>() : nullptr);
auto nested_storage = materialized_storage->getNested();
auto insert = std::make_shared<ASTInsertQuery>();
@ -355,7 +366,7 @@ StoragePtr PostgreSQLReplicationHandler::loadFromSnapshot(postgres::Connection &
auto nested_table_id = nested_storage->getStorageID();
LOG_DEBUG(log, "Loaded table {}.{} (uuid: {})", nested_table_id.database_name, nested_table_id.table_name, toString(nested_table_id.uuid));
return nested_storage;
return StorageInfo(nested_storage, std::move(table_attributes));
}
@ -787,9 +798,6 @@ std::set<String> PostgreSQLReplicationHandler::fetchTablesFromPublication(pqxx::
PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure(
pqxx::ReplicationTransaction & tx, const std::string & table_name) const
{
if (!is_materialized_postgresql_database)
return nullptr;
PostgreSQLTableStructure structure;
try
{
@ -815,7 +823,7 @@ void PostgreSQLReplicationHandler::addTableToReplication(StorageMaterializedPost
LOG_TRACE(log, "Adding table `{}` to replication", postgres_table_name);
postgres::Connection replication_connection(connection_info, /* replication */true);
String snapshot_name, start_lsn;
StoragePtr nested_storage;
StorageInfo nested_storage_info{ nullptr, {} };
{
auto tx = std::make_shared<pqxx::nontransaction>(replication_connection.getRef());
@ -831,8 +839,8 @@ void PostgreSQLReplicationHandler::addTableToReplication(StorageMaterializedPost
throw Exception(ErrorCodes::LOGICAL_ERROR, "Internal table was not created");
postgres::Connection tmp_connection(connection_info);
nested_storage = loadFromSnapshot(tmp_connection, snapshot_name, postgres_table_name, materialized_storage);
materialized_storage->set(nested_storage);
nested_storage_info = loadFromSnapshot(tmp_connection, snapshot_name, postgres_table_name, materialized_storage);
materialized_storage->set(nested_storage_info.storage);
}
{
@ -841,7 +849,7 @@ void PostgreSQLReplicationHandler::addTableToReplication(StorageMaterializedPost
}
/// Pass storage to consumer and lsn position, from which to start receiving replication messages for this table.
consumer->addNested(postgres_table_name, nested_storage, start_lsn);
consumer->addNested(postgres_table_name, nested_storage_info, start_lsn);
LOG_TRACE(log, "Table `{}` successfully added to replication", postgres_table_name);
}
catch (...)
@ -914,8 +922,8 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector<std::pai
auto temp_materialized_storage = materialized_storage->createTemporary();
/// This snapshot is valid up to the end of the transaction, which exported it.
StoragePtr temp_nested_storage = loadFromSnapshot(tmp_connection, snapshot_name, table_name,
temp_materialized_storage->as <StorageMaterializedPostgreSQL>());
auto [temp_nested_storage, table_attributes] = loadFromSnapshot(
tmp_connection, snapshot_name, table_name, temp_materialized_storage->as <StorageMaterializedPostgreSQL>());
auto table_id = materialized_storage->getNestedStorageID();
auto temp_table_id = temp_nested_storage->getStorageID();
@ -949,7 +957,7 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vector<std::pai
nested_storage->getStorageID().getNameForLogs(), nested_sample_block.dumpStructure());
/// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position.
consumer->updateNested(table_name, nested_storage, relation_id, start_lsn);
consumer->updateNested(table_name, StorageInfo(nested_storage, std::move(table_attributes)), relation_id, start_lsn);
auto table_to_drop = DatabaseCatalog::instance().getTable(StorageID(temp_table_id.database_name, temp_table_id.table_name, table_id.uuid), nested_context);
auto drop_table_id = table_to_drop->getStorageID();

View File

@ -87,7 +87,7 @@ private:
void consumerFunc();
StoragePtr loadFromSnapshot(postgres::Connection & connection, std::string & snapshot_name, const String & table_name, StorageMaterializedPostgreSQL * materialized_storage);
StorageInfo loadFromSnapshot(postgres::Connection & connection, std::string & snapshot_name, const String & table_name, StorageMaterializedPostgreSQL * materialized_storage);
void reloadFromSnapshot(const std::vector<std::pair<Int32, String>> & relation_data);

View File

@ -365,7 +365,7 @@ ASTPtr StorageMaterializedPostgreSQL::getColumnDeclaration(const DataTypePtr & d
ast_expression->name = "DateTime64";
ast_expression->arguments = std::make_shared<ASTExpressionList>();
ast_expression->arguments->children.emplace_back(std::make_shared<ASTLiteral>(UInt32(6)));
return ast_expression;
return std::move(ast_expression);
}
return std::make_shared<ASTIdentifier>(data_type->getName());
@ -423,7 +423,7 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(
table_id.database_name, table_id.table_name);
}
if (!table_structure->columns && (!table_override || !table_override->columns))
if (!table_structure->physical_columns && (!table_override || !table_override->columns))
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "No columns returned for table {}.{}",
table_id.database_name, table_id.table_name);
@ -465,7 +465,7 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(
}
else
{
ordinary_columns_and_types = *table_structure->columns;
ordinary_columns_and_types = table_structure->physical_columns->columns;
columns_declare_list->set(columns_declare_list->columns, getColumnsExpressionList(ordinary_columns_and_types));
}
@ -475,7 +475,7 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(
}
else
{
ordinary_columns_and_types = *table_structure->columns;
ordinary_columns_and_types = table_structure->physical_columns->columns;
columns_declare_list->set(columns_declare_list->columns, getColumnsExpressionList(ordinary_columns_and_types));
}
@ -485,9 +485,9 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(
NamesAndTypesList merging_columns;
if (table_structure->primary_key_columns)
merging_columns = *table_structure->primary_key_columns;
merging_columns = table_structure->primary_key_columns->columns;
else
merging_columns = *table_structure->replica_identity_columns;
merging_columns = table_structure->replica_identity_columns->columns;
order_by_expression->name = "tuple";
order_by_expression->arguments = std::make_shared<ASTExpressionList>();
@ -524,7 +524,7 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(
storage_metadata.setConstraints(constraints);
setInMemoryMetadata(storage_metadata);
return create_table_query;
return std::move(create_table_query);
}

View File

@ -442,6 +442,8 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree(
createNewZooKeeperNodes();
syncPinnedPartUUIDs();
createTableSharedID();
}
@ -7025,12 +7027,53 @@ void StorageReplicatedMergeTree::startBackgroundMovesIfNeeded()
background_moves_assignee.start();
}
std::unique_ptr<MergeTreeSettings> StorageReplicatedMergeTree::getDefaultSettings() const
{
return std::make_unique<MergeTreeSettings>(getContext()->getReplicatedMergeTreeSettings());
}
String StorageReplicatedMergeTree::getTableSharedID() const
{
return toString(table_shared_id);
}
void StorageReplicatedMergeTree::createTableSharedID()
{
if (table_shared_id != UUIDHelpers::Nil)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Table shared id already initialized");
zkutil::ZooKeeperPtr zookeeper = getZooKeeper();
String zookeeper_table_id_path = fs::path(zookeeper_path) / "table_shared_id";
String id;
if (!zookeeper->tryGet(zookeeper_table_id_path, id))
{
UUID table_id_candidate;
auto storage_id = getStorageID();
if (storage_id.uuid != UUIDHelpers::Nil)
table_id_candidate = storage_id.uuid;
else
table_id_candidate = UUIDHelpers::generateV4();
id = toString(table_id_candidate);
auto code = zookeeper->tryCreate(zookeeper_table_id_path, id, zkutil::CreateMode::Persistent);
if (code == Coordination::Error::ZNODEEXISTS)
{ /// Other replica create node early
id = zookeeper->get(zookeeper_table_id_path);
}
else if (code != Coordination::Error::ZOK)
{
throw zkutil::KeeperException(code, zookeeper_table_id_path);
}
}
table_shared_id = parseFromString<UUID>(id);
}
void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part) const
{
if (!part.volume)
@ -7038,7 +7081,6 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part)
DiskPtr disk = part.volume->getDisk();
if (!disk || !disk->supportZeroCopyReplication())
return;
String zero_copy = fmt::format("zero_copy_{}", toString(disk->getType()));
zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper();
if (!zookeeper)
@ -7047,73 +7089,100 @@ void StorageReplicatedMergeTree::lockSharedData(const IMergeTreeDataPart & part)
String id = part.getUniqueId();
boost::replace_all(id, "/", "_");
String zookeeper_node = fs::path(zookeeper_path) / zero_copy / "shared" / part.name / id / replica_name;
LOG_TRACE(log, "Set zookeeper lock {}", zookeeper_node);
/// In rare case other replica can remove path between createAncestors and createIfNotExists
/// So we make up to 5 attempts
for (int attempts = 5; attempts > 0; --attempts)
Strings zc_zookeeper_paths = getZeroCopyPartPath(*getSettings(), disk->getType(), getTableSharedID(),
part.name, zookeeper_path);
for (const auto & zc_zookeeper_path : zc_zookeeper_paths)
{
try
{
zookeeper->createAncestors(zookeeper_node);
zookeeper->createIfNotExists(zookeeper_node, "lock");
break;
}
catch (const zkutil::KeeperException & e)
{
if (e.code == Coordination::Error::ZNONODE)
continue;
throw;
}
String zookeeper_node = fs::path(zc_zookeeper_path) / id / replica_name;
LOG_TRACE(log, "Set zookeeper lock {}", zookeeper_node);
createZeroCopyLockNode(zookeeper, zookeeper_node);
}
}
bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part) const
{
return unlockSharedData(part, part.name);
}
bool StorageReplicatedMergeTree::unlockSharedData(const IMergeTreeDataPart & part, const String & name) const
{
if (!part.volume)
return true;
DiskPtr disk = part.volume->getDisk();
if (!disk || !disk->supportZeroCopyReplication())
return true;
String zero_copy = fmt::format("zero_copy_{}", toString(disk->getType()));
zkutil::ZooKeeperPtr zookeeper = tryGetZooKeeper();
if (!zookeeper)
return true;
String id = part.getUniqueId();
auto ref_count = part.getNumberOfRefereneces();
if (ref_count > 0) /// Keep part shard info for frozen backups
return false;
return unlockSharedDataByID(part.getUniqueId(), getTableSharedID(), name, replica_name, disk, zookeeper, *getSettings(), log,
zookeeper_path);
}
bool StorageReplicatedMergeTree::unlockSharedDataByID(String id, const String & table_uuid, const String & part_name,
const String & replica_name_, DiskPtr disk, zkutil::ZooKeeperPtr zookeeper_ptr, const MergeTreeSettings & settings,
Poco::Logger * logger, const String & zookeeper_path_old)
{
boost::replace_all(id, "/", "_");
String zookeeper_part_node = fs::path(zookeeper_path) / zero_copy / "shared" / part.name;
String zookeeper_part_uniq_node = fs::path(zookeeper_part_node) / id;
String zookeeper_node = fs::path(zookeeper_part_uniq_node) / replica_name;
Strings zc_zookeeper_paths = getZeroCopyPartPath(settings, disk->getType(), table_uuid, part_name, zookeeper_path_old);
LOG_TRACE(log, "Remove zookeeper lock {}", zookeeper_node);
bool res = true;
zookeeper->tryRemove(zookeeper_node);
Strings children;
zookeeper->tryGetChildren(zookeeper_part_uniq_node, children);
if (!children.empty())
for (const auto & zc_zookeeper_path : zc_zookeeper_paths)
{
LOG_TRACE(log, "Found zookeper locks for {}", zookeeper_part_uniq_node);
return false;
String zookeeper_part_uniq_node = fs::path(zc_zookeeper_path) / id;
String zookeeper_node = fs::path(zookeeper_part_uniq_node) / replica_name_;
LOG_TRACE(logger, "Remove zookeeper lock {}", zookeeper_node);
zookeeper_ptr->tryRemove(zookeeper_node);
Strings children;
zookeeper_ptr->tryGetChildren(zookeeper_part_uniq_node, children);
if (!children.empty())
{
LOG_TRACE(logger, "Found zookeper locks for {}", zookeeper_part_uniq_node);
res = false;
continue;
}
auto e = zookeeper_ptr->tryRemove(zookeeper_part_uniq_node);
LOG_TRACE(logger, "Remove parent zookeeper lock {} : {}", zookeeper_part_uniq_node, e != Coordination::Error::ZNOTEMPTY);
/// Even when we have lock with same part name, but with different uniq, we can remove files on S3
children.clear();
String zookeeper_part_node = fs::path(zookeeper_part_uniq_node).parent_path();
zookeeper_ptr->tryGetChildren(zookeeper_part_node, children);
if (children.empty())
{
/// Cleanup after last uniq removing
e = zookeeper_ptr->tryRemove(zookeeper_part_node);
LOG_TRACE(logger, "Remove parent zookeeper lock {} : {}", zookeeper_part_node, e != Coordination::Error::ZNOTEMPTY);
}
else
{
LOG_TRACE(logger, "Can't remove parent zookeeper lock {} : {}", zookeeper_part_node, children.size());
for (auto & c : children)
{
LOG_TRACE(logger, "Child node {}", c);
}
}
}
zookeeper->tryRemove(zookeeper_part_uniq_node);
/// Even when we have lock with same part name, but with different uniq, we can remove files on S3
children.clear();
zookeeper->tryGetChildren(zookeeper_part_node, children);
if (children.empty())
/// Cleanup after last uniq removing
zookeeper->tryRemove(zookeeper_part_node);
return true;
return res;
}
@ -7146,20 +7215,24 @@ String StorageReplicatedMergeTree::getSharedDataReplica(
if (!zookeeper)
return best_replica;
String zero_copy = fmt::format("zero_copy_{}", toString(disk_type));
String zookeeper_part_node = fs::path(zookeeper_path) / zero_copy / "shared" / part.name;
Strings zc_zookeeper_paths = getZeroCopyPartPath(*getSettings(), disk_type, getTableSharedID(), part.name,
zookeeper_path);
Strings ids;
zookeeper->tryGetChildren(zookeeper_part_node, ids);
std::set<String> replicas;
Strings replicas;
for (const auto & id : ids)
for (const auto & zc_zookeeper_path : zc_zookeeper_paths)
{
String zookeeper_part_uniq_node = fs::path(zookeeper_part_node) / id;
Strings id_replicas;
zookeeper->tryGetChildren(zookeeper_part_uniq_node, id_replicas);
LOG_TRACE(log, "Found zookeper replicas for {}: {}", zookeeper_part_uniq_node, id_replicas.size());
replicas.insert(replicas.end(), id_replicas.begin(), id_replicas.end());
Strings ids;
zookeeper->tryGetChildren(zc_zookeeper_path, ids);
for (const auto & id : ids)
{
String zookeeper_part_uniq_node = fs::path(zc_zookeeper_path) / id;
Strings id_replicas;
zookeeper->tryGetChildren(zookeeper_part_uniq_node, id_replicas);
LOG_TRACE(log, "Found zookeper replicas for {}: {}", zookeeper_part_uniq_node, id_replicas.size());
replicas.insert(id_replicas.begin(), id_replicas.end());
}
}
LOG_TRACE(log, "Found zookeper replicas for part {}: {}", part.name, replicas.size());
@ -7212,24 +7285,45 @@ String StorageReplicatedMergeTree::getSharedDataReplica(
return best_replica;
}
String StorageReplicatedMergeTree::findReplicaHavingPart(
const String & part_name, const String & zookeeper_path_, zkutil::ZooKeeper::Ptr zookeeper_)
Strings StorageReplicatedMergeTree::getZeroCopyPartPath(const MergeTreeSettings & settings, DiskType disk_type, const String & table_uuid,
const String & part_name, const String & zookeeper_path_old)
{
Strings replicas = zookeeper_->getChildren(fs::path(zookeeper_path_) / "replicas");
Strings res;
String zero_copy = fmt::format("zero_copy_{}", toString(disk_type));
String new_path = fs::path(settings.remote_fs_zero_copy_zookeeper_path.toString()) / zero_copy / table_uuid / part_name;
res.push_back(new_path);
if (settings.remote_fs_zero_copy_path_compatible_mode && !zookeeper_path_old.empty())
{ /// Compatibility mode for cluster with old and new versions
String old_path = fs::path(zookeeper_path_old) / zero_copy / "shared" / part_name;
res.push_back(old_path);
}
return res;
}
String StorageReplicatedMergeTree::findReplicaHavingPart(
const String & part_name, const String & zookeeper_path_, zkutil::ZooKeeper::Ptr zookeeper_ptr)
{
Strings replicas = zookeeper_ptr->getChildren(fs::path(zookeeper_path_) / "replicas");
/// Select replicas in uniformly random order.
std::shuffle(replicas.begin(), replicas.end(), thread_local_rng);
for (const String & replica : replicas)
{
if (zookeeper_->exists(fs::path(zookeeper_path_) / "replicas" / replica / "parts" / part_name)
&& zookeeper_->exists(fs::path(zookeeper_path_) / "replicas" / replica / "is_active"))
if (zookeeper_ptr->exists(fs::path(zookeeper_path_) / "replicas" / replica / "parts" / part_name)
&& zookeeper_ptr->exists(fs::path(zookeeper_path_) / "replicas" / replica / "is_active"))
return fs::path(zookeeper_path_) / "replicas" / replica;
}
return {};
}
bool StorageReplicatedMergeTree::checkIfDetachedPartExists(const String & part_name)
{
fs::directory_iterator dir_end;
@ -7240,6 +7334,7 @@ bool StorageReplicatedMergeTree::checkIfDetachedPartExists(const String & part_n
return false;
}
bool StorageReplicatedMergeTree::checkIfDetachedPartitionExists(const String & partition_name)
{
fs::directory_iterator dir_end;
@ -7430,4 +7525,180 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP
return true;
}
void StorageReplicatedMergeTree::createZeroCopyLockNode(const zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_node)
{
/// In rare case other replica can remove path between createAncestors and createIfNotExists
/// So we make up to 5 attempts
for (int attempts = 5; attempts > 0; --attempts)
{
try
{
zookeeper->createAncestors(zookeeper_node);
zookeeper->createIfNotExists(zookeeper_node, "lock");
break;
}
catch (const zkutil::KeeperException & e)
{
if (e.code == Coordination::Error::ZNONODE)
continue;
throw;
}
}
}
namespace
{
/// Special metadata used during freeze table. Required for zero-copy
/// replication.
struct FreezeMetaData
{
public:
void fill(const StorageReplicatedMergeTree & storage)
{
is_replicated = storage.supportsReplication();
is_remote = storage.isRemote();
replica_name = storage.getReplicaName();
zookeeper_name = storage.getZooKeeperName();
table_shared_id = storage.getTableSharedID();
}
void save(DiskPtr disk, const String & path) const
{
auto file_path = getFileName(path);
auto buffer = disk->writeMetaFile(file_path, DBMS_DEFAULT_BUFFER_SIZE, WriteMode::Rewrite);
writeIntText(version, *buffer);
buffer->write("\n", 1);
writeBoolText(is_replicated, *buffer);
buffer->write("\n", 1);
writeBoolText(is_remote, *buffer);
buffer->write("\n", 1);
writeString(replica_name, *buffer);
buffer->write("\n", 1);
writeString(zookeeper_name, *buffer);
buffer->write("\n", 1);
writeString(table_shared_id, *buffer);
buffer->write("\n", 1);
}
bool load(DiskPtr disk, const String & path)
{
auto file_path = getFileName(path);
if (!disk->exists(file_path))
return false;
auto buffer = disk->readMetaFile(file_path, ReadSettings(), {});
readIntText(version, *buffer);
if (version != 1)
{
LOG_ERROR(&Poco::Logger::get("FreezeMetaData"), "Unknown freezed metadata version: {}", version);
return false;
}
DB::assertChar('\n', *buffer);
readBoolText(is_replicated, *buffer);
DB::assertChar('\n', *buffer);
readBoolText(is_remote, *buffer);
DB::assertChar('\n', *buffer);
readString(replica_name, *buffer);
DB::assertChar('\n', *buffer);
readString(zookeeper_name, *buffer);
DB::assertChar('\n', *buffer);
readString(table_shared_id, *buffer);
DB::assertChar('\n', *buffer);
return true;
}
static void clean(DiskPtr disk, const String & path)
{
disk->removeMetaFileIfExists(getFileName(path));
}
private:
static String getFileName(const String & path)
{
return fs::path(path) / "frozen_metadata.txt";
}
public:
int version = 1;
bool is_replicated;
bool is_remote;
String replica_name;
String zookeeper_name;
String table_shared_id;
};
}
bool StorageReplicatedMergeTree::removeDetachedPart(DiskPtr disk, const String & path, const String & part_name, bool is_freezed)
{
if (disk->supportZeroCopyReplication())
{
if (is_freezed)
{
FreezeMetaData meta;
if (meta.load(disk, path))
{
FreezeMetaData::clean(disk, path);
return removeSharedDetachedPart(disk, path, part_name, meta.table_shared_id, meta.zookeeper_name, meta.replica_name, "");
}
}
else
{
String table_id = getTableSharedID();
return removeSharedDetachedPart(disk, path, part_name, table_id, zookeeper_name, replica_name, zookeeper_path);
}
}
disk->removeRecursive(path);
return false;
}
bool StorageReplicatedMergeTree::removeSharedDetachedPart(DiskPtr disk, const String & path, const String & part_name, const String & table_uuid,
const String &, const String & detached_replica_name, const String & detached_zookeeper_path)
{
bool keep_shared = false;
zkutil::ZooKeeperPtr zookeeper = getZooKeeper();
if (zookeeper)
{
fs::path checksums = fs::path(path) / "checksums.txt";
if (disk->exists(checksums))
{
auto ref_count = disk->getRefCount(checksums);
if (ref_count == 0)
{
String id = disk->getUniqueId(checksums);
keep_shared = !StorageReplicatedMergeTree::unlockSharedDataByID(id, table_uuid, part_name,
detached_replica_name, disk, zookeeper, getContext()->getReplicatedMergeTreeSettings(), log,
detached_zookeeper_path);
}
else
keep_shared = true;
}
}
disk->removeSharedRecursive(path, keep_shared);
return keep_shared;
}
void StorageReplicatedMergeTree::createAndStoreFreezeMetadata(DiskPtr disk, DataPartPtr, String backup_part_path) const
{
if (disk->supportZeroCopyReplication())
{
FreezeMetaData meta;
meta.fill(*this);
meta.save(disk, backup_part_path);
}
}
}

View File

@ -1,6 +1,7 @@
#pragma once
#include <base/shared_ptr_helper.h>
#include <base/UUID.h>
#include <atomic>
#include <pcg_random.hpp>
#include <Storages/IStorage.h>
@ -236,6 +237,16 @@ public:
/// Return false if data is still used by another node
bool unlockSharedData(const IMergeTreeDataPart & part) const override;
/// Remove lock with old name for shared data part after rename
bool unlockSharedData(const IMergeTreeDataPart & part, const String & name) const override;
/// Unlock shared data part in zookeeper by part id
/// Return true if data unlocked
/// Return false if data is still used by another node
static bool unlockSharedDataByID(String id, const String & table_uuid, const String & part_name, const String & replica_name_,
DiskPtr disk, zkutil::ZooKeeperPtr zookeeper_, const MergeTreeSettings & settings, Poco::Logger * logger,
const String & zookeeper_path_old);
/// Fetch part only if some replica has it on shared storage like S3
bool tryToFetchIfShared(const IMergeTreeDataPart & part, const DiskPtr & disk, const String & path) override;
@ -263,6 +274,12 @@ public:
bool createEmptyPartInsteadOfLost(zkutil::ZooKeeperPtr zookeeper, const String & lost_part_name);
// Return default or custom zookeeper name for table
String getZooKeeperName() const { return zookeeper_name; }
// Return table id, common for different replicas
String getTableSharedID() const;
static const String getDefaultZooKeeperName() { return default_zookeeper_name; }
private:
@ -393,6 +410,9 @@ private:
ThrottlerPtr replicated_fetches_throttler;
ThrottlerPtr replicated_sends_throttler;
/// Global ID, synced via ZooKeeper between replicas
UUID table_shared_id;
template <class Func>
void foreachActiveParts(Func && func, bool select_sequential_consistency) const;
@ -722,6 +742,22 @@ private:
PartitionBlockNumbersHolder allocateBlockNumbersInAffectedPartitions(
const MutationCommands & commands, ContextPtr query_context, const zkutil::ZooKeeperPtr & zookeeper) const;
static Strings getZeroCopyPartPath(const MergeTreeSettings & settings, DiskType disk_type, const String & table_uuid,
const String & part_name, const String & zookeeper_path_old);
static void createZeroCopyLockNode(const zkutil::ZooKeeperPtr & zookeeper, const String & zookeeper_node);
bool removeDetachedPart(DiskPtr disk, const String & path, const String & part_name, bool is_freezed) override;
bool removeSharedDetachedPart(DiskPtr disk, const String & path, const String & part_name, const String & table_uuid,
const String & zookeeper_name, const String & replica_name, const String & zookeeper_path);
/// Create freeze metadata for table and save in zookeeper. Required only if zero-copy replication enabled.
void createAndStoreFreezeMetadata(DiskPtr disk, DataPartPtr part, String backup_part_path) const override;
// Create table id if needed
void createTableSharedID();
protected:
/** If not 'attach', either creates a new table in ZK, or adds a replica to an existing table.
*/

View File

@ -50,6 +50,27 @@ const char * auto_config_build[]
"USE_KRB5", "@USE_KRB5@",
"USE_FILELOG", "@USE_FILELOG@",
"USE_BZIP2", "@USE_BZIP2@",
"USE_AMQPCPP", "@USE_AMQPCPP@",
"USE_ROCKSDB", "@USE_ROCKSDB@",
"USE_NURAFT", "@USE_NURAFT@",
"USE_NLP", "@USE_NLP@",
"USE_SQLITE", "@USE_SQLITE@",
"USE_INTERNAL_LLVM_LIBRARY", "@USE_INTERNAL_LLVM_LIBRARY@",
"USE_OPENCL", "@USE_OPENCL@",
"USE_LIBPQXX", "@USE_LIBPQXX@",
"USE_AZURE_BLOB_STORAGE", "@USE_AZURE_BLOB_STORAGE@",
"USE_INTERNAL_SSL_LIBRARY", "@USE_INTERNAL_SSL_LIBRARY@",
"USE_AWS_S3", "@USE_AWS_S3@",
"USE_CASSANDRA", "@USE_CASSANDRA@",
"USE_YAML_CPP", "@USE_YAML_CPP@",
"USE_INTERNAL_HDFS3_LIBRARY", "@USE_INTERNAL_HDFS3_LIBRARY@",
"CLICKHOUSE_SPLIT_BINARY", "@CLICKHOUSE_SPLIT_BINARY@",
"USE_SENTRY", "@USE_SENTRY@",
"USE_DATASKETCHES", "@USE_DATASKETCHES@",
"USE_AVRO", "@USE_AVRO@",
"USE_ARROW", "@USE_ARROW@",
"USE_ORC", "@USE_ORC@",
"USE_MSGPACK", "@USE_MSGPACK@",
"GIT_HASH", "@GIT_HASH@",
"GIT_BRANCH", R"IRjaNsZIL9Yh7FQ4(@GIT_BRANCH@)IRjaNsZIL9Yh7FQ4",
"GIT_DATE", "@GIT_DATE@",

View File

@ -877,12 +877,12 @@ void StorageWindowView::threadFuncFireEvent()
std::unique_lock lock(fire_signal_mutex);
while (!shutdown_called)
{
LOG_TRACE(log, "Fire events: {}", fire_signal.size());
bool signaled = std::cv_status::no_timeout == fire_signal_condition.wait_for(lock, std::chrono::seconds(5));
if (!signaled)
continue;
LOG_TRACE(log, "Fire events: {}", fire_signal.size());
while (!fire_signal.empty())
{
fire(fire_signal.front());

View File

@ -45,13 +45,13 @@ ColumnsDescription TableFunctionPostgreSQL::getActualTableStructure(ContextPtr c
{
const bool use_nulls = context->getSettingsRef().external_table_functions_use_nulls;
auto connection_holder = connection_pool->get();
auto columns = fetchPostgreSQLTableStructure(
connection_holder->get(), configuration->table, configuration->schema, use_nulls).columns;
auto columns_info = fetchPostgreSQLTableStructure(
connection_holder->get(), configuration->table, configuration->schema, use_nulls).physical_columns;
if (!columns)
if (!columns_info)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table structure not returned");
return ColumnsDescription{*columns};
return ColumnsDescription{columns_info->columns};
}

View File

@ -2,6 +2,7 @@
import logging
import subprocess
import os
import sys
from github import Github
@ -13,6 +14,7 @@ from ssh import SSHKey
from upload_result_helper import upload_results
from docker_pull_helper import get_image_with_version
from commit_status_helper import get_commit
from rerun_helper import RerunHelper
NAME = "Docs Release (actions)"
@ -22,9 +24,12 @@ if __name__ == "__main__":
temp_path = TEMP_PATH
repo_path = REPO_COPY
pr_info = PRInfo(need_changed_files=True)
gh = Github(get_best_robot_token())
pr_info = PRInfo(need_changed_files=True)
rerun_helper = RerunHelper(gh, pr_info, NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
if not os.path.exists(temp_path):
os.makedirs(temp_path)

View File

@ -33,6 +33,7 @@ IMAGES = [
"clickhouse/integration-test",
"clickhouse/kerberos-kdc",
"clickhouse/integration-helper",
"clickhouse/dotnet-client",
]
def get_json_params_dict(check_name, pr_info, docker_images, run_by_hash_total, run_by_hash_num):

View File

@ -69,6 +69,7 @@ TRUSTED_CONTRIBUTORS = {e.lower() for e in [
"zlobober", # Developer of YT
"ilejn", # Arenadata, responsible for Kerberized Kafka
"thomoco", # ClickHouse
"BoloniniD", # Seasoned contributor, HSE
]}

View File

@ -41,6 +41,7 @@ TRUSTED_ORG_IDS = {
NEED_RERUN_WORKFLOWS = {
13241696, # PR
14738810, # DocsRelease
15834118, # Docs
15522500, # MasterCI
15516108, # ReleaseCI
@ -92,6 +93,7 @@ TRUSTED_CONTRIBUTORS = {e.lower() for e in [
"vzakaznikov",
"YiuRULE",
"zlobober", # Developer of YT
"BoloniniD", # Seasoned contributor, HSE
]}

View File

@ -228,6 +228,7 @@ class ClickhouseIntegrationTestsRunner:
"clickhouse/mysql-java-client", "clickhouse/mysql-js-client",
"clickhouse/mysql-php-client", "clickhouse/postgresql-java-client",
"clickhouse/integration-test", "clickhouse/kerberos-kdc",
"clickhouse/dotnet-client",
"clickhouse/integration-helper", ]
@ -252,7 +253,7 @@ class ClickhouseIntegrationTestsRunner:
logging.info("Executing installation cmd %s", cmd)
retcode = subprocess.Popen(cmd, shell=True, stderr=log, stdout=log).wait()
if retcode == 0:
logging.info("Instsallation of %s successfull", full_path)
logging.info("Installation of %s successfull", full_path)
else:
raise Exception("Installation of %s failed", full_path)
break

View File

@ -2256,7 +2256,7 @@ class ClickHouseInstance:
logging.debug('{} log line(s) matching "{}" appeared in a {:.3f} seconds'.format(repetitions, regexp, wait_duration))
return wait_duration
def file_exists(self, path):
def path_exists(self, path):
return self.exec_in_container(
["bash", "-c", "echo $(if [ -e '{}' ]; then echo 'yes'; else echo 'no'; fi)".format(path)]) == 'yes\n'
@ -2694,6 +2694,20 @@ class ClickHouseInstance:
if p.exists(self.path):
shutil.rmtree(self.path)
def wait_for_path_exists(self, path, seconds):
while seconds > 0:
seconds -= 1
if self.path_exists(path):
return
time.sleep(1)
def get_backuped_s3_objects(self, disk, backup_name):
path = f'/var/lib/clickhouse/disks/{disk}/shadow/{backup_name}/store'
self.wait_for_path_exists(path, 10)
command = ['find', path, '-type', 'f',
'-exec', 'grep', '-o', 'r[01]\\{64\\}-file-[[:lower:]]\\{32\\}', '{}', ';']
return self.exec_in_container(command).split('\n')
class ClickHouseKiller(object):
def __init__(self, clickhouse_node):

View File

@ -226,6 +226,8 @@ if __name__ == "__main__":
[image, tag] = img_tag.split(":")
if image == "clickhouse/mysql-golang-client":
env_tags += "-e {}={} ".format("DOCKER_MYSQL_GOLANG_CLIENT_TAG", tag)
elif image == "clickhouse/dotnet-client":
env_tags += "-e {}={} ".format("DOCKER_DOTNET_CLIENT_TAG", tag)
elif image == "clickhouse/mysql-java-client":
env_tags += "-e {}={} ".format("DOCKER_MYSQL_JAVA_CLIENT_TAG", tag)
elif image == "clickhouse/mysql-js-client":
@ -237,7 +239,7 @@ if __name__ == "__main__":
elif image == "clickhouse/integration-test":
env_tags += "-e {}={} ".format("DOCKER_BASE_TAG", tag)
elif image == "clickhouse/kerberos-kdc":
env_tags += "-e {}={}".format("DOCKER_KERBEROS_KDC_TAG", tag)
env_tags += "-e {}={} ".format("DOCKER_KERBEROS_KDC_TAG", tag)
else:
logging.info("Unknown image %s" % (image))

Some files were not shown because too many files have changed in this diff Show More