mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge remote-tracking branch 'origin/master' into system-querylog-map
This commit is contained in:
commit
7083ef45a6
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a220591e335923ce1c19bbf9eb925787f7ab6c13
|
Subproject commit 7d48b2c8193679cc4516e5bd68ae4a64b94dae7d
|
@ -43,6 +43,7 @@ RUN apt-get update \
|
|||||||
clang-tidy-${LLVM_VERSION} \
|
clang-tidy-${LLVM_VERSION} \
|
||||||
cmake \
|
cmake \
|
||||||
curl \
|
curl \
|
||||||
|
lsof \
|
||||||
expect \
|
expect \
|
||||||
fakeroot \
|
fakeroot \
|
||||||
git \
|
git \
|
||||||
|
@ -21,13 +21,16 @@ function clone
|
|||||||
|
|
||||||
git init
|
git init
|
||||||
git remote add origin https://github.com/ClickHouse/ClickHouse
|
git remote add origin https://github.com/ClickHouse/ClickHouse
|
||||||
git fetch --depth=100 origin "$SHA_TO_TEST"
|
|
||||||
git fetch --depth=100 origin master # Used to obtain the list of modified or added tests
|
# Network is unreliable. GitHub neither.
|
||||||
|
for _ in {1..100}; do git fetch --depth=100 origin "$SHA_TO_TEST" && break; sleep 1; done
|
||||||
|
# Used to obtain the list of modified or added tests
|
||||||
|
for _ in {1..100}; do git fetch --depth=100 origin master && break; sleep 1; done
|
||||||
|
|
||||||
# If not master, try to fetch pull/.../{head,merge}
|
# If not master, try to fetch pull/.../{head,merge}
|
||||||
if [ "$PR_TO_TEST" != "0" ]
|
if [ "$PR_TO_TEST" != "0" ]
|
||||||
then
|
then
|
||||||
git fetch --depth=100 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*"
|
for _ in {1..100}; do git fetch --depth=100 origin "refs/pull/$PR_TO_TEST/*:refs/heads/pull/$PR_TO_TEST/*" && break; sleep 1; done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
git checkout "$SHA_TO_TEST"
|
git checkout "$SHA_TO_TEST"
|
||||||
|
@ -55,12 +55,11 @@ function run_tests()
|
|||||||
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
|
ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for _ in $(seq 1 "$NUM_TRIES"); do
|
clickhouse-test --testname --shard --zookeeper --hung-check --print-time \
|
||||||
clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
|
--test-runs "$NUM_TRIES" --jobs 4 \
|
||||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
"$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||||
break;
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
fi
|
| tee -a test_output/test_result.txt
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export -f run_tests
|
export -f run_tests
|
||||||
|
@ -5,16 +5,35 @@ toc_title: QUOTA
|
|||||||
|
|
||||||
# ALTER QUOTA {#alter-quota-statement}
|
# ALTER QUOTA {#alter-quota-statement}
|
||||||
|
|
||||||
Changes quotas.
|
Changes [quotas](../../../operations/access-rights.md#quotas-management).
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name]
|
ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name]
|
||||||
[RENAME TO new_name]
|
[RENAME TO new_name]
|
||||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
[KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
|
||||||
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR}
|
[FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
|
||||||
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |
|
{MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
|
||||||
NO LIMITS | TRACKING ONLY} [,...]]
|
NO LIMITS | TRACKING ONLY} [,...]]
|
||||||
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
||||||
```
|
```
|
||||||
|
Keys `user_name`, `ip_address`, `client_key`, `client_key, user_name` and `client_key, ip_address` correspond to the fields in the [system.quotas](../../../operations/system-tables/quotas.md) table.
|
||||||
|
|
||||||
|
Parameters `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` correspond to the fields in the [system.quotas_usage](../../../operations/system-tables/quotas_usage.md) table.
|
||||||
|
|
||||||
|
`ON CLUSTER` clause allows creating quotas on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Limit the maximum number of queries for the current user with 123 queries in 15 months constraint:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER QUOTA IF EXISTS qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
|
||||||
|
```
|
||||||
|
|
||||||
|
For the default user limit the maximum execution time with half a second in 30 minutes, and limit the maximum number of queries with 321 and the maximum number of errors with 10 in 5 quaters:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
|
||||||
|
```
|
||||||
|
@ -11,19 +11,29 @@ Syntax:
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
|
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
|
||||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'forwarded ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
[KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
|
||||||
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR}
|
[FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
|
||||||
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |
|
{MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
|
||||||
NO LIMITS | TRACKING ONLY} [,...]]
|
NO LIMITS | TRACKING ONLY} [,...]]
|
||||||
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Keys `user_name`, `ip_address`, `client_key`, `client_key, user_name` and `client_key, ip_address` correspond to the fields in the [system.quotas](../../../operations/system-tables/quotas.md) table.
|
||||||
|
|
||||||
|
Parameters `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` correspond to the fields in the [system.quotas_usage](../../../operations/system-tables/quotas_usage.md) table.
|
||||||
|
|
||||||
`ON CLUSTER` clause allows creating quotas on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
`ON CLUSTER` clause allows creating quotas on a cluster, see [Distributed DDL](../../../sql-reference/distributed-ddl.md).
|
||||||
|
|
||||||
## Example {#create-quota-example}
|
**Examples**
|
||||||
|
|
||||||
Limit the maximum number of queries for the current user with 123 queries in 15 months constraint:
|
Limit the maximum number of queries for the current user with 123 queries in 15 months constraint:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER
|
CREATE QUOTA qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
|
||||||
|
```
|
||||||
|
|
||||||
|
For the default user limit the maximum execution time with half a second in 30 minutes, and limit the maximum number of queries with 321 and the maximum number of errors with 10 in 5 quaters:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE QUOTA qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
|
||||||
```
|
```
|
||||||
|
@ -5,18 +5,38 @@ toc_title: QUOTA
|
|||||||
|
|
||||||
# ALTER QUOTA {#alter-quota-statement}
|
# ALTER QUOTA {#alter-quota-statement}
|
||||||
|
|
||||||
Изменяет квоту.
|
Изменяет [квоту](../../../operations/access-rights.md#quotas-management).
|
||||||
|
|
||||||
## Синтаксис {#alter-quota-syntax}
|
Синтаксис:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name]
|
ALTER QUOTA [IF EXISTS] name [ON CLUSTER cluster_name]
|
||||||
[RENAME TO new_name]
|
[RENAME TO new_name]
|
||||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
[KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
|
||||||
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR}
|
[FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
|
||||||
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |
|
{MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
|
||||||
NO LIMITS | TRACKING ONLY} [,...]]
|
NO LIMITS | TRACKING ONLY} [,...]]
|
||||||
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
||||||
```
|
```
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/alter/quota/) <!--hide-->
|
Ключи `user_name`, `ip_address`, `client_key`, `client_key, user_name` и `client_key, ip_address` соответствуют полям таблицы [system.quotas](../../../operations/system-tables/quotas.md).
|
||||||
|
|
||||||
|
Параметры `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` соответствуют полям таблицы [system.quotas_usage](../../../operations/system-tables/quotas_usage.md).
|
||||||
|
|
||||||
|
В секции `ON CLUSTER` можно указать кластеры, на которых создается квота, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
Ограничить для текущего пользователя максимальное число запросов — не более 123 запросов за каждые 15 месяцев:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER QUOTA IF EXISTS qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
|
||||||
|
```
|
||||||
|
|
||||||
|
Ограничить по умолчанию максимальное время выполнения запроса — не более полсекунды за каждые 30 минут, а также максимальное число запросов — не более 321 и максимальное число ошибок — не более 10 за каждые 5 кварталов:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER QUOTA IF EXISTS qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
|
||||||
|
```
|
||||||
|
|
||||||
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/alter/quota/) <!--hide-->
|
||||||
|
@ -7,23 +7,34 @@ toc_title: "\u041a\u0432\u043e\u0442\u0430"
|
|||||||
|
|
||||||
Создает [квоту](../../../operations/access-rights.md#quotas-management), которая может быть присвоена пользователю или роли.
|
Создает [квоту](../../../operations/access-rights.md#quotas-management), которая может быть присвоена пользователю или роли.
|
||||||
|
|
||||||
### Синтаксис {#create-quota-syntax}
|
Синтаксис:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
|
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
|
||||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
[KEYED BY {user_name | ip_address | client_key | client_key, user_name | client_key, ip_address} | NOT KEYED]
|
||||||
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR}
|
[FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
|
||||||
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |
|
{MAX { {queries | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |
|
||||||
NO LIMITS | TRACKING ONLY} [,...]]
|
NO LIMITS | TRACKING ONLY} [,...]]
|
||||||
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
[TO {role [,...] | ALL | ALL EXCEPT role [,...]}]
|
||||||
```
|
```
|
||||||
|
Ключи `user_name`, `ip_address`, `client_key`, `client_key, user_name` и `client_key, ip_address` соответствуют полям таблицы [system.quotas](../../../operations/system-tables/quotas.md).
|
||||||
|
|
||||||
### Пример {#create-quota-example}
|
Параметры `queries`, `errors`, `result_rows`, `result_bytes`, `read_rows`, `read_bytes`, `execution_time` соответствуют полям таблицы [system.quotas_usage](../../../operations/system-tables/quotas_usage.md).
|
||||||
|
|
||||||
Ограничить максимальное количество запросов для текущего пользователя до 123 запросов каждые 15 месяцев:
|
В секции `ON CLUSTER` можно указать кластеры, на которых создается квота, см. [Распределенные DDL запросы](../../../sql-reference/distributed-ddl.md).
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
Ограничить максимальное количество запросов для текущего пользователя — не более 123 запросов за каждые 15 месяцев:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE QUOTA qA FOR INTERVAL 15 MONTH MAX QUERIES 123 TO CURRENT_USER
|
CREATE QUOTA qA FOR INTERVAL 15 month MAX queries = 123 TO CURRENT_USER;
|
||||||
|
```
|
||||||
|
|
||||||
|
Ограничить по умолчанию максимальное время выполнения запроса — не более полсекунды за каждые 30 минут, а также максимальное число запросов — не более 321 и максимальное число ошибок — не более 10 за каждые 5 кварталов:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE QUOTA qB FOR INTERVAL 30 minute MAX execution_time = 0.5, FOR INTERVAL 5 quarter MAX queries = 321, errors = 10 TO default;
|
||||||
```
|
```
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/quota)
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/create/quota)
|
||||||
|
@ -118,7 +118,7 @@ for (auto & stream : streams)
|
|||||||
stream.second->finalize();
|
stream.second->finalize();
|
||||||
```
|
```
|
||||||
|
|
||||||
**18.** 行的某尾不应该包含空格。
|
**18.** 行的末尾不应该包含空格。
|
||||||
|
|
||||||
**19.** 源文件应该用 UTF-8 编码。
|
**19.** 源文件应该用 UTF-8 编码。
|
||||||
|
|
||||||
|
@ -932,6 +932,10 @@ private:
|
|||||||
std::cerr << "Received exception from server (version "
|
std::cerr << "Received exception from server (version "
|
||||||
<< server_version << "):" << std::endl << "Code: "
|
<< server_version << "):" << std::endl << "Code: "
|
||||||
<< server_exception->code() << ". " << text << std::endl;
|
<< server_exception->code() << ". " << text << std::endl;
|
||||||
|
if (is_interactive)
|
||||||
|
{
|
||||||
|
std::cerr << std::endl;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (client_exception)
|
if (client_exception)
|
||||||
@ -939,6 +943,10 @@ private:
|
|||||||
fmt::print(stderr,
|
fmt::print(stderr,
|
||||||
"Error on processing query '{}':\n{}\n",
|
"Error on processing query '{}':\n{}\n",
|
||||||
full_query, client_exception->message());
|
full_query, client_exception->message());
|
||||||
|
if (is_interactive)
|
||||||
|
{
|
||||||
|
fmt::print(stderr, "\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// A debug check -- at least some exception must be set, if the error
|
// A debug check -- at least some exception must be set, if the error
|
||||||
|
@ -139,6 +139,7 @@ class IColumn;
|
|||||||
\
|
\
|
||||||
M(UInt64, min_bytes_to_use_direct_io, 0, "The minimum number of bytes for reading the data with O_DIRECT option during SELECT queries execution. 0 - disabled.", 0) \
|
M(UInt64, min_bytes_to_use_direct_io, 0, "The minimum number of bytes for reading the data with O_DIRECT option during SELECT queries execution. 0 - disabled.", 0) \
|
||||||
M(UInt64, min_bytes_to_use_mmap_io, 0, "The minimum number of bytes for reading the data with mmap option during SELECT queries execution. 0 - disabled.", 0) \
|
M(UInt64, min_bytes_to_use_mmap_io, 0, "The minimum number of bytes for reading the data with mmap option during SELECT queries execution. 0 - disabled.", 0) \
|
||||||
|
M(Bool, checksum_on_read, true, "Validate checksums on reading. It is enabled by default and should be always enabled in production. Please do not expect any benefits in disabling this setting. It may only be used for experiments and benchmarks. The setting only applicable for tables of MergeTree family. Checksums are always validated for other table engines and when receiving data over network.", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, force_index_by_date, 0, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
M(Bool, force_index_by_date, 0, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
||||||
M(Bool, force_primary_key, 0, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
M(Bool, force_primary_key, 0, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
||||||
|
@ -29,8 +29,14 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
DataTypePtr DataTypeFactory::get(const String & full_name) const
|
DataTypePtr DataTypeFactory::get(const String & full_name) const
|
||||||
{
|
{
|
||||||
|
/// Data type parser can be invoked from coroutines with small stack.
|
||||||
|
/// Value 315 is known to cause stack overflow in some test configurations (debug build, sanitizers)
|
||||||
|
/// let's make the threshold significantly lower.
|
||||||
|
/// It is impractical for user to have complex data types with this depth.
|
||||||
|
static constexpr size_t data_type_max_parse_depth = 200;
|
||||||
|
|
||||||
ParserDataType parser;
|
ParserDataType parser;
|
||||||
ASTPtr ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
ASTPtr ast = parseQuery(parser, full_name.data(), full_name.data() + full_name.size(), "data type", 0, data_type_max_parse_depth);
|
||||||
return get(ast);
|
return get(ast);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -885,15 +885,17 @@ MutableColumnUniquePtr DataTypeLowCardinality::createColumnUniqueImpl(const IDat
|
|||||||
if (const auto * nullable_type = typeid_cast<const DataTypeNullable *>(&keys_type))
|
if (const auto * nullable_type = typeid_cast<const DataTypeNullable *>(&keys_type))
|
||||||
type = nullable_type->getNestedType().get();
|
type = nullable_type->getNestedType().get();
|
||||||
|
|
||||||
if (isString(type))
|
WhichDataType which(type);
|
||||||
|
|
||||||
|
if (which.isString())
|
||||||
return creator(static_cast<ColumnString *>(nullptr));
|
return creator(static_cast<ColumnString *>(nullptr));
|
||||||
if (isFixedString(type))
|
else if (which.isFixedString())
|
||||||
return creator(static_cast<ColumnFixedString *>(nullptr));
|
return creator(static_cast<ColumnFixedString *>(nullptr));
|
||||||
if (typeid_cast<const DataTypeDate *>(type))
|
else if (which.isDate())
|
||||||
return creator(static_cast<ColumnVector<UInt16> *>(nullptr));
|
return creator(static_cast<ColumnVector<UInt16> *>(nullptr));
|
||||||
if (typeid_cast<const DataTypeDateTime *>(type))
|
else if (which.isDateTime())
|
||||||
return creator(static_cast<ColumnVector<UInt32> *>(nullptr));
|
return creator(static_cast<ColumnVector<UInt32> *>(nullptr));
|
||||||
if (isColumnedAsNumber(type))
|
else if (which.isInt() || which.isUInt() || which.isFloat())
|
||||||
{
|
{
|
||||||
MutableColumnUniquePtr column;
|
MutableColumnUniquePtr column;
|
||||||
TypeListNativeNumbers::forEach(CreateColumnVector(column, *type, creator));
|
TypeListNativeNumbers::forEach(CreateColumnVector(column, *type, creator));
|
||||||
|
@ -31,6 +31,7 @@ public:
|
|||||||
|
|
||||||
bool canBeUsedInBitOperations() const override { return true; }
|
bool canBeUsedInBitOperations() const override { return true; }
|
||||||
bool canBeInsideNullable() const override { return true; }
|
bool canBeInsideNullable() const override { return true; }
|
||||||
|
bool canBeInsideLowCardinality() const override { return false; }
|
||||||
|
|
||||||
bool canBePromoted() const override { return false; }
|
bool canBePromoted() const override { return false; }
|
||||||
};
|
};
|
||||||
|
@ -112,32 +112,33 @@ void registerDiskS3(DiskFactory & factory)
|
|||||||
Poco::File disk{context.getPath() + "disks/" + name};
|
Poco::File disk{context.getPath() + "disks/" + name};
|
||||||
disk.createDirectories();
|
disk.createDirectories();
|
||||||
|
|
||||||
Aws::Client::ClientConfiguration cfg;
|
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
|
||||||
|
context.getRemoteHostFilter(),
|
||||||
|
context.getGlobalContext().getSettingsRef().s3_max_redirects);
|
||||||
|
|
||||||
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
|
S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint")));
|
||||||
if (uri.key.back() != '/')
|
if (uri.key.back() != '/')
|
||||||
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
cfg.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 10000);
|
client_configuration.connectTimeoutMs = config.getUInt(config_prefix + ".connect_timeout_ms", 10000);
|
||||||
cfg.httpRequestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
|
client_configuration.httpRequestTimeoutMs = config.getUInt(config_prefix + ".request_timeout_ms", 5000);
|
||||||
cfg.maxConnections = config.getUInt(config_prefix + ".max_connections", 100);
|
client_configuration.maxConnections = config.getUInt(config_prefix + ".max_connections", 100);
|
||||||
cfg.endpointOverride = uri.endpoint;
|
client_configuration.endpointOverride = uri.endpoint;
|
||||||
|
|
||||||
auto proxy_config = getProxyConfiguration(config_prefix, config);
|
auto proxy_config = getProxyConfiguration(config_prefix, config);
|
||||||
if (proxy_config)
|
if (proxy_config)
|
||||||
cfg.perRequestConfiguration = [proxy_config](const auto & request) { return proxy_config->getConfiguration(request); };
|
client_configuration.perRequestConfiguration = [proxy_config](const auto & request) { return proxy_config->getConfiguration(request); };
|
||||||
|
|
||||||
cfg.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(
|
client_configuration.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(
|
||||||
config.getUInt(config_prefix + ".retry_attempts", 10));
|
config.getUInt(config_prefix + ".retry_attempts", 10));
|
||||||
|
|
||||||
auto client = S3::ClientFactory::instance().create(
|
auto client = S3::ClientFactory::instance().create(
|
||||||
cfg,
|
client_configuration,
|
||||||
uri.is_virtual_hosted_style,
|
uri.is_virtual_hosted_style,
|
||||||
config.getString(config_prefix + ".access_key_id", ""),
|
config.getString(config_prefix + ".access_key_id", ""),
|
||||||
config.getString(config_prefix + ".secret_access_key", ""),
|
config.getString(config_prefix + ".secret_access_key", ""),
|
||||||
config.getBool(config_prefix + ".use_environment_credentials", config.getBool("s3.use_environment_credentials", false)),
|
config.getBool(config_prefix + ".use_environment_credentials", config.getBool("s3.use_environment_credentials", false))
|
||||||
context.getRemoteHostFilter(),
|
);
|
||||||
context.getGlobalContext().getSettingsRef().s3_max_redirects);
|
|
||||||
|
|
||||||
String metadata_path = config.getString(config_prefix + ".metadata_path", context.getPath() + "disks/" + name + "/");
|
String metadata_path = config.getString(config_prefix + ".metadata_path", context.getPath() + "disks/" + name + "/");
|
||||||
|
|
||||||
|
@ -95,9 +95,11 @@ void registerFunctionsConversion(FunctionFactory & factory)
|
|||||||
factory.registerFunction<FunctionToUUIDOrNull>();
|
factory.registerFunction<FunctionToUUIDOrNull>();
|
||||||
|
|
||||||
factory.registerFunction<FunctionParseDateTimeBestEffort>();
|
factory.registerFunction<FunctionParseDateTimeBestEffort>();
|
||||||
factory.registerFunction<FunctionParseDateTimeBestEffortUS>();
|
|
||||||
factory.registerFunction<FunctionParseDateTimeBestEffortOrZero>();
|
factory.registerFunction<FunctionParseDateTimeBestEffortOrZero>();
|
||||||
factory.registerFunction<FunctionParseDateTimeBestEffortOrNull>();
|
factory.registerFunction<FunctionParseDateTimeBestEffortOrNull>();
|
||||||
|
factory.registerFunction<FunctionParseDateTimeBestEffortUS>();
|
||||||
|
factory.registerFunction<FunctionParseDateTimeBestEffortUSOrZero>();
|
||||||
|
factory.registerFunction<FunctionParseDateTimeBestEffortUSOrNull>();
|
||||||
factory.registerFunction<FunctionParseDateTime32BestEffort>();
|
factory.registerFunction<FunctionParseDateTime32BestEffort>();
|
||||||
factory.registerFunction<FunctionParseDateTime32BestEffortOrZero>();
|
factory.registerFunction<FunctionParseDateTime32BestEffortOrZero>();
|
||||||
factory.registerFunction<FunctionParseDateTime32BestEffortOrNull>();
|
factory.registerFunction<FunctionParseDateTime32BestEffortOrNull>();
|
||||||
|
@ -955,6 +955,12 @@ struct ConvertThroughParsing
|
|||||||
vec_to[i] = res;
|
vec_to[i] = res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else if constexpr (parsing_mode == ConvertFromStringParsingMode::BestEffortUS)
|
||||||
|
{
|
||||||
|
time_t res;
|
||||||
|
parsed = tryParseDateTimeBestEffortUS(res, read_buffer, *local_time_zone, *utc_time_zone);
|
||||||
|
vec_to[i] = res;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if constexpr (to_datetime64)
|
if constexpr (to_datetime64)
|
||||||
@ -2003,9 +2009,11 @@ using FunctionToDecimal256OrNull = FunctionConvertFromString<DataTypeDecimal<Dec
|
|||||||
using FunctionToUUIDOrNull = FunctionConvertFromString<DataTypeUUID, NameToUUIDOrNull, ConvertFromStringExceptionMode::Null>;
|
using FunctionToUUIDOrNull = FunctionConvertFromString<DataTypeUUID, NameToUUIDOrNull, ConvertFromStringExceptionMode::Null>;
|
||||||
|
|
||||||
struct NameParseDateTimeBestEffort { static constexpr auto name = "parseDateTimeBestEffort"; };
|
struct NameParseDateTimeBestEffort { static constexpr auto name = "parseDateTimeBestEffort"; };
|
||||||
struct NameParseDateTimeBestEffortUS { static constexpr auto name = "parseDateTimeBestEffortUS"; };
|
|
||||||
struct NameParseDateTimeBestEffortOrZero { static constexpr auto name = "parseDateTimeBestEffortOrZero"; };
|
struct NameParseDateTimeBestEffortOrZero { static constexpr auto name = "parseDateTimeBestEffortOrZero"; };
|
||||||
struct NameParseDateTimeBestEffortOrNull { static constexpr auto name = "parseDateTimeBestEffortOrNull"; };
|
struct NameParseDateTimeBestEffortOrNull { static constexpr auto name = "parseDateTimeBestEffortOrNull"; };
|
||||||
|
struct NameParseDateTimeBestEffortUS { static constexpr auto name = "parseDateTimeBestEffortUS"; };
|
||||||
|
struct NameParseDateTimeBestEffortUSOrZero { static constexpr auto name = "parseDateTimeBestEffortUSOrZero"; };
|
||||||
|
struct NameParseDateTimeBestEffortUSOrNull { static constexpr auto name = "parseDateTimeBestEffortUSOrNull"; };
|
||||||
struct NameParseDateTime32BestEffort { static constexpr auto name = "parseDateTime32BestEffort"; };
|
struct NameParseDateTime32BestEffort { static constexpr auto name = "parseDateTime32BestEffort"; };
|
||||||
struct NameParseDateTime32BestEffortOrZero { static constexpr auto name = "parseDateTime32BestEffortOrZero"; };
|
struct NameParseDateTime32BestEffortOrZero { static constexpr auto name = "parseDateTime32BestEffortOrZero"; };
|
||||||
struct NameParseDateTime32BestEffortOrNull { static constexpr auto name = "parseDateTime32BestEffortOrNull"; };
|
struct NameParseDateTime32BestEffortOrNull { static constexpr auto name = "parseDateTime32BestEffortOrNull"; };
|
||||||
@ -2016,13 +2024,18 @@ struct NameParseDateTime64BestEffortOrNull { static constexpr auto name = "parse
|
|||||||
|
|
||||||
using FunctionParseDateTimeBestEffort = FunctionConvertFromString<
|
using FunctionParseDateTimeBestEffort = FunctionConvertFromString<
|
||||||
DataTypeDateTime, NameParseDateTimeBestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>;
|
DataTypeDateTime, NameParseDateTimeBestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>;
|
||||||
using FunctionParseDateTimeBestEffortUS = FunctionConvertFromString<
|
|
||||||
DataTypeDateTime, NameParseDateTimeBestEffortUS, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffortUS>;
|
|
||||||
using FunctionParseDateTimeBestEffortOrZero = FunctionConvertFromString<
|
using FunctionParseDateTimeBestEffortOrZero = FunctionConvertFromString<
|
||||||
DataTypeDateTime, NameParseDateTimeBestEffortOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffort>;
|
DataTypeDateTime, NameParseDateTimeBestEffortOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffort>;
|
||||||
using FunctionParseDateTimeBestEffortOrNull = FunctionConvertFromString<
|
using FunctionParseDateTimeBestEffortOrNull = FunctionConvertFromString<
|
||||||
DataTypeDateTime, NameParseDateTimeBestEffortOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffort>;
|
DataTypeDateTime, NameParseDateTimeBestEffortOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffort>;
|
||||||
|
|
||||||
|
using FunctionParseDateTimeBestEffortUS = FunctionConvertFromString<
|
||||||
|
DataTypeDateTime, NameParseDateTimeBestEffortUS, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffortUS>;
|
||||||
|
using FunctionParseDateTimeBestEffortUSOrZero = FunctionConvertFromString<
|
||||||
|
DataTypeDateTime, NameParseDateTimeBestEffortUSOrZero, ConvertFromStringExceptionMode::Zero, ConvertFromStringParsingMode::BestEffortUS>;
|
||||||
|
using FunctionParseDateTimeBestEffortUSOrNull = FunctionConvertFromString<
|
||||||
|
DataTypeDateTime, NameParseDateTimeBestEffortUSOrNull, ConvertFromStringExceptionMode::Null, ConvertFromStringParsingMode::BestEffortUS>;
|
||||||
|
|
||||||
using FunctionParseDateTime32BestEffort = FunctionConvertFromString<
|
using FunctionParseDateTime32BestEffort = FunctionConvertFromString<
|
||||||
DataTypeDateTime, NameParseDateTime32BestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>;
|
DataTypeDateTime, NameParseDateTime32BestEffort, ConvertFromStringExceptionMode::Throw, ConvertFromStringParsingMode::BestEffort>;
|
||||||
using FunctionParseDateTime32BestEffortOrZero = FunctionConvertFromString<
|
using FunctionParseDateTime32BestEffortOrZero = FunctionConvertFromString<
|
||||||
|
@ -6,13 +6,11 @@
|
|||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
#include <IO/HTTPCommon.h>
|
#include <IO/HTTPCommon.h>
|
||||||
#include <IO/S3/SessionAwareAwsStream.h>
|
|
||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <Common/Stopwatch.h>
|
#include <Common/Stopwatch.h>
|
||||||
#include <aws/core/http/HttpRequest.h>
|
#include <aws/core/http/HttpRequest.h>
|
||||||
#include <aws/core/http/HttpResponse.h>
|
#include <aws/core/http/HttpResponse.h>
|
||||||
#include <aws/core/http/standard/StandardHttpResponse.h>
|
|
||||||
#include <aws/core/monitoring/HttpClientMetrics.h>
|
#include <aws/core/monitoring/HttpClientMetrics.h>
|
||||||
#include <aws/core/utils/ratelimiter/RateLimiterInterface.h>
|
#include <aws/core/utils/ratelimiter/RateLimiterInterface.h>
|
||||||
#include "Poco/StreamCopier.h"
|
#include "Poco/StreamCopier.h"
|
||||||
@ -49,11 +47,9 @@ namespace DB::S3
|
|||||||
{
|
{
|
||||||
|
|
||||||
PocoHTTPClientConfiguration::PocoHTTPClientConfiguration(
|
PocoHTTPClientConfiguration::PocoHTTPClientConfiguration(
|
||||||
const Aws::Client::ClientConfiguration & cfg,
|
|
||||||
const RemoteHostFilter & remote_host_filter_,
|
const RemoteHostFilter & remote_host_filter_,
|
||||||
unsigned int s3_max_redirects_)
|
unsigned int s3_max_redirects_)
|
||||||
: Aws::Client::ClientConfiguration(cfg)
|
: remote_host_filter(remote_host_filter_)
|
||||||
, remote_host_filter(remote_host_filter_)
|
|
||||||
, s3_max_redirects(s3_max_redirects_)
|
, s3_max_redirects(s3_max_redirects_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -90,29 +86,19 @@ PocoHTTPClient::PocoHTTPClient(const PocoHTTPClientConfiguration & clientConfigu
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Aws::Http::HttpResponse> PocoHTTPClient::MakeRequest(
|
|
||||||
Aws::Http::HttpRequest & request,
|
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const
|
|
||||||
{
|
|
||||||
auto response = Aws::MakeShared<Aws::Http::Standard::StandardHttpResponse>("PocoHTTPClient", request);
|
|
||||||
makeRequestInternal(request, response, readLimiter, writeLimiter);
|
|
||||||
return response;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<Aws::Http::HttpResponse> PocoHTTPClient::MakeRequest(
|
std::shared_ptr<Aws::Http::HttpResponse> PocoHTTPClient::MakeRequest(
|
||||||
const std::shared_ptr<Aws::Http::HttpRequest> & request,
|
const std::shared_ptr<Aws::Http::HttpRequest> & request,
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const
|
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const
|
||||||
{
|
{
|
||||||
auto response = Aws::MakeShared<Aws::Http::Standard::StandardHttpResponse>("PocoHTTPClient", request);
|
auto response = Aws::MakeShared<PocoHTTPResponse>("PocoHTTPClient", request);
|
||||||
makeRequestInternal(*request, response, readLimiter, writeLimiter);
|
makeRequestInternal(*request, response, readLimiter, writeLimiter);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
void PocoHTTPClient::makeRequestInternal(
|
void PocoHTTPClient::makeRequestInternal(
|
||||||
Aws::Http::HttpRequest & request,
|
Aws::Http::HttpRequest & request,
|
||||||
std::shared_ptr<Aws::Http::Standard::StandardHttpResponse> & response,
|
std::shared_ptr<PocoHTTPResponse> & response,
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface *,
|
Aws::Utils::RateLimits::RateLimiterInterface *,
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface *) const
|
Aws::Utils::RateLimits::RateLimiterInterface *) const
|
||||||
{
|
{
|
||||||
@ -278,7 +264,7 @@ void PocoHTTPClient::makeRequestInternal(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
response->GetResponseStream().SetUnderlyingStream(std::make_shared<SessionAwareAwsStream<decltype(session)>>(session, response_body_stream));
|
response->SetResponseBody(response_body_stream, session);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,12 @@
|
|||||||
|
|
||||||
#include <Common/RemoteHostFilter.h>
|
#include <Common/RemoteHostFilter.h>
|
||||||
#include <IO/ConnectionTimeouts.h>
|
#include <IO/ConnectionTimeouts.h>
|
||||||
|
#include <IO/HTTPCommon.h>
|
||||||
|
#include <IO/S3/SessionAwareIOStream.h>
|
||||||
#include <aws/core/client/ClientConfiguration.h>
|
#include <aws/core/client/ClientConfiguration.h>
|
||||||
#include <aws/core/http/HttpClient.h>
|
#include <aws/core/http/HttpClient.h>
|
||||||
#include <aws/core/http/HttpRequest.h>
|
#include <aws/core/http/HttpRequest.h>
|
||||||
|
#include <aws/core/http/standard/StandardHttpResponse.h>
|
||||||
|
|
||||||
namespace Aws::Http::Standard
|
namespace Aws::Http::Standard
|
||||||
{
|
{
|
||||||
@ -18,16 +21,52 @@ class Context;
|
|||||||
|
|
||||||
namespace DB::S3
|
namespace DB::S3
|
||||||
{
|
{
|
||||||
|
class ClientFactory;
|
||||||
|
|
||||||
struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration
|
struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration
|
||||||
{
|
{
|
||||||
const RemoteHostFilter & remote_host_filter;
|
const RemoteHostFilter & remote_host_filter;
|
||||||
unsigned int s3_max_redirects;
|
unsigned int s3_max_redirects;
|
||||||
|
|
||||||
PocoHTTPClientConfiguration(const Aws::Client::ClientConfiguration & cfg, const RemoteHostFilter & remote_host_filter_,
|
|
||||||
unsigned int s3_max_redirects_);
|
|
||||||
|
|
||||||
void updateSchemeAndRegion();
|
void updateSchemeAndRegion();
|
||||||
|
|
||||||
|
private:
|
||||||
|
PocoHTTPClientConfiguration(const RemoteHostFilter & remote_host_filter_, unsigned int s3_max_redirects_);
|
||||||
|
|
||||||
|
/// Constructor of Aws::Client::ClientConfiguration must be called after AWS SDK initialization.
|
||||||
|
friend ClientFactory;
|
||||||
|
};
|
||||||
|
|
||||||
|
class PocoHTTPResponse : public Aws::Http::Standard::StandardHttpResponse
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using SessionPtr = PooledHTTPSessionPtr;
|
||||||
|
|
||||||
|
PocoHTTPResponse(const std::shared_ptr<const Aws::Http::HttpRequest> request)
|
||||||
|
: Aws::Http::Standard::StandardHttpResponse(request)
|
||||||
|
, body_stream(request->GetResponseStreamFactory())
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetResponseBody(Aws::IStream & incoming_stream, SessionPtr & session_)
|
||||||
|
{
|
||||||
|
body_stream = Aws::Utils::Stream::ResponseStream(
|
||||||
|
Aws::New<SessionAwareIOStream<SessionPtr>>("http result streambuf", session_, incoming_stream.rdbuf())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Aws::IOStream & GetResponseBody() const override
|
||||||
|
{
|
||||||
|
return body_stream.GetUnderlyingStream();
|
||||||
|
}
|
||||||
|
|
||||||
|
Aws::Utils::Stream::ResponseStream && SwapResponseStreamOwnership() override
|
||||||
|
{
|
||||||
|
return std::move(body_stream);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Aws::Utils::Stream::ResponseStream body_stream;
|
||||||
};
|
};
|
||||||
|
|
||||||
class PocoHTTPClient : public Aws::Http::HttpClient
|
class PocoHTTPClient : public Aws::Http::HttpClient
|
||||||
@ -35,10 +74,6 @@ class PocoHTTPClient : public Aws::Http::HttpClient
|
|||||||
public:
|
public:
|
||||||
explicit PocoHTTPClient(const PocoHTTPClientConfiguration & clientConfiguration);
|
explicit PocoHTTPClient(const PocoHTTPClientConfiguration & clientConfiguration);
|
||||||
~PocoHTTPClient() override = default;
|
~PocoHTTPClient() override = default;
|
||||||
std::shared_ptr<Aws::Http::HttpResponse> MakeRequest(
|
|
||||||
Aws::Http::HttpRequest & request,
|
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const override;
|
|
||||||
|
|
||||||
std::shared_ptr<Aws::Http::HttpResponse> MakeRequest(
|
std::shared_ptr<Aws::Http::HttpResponse> MakeRequest(
|
||||||
const std::shared_ptr<Aws::Http::HttpRequest> & request,
|
const std::shared_ptr<Aws::Http::HttpRequest> & request,
|
||||||
@ -48,7 +83,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
void makeRequestInternal(
|
void makeRequestInternal(
|
||||||
Aws::Http::HttpRequest & request,
|
Aws::Http::HttpRequest & request,
|
||||||
std::shared_ptr<Aws::Http::Standard::StandardHttpResponse> & response,
|
std::shared_ptr<PocoHTTPResponse> & response,
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
|
||||||
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const;
|
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const;
|
||||||
|
|
||||||
|
@ -1,27 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <IO/HTTPCommon.h>
|
|
||||||
|
|
||||||
#include <aws/core/utils/stream/ResponseStream.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB::S3
|
|
||||||
{
|
|
||||||
/**
|
|
||||||
* Wrapper of IOStream to store response stream and corresponding HTTP session.
|
|
||||||
*/
|
|
||||||
template <typename Session>
|
|
||||||
class SessionAwareAwsStream : public Aws::IStream
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
SessionAwareAwsStream(Session session_, std::istream & response_stream_)
|
|
||||||
: Aws::IStream(response_stream_.rdbuf()), session(std::move(session_))
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
/// Poco HTTP session is holder of response stream.
|
|
||||||
Session session;
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
26
src/IO/S3/SessionAwareIOStream.h
Normal file
26
src/IO/S3/SessionAwareIOStream.h
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB::S3
|
||||||
|
{
|
||||||
|
/**
|
||||||
|
* Wrapper of IOStream to store response stream and corresponding HTTP session.
|
||||||
|
*/
|
||||||
|
template <typename Session>
|
||||||
|
class SessionAwareIOStream : public std::iostream
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
SessionAwareIOStream(Session session_, std::streambuf * sb)
|
||||||
|
: std::iostream(sb)
|
||||||
|
, session(std::move(session_))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Poco HTTP session is holder of response stream.
|
||||||
|
Session session;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -144,7 +144,7 @@ public:
|
|||||||
}
|
}
|
||||||
else if (Aws::Utils::StringUtils::ToLower(ec2_metadata_disabled.c_str()) != "true")
|
else if (Aws::Utils::StringUtils::ToLower(ec2_metadata_disabled.c_str()) != "true")
|
||||||
{
|
{
|
||||||
Aws::Client::ClientConfiguration aws_client_configuration;
|
DB::S3::PocoHTTPClientConfiguration aws_client_configuration = DB::S3::ClientFactory::instance().createClientConfiguration(remote_host_filter, s3_max_redirects);
|
||||||
|
|
||||||
/// See MakeDefaultHttpResourceClientConfiguration().
|
/// See MakeDefaultHttpResourceClientConfiguration().
|
||||||
/// This is part of EC2 metadata client, but unfortunately it can't be accessed from outside
|
/// This is part of EC2 metadata client, but unfortunately it can't be accessed from outside
|
||||||
@ -165,8 +165,7 @@ public:
|
|||||||
aws_client_configuration.requestTimeoutMs = 1000;
|
aws_client_configuration.requestTimeoutMs = 1000;
|
||||||
aws_client_configuration.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(1, 1000);
|
aws_client_configuration.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(1, 1000);
|
||||||
|
|
||||||
DB::S3::PocoHTTPClientConfiguration client_configuration(aws_client_configuration, remote_host_filter, s3_max_redirects);
|
auto ec2_metadata_client = std::make_shared<Aws::Internal::EC2MetadataClient>(aws_client_configuration);
|
||||||
auto ec2_metadata_client = std::make_shared<Aws::Internal::EC2MetadataClient>(client_configuration);
|
|
||||||
auto config_loader = std::make_shared<Aws::Config::EC2InstanceProfileConfigLoader>(ec2_metadata_client);
|
auto config_loader = std::make_shared<Aws::Config::EC2InstanceProfileConfigLoader>(ec2_metadata_client);
|
||||||
|
|
||||||
AddProvider(std::make_shared<Aws::Auth::InstanceProfileCredentialsProvider>(config_loader));
|
AddProvider(std::make_shared<Aws::Auth::InstanceProfileCredentialsProvider>(config_loader));
|
||||||
@ -207,13 +206,32 @@ public:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool SignRequest(Aws::Http::HttpRequest & request, const char * region, const char * service_name, bool sign_body) const override
|
||||||
|
{
|
||||||
|
auto result = Aws::Client::AWSAuthV4Signer::SignRequest(request, region, service_name, sign_body);
|
||||||
|
for (const auto & header : headers)
|
||||||
|
request.SetHeaderValue(header.name, header.value);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
bool PresignRequest(
|
bool PresignRequest(
|
||||||
Aws::Http::HttpRequest & request,
|
Aws::Http::HttpRequest & request,
|
||||||
const char * region,
|
const char * region,
|
||||||
const char * serviceName,
|
|
||||||
long long expiration_time_sec) const override // NOLINT
|
long long expiration_time_sec) const override // NOLINT
|
||||||
{
|
{
|
||||||
auto result = Aws::Client::AWSAuthV4Signer::PresignRequest(request, region, serviceName, expiration_time_sec);
|
auto result = Aws::Client::AWSAuthV4Signer::PresignRequest(request, region, expiration_time_sec);
|
||||||
|
for (const auto & header : headers)
|
||||||
|
request.SetHeaderValue(header.name, header.value);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool PresignRequest(
|
||||||
|
Aws::Http::HttpRequest & request,
|
||||||
|
const char * region,
|
||||||
|
const char * service_name,
|
||||||
|
long long expiration_time_sec) const override // NOLINT
|
||||||
|
{
|
||||||
|
auto result = Aws::Client::AWSAuthV4Signer::PresignRequest(request, region, service_name, expiration_time_sec);
|
||||||
for (const auto & header : headers)
|
for (const auto & header : headers)
|
||||||
request.SetHeaderValue(header.name, header.value);
|
request.SetHeaderValue(header.name, header.value);
|
||||||
return result;
|
return result;
|
||||||
@ -265,33 +283,28 @@ namespace S3
|
|||||||
const RemoteHostFilter & remote_host_filter,
|
const RemoteHostFilter & remote_host_filter,
|
||||||
unsigned int s3_max_redirects)
|
unsigned int s3_max_redirects)
|
||||||
{
|
{
|
||||||
Aws::Client::ClientConfiguration cfg;
|
PocoHTTPClientConfiguration client_configuration(remote_host_filter, s3_max_redirects);
|
||||||
|
|
||||||
if (!endpoint.empty())
|
if (!endpoint.empty())
|
||||||
cfg.endpointOverride = endpoint;
|
client_configuration.endpointOverride = endpoint;
|
||||||
|
|
||||||
return create(cfg,
|
return create(client_configuration,
|
||||||
is_virtual_hosted_style,
|
is_virtual_hosted_style,
|
||||||
access_key_id,
|
access_key_id,
|
||||||
secret_access_key,
|
secret_access_key,
|
||||||
use_environment_credentials,
|
use_environment_credentials);
|
||||||
remote_host_filter,
|
|
||||||
s3_max_redirects);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Aws::S3::S3Client> ClientFactory::create( // NOLINT
|
std::shared_ptr<Aws::S3::S3Client> ClientFactory::create( // NOLINT
|
||||||
const Aws::Client::ClientConfiguration & cfg,
|
const PocoHTTPClientConfiguration & cfg_,
|
||||||
bool is_virtual_hosted_style,
|
bool is_virtual_hosted_style,
|
||||||
const String & access_key_id,
|
const String & access_key_id,
|
||||||
const String & secret_access_key,
|
const String & secret_access_key,
|
||||||
bool use_environment_credentials,
|
bool use_environment_credentials)
|
||||||
const RemoteHostFilter & remote_host_filter,
|
|
||||||
unsigned int s3_max_redirects)
|
|
||||||
{
|
{
|
||||||
Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key);
|
Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key);
|
||||||
|
|
||||||
PocoHTTPClientConfiguration client_configuration(cfg, remote_host_filter, s3_max_redirects);
|
PocoHTTPClientConfiguration client_configuration = cfg_;
|
||||||
|
|
||||||
client_configuration.updateSchemeAndRegion();
|
client_configuration.updateSchemeAndRegion();
|
||||||
|
|
||||||
return std::make_shared<Aws::S3::S3Client>(
|
return std::make_shared<Aws::S3::S3Client>(
|
||||||
@ -301,22 +314,19 @@ namespace S3
|
|||||||
use_environment_credentials), // AWS credentials provider.
|
use_environment_credentials), // AWS credentials provider.
|
||||||
std::move(client_configuration), // Client configuration.
|
std::move(client_configuration), // Client configuration.
|
||||||
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, // Sign policy.
|
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, // Sign policy.
|
||||||
is_virtual_hosted_style || cfg.endpointOverride.empty() // Use virtual addressing if endpoint is not specified.
|
is_virtual_hosted_style || client_configuration.endpointOverride.empty() // Use virtual addressing if endpoint is not specified.
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Aws::S3::S3Client> ClientFactory::create( // NOLINT
|
std::shared_ptr<Aws::S3::S3Client> ClientFactory::create( // NOLINT
|
||||||
const Aws::Client::ClientConfiguration & cfg,
|
const PocoHTTPClientConfiguration & cfg_,
|
||||||
bool is_virtual_hosted_style,
|
bool is_virtual_hosted_style,
|
||||||
const String & access_key_id,
|
const String & access_key_id,
|
||||||
const String & secret_access_key,
|
const String & secret_access_key,
|
||||||
HeaderCollection headers,
|
HeaderCollection headers,
|
||||||
bool use_environment_credentials,
|
bool use_environment_credentials)
|
||||||
const RemoteHostFilter & remote_host_filter,
|
|
||||||
unsigned int s3_max_redirects)
|
|
||||||
{
|
{
|
||||||
PocoHTTPClientConfiguration client_configuration(cfg, remote_host_filter, s3_max_redirects);
|
PocoHTTPClientConfiguration client_configuration = cfg_;
|
||||||
|
|
||||||
client_configuration.updateSchemeAndRegion();
|
client_configuration.updateSchemeAndRegion();
|
||||||
|
|
||||||
Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key);
|
Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key);
|
||||||
@ -329,6 +339,13 @@ namespace S3
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
PocoHTTPClientConfiguration ClientFactory::createClientConfiguration( // NOLINT
|
||||||
|
const RemoteHostFilter & remote_host_filter,
|
||||||
|
unsigned int s3_max_redirects)
|
||||||
|
{
|
||||||
|
return PocoHTTPClientConfiguration(remote_host_filter, s3_max_redirects);
|
||||||
|
}
|
||||||
|
|
||||||
URI::URI(const Poco::URI & uri_)
|
URI::URI(const Poco::URI & uri_)
|
||||||
{
|
{
|
||||||
/// Case when bucket name represented in domain name of S3 URL.
|
/// Case when bucket name represented in domain name of S3 URL.
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <common/types.h>
|
#include <common/types.h>
|
||||||
#include <aws/core/Aws.h>
|
#include <aws/core/Aws.h>
|
||||||
#include <aws/core/client/ClientConfiguration.h>
|
#include <aws/core/client/ClientConfiguration.h>
|
||||||
|
#include <IO/S3/PocoHTTPClient.h>
|
||||||
#include <Poco/URI.h>
|
#include <Poco/URI.h>
|
||||||
|
|
||||||
namespace Aws::S3
|
namespace Aws::S3
|
||||||
@ -23,7 +24,6 @@ namespace DB
|
|||||||
|
|
||||||
namespace DB::S3
|
namespace DB::S3
|
||||||
{
|
{
|
||||||
|
|
||||||
class ClientFactory
|
class ClientFactory
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -41,21 +41,21 @@ public:
|
|||||||
unsigned int s3_max_redirects);
|
unsigned int s3_max_redirects);
|
||||||
|
|
||||||
std::shared_ptr<Aws::S3::S3Client> create(
|
std::shared_ptr<Aws::S3::S3Client> create(
|
||||||
const Aws::Client::ClientConfiguration & cfg,
|
const PocoHTTPClientConfiguration & cfg,
|
||||||
bool is_virtual_hosted_style,
|
bool is_virtual_hosted_style,
|
||||||
const String & access_key_id,
|
const String & access_key_id,
|
||||||
const String & secret_access_key,
|
const String & secret_access_key,
|
||||||
bool use_environment_credentials,
|
bool use_environment_credentials);
|
||||||
const RemoteHostFilter & remote_host_filter,
|
|
||||||
unsigned int s3_max_redirects);
|
|
||||||
|
|
||||||
std::shared_ptr<Aws::S3::S3Client> create(
|
std::shared_ptr<Aws::S3::S3Client> create(
|
||||||
const Aws::Client::ClientConfiguration & cfg,
|
const PocoHTTPClientConfiguration & cfg,
|
||||||
bool is_virtual_hosted_style,
|
bool is_virtual_hosted_style,
|
||||||
const String & access_key_id,
|
const String & access_key_id,
|
||||||
const String & secret_access_key,
|
const String & secret_access_key,
|
||||||
HeaderCollection headers,
|
HeaderCollection headers,
|
||||||
bool use_environment_credentials,
|
bool use_environment_credentials);
|
||||||
|
|
||||||
|
PocoHTTPClientConfiguration createClientConfiguration(
|
||||||
const RemoteHostFilter & remote_host_filter,
|
const RemoteHostFilter & remote_host_filter,
|
||||||
unsigned int s3_max_redirects);
|
unsigned int s3_max_redirects);
|
||||||
|
|
||||||
|
@ -654,6 +654,11 @@ bool tryParseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl
|
|||||||
return parseDateTimeBestEffortImpl<bool, false>(res, in, local_time_zone, utc_time_zone, nullptr);
|
return parseDateTimeBestEffortImpl<bool, false>(res, in, local_time_zone, utc_time_zone, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool tryParseDateTimeBestEffortUS(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone)
|
||||||
|
{
|
||||||
|
return parseDateTimeBestEffortImpl<bool, true>(res, in, local_time_zone, utc_time_zone, nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone)
|
void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone)
|
||||||
{
|
{
|
||||||
return parseDateTime64BestEffortImpl<void>(res, scale, in, local_time_zone, utc_time_zone);
|
return parseDateTime64BestEffortImpl<void>(res, scale, in, local_time_zone, utc_time_zone);
|
||||||
|
@ -57,8 +57,9 @@ class ReadBuffer;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void parseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
void parseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||||
void parseDateTimeBestEffortUS(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
|
||||||
bool tryParseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
bool tryParseDateTimeBestEffort(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||||
|
void parseDateTimeBestEffortUS(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||||
|
bool tryParseDateTimeBestEffortUS(time_t & res, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||||
void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
void parseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||||
bool tryParseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
bool tryParseDateTime64BestEffort(DateTime64 & res, UInt32 scale, ReadBuffer & in, const DateLUTImpl & local_time_zone, const DateLUTImpl & utc_time_zone);
|
||||||
|
|
||||||
|
@ -821,6 +821,13 @@ ActionsDAGPtr ActionsDAG::merge(ActionsDAG && first, ActionsDAG && second)
|
|||||||
|
|
||||||
first.nodes.splice(first.nodes.end(), std::move(second.nodes));
|
first.nodes.splice(first.nodes.end(), std::move(second.nodes));
|
||||||
|
|
||||||
|
/// Here we rebuild index because some string_view from the first map now may point to string from second.
|
||||||
|
ActionsDAG::Index first_index;
|
||||||
|
for (auto * node : first.index)
|
||||||
|
first_index.insert(node);
|
||||||
|
|
||||||
|
first.index.swap(first_index);
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
if (first.compilation_cache == nullptr)
|
if (first.compilation_cache == nullptr)
|
||||||
first.compilation_cache = second.compilation_cache;
|
first.compilation_cache = second.compilation_cache;
|
||||||
|
@ -844,7 +844,12 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere(
|
|||||||
step.required_output.push_back(prewhere_column_name);
|
step.required_output.push_back(prewhere_column_name);
|
||||||
step.can_remove_required_output.push_back(true);
|
step.can_remove_required_output.push_back(true);
|
||||||
|
|
||||||
auto filter_type = (*step.actions()->getIndex().find(prewhere_column_name))->result_type;
|
const auto & index = step.actions()->getIndex();
|
||||||
|
auto it = index.find(prewhere_column_name);
|
||||||
|
if (it == index.end())
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown identifier: '{}'", prewhere_column_name);
|
||||||
|
|
||||||
|
auto filter_type = (*it)->result_type;
|
||||||
if (!filter_type->canBeUsedInBooleanContext())
|
if (!filter_type->canBeUsedInBooleanContext())
|
||||||
throw Exception("Invalid type for filter in PREWHERE: " + filter_type->getName(),
|
throw Exception("Invalid type for filter in PREWHERE: " + filter_type->getName(),
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
|
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
|
||||||
@ -944,7 +949,12 @@ bool SelectQueryExpressionAnalyzer::appendWhere(ExpressionActionsChain & chain,
|
|||||||
step.required_output.push_back(where_column_name);
|
step.required_output.push_back(where_column_name);
|
||||||
step.can_remove_required_output = {true};
|
step.can_remove_required_output = {true};
|
||||||
|
|
||||||
auto filter_type = (*step.actions()->getIndex().find(where_column_name))->result_type;
|
const auto & index = step.actions()->getIndex();
|
||||||
|
auto it = index.find(where_column_name);
|
||||||
|
if (it == index.end())
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_IDENTIFIER, "Unknown identifier: '{}'", where_column_name);
|
||||||
|
|
||||||
|
auto filter_type = (*it)->result_type;
|
||||||
if (!filter_type->canBeUsedInBooleanContext())
|
if (!filter_type->canBeUsedInBooleanContext())
|
||||||
throw Exception("Invalid type for filter in WHERE: " + filter_type->getName(),
|
throw Exception("Invalid type for filter in WHERE: " + filter_type->getName(),
|
||||||
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
|
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
|
||||||
|
@ -688,7 +688,7 @@ public:
|
|||||||
if constexpr (has_defaults)
|
if constexpr (has_defaults)
|
||||||
applyLazyDefaults();
|
applyLazyDefaults();
|
||||||
|
|
||||||
for (size_t j = 0; j < right_indexes.size(); ++j)
|
for (size_t j = 0, size = right_indexes.size(); j < size; ++j)
|
||||||
columns[j]->insertFrom(*block.getByPosition(right_indexes[j]).column, row_num);
|
columns[j]->insertFrom(*block.getByPosition(right_indexes[j]).column, row_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -701,7 +701,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (lazy_defaults_count)
|
if (lazy_defaults_count)
|
||||||
{
|
{
|
||||||
for (size_t j = 0; j < right_indexes.size(); ++j)
|
for (size_t j = 0, size = right_indexes.size(); j < size; ++j)
|
||||||
JoinCommon::addDefaultValues(*columns[j], type_name[j].first, lazy_defaults_count);
|
JoinCommon::addDefaultValues(*columns[j], type_name[j].first, lazy_defaults_count);
|
||||||
lazy_defaults_count = 0;
|
lazy_defaults_count = 0;
|
||||||
}
|
}
|
||||||
|
@ -693,12 +693,18 @@ void TreeRewriterResult::collectUsedColumns(const ASTPtr & query, bool is_select
|
|||||||
|
|
||||||
if (storage)
|
if (storage)
|
||||||
{
|
{
|
||||||
ss << ", maybe you meant: ";
|
String hint_name{};
|
||||||
for (const auto & name : columns_context.requiredColumns())
|
for (const auto & name : columns_context.requiredColumns())
|
||||||
{
|
{
|
||||||
auto hints = storage->getHints(name);
|
auto hints = storage->getHints(name);
|
||||||
if (!hints.empty())
|
if (!hints.empty())
|
||||||
ss << " '" << toString(hints) << "'";
|
hint_name = hint_name + " '" + toString(hints) + "'";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!hint_name.empty())
|
||||||
|
{
|
||||||
|
ss << ", maybe you meant: ";
|
||||||
|
ss << hint_name;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -14,20 +14,28 @@ namespace
|
|||||||
{
|
{
|
||||||
|
|
||||||
/// Wrapper to allow mixed lists of nested and normal types.
|
/// Wrapper to allow mixed lists of nested and normal types.
|
||||||
class ParserNestedTableOrExpression : public IParserBase
|
/// Parameters are either:
|
||||||
|
/// - Nested table elements;
|
||||||
|
/// - Enum element in form of 'a' = 1;
|
||||||
|
/// - literal;
|
||||||
|
/// - another data type (or identifier)
|
||||||
|
class ParserDataTypeArgument : public IParserBase
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
const char * getName() const override { return "data type or expression"; }
|
const char * getName() const override { return "data type argument"; }
|
||||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
|
||||||
{
|
{
|
||||||
ParserNestedTable parser1;
|
ParserNestedTable nested_parser;
|
||||||
|
ParserDataType data_type_parser;
|
||||||
|
ParserLiteral literal_parser;
|
||||||
|
|
||||||
if (parser1.parse(pos, node, expected))
|
const char * operators[] = {"=", "equals", nullptr};
|
||||||
return true;
|
ParserLeftAssociativeBinaryOperatorList enum_parser(operators, std::make_unique<ParserLiteral>());
|
||||||
|
|
||||||
ParserExpression parser2;
|
return nested_parser.parse(pos, node, expected)
|
||||||
|
|| enum_parser.parse(pos, node, expected)
|
||||||
return parser2.parse(pos, node, expected);
|
|| literal_parser.parse(pos, node, expected)
|
||||||
|
|| data_type_parser.parse(pos, node, expected);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -104,7 +112,7 @@ bool ParserDataType::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
++pos;
|
++pos;
|
||||||
|
|
||||||
/// Parse optional parameters
|
/// Parse optional parameters
|
||||||
ParserList args_parser(std::make_unique<ParserNestedTableOrExpression>(), std::make_unique<ParserToken>(TokenType::Comma));
|
ParserList args_parser(std::make_unique<ParserDataTypeArgument>(), std::make_unique<ParserToken>(TokenType::Comma));
|
||||||
ASTPtr expr_list_args;
|
ASTPtr expr_list_args;
|
||||||
|
|
||||||
if (!args_parser.parse(pos, expr_list_args, expected))
|
if (!args_parser.parse(pos, expr_list_args, expected))
|
||||||
|
@ -116,12 +116,6 @@ public:
|
|||||||
: Parent(CurrentMetrics::Merge)
|
: Parent(CurrentMetrics::Merge)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void onEntryCreate(const Parent::Entry & entry) override
|
|
||||||
{
|
|
||||||
if (isTTLMergeType(entry->merge_type))
|
|
||||||
++merges_with_ttl_counter;
|
|
||||||
}
|
|
||||||
|
|
||||||
void onEntryDestroy(const Parent::Entry & entry) override
|
void onEntryDestroy(const Parent::Entry & entry) override
|
||||||
{
|
{
|
||||||
if (isTTLMergeType(entry->merge_type))
|
if (isTTLMergeType(entry->merge_type))
|
||||||
@ -140,7 +134,20 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t getExecutingMergesWithTTLCount() const
|
/// Merge consists of two parts: assignment and execution. We add merge to
|
||||||
|
/// merge list on execution, but checking merge list during merge
|
||||||
|
/// assignment. This lead to the logical race condition (we can assign more
|
||||||
|
/// merges with TTL than allowed). So we "book" merge with ttl during
|
||||||
|
/// assignment, and remove from list after merge execution.
|
||||||
|
///
|
||||||
|
/// NOTE: Not important for replicated merge tree, we check count of merges twice:
|
||||||
|
/// in assignment and in queue before execution.
|
||||||
|
void bookMergeWithTTL()
|
||||||
|
{
|
||||||
|
++merges_with_ttl_counter;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t getMergesWithTTLCount() const
|
||||||
{
|
{
|
||||||
return merges_with_ttl_counter;
|
return merges_with_ttl_counter;
|
||||||
}
|
}
|
||||||
|
@ -3955,7 +3955,7 @@ NamesAndTypesList MergeTreeData::getVirtuals() const
|
|||||||
|
|
||||||
size_t MergeTreeData::getTotalMergesWithTTLInMergeList() const
|
size_t MergeTreeData::getTotalMergesWithTTLInMergeList() const
|
||||||
{
|
{
|
||||||
return global_context.getMergeList().getExecutingMergesWithTTLCount();
|
return global_context.getMergeList().getMergesWithTTLCount();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MergeTreeData::addPartContributionToDataVolume(const DataPartPtr & part)
|
void MergeTreeData::addPartContributionToDataVolume(const DataPartPtr & part)
|
||||||
|
@ -547,7 +547,8 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
|
|||||||
.min_bytes_to_use_direct_io = settings.min_bytes_to_use_direct_io,
|
.min_bytes_to_use_direct_io = settings.min_bytes_to_use_direct_io,
|
||||||
.min_bytes_to_use_mmap_io = settings.min_bytes_to_use_mmap_io,
|
.min_bytes_to_use_mmap_io = settings.min_bytes_to_use_mmap_io,
|
||||||
.max_read_buffer_size = settings.max_read_buffer_size,
|
.max_read_buffer_size = settings.max_read_buffer_size,
|
||||||
.save_marks_in_cache = true
|
.save_marks_in_cache = true,
|
||||||
|
.checksum_on_read = settings.checksum_on_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// PREWHERE
|
/// PREWHERE
|
||||||
|
@ -16,6 +16,8 @@ struct MergeTreeReaderSettings
|
|||||||
bool save_marks_in_cache = false;
|
bool save_marks_in_cache = false;
|
||||||
/// Convert old-style nested (single arrays with same prefix, `n.a`, `n.b`...) to subcolumns of data type Nested.
|
/// Convert old-style nested (single arrays with same prefix, `n.a`, `n.b`...) to subcolumns of data type Nested.
|
||||||
bool convert_nested_to_subcolumns = false;
|
bool convert_nested_to_subcolumns = false;
|
||||||
|
/// Validate checksums on reading (should be always enabled in production).
|
||||||
|
bool checksum_on_read = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MergeTreeWriterSettings
|
struct MergeTreeWriterSettings
|
||||||
|
@ -71,7 +71,8 @@ private:
|
|||||||
public:
|
public:
|
||||||
MergeTreeReadPool(
|
MergeTreeReadPool(
|
||||||
const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_,
|
const size_t threads_, const size_t sum_marks_, const size_t min_marks_for_concurrent_read_,
|
||||||
RangesInDataParts && parts_, const MergeTreeData & data_, const StorageMetadataPtr & metadata_snapshot_, const PrewhereInfoPtr & prewhere_info_,
|
RangesInDataParts && parts_, const MergeTreeData & data_, const StorageMetadataPtr & metadata_snapshot_,
|
||||||
|
const PrewhereInfoPtr & prewhere_info_,
|
||||||
const bool check_columns_, const Names & column_names_,
|
const bool check_columns_, const Names & column_names_,
|
||||||
const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_,
|
const BackoffSettings & backoff_settings_, size_t preferred_block_size_bytes_,
|
||||||
const bool do_not_steal_tasks_ = false);
|
const bool do_not_steal_tasks_ = false);
|
||||||
|
@ -92,6 +92,9 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
|
|||||||
if (profile_callback_)
|
if (profile_callback_)
|
||||||
buffer->setProfileCallback(profile_callback_, clock_type_);
|
buffer->setProfileCallback(profile_callback_, clock_type_);
|
||||||
|
|
||||||
|
if (!settings.checksum_on_read)
|
||||||
|
buffer->disableChecksumming();
|
||||||
|
|
||||||
cached_buffer = std::move(buffer);
|
cached_buffer = std::move(buffer);
|
||||||
data_buffer = cached_buffer.get();
|
data_buffer = cached_buffer.get();
|
||||||
}
|
}
|
||||||
@ -106,6 +109,9 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
|
|||||||
if (profile_callback_)
|
if (profile_callback_)
|
||||||
buffer->setProfileCallback(profile_callback_, clock_type_);
|
buffer->setProfileCallback(profile_callback_, clock_type_);
|
||||||
|
|
||||||
|
if (!settings.checksum_on_read)
|
||||||
|
buffer->disableChecksumming();
|
||||||
|
|
||||||
non_cached_buffer = std::move(buffer);
|
non_cached_buffer = std::move(buffer);
|
||||||
data_buffer = non_cached_buffer.get();
|
data_buffer = non_cached_buffer.get();
|
||||||
}
|
}
|
||||||
|
@ -96,6 +96,9 @@ MergeTreeReaderStream::MergeTreeReaderStream(
|
|||||||
if (profile_callback)
|
if (profile_callback)
|
||||||
buffer->setProfileCallback(profile_callback, clock_type);
|
buffer->setProfileCallback(profile_callback, clock_type);
|
||||||
|
|
||||||
|
if (!settings.checksum_on_read)
|
||||||
|
buffer->disableChecksumming();
|
||||||
|
|
||||||
cached_buffer = std::move(buffer);
|
cached_buffer = std::move(buffer);
|
||||||
data_buffer = cached_buffer.get();
|
data_buffer = cached_buffer.get();
|
||||||
}
|
}
|
||||||
@ -109,6 +112,9 @@ MergeTreeReaderStream::MergeTreeReaderStream(
|
|||||||
if (profile_callback)
|
if (profile_callback)
|
||||||
buffer->setProfileCallback(profile_callback, clock_type);
|
buffer->setProfileCallback(profile_callback, clock_type);
|
||||||
|
|
||||||
|
if (!settings.checksum_on_read)
|
||||||
|
buffer->disableChecksumming();
|
||||||
|
|
||||||
non_cached_buffer = std::move(buffer);
|
non_cached_buffer = std::move(buffer);
|
||||||
data_buffer = non_cached_buffer.get();
|
data_buffer = non_cached_buffer.get();
|
||||||
}
|
}
|
||||||
|
@ -732,6 +732,10 @@ std::shared_ptr<StorageMergeTree::MergeMutateSelectedEntry> StorageMergeTree::se
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Account TTL merge here to avoid exceeding the max_number_of_merges_with_ttl_in_pool limit
|
||||||
|
if (isTTLMergeType(future_part.merge_type))
|
||||||
|
global_context.getMergeList().bookMergeWithTTL();
|
||||||
|
|
||||||
merging_tagger = std::make_unique<CurrentlyMergingPartsTagger>(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part.parts), *this, metadata_snapshot, false);
|
merging_tagger = std::make_unique<CurrentlyMergingPartsTagger>(future_part, MergeTreeDataMergerMutator::estimateNeededDiskSpace(future_part.parts), *this, metadata_snapshot, false);
|
||||||
return std::make_shared<MergeMutateSelectedEntry>(future_part, std::move(merging_tagger), MutationCommands{});
|
return std::make_shared<MergeMutateSelectedEntry>(future_part, std::move(merging_tagger), MutationCommands{});
|
||||||
}
|
}
|
||||||
|
@ -1490,7 +1490,12 @@ bool StorageReplicatedMergeTree::tryExecuteMerge(const LogEntry & entry)
|
|||||||
future_merged_part.updatePath(*this, reserved_space);
|
future_merged_part.updatePath(*this, reserved_space);
|
||||||
future_merged_part.merge_type = entry.merge_type;
|
future_merged_part.merge_type = entry.merge_type;
|
||||||
|
|
||||||
|
/// Account TTL merge
|
||||||
|
if (isTTLMergeType(future_merged_part.merge_type))
|
||||||
|
global_context.getMergeList().bookMergeWithTTL();
|
||||||
|
|
||||||
auto table_id = getStorageID();
|
auto table_id = getStorageID();
|
||||||
|
/// Add merge to list
|
||||||
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(table_id.database_name, table_id.table_name, future_merged_part);
|
MergeList::EntryPtr merge_entry = global_context.getMergeList().insert(table_id.database_name, table_id.table_name, future_merged_part);
|
||||||
|
|
||||||
Transaction transaction(*this);
|
Transaction transaction(*this);
|
||||||
|
@ -222,7 +222,10 @@ StorageS3::StorageS3(
|
|||||||
if (access_key_id_.empty())
|
if (access_key_id_.empty())
|
||||||
credentials = Aws::Auth::AWSCredentials(std::move(settings.access_key_id), std::move(settings.secret_access_key));
|
credentials = Aws::Auth::AWSCredentials(std::move(settings.access_key_id), std::move(settings.secret_access_key));
|
||||||
|
|
||||||
Aws::Client::ClientConfiguration client_configuration;
|
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
|
||||||
|
context_.getRemoteHostFilter(),
|
||||||
|
context_.getGlobalContext().getSettingsRef().s3_max_redirects);
|
||||||
|
|
||||||
client_configuration.endpointOverride = uri_.endpoint;
|
client_configuration.endpointOverride = uri_.endpoint;
|
||||||
client_configuration.maxConnections = max_connections_;
|
client_configuration.maxConnections = max_connections_;
|
||||||
|
|
||||||
@ -232,9 +235,8 @@ StorageS3::StorageS3(
|
|||||||
credentials.GetAWSAccessKeyId(),
|
credentials.GetAWSAccessKeyId(),
|
||||||
credentials.GetAWSSecretKey(),
|
credentials.GetAWSSecretKey(),
|
||||||
std::move(settings.headers),
|
std::move(settings.headers),
|
||||||
settings.use_environment_credentials.value_or(global_context.getConfigRef().getBool("s3.use_environment_credentials", false)),
|
settings.use_environment_credentials.value_or(global_context.getConfigRef().getBool("s3.use_environment_credentials", false))
|
||||||
context_.getRemoteHostFilter(),
|
);
|
||||||
context_.getGlobalContext().getSettingsRef().s3_max_redirects);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -345,9 +345,10 @@ def run_tests_array(all_tests_with_params):
|
|||||||
SERVER_DIED = True
|
SERVER_DIED = True
|
||||||
break
|
break
|
||||||
|
|
||||||
|
file_suffix = ('.' + str(os.getpid())) if is_concurrent and args.test_runs > 1 else ''
|
||||||
reference_file = os.path.join(suite_dir, name) + '.reference'
|
reference_file = os.path.join(suite_dir, name) + '.reference'
|
||||||
stdout_file = os.path.join(suite_tmp_dir, name) + '.stdout'
|
stdout_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stdout'
|
||||||
stderr_file = os.path.join(suite_tmp_dir, name) + '.stderr'
|
stderr_file = os.path.join(suite_tmp_dir, name) + file_suffix + '.stderr'
|
||||||
|
|
||||||
proc, stdout, stderr, total_time = run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file)
|
proc, stdout, stderr, total_time = run_single_test(args, ext, server_logs_level, client_options, case_file, stdout_file, stderr_file)
|
||||||
|
|
||||||
@ -434,6 +435,9 @@ def run_tests_array(all_tests_with_params):
|
|||||||
if os.path.exists(stderr_file):
|
if os.path.exists(stderr_file):
|
||||||
os.remove(stderr_file)
|
os.remove(stderr_file)
|
||||||
|
|
||||||
|
if status and not status.endswith('\n'):
|
||||||
|
status += '\n'
|
||||||
|
|
||||||
sys.stdout.write(status)
|
sys.stdout.write(status)
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
except KeyboardInterrupt as e:
|
except KeyboardInterrupt as e:
|
||||||
@ -721,6 +725,7 @@ def main(args):
|
|||||||
all_tests = [case for case in all_tests if is_test_from_dir(suite_dir, case)]
|
all_tests = [case for case in all_tests if is_test_from_dir(suite_dir, case)]
|
||||||
if args.test:
|
if args.test:
|
||||||
all_tests = [t for t in all_tests if any([re.search(r, t) for r in args.test])]
|
all_tests = [t for t in all_tests if any([re.search(r, t) for r in args.test])]
|
||||||
|
all_tests = all_tests * args.test_runs
|
||||||
all_tests.sort(key=key_func)
|
all_tests.sort(key=key_func)
|
||||||
|
|
||||||
jobs = args.jobs
|
jobs = args.jobs
|
||||||
@ -893,6 +898,7 @@ if __name__ == '__main__':
|
|||||||
parser.add_argument('--database', help='Database for tests (random name test_XXXXXX by default)')
|
parser.add_argument('--database', help='Database for tests (random name test_XXXXXX by default)')
|
||||||
parser.add_argument('--parallel', default='1/1', help='One parallel test run number/total')
|
parser.add_argument('--parallel', default='1/1', help='One parallel test run number/total')
|
||||||
parser.add_argument('-j', '--jobs', default=1, nargs='?', type=int, help='Run all tests in parallel')
|
parser.add_argument('-j', '--jobs', default=1, nargs='?', type=int, help='Run all tests in parallel')
|
||||||
|
parser.add_argument('--test-runs', default=1, nargs='?', type=int, help='Run each test many times (useful for e.g. flaky check)')
|
||||||
parser.add_argument('-U', '--unified', default=3, type=int, help='output NUM lines of unified context')
|
parser.add_argument('-U', '--unified', default=3, type=int, help='output NUM lines of unified context')
|
||||||
parser.add_argument('-r', '--server-check-retries', default=30, type=int, help='Num of tries to execute SELECT 1 before tests started')
|
parser.add_argument('-r', '--server-check-retries', default=30, type=int, help='Num of tries to execute SELECT 1 before tests started')
|
||||||
parser.add_argument('--skip-list-path', help="Path to skip-list file")
|
parser.add_argument('--skip-list-path', help="Path to skip-list file")
|
||||||
|
@ -38,6 +38,9 @@ class TSV:
|
|||||||
def __str__(self):
|
def __str__(self):
|
||||||
return '\n'.join(self.lines)
|
return '\n'.join(self.lines)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.lines)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def toMat(contents):
|
def toMat(contents):
|
||||||
return [line.split("\t") for line in contents.split("\n") if line.strip()]
|
return [line.split("\t") for line in contents.split("\n") if line.strip()]
|
||||||
|
@ -2,7 +2,7 @@ import time
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from helpers.cluster import ClickHouseCluster
|
from helpers.cluster import ClickHouseCluster
|
||||||
from helpers.test_tools import assert_eq_with_retry
|
from helpers.test_tools import assert_eq_with_retry, TSV
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
node1 = cluster.add_instance('node1', main_configs=['configs/fast_background_pool.xml', 'configs/log_conf.xml'], with_zookeeper=True)
|
node1 = cluster.add_instance('node1', main_configs=['configs/fast_background_pool.xml', 'configs/log_conf.xml'], with_zookeeper=True)
|
||||||
@ -28,12 +28,13 @@ def count_ttl_merges_in_queue(node, table):
|
|||||||
return int(result.strip())
|
return int(result.strip())
|
||||||
|
|
||||||
|
|
||||||
def count_ttl_merges_in_background_pool(node, table):
|
def count_ttl_merges_in_background_pool(node, table, level):
|
||||||
result = node.query(
|
result = TSV(node.query(
|
||||||
"SELECT count() FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table))
|
"SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table)))
|
||||||
if not result:
|
count = len(result)
|
||||||
return 0
|
if count >= level:
|
||||||
return int(result.strip())
|
print("count_ttl_merges_in_background_pool: merges more than warn level:\n{}".format(result))
|
||||||
|
return count
|
||||||
|
|
||||||
|
|
||||||
def count_regular_merges_in_background_pool(node, table):
|
def count_regular_merges_in_background_pool(node, table):
|
||||||
@ -67,7 +68,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster):
|
|||||||
|
|
||||||
while count_running_mutations(node1, "test_ttl") < 6:
|
while count_running_mutations(node1, "test_ttl") < 6:
|
||||||
print("Mutations count", count_running_mutations(node1, "test_ttl"))
|
print("Mutations count", count_running_mutations(node1, "test_ttl"))
|
||||||
assert count_ttl_merges_in_background_pool(node1, "test_ttl") == 0
|
assert count_ttl_merges_in_background_pool(node1, "test_ttl", 1) == 0
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
|
|
||||||
node1.query("SYSTEM START TTL MERGES")
|
node1.query("SYSTEM START TTL MERGES")
|
||||||
@ -100,7 +101,7 @@ def test_limited_ttl_merges_in_empty_pool(started_cluster):
|
|||||||
|
|
||||||
merges_with_ttl_count = set({})
|
merges_with_ttl_count = set({})
|
||||||
while True:
|
while True:
|
||||||
merges_with_ttl_count.add(count_ttl_merges_in_background_pool(node1, "test_ttl_v2"))
|
merges_with_ttl_count.add(count_ttl_merges_in_background_pool(node1, "test_ttl_v2", 3))
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
if node1.query("SELECT COUNT() FROM test_ttl_v2") == "0\n":
|
if node1.query("SELECT COUNT() FROM test_ttl_v2") == "0\n":
|
||||||
break
|
break
|
||||||
@ -124,7 +125,7 @@ def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster):
|
|||||||
merges_with_ttl_count = set({})
|
merges_with_ttl_count = set({})
|
||||||
entries_with_ttl_count = set({})
|
entries_with_ttl_count = set({})
|
||||||
while True:
|
while True:
|
||||||
merges_with_ttl_count.add(count_ttl_merges_in_background_pool(node1, "replicated_ttl"))
|
merges_with_ttl_count.add(count_ttl_merges_in_background_pool(node1, "replicated_ttl", 3))
|
||||||
entries_with_ttl_count.add(count_ttl_merges_in_queue(node1, "replicated_ttl"))
|
entries_with_ttl_count.add(count_ttl_merges_in_queue(node1, "replicated_ttl"))
|
||||||
time.sleep(0.01)
|
time.sleep(0.01)
|
||||||
if node1.query("SELECT COUNT() FROM replicated_ttl") == "0\n":
|
if node1.query("SELECT COUNT() FROM replicated_ttl") == "0\n":
|
||||||
@ -159,8 +160,8 @@ def test_limited_ttl_merges_two_replicas(started_cluster):
|
|||||||
merges_with_ttl_count_node1 = set({})
|
merges_with_ttl_count_node1 = set({})
|
||||||
merges_with_ttl_count_node2 = set({})
|
merges_with_ttl_count_node2 = set({})
|
||||||
while True:
|
while True:
|
||||||
merges_with_ttl_count_node1.add(count_ttl_merges_in_background_pool(node1, "replicated_ttl_2"))
|
merges_with_ttl_count_node1.add(count_ttl_merges_in_background_pool(node1, "replicated_ttl_2", 3))
|
||||||
merges_with_ttl_count_node2.add(count_ttl_merges_in_background_pool(node2, "replicated_ttl_2"))
|
merges_with_ttl_count_node2.add(count_ttl_merges_in_background_pool(node2, "replicated_ttl_2", 3))
|
||||||
if node1.query("SELECT COUNT() FROM replicated_ttl_2") == "0\n" and node2.query(
|
if node1.query("SELECT COUNT() FROM replicated_ttl_2") == "0\n" and node2.query(
|
||||||
"SELECT COUNT() FROM replicated_ttl_2") == "0\n":
|
"SELECT COUNT() FROM replicated_ttl_2") == "0\n":
|
||||||
break
|
break
|
||||||
|
@ -262,18 +262,20 @@ def test_sqlite_odbc_cached_dictionary(started_cluster):
|
|||||||
assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))", "12")
|
assert_eq_with_retry(node1, "select dictGetUInt8('sqlite3_odbc_cached', 'Z', toUInt64(1))", "12")
|
||||||
|
|
||||||
|
|
||||||
def test_postgres_odbc_hached_dictionary_with_schema(started_cluster):
|
def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster):
|
||||||
conn = get_postgres_conn()
|
conn = get_postgres_conn()
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
|
cursor.execute("truncate table clickhouse.test_table")
|
||||||
cursor.execute("insert into clickhouse.test_table values(1, 'hello'),(2, 'world')")
|
cursor.execute("insert into clickhouse.test_table values(1, 'hello'),(2, 'world')")
|
||||||
node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_hashed")
|
node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_hashed")
|
||||||
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))", "hello")
|
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))", "hello")
|
||||||
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", "world")
|
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", "world")
|
||||||
|
|
||||||
|
|
||||||
def test_postgres_odbc_hached_dictionary_no_tty_pipe_overflow(started_cluster):
|
def test_postgres_odbc_hashed_dictionary_no_tty_pipe_overflow(started_cluster):
|
||||||
conn = get_postgres_conn()
|
conn = get_postgres_conn()
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
|
cursor.execute("truncate table clickhouse.test_table")
|
||||||
cursor.execute("insert into clickhouse.test_table values(3, 'xxx')")
|
cursor.execute("insert into clickhouse.test_table values(3, 'xxx')")
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
try:
|
try:
|
||||||
|
@ -18,3 +18,6 @@ c
|
|||||||
d
|
d
|
||||||
cb
|
cb
|
||||||
db
|
db
|
||||||
|
-
|
||||||
|
61f0c404-5cb3-11e7-907b-a6006ad3dba0 61f0c404-5cb3-11e7-907b-a6006ad3dba0 61f0c404-5cb3-11e7-907b-a6006ad3dba0
|
||||||
|
\N \N \N
|
||||||
|
@ -71,3 +71,12 @@ select (toLowCardinality('a') as val) || 'b' group by val;
|
|||||||
select toLowCardinality(z) as val from (select arrayJoin(['c', 'd']) as z) group by val;
|
select toLowCardinality(z) as val from (select arrayJoin(['c', 'd']) as z) group by val;
|
||||||
select (toLowCardinality(z) as val) || 'b' from (select arrayJoin(['c', 'd']) as z) group by val;
|
select (toLowCardinality(z) as val) || 'b' from (select arrayJoin(['c', 'd']) as z) group by val;
|
||||||
|
|
||||||
|
select '-';
|
||||||
|
drop table if exists lc_str_uuid;
|
||||||
|
create table lc_str_uuid(str1 String, str2 LowCardinality(String), str3 StringWithDictionary) ENGINE=Memory;
|
||||||
|
select toUUID(str1), toUUID(str2), toUUID(str3) from lc_str_uuid;
|
||||||
|
select toUUID(str1, '', NULL), toUUID(str2, '', NULL), toUUID(str3, '', NULL) from lc_str_uuid;
|
||||||
|
insert into lc_str_uuid values ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', '61f0c404-5cb3-11e7-907b-a6006ad3dba0', '61f0c404-5cb3-11e7-907b-a6006ad3dba0');
|
||||||
|
select toUUID(str1), toUUID(str2), toUUID(str3) from lc_str_uuid;
|
||||||
|
select toUUID(str1, '', NULL), toUUID(str2, '', NULL), toUUID(str3, '', NULL) from lc_str_uuid;
|
||||||
|
drop table if exists lc_str_uuid;
|
||||||
|
@ -163,23 +163,23 @@ DROP TABLE IF EXISTS bloom_filter_lc_null_types_test;
|
|||||||
DROP TABLE IF EXISTS bloom_filter_array_lc_null_types_test;
|
DROP TABLE IF EXISTS bloom_filter_array_lc_null_types_test;
|
||||||
|
|
||||||
CREATE TABLE bloom_filter_array_lc_null_types_test (
|
CREATE TABLE bloom_filter_array_lc_null_types_test (
|
||||||
order_key Array(LowCardinality(Nullable((UInt64)))),
|
order_key Array(LowCardinality(Nullable(UInt64))),
|
||||||
|
|
||||||
i8 Array(LowCardinality(Nullable((Int8)))),
|
i8 Array(LowCardinality(Nullable(Int8))),
|
||||||
i16 Array(LowCardinality(Nullable((Int16)))),
|
i16 Array(LowCardinality(Nullable(Int16))),
|
||||||
i32 Array(LowCardinality(Nullable((Int32)))),
|
i32 Array(LowCardinality(Nullable(Int32))),
|
||||||
i64 Array(LowCardinality(Nullable((Int64)))),
|
i64 Array(LowCardinality(Nullable(Int64))),
|
||||||
u8 Array(LowCardinality(Nullable((UInt8)))),
|
u8 Array(LowCardinality(Nullable(UInt8))),
|
||||||
u16 Array(LowCardinality(Nullable((UInt16)))),
|
u16 Array(LowCardinality(Nullable(UInt16))),
|
||||||
u32 Array(LowCardinality(Nullable((UInt32)))),
|
u32 Array(LowCardinality(Nullable(UInt32))),
|
||||||
u64 Array(LowCardinality(Nullable((UInt64)))),
|
u64 Array(LowCardinality(Nullable(UInt64))),
|
||||||
f32 Array(LowCardinality(Nullable((Float32)))),
|
f32 Array(LowCardinality(Nullable(Float32))),
|
||||||
f64 Array(LowCardinality(Nullable((Float64)))),
|
f64 Array(LowCardinality(Nullable(Float64))),
|
||||||
|
|
||||||
date Array(LowCardinality(Nullable((Date)))),
|
date Array(LowCardinality(Nullable(Date))),
|
||||||
date_time Array(LowCardinality(Nullable(DateTime('Europe/Moscow')))),
|
date_time Array(LowCardinality(Nullable(DateTime('Europe/Moscow')))),
|
||||||
|
|
||||||
str Array(LowCardinality(Nullable((String)))),
|
str Array(LowCardinality(Nullable(String))),
|
||||||
fixed_string Array(LowCardinality(Nullable(FixedString(5)))),
|
fixed_string Array(LowCardinality(Nullable(FixedString(5)))),
|
||||||
INDEX idx (i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, date, date_time, str, fixed_string)
|
INDEX idx (i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, date, date_time, str, fixed_string)
|
||||||
TYPE bloom_filter GRANULARITY 1)
|
TYPE bloom_filter GRANULARITY 1)
|
||||||
@ -286,7 +286,7 @@ SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(fixed_string
|
|||||||
DROP TABLE IF EXISTS bloom_filter_array_lc_null_types_test;
|
DROP TABLE IF EXISTS bloom_filter_array_lc_null_types_test;
|
||||||
|
|
||||||
DROP TABLE IF EXISTS bloom_filter_array_offsets_lc_str;
|
DROP TABLE IF EXISTS bloom_filter_array_offsets_lc_str;
|
||||||
CREATE TABLE bloom_filter_array_offsets_lc_str (order_key int, str Array(LowCardinality((String))), INDEX idx str TYPE bloom_filter(1.) GRANULARITY 1024) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 1024;
|
CREATE TABLE bloom_filter_array_offsets_lc_str (order_key int, str Array(LowCardinality(String)), INDEX idx str TYPE bloom_filter(1.) GRANULARITY 1024) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 1024;
|
||||||
INSERT INTO bloom_filter_array_offsets_lc_str SELECT number AS i, if(i%2, ['value'], []) FROM system.numbers LIMIT 10000;
|
INSERT INTO bloom_filter_array_offsets_lc_str SELECT number AS i, if(i%2, ['value'], []) FROM system.numbers LIMIT 10000;
|
||||||
SELECT count() FROM bloom_filter_array_offsets_lc_str WHERE has(str, 'value');
|
SELECT count() FROM bloom_filter_array_offsets_lc_str WHERE has(str, 'value');
|
||||||
DROP TABLE IF EXISTS bloom_filter_array_offsets_lc_str;
|
DROP TABLE IF EXISTS bloom_filter_array_offsets_lc_str;
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
parseDateTimeBestEffortUS
|
||||||
s a
|
s a
|
||||||
|
|
||||||
1970/01/02 010203Z 1970-01-02 01:02:03
|
1970/01/02 010203Z 1970-01-02 01:02:03
|
||||||
@ -16,3 +17,24 @@
|
|||||||
12/13/2019 2019-12-13 00:00:00
|
12/13/2019 2019-12-13 00:00:00
|
||||||
13/12/2019 2019-12-13 00:00:00
|
13/12/2019 2019-12-13 00:00:00
|
||||||
03/04/2019 2019-03-04 00:00:00
|
03/04/2019 2019-03-04 00:00:00
|
||||||
|
parseDateTimeBestEffortUSOrZero parseDateTimeBestEffortUSOrNull
|
||||||
|
s a b
|
||||||
|
|
||||||
|
1970/01/02 010203Z 1970-01-02 01:02:03 1970-01-02 01:02:03
|
||||||
|
01-02-2001 UTC 2001-01-02 00:00:00 2001-01-02 00:00:00
|
||||||
|
10.23.1990 1990-10-23 00:00:00 1990-10-23 00:00:00
|
||||||
|
01-02-2017 03:04:05+1 2017-01-02 02:04:05 2017-01-02 02:04:05
|
||||||
|
01/02/2017 03:04:05+300 2017-01-02 00:04:05 2017-01-02 00:04:05
|
||||||
|
01.02.2017 03:04:05GMT 2017-01-02 03:04:05 2017-01-02 03:04:05
|
||||||
|
01-02-2017 03:04:05 MSD 2017-01-01 23:04:05 2017-01-01 23:04:05
|
||||||
|
01-02-2017 11:04:05 AM 2017-01-02 11:04:05 2017-01-02 11:04:05
|
||||||
|
01-02-2017 11:04:05 PM 2017-01-02 23:04:05 2017-01-02 23:04:05
|
||||||
|
01-02-2017 12:04:05 AM 2017-01-02 00:04:05 2017-01-02 00:04:05
|
||||||
|
01-02-2017 12:04:05 PM 2017-01-02 12:04:05 2017-01-02 12:04:05
|
||||||
|
01.02.17 03:04:05 MSD Feb 2017-02-01 23:04:05 2017-02-01 23:04:05
|
||||||
|
01/02/2017 03:04:05 MSK 2017-01-02 00:04:05 2017-01-02 00:04:05
|
||||||
|
12/13/2019 2019-12-13 00:00:00 2019-12-13 00:00:00
|
||||||
|
13/12/2019 2019-12-13 00:00:00 2019-12-13 00:00:00
|
||||||
|
03/04/2019 2019-03-04 00:00:00 2019-03-04 00:00:00
|
||||||
|
1970-01-01 00:00:00 ᴺᵁᴸᴸ
|
||||||
|
xyz 1970-01-01 00:00:00 ᴺᵁᴸᴸ
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
SELECT 'parseDateTimeBestEffortUS';
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
s,
|
s,
|
||||||
parseDateTimeBestEffortUS(s, 'UTC') AS a
|
parseDateTimeBestEffortUS(s, 'UTC') AS a
|
||||||
@ -22,3 +24,32 @@ FROM
|
|||||||
'03/04/2019'
|
'03/04/2019'
|
||||||
]) AS s)
|
]) AS s)
|
||||||
FORMAT PrettySpaceNoEscapes;
|
FORMAT PrettySpaceNoEscapes;
|
||||||
|
|
||||||
|
SELECT 'parseDateTimeBestEffortUSOrZero', 'parseDateTimeBestEffortUSOrNull';
|
||||||
|
SELECT
|
||||||
|
s,
|
||||||
|
parseDateTimeBestEffortUSOrZero(s, 'UTC') AS a,
|
||||||
|
parseDateTimeBestEffortUSOrNull(s, 'UTC') AS b
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT arrayJoin([
|
||||||
|
'1970/01/02 010203Z',
|
||||||
|
'01-02-2001 UTC',
|
||||||
|
'10.23.1990',
|
||||||
|
'01-02-2017 03:04:05+1',
|
||||||
|
'01/02/2017 03:04:05+300',
|
||||||
|
'01.02.2017 03:04:05GMT',
|
||||||
|
'01-02-2017 03:04:05 MSD',
|
||||||
|
'01-02-2017 11:04:05 AM',
|
||||||
|
'01-02-2017 11:04:05 PM',
|
||||||
|
'01-02-2017 12:04:05 AM',
|
||||||
|
'01-02-2017 12:04:05 PM',
|
||||||
|
'01.02.17 03:04:05 MSD Feb',
|
||||||
|
'01/02/2017 03:04:05 MSK',
|
||||||
|
'12/13/2019',
|
||||||
|
'13/12/2019',
|
||||||
|
'03/04/2019',
|
||||||
|
'',
|
||||||
|
'xyz'
|
||||||
|
]) AS s)
|
||||||
|
FORMAT PrettySpaceNoEscapes;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
DROP TABLE IF EXISTS lc_nullable;
|
DROP TABLE IF EXISTS lc_nullable;
|
||||||
|
|
||||||
CREATE TABLE lc_nullable (
|
CREATE TABLE lc_nullable (
|
||||||
order_key Array(LowCardinality(Nullable((UInt64)))),
|
order_key Array(LowCardinality(Nullable(UInt64))),
|
||||||
|
|
||||||
i8 Array(LowCardinality(Nullable(Int8))),
|
i8 Array(LowCardinality(Nullable(Int8))),
|
||||||
i16 Array(LowCardinality(Nullable(Int16))),
|
i16 Array(LowCardinality(Nullable(Int16))),
|
||||||
@ -14,10 +14,10 @@ CREATE TABLE lc_nullable (
|
|||||||
f32 Array(LowCardinality(Nullable(Float32))),
|
f32 Array(LowCardinality(Nullable(Float32))),
|
||||||
f64 Array(LowCardinality(Nullable(Float64))),
|
f64 Array(LowCardinality(Nullable(Float64))),
|
||||||
|
|
||||||
date Array(LowCardinality(Nullable((Date)))),
|
date Array(LowCardinality(Nullable(Date))),
|
||||||
date_time Array(LowCardinality(Nullable(DateTime('Europe/Moscow')))),
|
date_time Array(LowCardinality(Nullable(DateTime('Europe/Moscow')))),
|
||||||
|
|
||||||
str Array(LowCardinality(Nullable((String)))),
|
str Array(LowCardinality(Nullable(String))),
|
||||||
fixed_string Array(LowCardinality(Nullable(FixedString(5))))
|
fixed_string Array(LowCardinality(Nullable(FixedString(5))))
|
||||||
) ENGINE = MergeTree() ORDER BY order_key;
|
) ENGINE = MergeTree() ORDER BY order_key;
|
||||||
|
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
SELECT [NULL, '25.6', '-0.02', NULL], [NULL], 1024, [NULL, '10485.76', NULL, NULL], [NULL, '-922337203.6854775808', toNullable(NULL)], [NULL] FROM (SELECT [multiIf((number % 1023) = -inf, toString(number), NULL)], NULL, '-1', multiIf((number % NULL) = NULL, toString(number), ''), [NULL, NULL], multiIf((number % NULL) = 65536, toString(number), '') AS s FROM system.numbers) LIMIT 1024 format Null
|
@ -0,0 +1 @@
|
|||||||
|
test
|
7
tests/queries/0_stateless/01674_clickhouse_client_query_param_cte.sh
Executable file
7
tests/queries/0_stateless/01674_clickhouse_client_query_param_cte.sh
Executable file
@ -0,0 +1,7 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT --param_paramName="test" -q "WITH subquery AS (SELECT {paramName:String}) SELECT * FROM subquery"
|
@ -0,0 +1,5 @@
|
|||||||
|
drop table if exists tab;
|
||||||
|
create table tab (x UInt64, `arr.a` Array(UInt64), `arr.b` Array(UInt64)) engine = MergeTree order by x;
|
||||||
|
select x from tab array join arr prewhere x != 0 where arr; -- { serverError 47; }
|
||||||
|
select x from tab array join arr prewhere arr where x != 0; -- { serverError 47; }
|
||||||
|
drop table if exists tab;
|
@ -0,0 +1 @@
|
|||||||
|
Ok
|
16
tests/queries/0_stateless/01675_data_type_coroutine.sh
Executable file
16
tests/queries/0_stateless/01675_data_type_coroutine.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
I=0
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
I=$((I + 1))
|
||||||
|
TYPE=$(perl -e "print 'Array(' x $I; print 'UInt8'; print ')' x $I")
|
||||||
|
${CLICKHOUSE_CLIENT} --max_parser_depth 1000000 --query "SELECT * FROM remote('127.0.0.{1,2}', generateRandom('x $TYPE', 1, 1, 1)) LIMIT 1 FORMAT Null" 2>&1 | grep -q -F 'Maximum parse depth' && break;
|
||||||
|
done
|
||||||
|
|
||||||
|
#echo "I = ${I}"
|
||||||
|
echo 'Ok'
|
@ -196,3 +196,5 @@
|
|||||||
01181_db_atomic_drop_on_cluster
|
01181_db_atomic_drop_on_cluster
|
||||||
01658_test_base64Encode_mysql_compatibility
|
01658_test_base64Encode_mysql_compatibility
|
||||||
01659_test_base64Decode_mysql_compatibility
|
01659_test_base64Decode_mysql_compatibility
|
||||||
|
01675_data_type_coroutine
|
||||||
|
01671_aggregate_function_group_bitmap_data
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/* Заметим, что запросы написаны так, как будто пользователь не понимает смысл символа _ в LIKE выражении. */
|
/* Note that queries are written as the user doesn't really understand that the symbol _ has special meaning in LIKE pattern. */
|
||||||
SELECT count() FROM test.hits WHERE URL LIKE '%/avtomobili_s_probegom/_%__%__%__%';
|
SELECT count() FROM test.hits WHERE URL LIKE '%/avtomobili_s_probegom/_%__%__%__%';
|
||||||
SELECT count() FROM test.hits WHERE URL LIKE '/avtomobili_s_probegom/_%__%__%__%';
|
SELECT count() FROM test.hits WHERE URL LIKE '/avtomobili_s_probegom/_%__%__%__%';
|
||||||
SELECT count() FROM test.hits WHERE URL LIKE '%_/avtomobili_s_probegom/_%__%__%__%';
|
SELECT count() FROM test.hits WHERE URL LIKE '%_/avtomobili_s_probegom/_%__%__%__%';
|
||||||
|
@ -310,18 +310,22 @@
|
|||||||
"01642_if_nullable_regression",
|
"01642_if_nullable_regression",
|
||||||
"01643_system_suspend",
|
"01643_system_suspend",
|
||||||
"01655_plan_optimizations",
|
"01655_plan_optimizations",
|
||||||
"01475_read_subcolumns_storages"
|
"01475_read_subcolumns_storages",
|
||||||
|
"01674_clickhouse_client_query_param_cte"
|
||||||
],
|
],
|
||||||
"parallel":
|
"parallel":
|
||||||
[
|
[
|
||||||
/// Pessimistic list of tests which work badly in parallel.
|
/// Pessimistic list of tests which work badly in parallel.
|
||||||
/// Probably they need better investigation.
|
/// Probably they need better investigation.
|
||||||
"00062_replicated_merge_tree_alter_zookeeper",
|
"00062_replicated_merge_tree_alter_zookeeper",
|
||||||
|
"00080_show_tables_and_system_tables",
|
||||||
|
"00101_materialized_views_and_insert_without_explicit_database",
|
||||||
"00109_shard_totals_after_having",
|
"00109_shard_totals_after_having",
|
||||||
"00110_external_sort",
|
"00110_external_sort",
|
||||||
"00116_storage_set",
|
"00116_storage_set",
|
||||||
"00121_drop_column_zookeeper",
|
"00121_drop_column_zookeeper",
|
||||||
"00133_long_shard_memory_tracker_and_exception_safety",
|
"00133_long_shard_memory_tracker_and_exception_safety",
|
||||||
|
"00158_buffer_and_nonexistent_table",
|
||||||
"00180_attach_materialized_view",
|
"00180_attach_materialized_view",
|
||||||
"00226_zookeeper_deduplication_and_unexpected_parts",
|
"00226_zookeeper_deduplication_and_unexpected_parts",
|
||||||
"00236_replicated_drop_on_non_leader_zookeeper",
|
"00236_replicated_drop_on_non_leader_zookeeper",
|
||||||
@ -342,9 +346,12 @@
|
|||||||
"00571_non_exist_database_when_create_materializ_view",
|
"00571_non_exist_database_when_create_materializ_view",
|
||||||
"00575_illegal_column_exception_when_drop_depen_column",
|
"00575_illegal_column_exception_when_drop_depen_column",
|
||||||
"00599_create_view_with_subquery",
|
"00599_create_view_with_subquery",
|
||||||
|
"00604_show_create_database",
|
||||||
"00612_http_max_query_size",
|
"00612_http_max_query_size",
|
||||||
"00619_union_highlite",
|
"00619_union_highlite",
|
||||||
"00620_optimize_on_nonleader_replica_zookeeper",
|
"00620_optimize_on_nonleader_replica_zookeeper",
|
||||||
|
"00623_truncate_table",
|
||||||
|
"00623_truncate_table_throw_exception",
|
||||||
"00625_arrays_in_nested",
|
"00625_arrays_in_nested",
|
||||||
"00626_replace_partition_from_table",
|
"00626_replace_partition_from_table",
|
||||||
"00626_replace_partition_from_table_zookeeper",
|
"00626_replace_partition_from_table_zookeeper",
|
||||||
@ -357,20 +364,28 @@
|
|||||||
"00699_materialized_view_mutations",
|
"00699_materialized_view_mutations",
|
||||||
"00701_rollup",
|
"00701_rollup",
|
||||||
"00715_fetch_merged_or_mutated_part_zookeeper",
|
"00715_fetch_merged_or_mutated_part_zookeeper",
|
||||||
|
"00716_allow_ddl",
|
||||||
|
"00719_parallel_ddl_db",
|
||||||
|
"00740_database_in_nested_view",
|
||||||
|
"00741_client_comment_multiline",
|
||||||
"00751_default_databasename_for_view",
|
"00751_default_databasename_for_view",
|
||||||
"00753_alter_attach",
|
"00753_alter_attach",
|
||||||
"00754_alter_modify_column_partitions",
|
"00754_alter_modify_column_partitions",
|
||||||
"00754_alter_modify_order_by_replicated_zookeeper",
|
"00754_alter_modify_order_by_replicated_zookeeper",
|
||||||
"00763_long_lock_buffer_alter_destination_table",
|
"00763_long_lock_buffer_alter_destination_table",
|
||||||
|
"00800_versatile_storage_join",
|
||||||
"00804_test_alter_compression_codecs",
|
"00804_test_alter_compression_codecs",
|
||||||
"00804_test_custom_compression_codecs",
|
"00804_test_custom_compression_codecs",
|
||||||
"00804_test_custom_compression_codes_log_storages",
|
"00804_test_custom_compression_codes_log_storages",
|
||||||
"00804_test_delta_codec_compression",
|
"00804_test_delta_codec_compression",
|
||||||
|
"00815_left_join_on_stepanel",
|
||||||
"00834_cancel_http_readonly_queries_on_client_close",
|
"00834_cancel_http_readonly_queries_on_client_close",
|
||||||
"00834_kill_mutation",
|
"00834_kill_mutation",
|
||||||
"00834_kill_mutation_replicated_zookeeper",
|
"00834_kill_mutation_replicated_zookeeper",
|
||||||
"00840_long_concurrent_select_and_drop_deadlock",
|
"00840_long_concurrent_select_and_drop_deadlock",
|
||||||
|
"00857_global_joinsavel_table_alias",
|
||||||
"00899_long_attach_memory_limit",
|
"00899_long_attach_memory_limit",
|
||||||
|
"00910_buffer_prewhere",
|
||||||
"00910_zookeeper_custom_compression_codecs_replicated",
|
"00910_zookeeper_custom_compression_codecs_replicated",
|
||||||
"00926_adaptive_index_granularity_merge_tree",
|
"00926_adaptive_index_granularity_merge_tree",
|
||||||
"00926_adaptive_index_granularity_pk",
|
"00926_adaptive_index_granularity_pk",
|
||||||
@ -388,49 +403,94 @@
|
|||||||
"00988_constraints_replication_zookeeper",
|
"00988_constraints_replication_zookeeper",
|
||||||
"00989_parallel_parts_loading",
|
"00989_parallel_parts_loading",
|
||||||
"00993_system_parts_race_condition_drop_zookeeper",
|
"00993_system_parts_race_condition_drop_zookeeper",
|
||||||
|
"01012_show_tables_limit",
|
||||||
"01013_sync_replica_timeout_zookeeper",
|
"01013_sync_replica_timeout_zookeeper",
|
||||||
|
"01014_lazy_database_basic",
|
||||||
"01014_lazy_database_concurrent_recreate_reattach_and_show_tables",
|
"01014_lazy_database_concurrent_recreate_reattach_and_show_tables",
|
||||||
"01015_attach_part",
|
"01015_attach_part",
|
||||||
|
"01015_database_bad_tables",
|
||||||
"01018_ddl_dictionaries_concurrent_requrests",
|
"01018_ddl_dictionaries_concurrent_requrests",
|
||||||
"01018_ddl_dictionaries_create",
|
"01018_ddl_dictionaries_create",
|
||||||
"01018_ddl_dictionaries_select",
|
"01018_ddl_dictionaries_select",
|
||||||
|
"01018_ddl_dictionaries_special",
|
||||||
|
"01018_dictionaries_from_dictionaries",
|
||||||
|
"01018_ip_dictionary",
|
||||||
"01021_only_tuple_columns",
|
"01021_only_tuple_columns",
|
||||||
|
"01023_materialized_view_query_context",
|
||||||
"01031_mutations_interpreter_and_context",
|
"01031_mutations_interpreter_and_context",
|
||||||
"01033_dictionaries_lifetime",
|
"01033_dictionaries_lifetime",
|
||||||
"01035_concurrent_move_partition_from_table_zookeeper",
|
"01035_concurrent_move_partition_from_table_zookeeper",
|
||||||
|
"01036_no_superfluous_dict_reload_on_create_database",
|
||||||
|
"01036_no_superfluous_dict_reload_on_create_database_2",
|
||||||
|
"01037_polygon_dicts_correctness_all",
|
||||||
|
"01037_polygon_dicts_correctness_fast",
|
||||||
|
"01037_polygon_dicts_simple_functions",
|
||||||
|
"01038_dictionary_lifetime_min_zero_sec",
|
||||||
|
"01040_dictionary_invalidate_query_switchover_long",
|
||||||
|
"01041_create_dictionary_if_not_exists",
|
||||||
|
"01042_system_reload_dictionary_reloads_completely",
|
||||||
|
"01043_dictionary_attribute_properties_values",
|
||||||
|
"01045_dictionaries_restrictions",
|
||||||
"01045_zookeeper_system_mutations_with_parts_names",
|
"01045_zookeeper_system_mutations_with_parts_names",
|
||||||
|
"01048_exists_query",
|
||||||
|
"01053_drop_database_mat_view",
|
||||||
"01053_ssd_dictionary",
|
"01053_ssd_dictionary",
|
||||||
|
"01054_cache_dictionary_bunch_update",
|
||||||
|
"01054_cache_dictionary_overflow_cell",
|
||||||
"01055_compact_parts_1",
|
"01055_compact_parts_1",
|
||||||
|
"01056_create_table_as",
|
||||||
"01060_avro",
|
"01060_avro",
|
||||||
"01060_shutdown_table_after_detach",
|
"01060_shutdown_table_after_detach",
|
||||||
|
"01069_database_memory",
|
||||||
"01070_materialize_ttl",
|
"01070_materialize_ttl",
|
||||||
"01070_modify_ttl",
|
"01070_modify_ttl",
|
||||||
"01070_mutations_with_dependencies",
|
"01070_mutations_with_dependencies",
|
||||||
"01071_live_view_detach_dependency",
|
"01071_live_view_detach_dependency",
|
||||||
"01071_prohibition_secondary_index_with_old_format_merge_tree",
|
"01071_prohibition_secondary_index_with_old_format_merge_tree",
|
||||||
"01073_attach_if_not_exists",
|
"01073_attach_if_not_exists",
|
||||||
|
"01073_show_tables_not_like",
|
||||||
|
"01076_cache_dictionary_datarace_exception_ptr",
|
||||||
"01076_parallel_alter_replicated_zookeeper",
|
"01076_parallel_alter_replicated_zookeeper",
|
||||||
"01079_parallel_alter_add_drop_column_zookeeper",
|
"01079_parallel_alter_add_drop_column_zookeeper",
|
||||||
"01079_parallel_alter_detach_table_zookeeper",
|
"01079_parallel_alter_detach_table_zookeeper",
|
||||||
|
"01080_check_for_error_incorrect_size_of_nested_column",
|
||||||
"01083_expressions_in_engine_arguments",
|
"01083_expressions_in_engine_arguments",
|
||||||
|
"01084_regexp_empty",
|
||||||
"01085_max_distributed_connections_http",
|
"01085_max_distributed_connections_http",
|
||||||
"01092_memory_profiler",
|
"01092_memory_profiler",
|
||||||
"01098_temporary_and_external_tables",
|
"01098_temporary_and_external_tables",
|
||||||
|
"01103_distributed_product_mode_local_column_renames",
|
||||||
"01107_atomic_db_detach_attach",
|
"01107_atomic_db_detach_attach",
|
||||||
"01108_restart_replicas_rename_deadlock_zookeeper",
|
"01108_restart_replicas_rename_deadlock_zookeeper",
|
||||||
|
"01109_exchange_tables",
|
||||||
"01110_dictionary_layout_without_arguments",
|
"01110_dictionary_layout_without_arguments",
|
||||||
|
"01113_local_dictionary_type_conversion",
|
||||||
"01114_database_atomic",
|
"01114_database_atomic",
|
||||||
|
"01114_mysql_database_engine_segfault",
|
||||||
|
"01115_join_with_dictionary",
|
||||||
|
"01125_dict_ddl_cannot_add_column",
|
||||||
"01127_month_partitioning_consistency_select",
|
"01127_month_partitioning_consistency_select",
|
||||||
"01130_in_memory_parts_partitons",
|
"01130_in_memory_parts_partitons",
|
||||||
"01135_default_and_alter_zookeeper",
|
"01135_default_and_alter_zookeeper",
|
||||||
"01148_zookeeper_path_macros_unfolding",
|
"01148_zookeeper_path_macros_unfolding",
|
||||||
|
"01150_ddl_guard_rwr",
|
||||||
|
"01185_create_or_replace_table",
|
||||||
"01190_full_attach_syntax",
|
"01190_full_attach_syntax",
|
||||||
|
"01191_rename_dictionary",
|
||||||
|
"01192_rename_database_zookeeper",
|
||||||
"01193_metadata_loading",
|
"01193_metadata_loading",
|
||||||
"01200_mutations_memory_consumption",
|
"01200_mutations_memory_consumption",
|
||||||
|
"01224_no_superfluous_dict_reload",
|
||||||
|
"01225_drop_dictionary_as_table",
|
||||||
|
"01225_show_create_table_from_dictionary",
|
||||||
|
"01231_distributed_aggregation_memory_efficient_mix_levels",
|
||||||
|
"01232_extremes",
|
||||||
"01238_http_memory_tracking",
|
"01238_http_memory_tracking",
|
||||||
"01249_bad_arguments_for_bloom_filter",
|
"01249_bad_arguments_for_bloom_filter",
|
||||||
"01251_dict_is_in_infinite_loop",
|
"01251_dict_is_in_infinite_loop",
|
||||||
|
"01254_dict_create_without_db",
|
||||||
"01254_dict_load_after_detach_attach",
|
"01254_dict_load_after_detach_attach",
|
||||||
|
"01257_dictionary_mismatch_types",
|
||||||
"01259_dictionary_custom_settings_ddl",
|
"01259_dictionary_custom_settings_ddl",
|
||||||
"01267_alter_default_key_columns_zookeeper",
|
"01267_alter_default_key_columns_zookeeper",
|
||||||
"01268_dictionary_direct_layout",
|
"01268_dictionary_direct_layout",
|
||||||
@ -444,18 +504,25 @@
|
|||||||
"01293_system_distribution_queue",
|
"01293_system_distribution_queue",
|
||||||
"01294_lazy_database_concurrent",
|
"01294_lazy_database_concurrent",
|
||||||
"01294_lazy_database_concurrent_recreate_reattach_and_show_tables",
|
"01294_lazy_database_concurrent_recreate_reattach_and_show_tables",
|
||||||
|
"01294_system_distributed_on_cluster",
|
||||||
|
"01296_create_row_policy_in_current_database",
|
||||||
"01305_replica_create_drop_zookeeper",
|
"01305_replica_create_drop_zookeeper",
|
||||||
"01307_multiple_leaders_zookeeper",
|
"01307_multiple_leaders_zookeeper",
|
||||||
"01318_long_unsuccessful_mutation_zookeeper",
|
"01318_long_unsuccessful_mutation_zookeeper",
|
||||||
"01319_manual_write_to_replicas",
|
"01319_manual_write_to_replicas",
|
||||||
|
"01320_create_sync_race_condition_zookeeper",
|
||||||
"01338_long_select_and_alter",
|
"01338_long_select_and_alter",
|
||||||
"01338_long_select_and_alter_zookeeper",
|
"01338_long_select_and_alter_zookeeper",
|
||||||
"01355_alter_column_with_order",
|
"01355_alter_column_with_order",
|
||||||
"01355_ilike",
|
"01355_ilike",
|
||||||
"01357_version_collapsing_attach_detach_zookeeper",
|
"01357_version_collapsing_attach_detach_zookeeper",
|
||||||
"01375_compact_parts_codecs",
|
"01375_compact_parts_codecs",
|
||||||
|
"01376_GROUP_BY_injective_elimination_dictGet",
|
||||||
"01378_alter_rename_with_ttl_zookeeper",
|
"01378_alter_rename_with_ttl_zookeeper",
|
||||||
|
"01383_remote_ambiguous_column_shard",
|
||||||
"01388_clear_all_columns",
|
"01388_clear_all_columns",
|
||||||
|
"01391_join_on_dict_crash",
|
||||||
|
"01392_column_resolve",
|
||||||
"01396_inactive_replica_cleanup_nodes_zookeeper",
|
"01396_inactive_replica_cleanup_nodes_zookeeper",
|
||||||
"01412_cache_dictionary_race",
|
"01412_cache_dictionary_race",
|
||||||
"01414_mutations_and_errors_zookeeper",
|
"01414_mutations_and_errors_zookeeper",
|
||||||
@ -464,20 +531,41 @@
|
|||||||
"01417_freeze_partition_verbose",
|
"01417_freeze_partition_verbose",
|
||||||
"01417_freeze_partition_verbose_zookeeper",
|
"01417_freeze_partition_verbose_zookeeper",
|
||||||
"01430_modify_sample_by_zookeeper",
|
"01430_modify_sample_by_zookeeper",
|
||||||
|
"01444_create_table_drop_database_race",
|
||||||
"01454_storagememory_data_race_challenge",
|
"01454_storagememory_data_race_challenge",
|
||||||
|
"01455_rank_correlation_spearman",
|
||||||
"01456_modify_column_type_via_add_drop_update",
|
"01456_modify_column_type_via_add_drop_update",
|
||||||
"01457_create_as_table_function_structure",
|
"01457_create_as_table_function_structure",
|
||||||
"01459_manual_write_to_replicas",
|
"01459_manual_write_to_replicas",
|
||||||
"01460_DistributedFilesToInsert",
|
"01460_DistributedFilesToInsert",
|
||||||
"01465_ttl_recompression",
|
"01465_ttl_recompression",
|
||||||
|
"01470_show_databases_like",
|
||||||
"01471_calculate_ttl_during_merge",
|
"01471_calculate_ttl_during_merge",
|
||||||
|
"01487_distributed_in_not_default_db",
|
||||||
"01493_alter_remove_properties_zookeeper",
|
"01493_alter_remove_properties_zookeeper",
|
||||||
"01493_storage_set_persistency",
|
"01493_storage_set_persistency",
|
||||||
"01494_storage_join_persistency",
|
"01494_storage_join_persistency",
|
||||||
|
"01501_cache_dictionary_all_fields",
|
||||||
|
"01507_clickhouse_server_start_with_embedded_config",
|
||||||
|
"01509_dictionary_preallocate",
|
||||||
|
"01516_create_table_primary_key",
|
||||||
"01516_drop_table_stress",
|
"01516_drop_table_stress",
|
||||||
|
"01517_drop_mv_with_inner_table",
|
||||||
|
"01526_complex_key_dict_direct_layout",
|
||||||
|
"01527_clickhouse_local_optimize",
|
||||||
|
"01527_dist_sharding_key_dictGet_reload",
|
||||||
|
"01530_drop_database_atomic_sync",
|
||||||
"01541_max_memory_usage_for_user",
|
"01541_max_memory_usage_for_user",
|
||||||
"01646_system_restart_replicas_smoke", // system restart replicas is a global query
|
"01542_dictionary_load_exception_race",
|
||||||
|
"01575_disable_detach_table_of_dictionary",
|
||||||
"01600_count_of_parts_metrics", // tests global system metrics
|
"01600_count_of_parts_metrics", // tests global system metrics
|
||||||
|
"01600_detach_permanently",
|
||||||
|
"01600_log_queries_with_extensive_info",
|
||||||
|
"01600_multiple_left_join_with_aliases",
|
||||||
|
"01601_detach_permanently",
|
||||||
|
"01602_show_create_view",
|
||||||
|
"01603_rename_overwrite_bug",
|
||||||
|
"01646_system_restart_replicas_smoke", // system restart replicas is a global query
|
||||||
"attach",
|
"attach",
|
||||||
"ddl_dictionaries",
|
"ddl_dictionaries",
|
||||||
"dictionary",
|
"dictionary",
|
||||||
|
Loading…
Reference in New Issue
Block a user