Merge branch 'master' of github.com:ClickHouse/ClickHouse into interactive-mode-for-clickhouse-local

This commit is contained in:
kssenii 2021-08-21 18:22:53 +03:00
commit 0afc23bfa3
107 changed files with 1824 additions and 504 deletions

View File

@ -3,7 +3,7 @@ I hereby agree to the terms of the CLA available at: https://yandex.ru/legal/cla
Changelog category (leave one):
- New Feature
- Improvement
- Bug Fix
- Bug Fix (user-visible misbehaviour in official stable or prestable release)
- Performance Improvement
- Backward Incompatible Change
- Build/Testing/Packaging Improvement

2
contrib/librdkafka vendored

@ -1 +1 @@
Subproject commit 43491d33ca2826531d1e3cae70d4bf1e5249e3c9
Subproject commit b8554f1682062c85ba519eb54ef2f90e02b812cb

View File

@ -173,6 +173,9 @@ def parse_env_variables(build_type, compiler, sanitizer, package_type, image_typ
cmake_flags.append('-DUSE_GTEST=1')
cmake_flags.append('-DENABLE_TESTS=1')
cmake_flags.append('-DENABLE_EXAMPLES=1')
cmake_flags.append('-DENABLE_FUZZING=1')
# For fuzzing needs
cmake_flags.append('-DUSE_YAML_CPP=1')
# Don't stop on first error to find more clang-tidy errors in one run.
result.append('NINJA_FLAGS=-k0')

View File

@ -628,9 +628,6 @@ cat analyze/errors.log >> report/errors.log ||:
cat profile-errors.log >> report/errors.log ||:
clickhouse-local --query "
-- We use decimals specifically to get fixed-point, fixed-width formatting.
set output_format_decimal_trailing_zeros = 1;
create view query_display_names as select * from
file('analyze/query-display-names.tsv', TSV,
'test text, query_index int, query_display_name text')
@ -644,6 +641,7 @@ create view partial_query_times as select * from
-- Report for partial queries that we could only run on the new server (e.g.
-- queries with new functions added in the tested PR).
create table partial_queries_report engine File(TSV, 'report/partial-queries-report.tsv')
settings output_format_decimal_trailing_zeros = 1
as select toDecimal64(time_median, 3) time,
toDecimal64(time_stddev / time_median, 3) relative_time_stddev,
test, query_index, query_display_name
@ -716,8 +714,9 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
order by test, query_index, metric_name
;
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv') as
with
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv')
settings output_format_decimal_trailing_zeros = 1
as with
-- server_time is sometimes reported as zero (if it's less than 1 ms),
-- so we have to work around this to not get an error about conversion
-- of NaN to decimal.
@ -733,8 +732,9 @@ create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv') as
changed_fail, test, query_index, query_display_name
from queries where changed_show order by abs(diff) desc;
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv') as
select
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv')
settings output_format_decimal_trailing_zeros = 1
as select
toDecimal64(left, 3), toDecimal64(right, 3), toDecimal64(diff, 3),
toDecimal64(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
from queries where unstable_show order by stat_threshold desc;
@ -764,8 +764,9 @@ create view total_speedup as
from test_speedup
;
create table test_perf_changes_report engine File(TSV, 'report/test-perf-changes.tsv') as
with
create table test_perf_changes_report engine File(TSV, 'report/test-perf-changes.tsv')
settings output_format_decimal_trailing_zeros = 1
as with
(times_speedup >= 1
? '-' || toString(toDecimal64(times_speedup, 3)) || 'x'
: '+' || toString(toDecimal64(1 / times_speedup, 3)) || 'x')
@ -791,8 +792,9 @@ create view total_client_time_per_query as select *
from file('analyze/client-times.tsv', TSV,
'test text, query_index int, client float, server float');
create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv') as
select client, server, toDecimal64(client/server, 3) p,
create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv')
settings output_format_decimal_trailing_zeros = 1
as select client, server, toDecimal64(client/server, 3) p,
test, query_display_name
from total_client_time_per_query left join query_display_names using (test, query_index)
where p > toDecimal64(1.02, 3) order by p desc;
@ -877,8 +879,9 @@ create view test_times_view_total as
from test_times_view
;
create table test_times_report engine File(TSV, 'report/test-times.tsv') as
select
create table test_times_report engine File(TSV, 'report/test-times.tsv')
settings output_format_decimal_trailing_zeros = 1
as select
test,
toDecimal64(real, 3),
toDecimal64(total_client_time, 3),
@ -896,8 +899,9 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv') as
;
-- report for all queries page, only main metric
create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as
with
create table all_tests_report engine File(TSV, 'report/all-queries.tsv')
settings output_format_decimal_trailing_zeros = 1
as with
-- server_time is sometimes reported as zero (if it's less than 1 ms),
-- so we have to work around this to not get an error about conversion
-- of NaN to decimal.
@ -978,9 +982,6 @@ for version in {right,left}
do
rm -rf data
clickhouse-local --query "
-- We use decimals specifically to get fixed-point, fixed-width formatting.
set output_format_decimal_trailing_zeros = 1;
create view query_profiles as
with 0 as left, 1 as right
select * from file('analyze/query-profiles.tsv', TSV,
@ -1063,9 +1064,10 @@ create table unstable_run_traces engine File(TSVWithNamesAndTypes,
;
create table metric_devation engine File(TSVWithNamesAndTypes,
'report/metric-deviation.$version.tsv') as
'report/metric-deviation.$version.tsv')
settings output_format_decimal_trailing_zeros = 1
-- first goes the key used to split the file with grep
select test, query_index, query_display_name,
as select test, query_index, query_display_name,
toDecimal64(d, 3) d, q, metric
from (
select
@ -1176,9 +1178,6 @@ rm -rf metrics ||:
mkdir metrics
clickhouse-local --query "
-- We use decimals specifically to get fixed-point, fixed-width formatting.
set output_format_decimal_trailing_zeros = 1;
create view right_async_metric_log as
select * from file('right-async-metric-log.tsv', TSVWithNamesAndTypes,
'$(cat right-async-metric-log.tsv.columns)')
@ -1196,8 +1195,9 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
;
-- Show metrics that have changed
create table changes engine File(TSV, 'metrics/changes.tsv') as
select metric, left, right,
create table changes engine File(TSV, 'metrics/changes.tsv')
settings output_format_decimal_trailing_zeros = 1
as select metric, left, right,
toDecimal64(diff, 3), toDecimal64(times_diff, 3)
from (
select metric, median(left) as left, median(right) as right,

View File

@ -105,7 +105,7 @@ We use `Decimal` data type to store prices. Everything else is quite straightfor
## Import Data
Upload data into ClickHouse in parallel:
Upload data into ClickHouse:
```
clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO dish FORMAT CSVWithNames" < Dish.csv

View File

@ -114,5 +114,5 @@ Seamlessly migration from ZooKeeper to `clickhouse-keeper` is impossible you hav
clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots
```
4. Copy snapshot to `clickhouse-server` nodes with configured `keeper` or start `clickhouse-keeper` instead of ZooKeeper. Snapshot must persist only on leader node, leader will sync it automatically to other nodes.
4. Copy snapshot to `clickhouse-server` nodes with configured `keeper` or start `clickhouse-keeper` instead of ZooKeeper. Snapshot must persist on all nodes, otherwise empty nodes can be faster and one of them can becamse leader.

View File

@ -1339,3 +1339,149 @@ Result:
│ 2,"good" │
└───────────────────────────────────────────┘
```
## snowflakeToDateTime {#snowflakeToDateTime}
Extract time from snowflake id as DateTime format.
**Syntax**
``` sql
snowflakeToDateTime(value [, time_zone])
```
**Parameters**
- `value``snowflake id`, Int64 value.
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Returned value**
- value converted to the `DateTime` data type.
**Example**
Query:
``` sql
SELECT snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC');
```
Result:
``` text
┌─snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC')─┐
│ 2021-08-15 10:57:56 │
└──────────────────────────────────────────────────────────────────┘
```
## snowflakeToDateTime64 {#snowflakeToDateTime64}
Extract time from snowflake id as DateTime64 format.
**Syntax**
``` sql
snowflakeToDateTime64(value [, time_zone])
```
**Parameters**
- `value``snowflake id`, Int64 value.
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Returned value**
- value converted to the `DateTime64` data type.
**Example**
Query:
``` sql
SELECT snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC');
```
Result:
``` text
┌─snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC')─┐
│ 2021-08-15 10:58:19.841 │
└────────────────────────────────────────────────────────────────────┘
```
## dateTimeToSnowflake {#dateTimeToSnowflake}
Convert DateTime to the first snowflake id at the giving time.
**Syntax**
``` sql
dateTimeToSnowflake(value)
```
**Parameters**
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md).
**Returned value**
- `value` converted to the `Int64` data type as the first snowflake id at that time.
**Example**
Query:
``` sql
WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt
SELECT dateTimeToSnowflake(dt);
```
Result:
``` text
┌─dateTimeToSnowflake(dt)─┐
│ 1426860702823350272 │
└─────────────────────────┘
```
## dateTime64ToSnowflake {#dateTime64ToSnowflake}
Convert DateTime64 to the first snowflake id at the giving time.
**Syntax**
``` sql
dateTime64ToSnowflake(value)
```
**Parameters**
- `value` — Date and time. [DateTime64](../../sql-reference/data-types/datetime64.md).
**Returned value**
- `value` converted to the `Int64` data type as the first snowflake id at that time.
**Example**
Query:
``` sql
WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64
SELECT dateTime64ToSnowflake(dt64);
```
Result:
``` text
┌─dateTime64ToSnowflake(dt64)─┐
│ 1426860704886947840 │
└─────────────────────────────┘
```

View File

@ -1,8 +1,8 @@
# 功能与Yandex的工作。梅特里卡词典 {#functions-for-working-with-yandex-metrica-dictionaries}
# 使用 Yandex.Metrica 字典函数 {#functions-for-working-with-yandex-metrica-dictionaries}
为了使下面的功能正常工作,服务器配置必须指定获取所有Yandex的路径和地址。梅特里卡字典. 字典在任何这些函数的第一次调用时加载。 如果无法加载引用列表,则会引发异常。
为了使下面的功能正常工作,服务器配置必须指定获取所有 Yandex.Metrica 字典的路径和地址。Yandex.Metrica 字典在任何这些函数的第一次调用时加载。 如果无法加载引用列表,则会引发异常。
For information about creating reference lists, see the section «Dictionaries».
有关创建引用列表的信息,请参阅 «字典» 部分.
## 多个地理基 {#multiple-geobases}
@ -17,18 +17,18 @@ ClickHouse支持同时使用多个备选地理基区域层次结构
所有字典都在运行时重新加载每隔一定数量的秒重新加载一次如builtin_dictionaries_reload_interval config参数中定义或默认情况下每小时一次。 但是,可用字典列表在服务器启动时定义一次。
All functions for working with regions have an optional argument at the end the dictionary key. It is referred to as the geobase.
所有处理区域的函数都在末尾有一个可选参数—字典键。它被称为地基。
示例:
regionToCountry(RegionID) Uses the default dictionary: /opt/geo/regions_hierarchy.txt
regionToCountry(RegionID, '') Uses the default dictionary: /opt/geo/regions_hierarchy.txt
regionToCountry(RegionID, 'ua') Uses the dictionary for the 'ua' key: /opt/geo/regions_hierarchy_ua.txt
regionToCountry(RegionID) 使用默认路径: /opt/geo/regions_hierarchy.txt
regionToCountry(RegionID, '') 使用默认路径: /opt/geo/regions_hierarchy.txt
regionToCountry(RegionID, 'ua') 使用字典中的'ua' 键: /opt/geo/regions_hierarchy_ua.txt
### ツ环板(ョツ嘉ッツ偲青regionシツ氾カツ鉄ツ工ツ渉\]) {#regiontocityid-geobase}
### regionToCity(id[, geobase]) {#regiontocityid-geobase}
Accepts a UInt32 number the region ID from the Yandex geobase. If this region is a city or part of a city, it returns the region ID for the appropriate city. Otherwise, returns 0.
从 Yandex geobase 接收一个 UInt32 数字类型的区域ID 。如果该区域是一个城市或城市的一部分它将返回相应城市的区域ID。否则,返回0。
### 虏茅驴麓卤戮碌禄路戮鲁拢\]) {#regiontoareaid-geobase}
### regionToArea(id[, geobase]) {#regiontoareaid-geobase}
将区域转换为区域地理数据库中的类型5。 在所有其他方式,这个功能是一样的 regionToCity.
@ -84,36 +84,58 @@ LIMIT 15
│ Federation of Bosnia and Herzegovina │
└──────────────────────────────────────────────────────────┘
### 虏茅驴麓卤戮碌禄路戮鲁拢(陆毛隆隆(803)888-8325\]) {#regiontocountryid-geobase}
### regionToCountry(id[, geobase]) {#regiontocountryid-geobase}
将区域转换为国家。 在所有其他方式,这个功能是一样的 regionToCity.
示例: `regionToCountry(toUInt32(213)) = 225` 转换莫斯科213到俄罗斯225
### 掳胫((禄脢鹿脷露胫鲁隆鹿((酶-11-16""\[脪陆,ase\]) {#regiontocontinentid-geobase}
### regionToContinent(id[, geobase]) {#regiontocontinentid-geobase}
将区域转换为大陆。 在所有其他方式,这个功能是一样的 regionToCity.
示例: `regionToContinent(toUInt32(213)) = 10001` 将莫斯科213转换为欧亚大陆10001
### ツ环板(ョツ嘉ッツ偲青regionャツ静ャツ青サツ催ャツ渉\]) {#regiontopopulationid-geobase}
### regionToTopContinent (#regiontotopcontinent) {#regiontotopcontinent-regiontotopcontinent}
查找该区域层次结构中最高的大陆。
**语法**
``` sql
regionToTopContinent(id[, geobase])
```
**参数**
- `id` — Yandex geobase 的区域 ID. [UInt32](../../sql-reference/data-types/int-uint.md).
- `geobase` — 字典的建. 参阅 [Multiple Geobases](#multiple-geobases). [String](../../sql-reference/data-types/string.md). 可选.
**返回值**
- 顶级大陆的标识符(当您在区域层次结构中攀爬时,是后者)。
- 0如果没有。
类型: `UInt32`.
### regionToPopulation(id\[, geobase\]) {#regiontopopulationid-geobase}
获取区域的人口。
The population can be recorded in files with the geobase. See the section «External dictionaries».
人口可以记录在文件与地球基。请参阅«外部词典»部分。
如果没有为该区域记录人口则返回0。
在Yandex地理数据库中可能会为子区域记录人口但不会为父区域记录人口。
### regionIn(lhs,rhs\[,地理数据库\]) {#regioninlhs-rhs-geobase}
检查是否 lhs 属于一个区域 rhs 区域。 如果属于UInt8则返回等于1的数字如果不属于则返回0。
The relationship is reflexive any region also belongs to itself.
这种关系是反射的——任何地区也属于自己。
### ツ暗ェツ氾环催ツ団ツ法ツ人\]) {#regionhierarchyid-geobase}
### regionHierarchy(id\[, geobase\]) {#regionhierarchyid-geobase}
Accepts a UInt32 number the region ID from the Yandex geobase. Returns an array of region IDs consisting of the passed region and all parents along the chain.
从 Yandex geobase 接收一个 UInt32 数字类型的区域ID。返回一个区域ID数组由传递的区域和链上的所有父节点组成。
示例: `regionHierarchy(toUInt32(213)) = [213,1,3,225,10001,10000]`.
### 地区名称(id\[,郎\]) {#regiontonameid-lang}
### regionToName(id\[, lang\]) {#regiontonameid-lang}
Accepts a UInt32 number the region ID from the Yandex geobase. A string with the name of the language can be passed as a second argument. Supported languages are: ru, en, ua, uk, by, kz, tr. If the second argument is omitted, the language ru is used. If the language is not supported, an exception is thrown. Returns a string the name of the region in the corresponding language. If the region with the specified ID doesnt exist, an empty string is returned.
从 Yandex geobase 接收一个 UInt32 数字类型的区域ID。带有语言名称的字符串可以作为第二个参数传递。支持的语言有:ru, en, ua, uk, by, kz, tr。如果省略第二个参数则使用' ru '语言。如果不支持该语言,则抛出异常。返回一个字符串-对应语言的区域名称。如果指定ID的区域不存在则返回一个空字符串。
`ua``uk` 都意味着乌克兰。

View File

@ -92,6 +92,7 @@ namespace ErrorCodes
extern const int TOO_DEEP_RECURSION;
extern const int NETWORK_ERROR;
extern const int UNRECOGNIZED_ARGUMENTS;
extern const int AUTHENTICATION_FAILED;
}
@ -572,31 +573,50 @@ void Client::connect()
<< connection_parameters.host << ":" << connection_parameters.port
<< (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl;
connection = std::make_unique<Connection>(
connection_parameters.host,
connection_parameters.port,
connection_parameters.default_database,
connection_parameters.user,
connection_parameters.password,
"", /* cluster */
"", /* cluster_secret */
"client",
connection_parameters.compression,
connection_parameters.security);
String server_name;
UInt64 server_version_major = 0;
UInt64 server_version_minor = 0;
UInt64 server_version_patch = 0;
if (max_client_network_bandwidth)
try
{
ThrottlerPtr throttler = std::make_shared<Throttler>(max_client_network_bandwidth, 0, "");
connection->setThrottler(throttler);
}
connection = std::make_unique<Connection>(
connection_parameters.host,
connection_parameters.port,
connection_parameters.default_database,
connection_parameters.user,
connection_parameters.password,
"", /* cluster */
"", /* cluster_secret */
"client",
connection_parameters.compression,
connection_parameters.security);
connection->getServerVersion(
connection_parameters.timeouts, server_name, server_version_major, server_version_minor, server_version_patch, server_revision);
if (max_client_network_bandwidth)
{
ThrottlerPtr throttler = std::make_shared<Throttler>(max_client_network_bandwidth, 0, "");
connection->setThrottler(throttler);
}
connection->getServerVersion(
connection_parameters.timeouts, server_name, server_version_major, server_version_minor, server_version_patch, server_revision);
}
catch (const Exception & e)
{
/// It is typical when users install ClickHouse, type some password and instantly forget it.
if ((connection_parameters.user.empty() || connection_parameters.user == "default")
&& e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
{
std::cerr << std::endl
<< "If you have installed ClickHouse and forgot password you can reset it in the configuration file." << std::endl
<< "The password for default user is typically located at /etc/clickhouse-server/users.d/default-password.xml" << std::endl
<< "and deleting this file will reset the password." << std::endl
<< "See also /etc/clickhouse-server/users.xml on the server where ClickHouse is installed." << std::endl
<< std::endl;
}
throw;
}
server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch);

View File

@ -54,7 +54,6 @@
#include <Interpreters/ExternalLoaderXMLConfigRepository.h>
#include <Interpreters/InterserverCredentials.h>
#include <Interpreters/JIT/CompiledExpressionCache.h>
#include <Interpreters/Session.h>
#include <Access/AccessControlManager.h>
#include <Storages/StorageReplicatedMergeTree.h>
#include <Storages/System/attachSystemTables.h>
@ -1431,7 +1430,6 @@ if (ThreadFuzzer::instance().isEffective())
/// Must be done after initialization of `servers`, because async_metrics will access `servers` variable from its thread.
async_metrics.start();
Session::startupNamedSessions();
{
String level_str = config().getString("text_log.level", "");

View File

@ -304,7 +304,7 @@ size_t ColumnUnique<ColumnType>::uniqueInsert(const Field & x)
if (x.getType() == Field::Types::Null)
return getNullValueIndex();
if (isNumeric())
if (valuesHaveFixedSize())
return uniqueInsertData(&x.reinterpret<char>(), size_of_value_if_fixed);
auto & val = x.get<String>();

View File

@ -80,8 +80,3 @@ target_link_libraries (average PRIVATE clickhouse_common_io)
add_executable (shell_command_inout shell_command_inout.cpp)
target_link_libraries (shell_command_inout PRIVATE clickhouse_common_io)
if (ENABLE_FUZZING)
add_executable(YAML_fuzzer YAML_fuzzer.cpp ${SRCS})
target_link_libraries(YAML_fuzzer PRIVATE clickhouse_parsers ${LIB_FUZZING_ENGINE})
endif ()

View File

@ -1,39 +0,0 @@
#include <iostream>
#include <fstream>
#include <string>
#include <cstdio>
#include <time.h>
#include <filesystem>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
{
/// How to test:
/// build ClickHouse with YAML_fuzzer.cpp
/// ./YAML_fuzzer YAML_CORPUS
/// where YAML_CORPUS is a directory with different YAML configs for libfuzzer
char file_name[L_tmpnam];
if (!std::tmpnam(file_name))
{
std::cerr << "Cannot create temp file!\n";
return 1;
}
std::string input = std::string(reinterpret_cast<const char*>(data), size);
DB::YAMLParser parser;
{
std::ofstream temp_file(file_name);
temp_file << input;
}
try
{
DB::YAMLParser::parse(std::string(file_name));
}
catch (...)
{
std::cerr << "YAML_fuzzer failed: " << getCurrentExceptionMessage() << std::endl;
return 1;
}
return 0;
}

View File

@ -1,3 +1,18 @@
if(ENABLE_EXAMPLES)
if (ENABLE_FUZZING)
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
add_headers_and_sources(fuzz_compression .)
# Remove this file, because it has dependencies on DataTypes
list(REMOVE_ITEM ${fuzz_compression_sources} CompressionFactoryAdditions.cpp)
add_library(fuzz_compression ${fuzz_compression_headers} ${fuzz_compression_sources})
target_link_libraries(fuzz_compression PUBLIC clickhouse_parsers clickhouse_common_io common lz4)
endif()
if (ENABLE_EXAMPLES)
add_subdirectory(examples)
endif()
if (ENABLE_FUZZING)
add_subdirectory(fuzzers)
endif()

View File

@ -22,13 +22,10 @@ namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int UNKNOWN_CODEC;
extern const int BAD_ARGUMENTS;
extern const int UNEXPECTED_AST_STRUCTURE;
extern const int DATA_TYPE_CANNOT_HAVE_ARGUMENTS;
}
static constexpr auto DEFAULT_CODEC_NAME = "Default";
CompressionCodecPtr CompressionCodecFactory::getDefaultCodec() const
{
return default_codec;
@ -49,184 +46,6 @@ CompressionCodecPtr CompressionCodecFactory::get(const String & family_name, std
}
}
void CompressionCodecFactory::validateCodec(
const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs) const
{
if (family_name.empty())
throw Exception("Compression codec name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
if (level)
{
auto literal = std::make_shared<ASTLiteral>(static_cast<UInt64>(*level));
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", makeASTFunction(Poco::toUpper(family_name), literal)),
{}, sanity_check, allow_experimental_codecs);
}
else
{
auto identifier = std::make_shared<ASTIdentifier>(Poco::toUpper(family_name));
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", identifier),
{}, sanity_check, allow_experimental_codecs);
}
}
ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
const ASTPtr & ast, const IDataType * column_type, bool sanity_check, bool allow_experimental_codecs) const
{
if (const auto * func = ast->as<ASTFunction>())
{
ASTPtr codecs_descriptions = std::make_shared<ASTExpressionList>();
bool is_compression = false;
bool has_none = false;
std::optional<size_t> generic_compression_codec_pos;
std::set<size_t> post_processing_codecs;
bool can_substitute_codec_arguments = true;
for (size_t i = 0, size = func->arguments->children.size(); i < size; ++i)
{
const auto & inner_codec_ast = func->arguments->children[i];
String codec_family_name;
ASTPtr codec_arguments;
if (const auto * family_name = inner_codec_ast->as<ASTIdentifier>())
{
codec_family_name = family_name->name();
codec_arguments = {};
}
else if (const auto * ast_func = inner_codec_ast->as<ASTFunction>())
{
codec_family_name = ast_func->name;
codec_arguments = ast_func->arguments;
}
else
throw Exception("Unexpected AST element for compression codec", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
/// Default codec replaced with current default codec which may depend on different
/// settings (and properties of data) in runtime.
CompressionCodecPtr result_codec;
if (codec_family_name == DEFAULT_CODEC_NAME)
{
if (codec_arguments != nullptr)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"{} codec cannot have any arguments, it's just an alias for codec specified in config.xml", DEFAULT_CODEC_NAME);
result_codec = default_codec;
codecs_descriptions->children.emplace_back(std::make_shared<ASTIdentifier>(DEFAULT_CODEC_NAME));
}
else
{
if (column_type)
{
CompressionCodecPtr prev_codec;
IDataType::StreamCallbackWithType callback = [&](
const ISerialization::SubstreamPath & substream_path, const IDataType & substream_type)
{
if (ISerialization::isSpecialCompressionAllowed(substream_path))
{
result_codec = getImpl(codec_family_name, codec_arguments, &substream_type);
/// Case for column Tuple, which compressed with codec which depends on data type, like Delta.
/// We cannot substitute parameters for such codecs.
if (prev_codec && prev_codec->getHash() != result_codec->getHash())
can_substitute_codec_arguments = false;
prev_codec = result_codec;
}
};
ISerialization::SubstreamPath stream_path;
column_type->enumerateStreams(column_type->getDefaultSerialization(), callback, stream_path);
if (!result_codec)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find any substream with data type for type {}. It's a bug", column_type->getName());
}
else
{
result_codec = getImpl(codec_family_name, codec_arguments, nullptr);
}
if (!allow_experimental_codecs && result_codec->isExperimental())
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Codec {} is experimental and not meant to be used in production."
" You can enable it with the 'allow_experimental_codecs' setting.",
codec_family_name);
codecs_descriptions->children.emplace_back(result_codec->getCodecDesc());
}
is_compression |= result_codec->isCompression();
has_none |= result_codec->isNone();
if (!generic_compression_codec_pos && result_codec->isGenericCompression())
generic_compression_codec_pos = i;
if (result_codec->isPostProcessing())
post_processing_codecs.insert(i);
}
String codec_description = queryToString(codecs_descriptions);
if (sanity_check)
{
if (codecs_descriptions->children.size() > 1 && has_none)
throw Exception(
"It does not make sense to have codec NONE along with other compression codecs: " + codec_description
+ ". (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).",
ErrorCodes::BAD_ARGUMENTS);
/// Allow to explicitly specify single NONE codec if user don't want any compression.
/// But applying other transformations solely without compression (e.g. Delta) does not make sense.
/// It's okay to apply post-processing codecs solely without anything else.
if (!is_compression && !has_none && post_processing_codecs.size() != codecs_descriptions->children.size())
throw Exception(
"Compression codec " + codec_description
+ " does not compress anything."
" You may want to add generic compression algorithm after other transformations, like: "
+ codec_description
+ ", LZ4."
" (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).",
ErrorCodes::BAD_ARGUMENTS);
/// It does not make sense to apply any non-post-processing codecs
/// after post-processing one.
if (!post_processing_codecs.empty() &&
*post_processing_codecs.begin() != codecs_descriptions->children.size() - post_processing_codecs.size())
throw Exception("The combination of compression codecs " + codec_description + " is meaningless,"
" because it does not make sense to apply any non-post-processing codecs after"
" post-processing ones. (Note: you can enable setting 'allow_suspicious_codecs'"
" to skip this check).", ErrorCodes::BAD_ARGUMENTS);
/// It does not make sense to apply any transformations after generic compression algorithm
/// So, generic compression can be only one and only at the end.
if (generic_compression_codec_pos &&
*generic_compression_codec_pos != codecs_descriptions->children.size() - 1 - post_processing_codecs.size())
throw Exception("The combination of compression codecs " + codec_description + " is meaningless,"
" because it does not make sense to apply any transformations after generic compression algorithm."
" (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).", ErrorCodes::BAD_ARGUMENTS);
}
/// For columns with nested types like Tuple(UInt32, UInt64) we
/// obviously cannot substitute parameters for codecs which depend on
/// data type, because for the first column Delta(4) is suitable and
/// Delta(8) for the second. So we should leave codec description as is
/// and deduce them in get method for each subtype separately. For all
/// other types it's better to substitute parameters, for better
/// readability and backward compatibility.
if (can_substitute_codec_arguments)
{
std::shared_ptr<ASTFunction> result = std::make_shared<ASTFunction>();
result->name = "CODEC";
result->arguments = codecs_descriptions;
return result;
}
else
{
return ast;
}
}
throw Exception("Unknown codec family: " + queryToString(ast), ErrorCodes::UNKNOWN_CODEC);
}
CompressionCodecPtr CompressionCodecFactory::get(
const ASTPtr & ast, const IDataType * column_type, CompressionCodecPtr current_default, bool only_generic) const

View File

@ -14,6 +14,8 @@
namespace DB
{
static constexpr auto DEFAULT_CODEC_NAME = "Default";
class ICompressionCodec;
using CompressionCodecPtr = std::shared_ptr<ICompressionCodec>;

View File

@ -0,0 +1,214 @@
/**
* This file contains a part of CompressionCodecFactory methods definitions and
* is needed only because they have dependencies on DataTypes.
* They are not useful for fuzzers, so we leave them in other translation unit.
*/
#include <Compression/CompressionFactory.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ASTLiteral.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/parseQuery.h>
#include <Parsers/queryToString.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/NestedUtils.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeNested.h>
#include <Common/Exception.h>
namespace DB
{
namespace ErrorCodes
{
extern const int UNEXPECTED_AST_STRUCTURE;
extern const int UNKNOWN_CODEC;
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
}
void CompressionCodecFactory::validateCodec(
const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs) const
{
if (family_name.empty())
throw Exception("Compression codec name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
if (level)
{
auto literal = std::make_shared<ASTLiteral>(static_cast<UInt64>(*level));
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", makeASTFunction(Poco::toUpper(family_name), literal)),
{}, sanity_check, allow_experimental_codecs);
}
else
{
auto identifier = std::make_shared<ASTIdentifier>(Poco::toUpper(family_name));
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", identifier),
{}, sanity_check, allow_experimental_codecs);
}
}
ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
const ASTPtr & ast, const IDataType * column_type, bool sanity_check, bool allow_experimental_codecs) const
{
if (const auto * func = ast->as<ASTFunction>())
{
ASTPtr codecs_descriptions = std::make_shared<ASTExpressionList>();
bool is_compression = false;
bool has_none = false;
std::optional<size_t> generic_compression_codec_pos;
std::set<size_t> post_processing_codecs;
bool can_substitute_codec_arguments = true;
for (size_t i = 0, size = func->arguments->children.size(); i < size; ++i)
{
const auto & inner_codec_ast = func->arguments->children[i];
String codec_family_name;
ASTPtr codec_arguments;
if (const auto * family_name = inner_codec_ast->as<ASTIdentifier>())
{
codec_family_name = family_name->name();
codec_arguments = {};
}
else if (const auto * ast_func = inner_codec_ast->as<ASTFunction>())
{
codec_family_name = ast_func->name;
codec_arguments = ast_func->arguments;
}
else
throw Exception("Unexpected AST element for compression codec", ErrorCodes::UNEXPECTED_AST_STRUCTURE);
/// Default codec replaced with current default codec which may depend on different
/// settings (and properties of data) in runtime.
CompressionCodecPtr result_codec;
if (codec_family_name == DEFAULT_CODEC_NAME)
{
if (codec_arguments != nullptr)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"{} codec cannot have any arguments, it's just an alias for codec specified in config.xml", DEFAULT_CODEC_NAME);
result_codec = default_codec;
codecs_descriptions->children.emplace_back(std::make_shared<ASTIdentifier>(DEFAULT_CODEC_NAME));
}
else
{
if (column_type)
{
CompressionCodecPtr prev_codec;
IDataType::StreamCallbackWithType callback = [&](
const ISerialization::SubstreamPath & substream_path, const IDataType & substream_type)
{
if (ISerialization::isSpecialCompressionAllowed(substream_path))
{
result_codec = getImpl(codec_family_name, codec_arguments, &substream_type);
/// Case for column Tuple, which compressed with codec which depends on data type, like Delta.
/// We cannot substitute parameters for such codecs.
if (prev_codec && prev_codec->getHash() != result_codec->getHash())
can_substitute_codec_arguments = false;
prev_codec = result_codec;
}
};
ISerialization::SubstreamPath stream_path;
column_type->enumerateStreams(column_type->getDefaultSerialization(), callback, stream_path);
if (!result_codec)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find any substream with data type for type {}. It's a bug", column_type->getName());
}
else
{
result_codec = getImpl(codec_family_name, codec_arguments, nullptr);
}
if (!allow_experimental_codecs && result_codec->isExperimental())
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Codec {} is experimental and not meant to be used in production."
" You can enable it with the 'allow_experimental_codecs' setting.",
codec_family_name);
codecs_descriptions->children.emplace_back(result_codec->getCodecDesc());
}
is_compression |= result_codec->isCompression();
has_none |= result_codec->isNone();
if (!generic_compression_codec_pos && result_codec->isGenericCompression())
generic_compression_codec_pos = i;
if (result_codec->isPostProcessing())
post_processing_codecs.insert(i);
}
String codec_description = queryToString(codecs_descriptions);
if (sanity_check)
{
if (codecs_descriptions->children.size() > 1 && has_none)
throw Exception(
"It does not make sense to have codec NONE along with other compression codecs: " + codec_description
+ ". (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).",
ErrorCodes::BAD_ARGUMENTS);
/// Allow to explicitly specify single NONE codec if user don't want any compression.
/// But applying other transformations solely without compression (e.g. Delta) does not make sense.
/// It's okay to apply post-processing codecs solely without anything else.
if (!is_compression && !has_none && post_processing_codecs.size() != codecs_descriptions->children.size())
throw Exception(
"Compression codec " + codec_description
+ " does not compress anything."
" You may want to add generic compression algorithm after other transformations, like: "
+ codec_description
+ ", LZ4."
" (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).",
ErrorCodes::BAD_ARGUMENTS);
/// It does not make sense to apply any non-post-processing codecs
/// after post-processing one.
if (!post_processing_codecs.empty() &&
*post_processing_codecs.begin() != codecs_descriptions->children.size() - post_processing_codecs.size())
throw Exception("The combination of compression codecs " + codec_description + " is meaningless,"
" because it does not make sense to apply any non-post-processing codecs after"
" post-processing ones. (Note: you can enable setting 'allow_suspicious_codecs'"
" to skip this check).", ErrorCodes::BAD_ARGUMENTS);
/// It does not make sense to apply any transformations after generic compression algorithm
/// So, generic compression can be only one and only at the end.
if (generic_compression_codec_pos &&
*generic_compression_codec_pos != codecs_descriptions->children.size() - 1 - post_processing_codecs.size())
throw Exception("The combination of compression codecs " + codec_description + " is meaningless,"
" because it does not make sense to apply any transformations after generic compression algorithm."
" (Note: you can enable setting 'allow_suspicious_codecs' to skip this check).", ErrorCodes::BAD_ARGUMENTS);
}
/// For columns with nested types like Tuple(UInt32, UInt64) we
/// obviously cannot substitute parameters for codecs which depend on
/// data type, because for the first column Delta(4) is suitable and
/// Delta(8) for the second. So we should leave codec description as is
/// and deduce them in get method for each subtype separately. For all
/// other types it's better to substitute parameters, for better
/// readability and backward compatibility.
if (can_substitute_codec_arguments)
{
std::shared_ptr<ASTFunction> result = std::make_shared<ASTFunction>();
result->name = "CODEC";
result->arguments = codecs_descriptions;
return result;
}
else
{
return ast;
}
}
throw Exception("Unknown codec family: " + queryToString(ast), ErrorCodes::UNKNOWN_CODEC);
}
}

View File

@ -439,11 +439,14 @@ bool NO_INLINE decompressImpl(
{
s = *ip++;
length += s;
} while (unlikely(s == 255));
} while (unlikely(s == 255 && ip < input_end));
};
/// Get literal length.
if (unlikely(ip >= input_end))
return false;
const unsigned token = *ip++;
length = token >> 4;
if (length == 0x0F)
@ -464,18 +467,18 @@ bool NO_INLINE decompressImpl(
/// output: xyzHello, w
/// ^-op (we will overwrite excessive bytes on next iteration)
{
auto * target = std::min(copy_end, output_end);
wildCopy<copy_amount>(op, ip, target); /// Here we can write up to copy_amount - 1 bytes after buffer.
if (unlikely(copy_end > output_end))
return false;
if (target == output_end)
return true;
}
wildCopy<copy_amount>(op, ip, copy_end); /// Here we can write up to copy_amount - 1 bytes after buffer.
if (copy_end == output_end)
return true;
ip += length;
op = copy_end;
if (unlikely(ip > input_end))
if (unlikely(ip + 1 >= input_end))
return false;
/// Get match offset.
@ -528,8 +531,9 @@ bool NO_INLINE decompressImpl(
copy<copy_amount>(op, match); /// copy_amount + copy_amount - 1 - 4 * 2 bytes after buffer.
if (length > copy_amount * 2)
{
auto * target = std::min(copy_end, output_end);
wildCopy<copy_amount>(op + copy_amount, match + copy_amount, target);
if (unlikely(copy_end > output_end))
return false;
wildCopy<copy_amount>(op + copy_amount, match + copy_amount, copy_end);
}
op = copy_end;

View File

@ -3,8 +3,3 @@ target_link_libraries (compressed_buffer PRIVATE dbms)
add_executable (cached_compressed_read_buffer cached_compressed_read_buffer.cpp)
target_link_libraries (cached_compressed_read_buffer PRIVATE dbms)
if (ENABLE_FUZZING)
add_executable (compressed_buffer_fuzzer compressed_buffer_fuzzer.cpp)
target_link_libraries (compressed_buffer_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
endif ()

View File

@ -0,0 +1,2 @@
add_executable (compressed_buffer_fuzzer compressed_buffer_fuzzer.cpp)
target_link_libraries (compressed_buffer_fuzzer PRIVATE fuzz_compression clickhouse_common_io ${LIB_FUZZING_ENGINE})

View File

@ -1,3 +1,7 @@
if (ENABLE_EXAMPLES)
add_subdirectory(examples)
endif ()
if (ENABLE_FUZZING)
add_subdirectory(fuzzers)
endif()

View File

@ -110,7 +110,7 @@ void insertPostgreSQLValue(
readDateTime64Text(time, 6, in, assert_cast<const DataTypeDateTime64 *>(data_type.get())->getTimeZone());
if (time < 0)
time = 0;
assert_cast<ColumnDecimal<Decimal64> &>(column).insertValue(time);
assert_cast<DataTypeDateTime64::ColumnType &>(column).insertValue(time);
break;
}
case ExternalResultDescription::ValueType::vtDecimal32: [[fallthrough]];

View File

@ -114,6 +114,7 @@ class IColumn;
M(UInt64, group_by_two_level_threshold_bytes, 50000000, "From what size of the aggregation state in bytes, a two-level aggregation begins to be used. 0 - the threshold is not set. Two-level aggregation is used when at least one of the thresholds is triggered.", 0) \
M(Bool, distributed_aggregation_memory_efficient, true, "Is the memory-saving mode of distributed aggregation enabled.", 0) \
M(UInt64, aggregation_memory_efficient_merge_threads, 0, "Number of threads to use for merge intermediate aggregation results in memory efficient mode. When bigger, then more memory is consumed. 0 means - same as 'max_threads'.", 0) \
M(Bool, enable_positional_arguments, false, "Enable positional arguments in ORDER BY, GROUP BY and LIMIT BY", 0) \
\
M(UInt64, max_parallel_replicas, 1, "The maximum number of replicas of each shard used when the query is executed. For consistency (to get different parts of the same partition), this option only works for the specified sampling key. The lag of the replicas is not controlled.", 0) \
M(UInt64, parallel_replicas_count, 0, "", 0) \
@ -252,6 +253,7 @@ class IColumn;
M(Bool, use_index_for_in_with_subqueries, true, "Try using an index if there is a subquery or a table expression on the right side of the IN operator.", 0) \
M(Bool, joined_subquery_requires_alias, true, "Force joined subqueries and table functions to have aliases for correct name qualification.", 0) \
M(Bool, empty_result_for_aggregation_by_empty_set, false, "Return empty result when aggregating without keys on empty set.", 0) \
M(Bool, empty_result_for_aggregation_by_constant_keys_on_empty_set, true, "Return empty result when aggregating by constant keys on empty set.", 0) \
M(Bool, allow_distributed_ddl, true, "If it is set to true, then a user is allowed to executed distributed DDL queries.", 0) \
M(Bool, allow_suspicious_codecs, false, "If it is set to true, allow to specify meaningless compression codecs.", 0) \
M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \

View File

@ -8,11 +8,6 @@ target_link_libraries (field PRIVATE dbms)
add_executable (string_ref_hash string_ref_hash.cpp)
target_link_libraries (string_ref_hash PRIVATE clickhouse_common_io)
if (ENABLE_FUZZING)
add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp)
target_link_libraries (names_and_types_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
endif ()
add_executable (mysql_protocol mysql_protocol.cpp)
target_link_libraries (mysql_protocol PRIVATE dbms)
if(USE_SSL)

View File

@ -0,0 +1,2 @@
add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp)
target_link_libraries (names_and_types_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})

View File

@ -26,23 +26,6 @@ namespace ErrorCodes
IDataType::~IDataType() = default;
String IDataType::getName() const
{
if (custom_name)
{
return custom_name->getName();
}
else
{
return doGetName();
}
}
String IDataType::doGetName() const
{
return getFamilyName();
}
void IDataType::updateAvgValueSizeHint(const IColumn & column, double & avg_value_size_hint)
{
/// Update the average value size hint if amount of read rows isn't too small

View File

@ -62,7 +62,13 @@ public:
/// static constexpr bool is_parametric = false;
/// Name of data type (examples: UInt64, Array(String)).
String getName() const;
String getName() const
{
if (custom_name)
return custom_name->getName();
else
return doGetName();
}
/// Name of data type family (example: FixedString, Array).
virtual const char * getFamilyName() const = 0;
@ -105,7 +111,7 @@ public:
void enumerateStreams(const SerializationPtr & serialization, const StreamCallbackWithType & callback) const { enumerateStreams(serialization, callback, {}); }
protected:
virtual String doGetName() const;
virtual String doGetName() const { return getFamilyName(); }
virtual SerializationPtr doGetDefaultSerialization() const = 0;
DataTypePtr getTypeForSubstream(const ISerialization::SubstreamPath & substream_path) const;

View File

@ -161,9 +161,6 @@ DictionaryStructure::DictionaryStructure(const Poco::Util::AbstractConfiguration
}
}
if (attributes.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Dictionary has no attributes defined");
if (config.getBool(config_prefix + ".layout.ip_trie.access_to_key_from_attributes", false))
access_to_key_from_attributes = true;
}

View File

@ -496,9 +496,6 @@ void checkAST(const ASTCreateQuery & query)
if (!query.is_dictionary || query.dictionary == nullptr)
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION, "Cannot convert dictionary to configuration from non-dictionary AST.");
if (query.dictionary_attributes_list == nullptr || query.dictionary_attributes_list->children.empty())
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION, "Cannot create dictionary with empty attributes list");
if (query.dictionary->layout == nullptr)
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION, "Cannot create dictionary with empty layout");
@ -512,8 +509,6 @@ void checkAST(const ASTCreateQuery & query)
if (query.dictionary->source == nullptr)
throw Exception(ErrorCodes::INCORRECT_DICTIONARY_DEFINITION, "Cannot create dictionary with empty source");
/// Range can be empty
}
void checkPrimaryKey(const NamesToTypeNames & all_attrs, const Names & key_attrs)

View File

@ -0,0 +1,207 @@
#pragma once
#include <Functions/extractTimeZoneFromFunctionArguments.h>
#include <Functions/IFunction.h>
#include <Functions/FunctionHelpers.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypesNumber.h>
#include <Columns/ColumnsNumber.h>
#include <common/arithmeticOverflow.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
/** According to Twitter's post on Snowflake, we can extract the timestamp for a snowflake ID by right shifting
* the snowflake ID by 22 bits(10 bits machine ID and 12 bits sequence ID) and adding the Twitter epoch time of 1288834974657.
* https://en.wikipedia.org/wiki/Snowflake_ID
* https://blog.twitter.com/engineering/en_us/a/2010/announcing-snowflake
* https://ws-dl.blogspot.com/2019/08/2019-08-03-tweetedat-finding-tweet.html
*/
static constexpr long snowflake_epoch = 1288834974657L;
static constexpr int time_shift = 22;
class FunctionDateTimeToSnowflake : public IFunction
{
private:
const char * name;
public:
FunctionDateTimeToSnowflake(const char * name_) : name(name_) { }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 1; }
bool isVariadic() const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (!isDateTime(arguments[0].type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The only argument for function {} must be DateTime", name);
return std::make_shared<DataTypeInt64>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto & src = arguments[0];
const auto & col = *src.column;
auto res_column = ColumnInt64::create(input_rows_count);
auto & result_data = res_column->getData();
const auto & source_data = typeid_cast<const ColumnUInt32 &>(col).getData();
for (size_t i = 0; i < input_rows_count; ++i)
{
result_data[i] = (Int64(source_data[i]) * 1000 - snowflake_epoch) << time_shift;
}
return res_column;
}
};
class FunctionSnowflakeToDateTime : public IFunction
{
private:
const char * name;
public:
FunctionSnowflakeToDateTime(const char * name_) : name(name_) { }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 0; }
bool isVariadic() const override { return true; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() < 1 || arguments.size() > 2)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes one or two arguments", name);
if (!typeid_cast<const DataTypeInt64 *>(arguments[0].type.get()))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The first argument for function {} must be Int64", name);
std::string timezone;
if (arguments.size() == 2)
timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0);
return std::make_shared<DataTypeDateTime>(timezone);
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto & src = arguments[0];
const auto & col = *src.column;
auto res_column = ColumnUInt32::create(input_rows_count);
auto & result_data = res_column->getData();
const auto & source_data = typeid_cast<const ColumnInt64 &>(col).getData();
for (size_t i = 0; i < input_rows_count; ++i)
{
result_data[i] = ((source_data[i] >> time_shift) + snowflake_epoch) / 1000;
}
return res_column;
}
};
class FunctionDateTime64ToSnowflake : public IFunction
{
private:
const char * name;
public:
FunctionDateTime64ToSnowflake(const char * name_) : name(name_) { }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 1; }
bool isVariadic() const override { return false; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (!isDateTime64(arguments[0].type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The only argument for function {} must be DateTime64", name);
return std::make_shared<DataTypeInt64>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto & src = arguments[0];
const auto & col = *src.column;
auto res_column = ColumnInt64::create(input_rows_count);
auto & result_data = res_column->getData();
const auto & source_data = typeid_cast<const ColumnDecimal<DateTime64> &>(col).getData();
for (size_t i = 0; i < input_rows_count; ++i)
{
result_data[i] = (source_data[i] - snowflake_epoch) << time_shift;
}
return res_column;
}
};
class FunctionSnowflakeToDateTime64 : public IFunction
{
private:
const char * name;
public:
FunctionSnowflakeToDateTime64(const char * name_) : name(name_) { }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 0; }
bool isVariadic() const override { return true; }
bool useDefaultImplementationForConstants() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() < 1 || arguments.size() > 2)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} takes one or two arguments", name);
if (!typeid_cast<const DataTypeInt64 *>(arguments[0].type.get()))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "The first argument for function {} must be Int64", name);
std::string timezone;
if (arguments.size() == 2)
timezone = extractTimeZoneNameFromFunctionArguments(arguments, 1, 0);
return std::make_shared<DataTypeDateTime64>(3, timezone);
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
const auto & src = arguments[0];
const auto & col = *src.column;
auto res_column = ColumnDecimal<DateTime64>::create(input_rows_count, 3);
auto & result_data = res_column->getData();
const auto & source_data = typeid_cast<const ColumnInt64 &>(col).getData();
for (size_t i = 0; i < input_rows_count; ++i)
{
result_data[i] = (source_data[i] >> time_shift) + snowflake_epoch;
}
return res_column;
}
};
}

View File

@ -51,6 +51,7 @@ void registerFunctionBitHammingDistance(FunctionFactory & factory);
void registerFunctionTupleHammingDistance(FunctionFactory & factory);
void registerFunctionsStringHash(FunctionFactory & factory);
void registerFunctionValidateNestedArraySizes(FunctionFactory & factory);
void registerFunctionsSnowflake(FunctionFactory & factory);
#if !defined(ARCADIA_BUILD)
void registerFunctionBayesAB(FunctionFactory &);
#endif
@ -115,6 +116,7 @@ void registerFunctions()
registerFunctionTupleHammingDistance(factory);
registerFunctionsStringHash(factory);
registerFunctionValidateNestedArraySizes(factory);
registerFunctionsSnowflake(factory);
#if !defined(ARCADIA_BUILD)
registerFunctionBayesAB(factory);

View File

@ -0,0 +1,22 @@
namespace DB
{
class FunctionFactory;
void registerDateTimeToSnowflake(FunctionFactory &);
void registerSnowflakeToDateTime(FunctionFactory &);
void registerDateTime64ToSnowflake(FunctionFactory &);
void registerSnowflakeToDateTime64(FunctionFactory &);
void registerFunctionsSnowflake(FunctionFactory & factory)
{
registerDateTimeToSnowflake(factory);
registerSnowflakeToDateTime(factory);
registerDateTime64ToSnowflake(factory);
registerSnowflakeToDateTime64(factory);
}
}

View File

@ -0,0 +1,34 @@
#include <Functions/FunctionSnowflake.h>
#include <Functions/FunctionFactory.h>
namespace DB
{
void registerDateTimeToSnowflake(FunctionFactory & factory)
{
factory.registerFunction("dateTimeToSnowflake",
[](ContextPtr){ return std::make_unique<FunctionToOverloadResolverAdaptor>(
std::make_shared<FunctionDateTimeToSnowflake>("dateTimeToSnowflake")); });
}
void registerDateTime64ToSnowflake(FunctionFactory & factory)
{
factory.registerFunction("dateTime64ToSnowflake",
[](ContextPtr){ return std::make_unique<FunctionToOverloadResolverAdaptor>(
std::make_shared<FunctionDateTime64ToSnowflake>("dateTime64ToSnowflake")); });
}
void registerSnowflakeToDateTime(FunctionFactory & factory)
{
factory.registerFunction("snowflakeToDateTime",
[](ContextPtr){ return std::make_unique<FunctionToOverloadResolverAdaptor>(
std::make_shared<FunctionSnowflakeToDateTime>("snowflakeToDateTime")); });
}
void registerSnowflakeToDateTime64(FunctionFactory & factory)
{
factory.registerFunction("snowflakeToDateTime64",
[](ContextPtr){ return std::make_unique<FunctionToOverloadResolverAdaptor>(
std::make_shared<FunctionSnowflakeToDateTime64>("snowflakeToDateTime64")); });
}
}

View File

@ -59,6 +59,7 @@
#include <Interpreters/Context.h>
#include <Interpreters/DDLWorker.h>
#include <Interpreters/DDLTask.h>
#include <Interpreters/Session.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/UncompressedCache.h>
#include <IO/MMappedFileCache.h>
@ -273,6 +274,8 @@ struct ContextSharedPart
return;
shutdown_called = true;
Session::shutdownNamedSessions();
/** After system_logs have been shut down it is guaranteed that no system table gets created or written to.
* Note that part changes at shutdown won't be logged to part log.
*/
@ -1800,8 +1803,8 @@ std::shared_ptr<Cluster> Context::getCluster(const std::string & cluster_name) c
auto res = getClusters()->getCluster(cluster_name);
if (res)
return res;
res = tryGetReplicatedDatabaseCluster(cluster_name);
if (!cluster_name.empty())
res = tryGetReplicatedDatabaseCluster(cluster_name);
if (res)
return res;

View File

@ -162,6 +162,36 @@ ExpressionAnalyzer::ExpressionAnalyzer(
analyzeAggregation();
}
static ASTPtr checkPositionalArgument(ASTPtr argument, const ASTSelectQuery * select_query, ASTSelectQuery::Expression expression)
{
auto columns = select_query->select()->children;
/// Case when GROUP BY element is position.
/// Do not consider case when GROUP BY element is not a literal, but expression, even if all values are constants.
if (const auto * ast_literal = typeid_cast<const ASTLiteral *>(argument.get()))
{
auto which = ast_literal->value.getType();
if (which == Field::Types::UInt64)
{
auto pos = ast_literal->value.get<UInt64>();
if (pos > 0 && pos <= columns.size())
{
const auto & column = columns[--pos];
if (const auto * literal_ast = typeid_cast<const ASTIdentifier *>(column.get()))
{
return std::make_shared<ASTIdentifier>(literal_ast->name());
}
else
{
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal value for positional argument in {}",
ASTSelectQuery::expressionToString(expression));
}
}
/// Do not throw if out of bounds, see appendUnusedGroupByColumn.
}
}
return nullptr;
}
void ExpressionAnalyzer::analyzeAggregation()
{
@ -238,13 +268,22 @@ void ExpressionAnalyzer::analyzeAggregation()
{
NameSet unique_keys;
ASTs & group_asts = select_query->groupBy()->children;
for (ssize_t i = 0; i < ssize_t(group_asts.size()); ++i)
{
ssize_t size = group_asts.size();
getRootActionsNoMakeSet(group_asts[i], true, temp_actions, false);
if (getContext()->getSettingsRef().enable_positional_arguments)
{
auto new_argument = checkPositionalArgument(group_asts[i], select_query, ASTSelectQuery::Expression::GROUP_BY);
if (new_argument)
group_asts[i] = new_argument;
}
const auto & column_name = group_asts[i]->getColumnName();
const auto * node = temp_actions->tryFindInIndex(column_name);
if (!node)
throw Exception("Unknown identifier (in GROUP BY): " + column_name, ErrorCodes::UNKNOWN_IDENTIFIER);
@ -1223,11 +1262,20 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai
bool with_fill = false;
NameSet order_by_keys;
for (auto & child : select_query->orderBy()->children)
{
const auto * ast = child->as<ASTOrderByElement>();
auto * ast = child->as<ASTOrderByElement>();
if (!ast || ast->children.empty())
throw Exception("Bad order expression AST", ErrorCodes::UNKNOWN_TYPE_OF_AST_NODE);
if (getContext()->getSettingsRef().enable_positional_arguments)
{
auto new_argument = checkPositionalArgument(ast->children.at(0), select_query, ASTSelectQuery::Expression::ORDER_BY);
if (new_argument)
ast->children[0] = new_argument;
}
ASTPtr order_expression = ast->children.at(0);
step.addRequiredOutput(order_expression->getColumnName());
@ -1277,8 +1325,16 @@ bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain
aggregated_names.insert(column.name);
}
for (const auto & child : select_query->limitBy()->children)
auto & children = select_query->limitBy()->children;
for (auto & child : children)
{
if (getContext()->getSettingsRef().enable_positional_arguments)
{
auto new_argument = checkPositionalArgument(child, select_query, ASTSelectQuery::Expression::LIMIT_BY);
if (new_argument)
child = new_argument;
}
auto child_name = child->getColumnName();
if (!aggregated_names.count(child_name))
step.addRequiredOutput(std::move(child_name));

View File

@ -2080,7 +2080,9 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
settings.group_by_two_level_threshold,
settings.group_by_two_level_threshold_bytes,
settings.max_bytes_before_external_group_by,
settings.empty_result_for_aggregation_by_empty_set || (keys.empty() && query_analyzer->hasConstAggregationKeys()),
settings.empty_result_for_aggregation_by_empty_set
|| (settings.empty_result_for_aggregation_by_constant_keys_on_empty_set && keys.empty()
&& query_analyzer->hasConstAggregationKeys()),
context->getTemporaryVolume(),
settings.max_threads,
settings.min_free_disk_space_for_temporary_data,

View File

@ -54,17 +54,17 @@ class NamedSessionsStorage
public:
using Key = NamedSessionKey;
static NamedSessionsStorage & instance()
{
static NamedSessionsStorage the_instance;
return the_instance;
}
~NamedSessionsStorage()
{
try
{
{
std::lock_guard lock{mutex};
quit = true;
}
cond.notify_one();
thread.join();
shutdown();
}
catch (...)
{
@ -72,6 +72,20 @@ public:
}
}
void shutdown()
{
{
std::lock_guard lock{mutex};
sessions.clear();
if (!thread.joinable())
return;
quit = true;
}
cond.notify_one();
thread.join();
}
/// Find existing session or create a new.
std::pair<std::shared_ptr<NamedSessionData>, bool> acquireSession(
const ContextPtr & global_context,
@ -94,6 +108,10 @@ public:
auto context = Context::createCopy(global_context);
it = sessions.insert(std::make_pair(key, std::make_shared<NamedSessionData>(key, context, timeout, *this))).first;
const auto & session = it->second;
if (!thread.joinable())
thread = ThreadFromGlobalPool{&NamedSessionsStorage::cleanThread, this};
return {session, true};
}
else
@ -156,11 +174,9 @@ private:
{
setThreadName("SessionCleaner");
std::unique_lock lock{mutex};
while (true)
while (!quit)
{
auto interval = closeSessions(lock);
if (cond.wait_for(lock, interval, [this]() -> bool { return quit; }))
break;
}
@ -208,8 +224,8 @@ private:
std::mutex mutex;
std::condition_variable cond;
std::atomic<bool> quit{false};
ThreadFromGlobalPool thread{&NamedSessionsStorage::cleanThread, this};
ThreadFromGlobalPool thread;
bool quit = false;
};
@ -218,13 +234,12 @@ void NamedSessionData::release()
parent.releaseSession(*this);
}
std::optional<NamedSessionsStorage> Session::named_sessions = std::nullopt;
void Session::startupNamedSessions()
void Session::shutdownNamedSessions()
{
named_sessions.emplace();
NamedSessionsStorage::instance().shutdown();
}
Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_)
: global_context(global_context_)
{
@ -317,15 +332,13 @@ ContextMutablePtr Session::makeSessionContext(const String & session_id_, std::c
throw Exception("Session context already exists", ErrorCodes::LOGICAL_ERROR);
if (query_context_created)
throw Exception("Session context must be created before any query context", ErrorCodes::LOGICAL_ERROR);
if (!named_sessions)
throw Exception("Support for named sessions is not enabled", ErrorCodes::LOGICAL_ERROR);
/// Make a new session context OR
/// if the `session_id` and `user_id` were used before then just get a previously created session context.
std::shared_ptr<NamedSessionData> new_named_session;
bool new_named_session_created = false;
std::tie(new_named_session, new_named_session_created)
= named_sessions->acquireSession(global_context, user_id.value_or(UUID{}), session_id_, timeout_, session_check_);
= NamedSessionsStorage::instance().acquireSession(global_context, user_id.value_or(UUID{}), session_id_, timeout_, session_check_);
auto new_session_context = new_named_session->context;
new_session_context->makeSessionContext();

View File

@ -28,9 +28,8 @@ using UserPtr = std::shared_ptr<const User>;
class Session
{
public:
/// Allow to use named sessions. The thread will be run to cleanup sessions after timeout has expired.
/// The method must be called at the server startup.
static void startupNamedSessions();
/// Stops using named sessions. The method must be called at the server shutdown.
static void shutdownNamedSessions();
Session(const ContextPtr & global_context_, ClientInfo::Interface interface_);
Session(Session &&);
@ -83,8 +82,6 @@ private:
String session_id;
std::shared_ptr<NamedSessionData> named_session;
bool named_session_created = false;
static std::optional<NamedSessionsStorage> named_sessions;
};
}

View File

@ -69,7 +69,9 @@ const std::unordered_set<String> possibly_injective_function_names
void appendUnusedGroupByColumn(ASTSelectQuery * select_query, const NameSet & source_columns)
{
/// You must insert a constant that is not the name of the column in the table. Such a case is rare, but it happens.
UInt64 unused_column = 0;
/// Also start unused_column integer from source_columns.size() + 1, because lower numbers ([1, source_columns.size()])
/// might be in positional GROUP BY.
UInt64 unused_column = source_columns.size() + 1;
String unused_column_name = toString(unused_column);
while (source_columns.count(unused_column_name))
@ -111,6 +113,8 @@ void optimizeGroupBy(ASTSelectQuery * select_query, const NameSet & source_colum
group_exprs.pop_back();
};
const auto & settings = context->getSettingsRef();
/// iterate over each GROUP BY expression, eliminate injective function calls and literals
for (size_t i = 0; i < group_exprs.size();)
{
@ -166,7 +170,22 @@ void optimizeGroupBy(ASTSelectQuery * select_query, const NameSet & source_colum
}
else if (is_literal(group_exprs[i]))
{
remove_expr_at_index(i);
bool keep_position = false;
if (settings.enable_positional_arguments)
{
const auto & value = group_exprs[i]->as<ASTLiteral>()->value;
if (value.getType() == Field::Types::UInt64)
{
auto pos = value.get<UInt64>();
if (pos > 0 && pos <= select_query->children.size())
keep_position = true;
}
}
if (keep_position)
++i;
else
remove_expr_at_index(i);
}
else
{

View File

@ -7,6 +7,7 @@
#include <Common/quoteString.h>
#include <IO/Operators.h>
#include <re2/re2.h>
#include <stack>
namespace DB
@ -40,10 +41,18 @@ void ASTColumnsApplyTransformer::formatImpl(const FormatSettings & settings, For
if (!column_name_prefix.empty())
settings.ostr << "(";
settings.ostr << func_name;
if (parameters)
parameters->formatImpl(settings, state, frame);
if (lambda)
{
lambda->formatImpl(settings, state, frame);
}
else
{
settings.ostr << func_name;
if (parameters)
parameters->formatImpl(settings, state, frame);
}
if (!column_name_prefix.empty())
settings.ostr << ", '" << column_name_prefix << "')";
@ -64,9 +73,33 @@ void ASTColumnsApplyTransformer::transform(ASTs & nodes) const
else
name = column->getColumnName();
}
auto function = makeASTFunction(func_name, column);
function->parameters = parameters;
column = function;
if (lambda)
{
auto body = lambda->as<const ASTFunction &>().arguments->children.at(1)->clone();
std::stack<ASTPtr> stack;
stack.push(body);
while (!stack.empty())
{
auto ast = stack.top();
stack.pop();
for (auto & child : ast->children)
{
if (auto arg_name = tryGetIdentifierName(child); arg_name && arg_name == lambda_arg)
{
child = column->clone();
continue;
}
stack.push(child);
}
}
column = body;
}
else
{
auto function = makeASTFunction(func_name, column);
function->parameters = parameters;
column = function;
}
if (!column_name_prefix.empty())
column->setAlias(column_name_prefix + name);
}

View File

@ -25,13 +25,22 @@ public:
auto res = std::make_shared<ASTColumnsApplyTransformer>(*this);
if (parameters)
res->parameters = parameters->clone();
if (lambda)
res->lambda = lambda->clone();
return res;
}
void transform(ASTs & nodes) const override;
// Case 1 APPLY (quantile(0.9))
String func_name;
String column_name_prefix;
ASTPtr parameters;
// Case 2 APPLY (x -> quantile(0.9)(x))
ASTPtr lambda;
String lambda_arg;
String column_name_prefix;
protected:
void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
};

View File

@ -35,6 +35,44 @@ public:
SETTINGS
};
static String expressionToString(Expression expr)
{
switch (expr)
{
case Expression::WITH:
return "WITH";
case Expression::SELECT:
return "SELECT";
case Expression::TABLES:
return "TABLES";
case Expression::PREWHERE:
return "PREWHERE";
case Expression::WHERE:
return "WHERE";
case Expression::GROUP_BY:
return "GROUP BY";
case Expression::HAVING:
return "HAVING";
case Expression::WINDOW:
return "WINDOW";
case Expression::ORDER_BY:
return "ORDER BY";
case Expression::LIMIT_BY_OFFSET:
return "LIMIT BY OFFSET";
case Expression::LIMIT_BY_LENGTH:
return "LIMIT BY LENGTH";
case Expression::LIMIT_BY:
return "LIMIT BY";
case Expression::LIMIT_OFFSET:
return "LIMIT OFFSET";
case Expression::LIMIT_LENGTH:
return "LIMIT LENGTH";
case Expression::SETTINGS:
return "SETTINGS";
}
return "";
}
/** Get the text that identifies this element. */
String getID(char) const override { return "SelectQuery"; }

View File

@ -12,3 +12,7 @@ endif ()
if(ENABLE_EXAMPLES)
add_subdirectory(examples)
endif()
if (ENABLE_FUZZING)
add_subdirectory(fuzzers)
endif()

View File

@ -850,15 +850,24 @@ static bool isOneOf(TokenType token)
return ((token == tokens) || ...);
}
bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
/// Parse numbers (including decimals), strings and arrays of them.
/// Parse numbers (including decimals), strings, arrays and tuples of them.
const char * data_begin = pos->begin;
const char * data_end = pos->end;
bool is_string_literal = pos->type == TokenType::StringLiteral;
if (pos->type == TokenType::Number || is_string_literal)
if (pos->type == TokenType::Minus)
{
++pos;
if (pos->type != TokenType::Number)
return false;
data_end = pos->end;
++pos;
}
else if (pos->type == TokenType::Number || is_string_literal)
{
++pos;
}
@ -876,7 +885,7 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
}
else if (pos->type == TokenType::ClosingSquareBracket)
{
if (isOneOf<TokenType::Comma, TokenType::OpeningRoundBracket>(last_token))
if (isOneOf<TokenType::Comma, TokenType::OpeningRoundBracket, TokenType::Minus>(last_token))
return false;
if (stack.empty() || stack.back() != TokenType::OpeningSquareBracket)
return false;
@ -884,7 +893,7 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
}
else if (pos->type == TokenType::ClosingRoundBracket)
{
if (isOneOf<TokenType::Comma, TokenType::OpeningSquareBracket>(last_token))
if (isOneOf<TokenType::Comma, TokenType::OpeningSquareBracket, TokenType::Minus>(last_token))
return false;
if (stack.empty() || stack.back() != TokenType::OpeningRoundBracket)
return false;
@ -892,10 +901,15 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
}
else if (pos->type == TokenType::Comma)
{
if (isOneOf<TokenType::OpeningSquareBracket, TokenType::OpeningRoundBracket, TokenType::Comma>(last_token))
if (isOneOf<TokenType::OpeningSquareBracket, TokenType::OpeningRoundBracket, TokenType::Comma, TokenType::Minus>(last_token))
return false;
}
else if (isOneOf<TokenType::Number, TokenType::StringLiteral>(pos->type))
else if (pos->type == TokenType::Number)
{
if (!isOneOf<TokenType::OpeningSquareBracket, TokenType::OpeningRoundBracket, TokenType::Comma, TokenType::Minus>(last_token))
return false;
}
else if (isOneOf<TokenType::StringLiteral, TokenType::Minus>(pos->type))
{
if (!isOneOf<TokenType::OpeningSquareBracket, TokenType::OpeningRoundBracket, TokenType::Comma>(last_token))
return false;
@ -915,6 +929,8 @@ bool ParserCastOperator::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
if (!stack.empty())
return false;
}
else
return false;
ASTPtr type_ast;
if (ParserToken(TokenType::DoubleColon).ignore(pos, expected)
@ -1811,20 +1827,47 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e
with_open_round_bracket = true;
}
ASTPtr lambda;
String lambda_arg;
ASTPtr func_name;
if (!ParserIdentifier().parse(pos, func_name, expected))
return false;
ASTPtr expr_list_args;
if (pos->type == TokenType::OpeningRoundBracket)
auto opos = pos;
if (ParserLambdaExpression().parse(pos, lambda, expected))
{
++pos;
if (!ParserExpressionList(false).parse(pos, expr_list_args, expected))
if (const auto * func = lambda->as<ASTFunction>(); func && func->name == "lambda")
{
const auto * lambda_args_tuple = func->arguments->children.at(0)->as<ASTFunction>();
const ASTs & lambda_arg_asts = lambda_args_tuple->arguments->children;
if (lambda_arg_asts.size() != 1)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "APPLY column transformer can only accept lambda with one argument");
if (auto opt_arg_name = tryGetIdentifierName(lambda_arg_asts[0]); opt_arg_name)
lambda_arg = *opt_arg_name;
else
throw Exception(ErrorCodes::BAD_ARGUMENTS, "lambda argument declarations must be identifiers");
}
else
{
lambda = nullptr;
pos = opos;
}
}
if (!lambda)
{
if (!ParserIdentifier().parse(pos, func_name, expected))
return false;
if (pos->type != TokenType::ClosingRoundBracket)
return false;
++pos;
if (pos->type == TokenType::OpeningRoundBracket)
{
++pos;
if (!ParserExpressionList(false).parse(pos, expr_list_args, expected))
return false;
if (pos->type != TokenType::ClosingRoundBracket)
return false;
++pos;
}
}
String column_name_prefix;
@ -1848,8 +1891,16 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e
}
auto res = std::make_shared<ASTColumnsApplyTransformer>();
res->func_name = getIdentifierName(func_name);
res->parameters = expr_list_args;
if (lambda)
{
res->lambda = lambda;
res->lambda_arg = lambda_arg;
}
else
{
res->func_name = getIdentifierName(func_name);
res->parameters = expr_list_args;
}
res->column_name_prefix = column_name_prefix;
node = std::move(res);
return true;

View File

@ -664,10 +664,12 @@ bool ParserUnaryExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
if (pos->type == TokenType::Minus)
{
ParserLiteral lit_p;
Pos begin = pos;
if (ParserCastOperator().parse(pos, node, expected))
return true;
if (lit_p.parse(pos, node, expected))
pos = begin;
if (ParserLiteral().parse(pos, node, expected))
return true;
pos = begin;

View File

@ -8,14 +8,3 @@ target_link_libraries(select_parser PRIVATE clickhouse_parsers)
add_executable(create_parser create_parser.cpp ${SRCS})
target_link_libraries(create_parser PRIVATE clickhouse_parsers)
if (ENABLE_FUZZING)
add_executable(lexer_fuzzer lexer_fuzzer.cpp ${SRCS})
target_link_libraries(lexer_fuzzer PRIVATE clickhouse_parsers ${LIB_FUZZING_ENGINE})
add_executable(select_parser_fuzzer select_parser_fuzzer.cpp ${SRCS})
target_link_libraries(select_parser_fuzzer PRIVATE clickhouse_parsers ${LIB_FUZZING_ENGINE})
add_executable(create_parser_fuzzer create_parser_fuzzer.cpp ${SRCS})
target_link_libraries(create_parser_fuzzer PRIVATE clickhouse_parsers ${LIB_FUZZING_ENGINE})
endif ()

View File

@ -0,0 +1,8 @@
add_executable(lexer_fuzzer lexer_fuzzer.cpp ${SRCS})
target_link_libraries(lexer_fuzzer PRIVATE clickhouse_parsers ${LIB_FUZZING_ENGINE})
add_executable(select_parser_fuzzer select_parser_fuzzer.cpp ${SRCS})
target_link_libraries(select_parser_fuzzer PRIVATE clickhouse_parsers ${LIB_FUZZING_ENGINE})
add_executable(create_parser_fuzzer create_parser_fuzzer.cpp ${SRCS})
target_link_libraries(create_parser_fuzzer PRIVATE clickhouse_parsers ${LIB_FUZZING_ENGINE})

View File

@ -15,7 +15,10 @@ try
DB::ParserCreateQuery parser;
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
DB::formatAST(*ast, std::cerr);
DB::WriteBufferFromOwnString wb;
DB::formatAST(*ast, wb);
std::cerr << wb.str() << std::endl;
return 0;
}

View File

@ -14,7 +14,10 @@ try
DB::ParserQueryWithOutput parser(input.data() + input.size());
DB::ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0);
DB::formatAST(*ast, std::cerr);
DB::WriteBufferFromOwnString wb;
DB::formatAST(*ast, wb);
std::cerr << wb.str() << std::endl;
return 0;
}

View File

@ -1764,21 +1764,21 @@ void registerWindowFunctions(AggregateFunctionFactory & factory)
{
return std::make_shared<WindowFunctionRank>(name, argument_types,
parameters);
}, properties});
}, properties}, AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("dense_rank", {[](const std::string & name,
const DataTypes & argument_types, const Array & parameters, const Settings *)
{
return std::make_shared<WindowFunctionDenseRank>(name, argument_types,
parameters);
}, properties});
}, properties}, AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("row_number", {[](const std::string & name,
const DataTypes & argument_types, const Array & parameters, const Settings *)
{
return std::make_shared<WindowFunctionRowNumber>(name, argument_types,
parameters);
}, properties});
}, properties}, AggregateFunctionFactory::CaseInsensitive);
factory.registerFunction("lagInFrame", {[](const std::string & name,
const DataTypes & argument_types, const Array & parameters, const Settings *)
@ -1799,7 +1799,7 @@ void registerWindowFunctions(AggregateFunctionFactory & factory)
{
return std::make_shared<WindowFunctionNthValue>(
name, argument_types, parameters);
}, properties});
}, properties}, AggregateFunctionFactory::CaseInsensitive);
}
}

View File

@ -49,27 +49,6 @@
namespace DB
{
namespace
{
std::string formatHTTPErrorResponse(const Poco::Util::AbstractConfiguration& config)
{
std::string result = fmt::format(
"HTTP/1.0 400 Bad Request\r\n\r\n"
"Port {} is for clickhouse-client program\r\n",
config.getString("tcp_port"));
if (config.has("http_port"))
{
result += fmt::format(
"You must use port {} for HTTP.\r\n",
config.getString("http_port"));
}
return result;
}
}
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
@ -925,6 +904,29 @@ bool TCPHandler::receiveProxyHeader()
}
namespace
{
std::string formatHTTPErrorResponseWhenUserIsConnectedToWrongPort(const Poco::Util::AbstractConfiguration& config)
{
std::string result = fmt::format(
"HTTP/1.0 400 Bad Request\r\n\r\n"
"Port {} is for clickhouse-client program\r\n",
config.getString("tcp_port"));
if (config.has("http_port"))
{
result += fmt::format(
"You must use port {} for HTTP.\r\n",
config.getString("http_port"));
}
return result;
}
}
void TCPHandler::receiveHello()
{
/// Receive `hello` packet.
@ -940,9 +942,7 @@ void TCPHandler::receiveHello()
*/
if (packet_type == 'G' || packet_type == 'P')
{
writeString(formatHTTPErrorResponse(server.config()),
*out);
writeString(formatHTTPErrorResponseWhenUserIsConnectedToWrongPort(server.config()), *out);
throw Exception("Client has connected to wrong port", ErrorCodes::CLIENT_HAS_CONNECTED_TO_WRONG_PORT);
}
else

View File

@ -1,6 +1,10 @@
add_subdirectory(MergeTree)
add_subdirectory(System)
if(ENABLE_EXAMPLES)
if (ENABLE_EXAMPLES)
add_subdirectory(examples)
endif()
if (ENABLE_FUZZING)
add_subdirectory(fuzzers)
endif()

View File

@ -48,7 +48,7 @@ WriteBufferToKafkaProducer::WriteBufferToKafkaProducer(
WriteBufferToKafkaProducer::~WriteBufferToKafkaProducer()
{
assert(rows == 0 && chunks.empty());
assert(rows == 0);
}
void WriteBufferToKafkaProducer::countRow(const Columns & columns, size_t current_row)

View File

@ -172,11 +172,21 @@ bool ReplicatedMergeTreeRestartingThread::tryStartup()
storage.cloneReplicaIfNeeded(zookeeper);
storage.queue.load(zookeeper);
try
{
storage.queue.load(zookeeper);
/// pullLogsToQueue() after we mark replica 'is_active' (and after we repair if it was lost);
/// because cleanup_thread doesn't delete log_pointer of active replicas.
storage.queue.pullLogsToQueue(zookeeper, {}, ReplicatedMergeTreeQueue::LOAD);
}
catch (...)
{
std::unique_lock lock(storage.last_queue_update_exception_lock);
storage.last_queue_update_exception = getCurrentExceptionMessage(false);
throw;
}
/// pullLogsToQueue() after we mark replica 'is_active' (and after we repair if it was lost);
/// because cleanup_thread doesn't delete log_pointer of active replicas.
storage.queue.pullLogsToQueue(zookeeper, {}, ReplicatedMergeTreeQueue::LOAD);
storage.queue.removeCurrentPartsFromMutations();
storage.last_queue_update_finish_time.store(time(nullptr));

View File

@ -101,7 +101,7 @@ WriteBufferToRabbitMQProducer::~WriteBufferToRabbitMQProducer()
std::this_thread::sleep_for(std::chrono::milliseconds(CONNECT_SLEEP));
}
assert(rows == 0 && chunks.empty());
assert(rows == 0);
}

View File

@ -327,11 +327,13 @@ StorageDistributed::StorageDistributed(
const String & relative_data_path_,
const DistributedSettings & distributed_settings_,
bool attach_,
ClusterPtr owned_cluster_)
ClusterPtr owned_cluster_,
ASTPtr remote_table_function_ptr_)
: IStorage(id_)
, WithContext(context_->getGlobalContext())
, remote_database(remote_database_)
, remote_table(remote_table_)
, remote_table_function_ptr(remote_table_function_ptr_)
, log(&Poco::Logger::get("StorageDistributed (" + id_.table_name + ")"))
, owned_cluster(std::move(owned_cluster_))
, cluster_name(getContext()->getMacros()->expand(cluster_name_))
@ -363,10 +365,13 @@ StorageDistributed::StorageDistributed(
}
/// Sanity check. Skip check if the table is already created to allow the server to start.
if (!attach_ && !cluster_name.empty())
if (!attach_)
{
size_t num_local_shards = getContext()->getCluster(cluster_name)->getLocalShardCount();
if (num_local_shards && remote_database == id_.database_name && remote_table == id_.table_name)
if (remote_database.empty() && !remote_table_function_ptr && !getCluster()->maybeCrossReplication())
LOG_WARNING(log, "Name of remote database is empty. Default database will be used implicitly.");
size_t num_local_shards = getCluster()->getLocalShardCount();
if (num_local_shards && (remote_database.empty() || remote_database == id_.database_name) && remote_table == id_.table_name)
throw Exception("Distributed table " + id_.table_name + " looks at itself", ErrorCodes::INFINITE_LOOP);
}
}
@ -399,9 +404,9 @@ StorageDistributed::StorageDistributed(
relative_data_path_,
distributed_settings_,
attach,
std::move(owned_cluster_))
std::move(owned_cluster_),
remote_table_function_ptr_)
{
remote_table_function_ptr = std::move(remote_table_function_ptr_);
}
QueryProcessingStage::Enum StorageDistributed::getQueryProcessingStage(
@ -810,9 +815,6 @@ void StorageDistributed::alter(const AlterCommands & params, ContextPtr local_co
void StorageDistributed::startup()
{
if (remote_database.empty() && !remote_table_function_ptr && !getCluster()->maybeCrossReplication())
LOG_WARNING(log, "Name of remote database is empty. Default database will be used implicitly.");
if (!storage_policy)
return;

View File

@ -136,7 +136,8 @@ private:
const String & relative_data_path_,
const DistributedSettings & distributed_settings_,
bool attach_,
ClusterPtr owned_cluster_ = {});
ClusterPtr owned_cluster_ = {},
ASTPtr remote_table_function_ptr_ = {});
StorageDistributed(
const StorageID & id_,

View File

@ -3079,6 +3079,12 @@ void StorageReplicatedMergeTree::cloneReplicaIfNeeded(zkutil::ZooKeeperPtr zooke
zookeeper->set(fs::path(replica_path) / "is_lost", "0");
}
String StorageReplicatedMergeTree::getLastQueueUpdateException() const
{
std::unique_lock lock(last_queue_update_exception_lock);
return last_queue_update_exception;
}
void StorageReplicatedMergeTree::queueUpdatingTask()
{
@ -3097,6 +3103,9 @@ void StorageReplicatedMergeTree::queueUpdatingTask()
{
tryLogCurrentException(log, __PRETTY_FUNCTION__);
std::unique_lock lock(last_queue_update_exception_lock);
last_queue_update_exception = getCurrentExceptionMessage(false);
if (e.code == Coordination::Error::ZSESSIONEXPIRED)
{
restarting_thread.wakeup();
@ -3108,6 +3117,10 @@ void StorageReplicatedMergeTree::queueUpdatingTask()
catch (...)
{
tryLogCurrentException(log, __PRETTY_FUNCTION__);
std::unique_lock lock(last_queue_update_exception_lock);
last_queue_update_exception = getCurrentExceptionMessage(false);
queue_updating_task->scheduleAfter(QUEUE_UPDATE_ERROR_SLEEP_MS);
}
}
@ -5565,6 +5578,7 @@ void StorageReplicatedMergeTree::getStatus(Status & res, bool with_zk_fields)
res.log_pointer = 0;
res.total_replicas = 0;
res.active_replicas = 0;
res.last_queue_update_exception = getLastQueueUpdateException();
if (with_zk_fields && !res.is_session_expired)
{

View File

@ -174,6 +174,7 @@ public:
UInt64 absolute_delay;
UInt8 total_replicas;
UInt8 active_replicas;
String last_queue_update_exception;
/// If the error has happened fetching the info from ZooKeeper, this field will be set.
String zookeeper_exception;
std::unordered_map<std::string, bool> replica_is_active;
@ -331,6 +332,10 @@ private:
std::atomic<time_t> last_queue_update_start_time{0};
std::atomic<time_t> last_queue_update_finish_time{0};
mutable std::mutex last_queue_update_exception_lock;
String last_queue_update_exception;
String getLastQueueUpdateException() const;
DataPartsExchange::Fetcher fetcher;
/// When activated, replica is initialized and startup() method could exit

View File

@ -51,6 +51,7 @@ StorageSystemReplicas::StorageSystemReplicas(const StorageID & table_id_)
{ "absolute_delay", std::make_shared<DataTypeUInt64>() },
{ "total_replicas", std::make_shared<DataTypeUInt8>() },
{ "active_replicas", std::make_shared<DataTypeUInt8>() },
{ "last_queue_update_exception", std::make_shared<DataTypeString>() },
{ "zookeeper_exception", std::make_shared<DataTypeString>() },
{ "replica_is_active", std::make_shared<DataTypeMap>(std::make_shared<DataTypeString>(), std::make_shared<DataTypeUInt8>()) }
}));
@ -186,6 +187,7 @@ Pipe StorageSystemReplicas::read(
res_columns[col_num++]->insert(status.absolute_delay);
res_columns[col_num++]->insert(status.total_replicas);
res_columns[col_num++]->insert(status.active_replicas);
res_columns[col_num++]->insert(status.last_queue_update_exception);
res_columns[col_num++]->insert(status.zookeeper_exception);
Map replica_is_active_values;

View File

@ -23,10 +23,3 @@ target_link_libraries (transform_part_zk_nodes
string_utils
)
if (ENABLE_FUZZING)
add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.cpp)
target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
add_executable (columns_description_fuzzer columns_description_fuzzer.cpp)
target_link_libraries (columns_description_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})
endif ()

View File

@ -0,0 +1,11 @@
add_executable (mergetree_checksum_fuzzer
mergetree_checksum_fuzzer.cpp
"${ClickHouse_SOURCE_DIR}/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp"
"${ClickHouse_SOURCE_DIR}/src/Compression/CompressedReadBuffer.cpp"
"${ClickHouse_SOURCE_DIR}/src/Compression/CompressedWriteBuffer.cpp"
)
target_link_libraries (mergetree_checksum_fuzzer PRIVATE clickhouse_common_io fuzz_compression ${LIB_FUZZING_ENGINE})
add_executable (columns_description_fuzzer columns_description_fuzzer.cpp)
target_link_libraries (columns_description_fuzzer PRIVATE dbms ${LIB_FUZZING_ENGINE})

View File

@ -44,15 +44,17 @@ DISTRIBUTED_DDL_TIMEOUT_MSG = "is executing longer than distributed_ddl_task_tim
MESSAGES_TO_RETRY = [
"DB::Exception: ZooKeeper session has been expired",
"DB::Exception: Connection loss",
"Coordination::Exception: Session expired",
"Coordination::Exception: Connection loss",
"Coordination::Exception: Operation timeout",
"DB::Exception: Session expired",
"DB::Exception: Connection loss",
"DB::Exception: Operation timeout",
"Operation timed out",
"ConnectionPoolWithFailover: Connection failed at try",
"DB::Exception: New table appeared in database being dropped or detached. Try again",
"is already started to be removing by another replica right now",
"DB::Exception: Cannot enqueue query",
"Shutdown is called for table", # It happens in SYSTEM SYNC REPLICA query if session with ZooKeeper is being reinitialized.
DISTRIBUTED_DDL_TIMEOUT_MSG # FIXME
]

View File

@ -22,9 +22,9 @@
<operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>30000</session_timeout_ms>
<heart_beat_interval_ms>1000</heart_beat_interval_ms>
<election_timeout_lower_bound_ms>2000</election_timeout_lower_bound_ms>
<election_timeout_upper_bound_ms>4000</election_timeout_upper_bound_ms>
<raft_logs_level>trace</raft_logs_level>
<election_timeout_lower_bound_ms>4000</election_timeout_lower_bound_ms>
<election_timeout_upper_bound_ms>5000</election_timeout_upper_bound_ms>
<raft_logs_level>information</raft_logs_level>
<force_sync>false</force_sync>
<!-- we want all logs for complex problems investigation -->
<reserved_log_items>1000000000000000</reserved_log_items>

View File

@ -474,6 +474,11 @@ class ClickHouseCluster:
cmd += " client"
return cmd
def copy_file_from_container_to_container(self, src_node, src_path, dst_node, dst_path):
fname = os.path.basename(src_path)
run_and_check([f"docker cp {src_node.docker_id}:{src_path} {self.instances_dir}"], shell=True)
run_and_check([f"docker cp {self.instances_dir}/{fname} {dst_node.docker_id}:{dst_path}"], shell=True)
def setup_zookeeper_secure_cmd(self, instance, env_variables, docker_compose_yml_dir):
logging.debug('Setup ZooKeeper Secure')
zookeeper_docker_compose_path = p.join(docker_compose_yml_dir, 'docker_compose_zookeeper_secure.yml')
@ -1836,6 +1841,10 @@ class ClickHouseInstance:
build_opts = self.query("SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
return "-fsanitize={}".format(sanitizer_name) in build_opts
def is_debug_build(self):
build_opts = self.query("SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'")
return 'NDEBUG' not in build_opts
def is_built_with_thread_sanitizer(self):
return self.is_built_with_sanitizer('thread')
@ -2024,6 +2033,37 @@ class ClickHouseInstance:
return None
return None
def restart_with_original_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15):
if not self.stay_alive:
raise Exception("Cannot restart not stay alive container")
self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root')
retries = int(stop_start_wait_sec / 0.5)
local_counter = 0
# wait stop
while local_counter < retries:
if not self.get_process_pid("clickhouse server"):
break
time.sleep(0.5)
local_counter += 1
# force kill if server hangs
if self.get_process_pid("clickhouse server"):
# server can die before kill, so don't throw exception, it's expected
self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(9)], nothrow=True, user='root')
if callback_onstop:
callback_onstop(self)
self.exec_in_container(
["bash", "-c", "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"],
user='root')
self.exec_in_container(["bash", "-c",
"cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"],
user='root')
self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid()))
# wait start
assert_eq_with_retry(self, "select 1", "1", retry_count=retries)
def restart_with_latest_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15):
if not self.stay_alive:
raise Exception("Cannot restart not stay alive container")
@ -2044,6 +2084,9 @@ class ClickHouseInstance:
if callback_onstop:
callback_onstop(self)
self.exec_in_container(
["bash", "-c", "cp /usr/bin/clickhouse /usr/share/clickhouse_original"],
user='root')
self.exec_in_container(
["bash", "-c", "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"],
user='root')

View File

@ -53,3 +53,9 @@ def test_backward_compatability(start_cluster):
node1.restart_with_latest_version()
assert (node1.query("SELECT avgMerge(x) FROM state") == '2.5\n')
node1.query("drop table tab")
node1.query("drop table state")
node2.query("drop table tab")
node3.query("drop table tab")
node4.query("drop table tab")

View File

@ -5,7 +5,7 @@
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
cluster = ClickHouseCluster(__file__, name="skipping_indices")
node = cluster.add_instance('node', image='yandex/clickhouse-server', tag='21.6', stay_alive=True, with_installed_binary=True)
@ -41,4 +41,4 @@ def test_index(start_cluster):
node.query("""
SELECT * FROM data WHERE value = 20000 SETTINGS force_data_skipping_indices = 'value_index' SETTINGS force_data_skipping_indices = 'value_index', max_rows_to_read=1;
DROP TABLE data;
""")
""")

View File

@ -30,3 +30,7 @@ def test_detach_part_wrong_partition_id(start_cluster):
num_detached = node_21_6.query("select count() from system.detached_parts")
assert num_detached == '1\n'
node_21_6.restart_with_original_version()
node_21_6.query("drop table tab SYNC")

View File

@ -27,3 +27,6 @@ def test_select_aggregate_alias_column(start_cluster):
node1.query("select sum(x_alias) from remote('node{1,2}', default, tab)")
node2.query("select sum(x_alias) from remote('node{1,2}', default, tab)")
node1.query("drop table tab")
node2.query("drop table tab")

View File

@ -29,3 +29,5 @@ def test_backward_compatability(start_cluster):
"select s, count() from remote('node{1,2}', default, tab) group by s order by toUInt64(s) limit 50")
print(res)
assert res == ''.join('{}\t2\n'.format(i) for i in range(50))
node1.query("drop table tab")
node2.query("drop table tab")

View File

@ -3,6 +3,7 @@ import os.path
import timeit
import pytest
import logging
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
@ -11,6 +12,8 @@ cluster = ClickHouseCluster(__file__)
NODES = {'node' + str(i): None for i in (1, 2)}
IS_DEBUG = False
CREATE_TABLES_SQL = '''
CREATE DATABASE test;
@ -104,6 +107,11 @@ def started_cluster(request):
try:
cluster.start()
if cluster.instances["node1"].is_debug_build():
global IS_DEBUG
IS_DEBUG = True
logging.warning("Debug build is too slow to show difference in timings. We disable checks.")
for node_id, node in list(NODES.items()):
node.query(CREATE_TABLES_SQL)
node.query(INSERT_SQL_TEMPLATE.format(node_id=node_id))
@ -133,8 +141,9 @@ def _check_timeout_and_exception(node, user, query_base, query):
# And it should timeout no faster than:
measured_timeout = timeit.default_timer() - start
assert expected_timeout - measured_timeout <= TIMEOUT_MEASUREMENT_EPS
assert measured_timeout - expected_timeout <= TIMEOUT_DIFF_UPPER_BOUND[user][query_base]
if not IS_DEBUG:
assert expected_timeout - measured_timeout <= TIMEOUT_MEASUREMENT_EPS
assert measured_timeout - expected_timeout <= TIMEOUT_DIFF_UPPER_BOUND[user][query_base]
# And exception should reflect connection attempts:
_check_exception(exception, repeats)

View File

@ -0,0 +1,37 @@
<yandex>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<snapshot_distance>75</snapshot_distance>
<reserved_log_items>5</reserved_log_items>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<priority>2</priority>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<priority>1</priority>
</server>
</raft_configuration>
</keeper_server>
</yandex>

View File

@ -0,0 +1,37 @@
<yandex>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>2</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<snapshot_distance>75</snapshot_distance>
<reserved_log_items>5</reserved_log_items>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<priority>2</priority>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<priority>1</priority>
</server>
</raft_configuration>
</keeper_server>
</yandex>

View File

@ -0,0 +1,37 @@
<yandex>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>3</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<snapshot_distance>75</snapshot_distance>
<reserved_log_items>5</reserved_log_items>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>44444</port>
<priority>3</priority>
</server>
<server>
<id>2</id>
<hostname>node2</hostname>
<port>44444</port>
<priority>2</priority>
</server>
<server>
<id>3</id>
<hostname>node3</hostname>
<port>44444</port>
<priority>1</priority>
</server>
</raft_configuration>
</keeper_server>
</yandex>

View File

@ -0,0 +1,12 @@
<yandex>
<shutdown_wait_unfinished>3</shutdown_wait_unfinished>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-server/log.log</log>
<errorlog>/var/log/clickhouse-server/log.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<stderr>/var/log/clickhouse-server/stderr.log</stderr>
<stdout>/var/log/clickhouse-server/stdout.log</stdout>
</logger>
</yandex>

View File

@ -0,0 +1,120 @@
#!/usr/bin/env python3
##!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
from multiprocessing.dummy import Pool
from kazoo.client import KazooClient, KazooState
import random
import string
import os
import time
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/keeper_config1.xml'], stay_alive=True)
node2 = cluster.add_instance('node2', main_configs=['configs/keeper_config2.xml'], stay_alive=True)
node3 = cluster.add_instance('node3', main_configs=['configs/keeper_config3.xml'], stay_alive=True)
def start_zookeeper(node):
node1.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh start'])
def stop_zookeeper(node):
node.exec_in_container(['bash', '-c', '/opt/zookeeper/bin/zkServer.sh stop'])
def clear_zookeeper(node):
node.exec_in_container(['bash', '-c', 'rm -fr /zookeeper/*'])
def restart_and_clear_zookeeper(node):
stop_zookeeper(node)
clear_zookeeper(node)
start_zookeeper(node)
def clear_clickhouse_data(node):
node.exec_in_container(['bash', '-c', 'rm -fr /var/lib/clickhouse/coordination/logs/* /var/lib/clickhouse/coordination/snapshots/*'])
def convert_zookeeper_data(node):
cmd = '/usr/bin/clickhouse keeper-converter --zookeeper-logs-dir /zookeeper/version-2/ --zookeeper-snapshots-dir /zookeeper/version-2/ --output-dir /var/lib/clickhouse/coordination/snapshots'
node.exec_in_container(['bash', '-c', cmd])
return os.path.join('/var/lib/clickhouse/coordination/snapshots', node.exec_in_container(['bash', '-c', 'ls /var/lib/clickhouse/coordination/snapshots']).strip())
def stop_clickhouse(node):
node.stop_clickhouse()
def start_clickhouse(node):
node.start_clickhouse()
def copy_zookeeper_data(make_zk_snapshots, node):
stop_zookeeper(node)
if make_zk_snapshots: # force zookeeper to create snapshot
start_zookeeper(node)
stop_zookeeper(node)
stop_clickhouse(node)
clear_clickhouse_data(node)
convert_zookeeper_data(node)
start_zookeeper(node)
start_clickhouse(node)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_fake_zk(node, timeout=30.0):
_fake_zk_instance = KazooClient(hosts=cluster.get_instance_ip(node.name) + ":9181", timeout=timeout)
_fake_zk_instance.start()
return _fake_zk_instance
def get_genuine_zk(node, timeout=30.0):
_genuine_zk_instance = KazooClient(hosts=cluster.get_instance_ip(node.name) + ":2181", timeout=timeout)
_genuine_zk_instance.start()
return _genuine_zk_instance
def test_snapshot_and_load(started_cluster):
restart_and_clear_zookeeper(node1)
genuine_connection = get_genuine_zk(node1)
for node in [node1, node2, node3]:
print("Stop and clear", node.name, "with dockerid", node.docker_id)
stop_clickhouse(node)
clear_clickhouse_data(node)
for i in range(1000):
genuine_connection.create("/test" + str(i), b"data")
print("Data loaded to zookeeper")
stop_zookeeper(node1)
start_zookeeper(node1)
stop_zookeeper(node1)
print("Data copied to node1")
resulted_path = convert_zookeeper_data(node1)
print("Resulted path", resulted_path)
for node in [node2, node3]:
print("Copy snapshot from", node1.name, "to", node.name)
cluster.copy_file_from_container_to_container(node1, resulted_path, node, '/var/lib/clickhouse/coordination/snapshots')
print("Starting clickhouses")
p = Pool(3)
result = p.map_async(start_clickhouse, [node1, node2, node3])
result.wait()
print("Loading additional data")
fake_zks = [get_fake_zk(node) for node in [node1, node2, node3]]
for i in range(1000):
fake_zk = random.choice(fake_zks)
try:
fake_zk.create("/test" + str(i + 1000), b"data")
except Exception as ex:
print("Got exception:" + str(ex))
print("Final")
fake_zks[0].create("/test10000", b"data")

View File

@ -37,6 +37,9 @@ def cluster():
with_hdfs=True)
logging.info("Starting cluster...")
cluster.start()
if cluster.instances["node1"].is_debug_build():
# https://github.com/ClickHouse/ClickHouse/issues/27814
pytest.skip("libhdfs3 calls rand function which does not pass harmful check in debug build")
logging.info("Cluster started")
fs = HdfsClient(hosts=cluster.hdfs_ip)

View File

@ -180,28 +180,6 @@ def avro_confluent_message(schema_registry_client, value):
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
wait_kafka_is_available() # ensure kafka is alive
kafka_producer_send_heartbeat_msg() # ensure python kafka client is ok
# print("kafka is available - running test")
yield # run test
# Tests
def test_kafka_settings_old_syntax(kafka_cluster):
@ -699,6 +677,8 @@ def describe_consumer_group(kafka_cluster, name):
def kafka_cluster():
try:
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@ -1129,6 +1109,7 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster):
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;

View File

@ -55,6 +55,9 @@ def kafka_produce(kafka_cluster, topic, messages, timestamp=None):
def kafka_cluster():
try:
cluster.start()
if instance.is_debug_build():
# https://github.com/ClickHouse/ClickHouse/issues/27651
pytest.skip("librdkafka calls system function for kinit which does not pass harmful check in debug build")
yield cluster
finally:
cluster.shutdown()

View File

@ -42,10 +42,10 @@ popd > /dev/null
#SCRIPTDIR=`dirname "$SCRIPTPATH"`
SCRIPTDIR=$SCRIPTPATH
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout 2>&1
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=10 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout
cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout 2>&1
cat "$SCRIPTDIR"/00282_merging.sql | $CLICKHOUSE_CLIENT --preferred_block_size_bytes=20 -n > "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout
cmp "$SCRIPTDIR"/00282_merging.reference "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout && echo PASSED || echo FAILED
rm "${CLICKHOUSE_TMP}"/preferred_block_size_bytes.stdout

View File

@ -4,8 +4,7 @@ DROP TABLE IF EXISTS distr2;
CREATE TABLE distr (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr); -- { serverError 269 }
CREATE TABLE distr0 (x UInt8) ENGINE = Distributed(test_shard_localhost, '', distr0);
SELECT * FROM distr0; -- { serverError 581 }
CREATE TABLE distr0 (x UInt8) ENGINE = Distributed(test_shard_localhost, '', distr0); -- { serverError 269 }
CREATE TABLE distr1 (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr2);
CREATE TABLE distr2 (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr1);
@ -13,6 +12,5 @@ CREATE TABLE distr2 (x UInt8) ENGINE = Distributed(test_shard_localhost, current
SELECT * FROM distr1; -- { serverError 581 }
SELECT * FROM distr2; -- { serverError 581 }
DROP TABLE distr0;
DROP TABLE distr1;
DROP TABLE distr2;

View File

@ -1 +1,4 @@
100 10 324 120.00 B 8.00 B 23.00 B
0
SELECT argMax(number, number)
FROM numbers(1)

View File

@ -5,3 +5,6 @@ INSERT INTO columns_transformers VALUES (100, 10, 324, 120, 8, 23);
SELECT * EXCEPT 'bytes', COLUMNS('bytes') APPLY formatReadableSize FROM columns_transformers;
DROP TABLE IF EXISTS columns_transformers;
SELECT * APPLY x->argMax(x, number) FROM numbers(1);
EXPLAIN SYNTAX SELECT * APPLY x->argMax(x, number) FROM numbers(1);

View File

@ -9,7 +9,9 @@ CREATE TABLE tt6
`status` String
)
ENGINE = Distributed('test_shard_localhost', '', 'tt6', rand());
ENGINE = Distributed('test_shard_localhost', '', 'tt7', rand());
CREATE TABLE tt7 as tt6 ENGINE = Distributed('test_shard_localhost', '', 'tt6', rand());
INSERT INTO tt6 VALUES (1, 1, 1, 1, 'ok'); -- { serverError 581 }

View File

@ -0,0 +1,10 @@
-1
SELECT CAST(\'-1\', \'Int32\')
-0.1
SELECT CAST(\'-0.1\', \'Decimal(38, 38)\')
-0.111
SELECT CAST(\'-0.111\', \'Float64\')
[-1,2,-3]
SELECT CAST(\'[-1, 2, -3]\', \'Array(Int32)\')
[-1.1,2,-3]
SELECT CAST(\'[-1.1, 2, -3]\', \'Array(Float64)\')

View File

@ -0,0 +1,14 @@
SELECT -1::Int32;
EXPLAIN SYNTAX SELECT -1::Int32;
SELECT -0.1::Decimal(38, 38);
EXPLAIN SYNTAX SELECT -0.1::Decimal(38, 38);
SELECT -0.111::Float64;
EXPLAIN SYNTAX SELECT -0.111::Float64;
SELECT [-1, 2, -3]::Array(Int32);
EXPLAIN SYNTAX SELECT [-1, 2, -3]::Array(Int32);
SELECT [-1.1, 2, -3]::Array(Float64);
EXPLAIN SYNTAX SELECT [-1.1, 2, -3]::Array(Float64);

View File

@ -8,3 +8,11 @@ Syntax error
Syntax error
Syntax error
Code: 6
Syntax error
Syntax error
Syntax error
Syntax error
Syntax error
Syntax error
Syntax error
Syntax error

View File

@ -15,3 +15,13 @@ $CLICKHOUSE_CLIENT --query="SELECT [1 2]::Array(UInt8)" 2>&1 | grep -o -m1 'Syn
$CLICKHOUSE_CLIENT --query="SELECT 1 4::UInt32" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT '1' '4'::UInt32" 2>&1 | grep -o -m1 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT '1''4'::UInt32" 2>&1 | grep -o -m1 'Code: 6'
$CLICKHOUSE_CLIENT --query="SELECT ::UInt32" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT ::String" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT -::Int32" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT [1, -]::Array(Int32)" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT [1, 3-]::Array(Int32)" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT [-, 2]::Array(Int32)" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT [--, 2]::Array(Int32)" 2>&1 | grep -o 'Syntax error'
$CLICKHOUSE_CLIENT --query="SELECT [1, 2]-::Array(Int32)" 2>&1 | grep -o 'Syntax error'

View File

@ -1,2 +1,4 @@
SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a;
SELECT count() FROM numbers(10) WHERE 0
SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a;
SELECT count() FROM numbers(10) WHERE 0;
SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a SETTINGS empty_result_for_aggregation_by_constant_keys_on_empty_set = 0;

View File

@ -0,0 +1,6 @@
const column
2021-08-15 18:57:56 1426860702823350272
2021-08-15 18:57:56.492 1426860704886947840
non-const column
2021-08-15 18:57:56 1426860702823350272
2021-08-15 18:57:56.492 1426860704886947840

View File

@ -0,0 +1,23 @@
-- Error cases
SELECT dateTimeToSnowflake(); -- {serverError 42}
SELECT dateTime64ToSnowflake(); -- {serverError 42}
SELECT dateTimeToSnowflake('abc'); -- {serverError 43}
SELECT dateTime64ToSnowflake('abc'); -- {serverError 43}
SELECT dateTimeToSnowflake('abc', 123); -- {serverError 42}
SELECT dateTime64ToSnowflake('abc', 123); -- {serverError 42}
SELECT 'const column';
WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt
SELECT dt, dateTimeToSnowflake(dt);
WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64
SELECT dt64, dateTime64ToSnowflake(dt64);
SELECT 'non-const column';
WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS x
SELECT materialize(x) as dt, dateTimeToSnowflake(dt);;
WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS x
SELECT materialize(x) as dt64, dateTime64ToSnowflake(dt64);

View File

@ -0,0 +1,3 @@
const column
UTC 1426860704886947840 2021-08-15 10:57:56 DateTime(\'UTC\') 2021-08-15 10:57:56.492 DateTime64(3, \'UTC\')
Asia/Shanghai 1426860704886947840 2021-08-15 18:57:56 DateTime(\'Asia/Shanghai\') 2021-08-15 18:57:56.492 DateTime64(3, \'Asia/Shanghai\')

View File

@ -0,0 +1,32 @@
-- -- Error cases
SELECT snowflakeToDateTime(); -- {serverError 42}
SELECT snowflakeToDateTime64(); -- {serverError 42}
SELECT snowflakeToDateTime('abc'); -- {serverError 43}
SELECT snowflakeToDateTime64('abc'); -- {serverError 43}
SELECT snowflakeToDateTime('abc', 123); -- {serverError 43}
SELECT snowflakeToDateTime64('abc', 123); -- {serverError 43}
SELECT 'const column';
WITH
CAST(1426860704886947840 AS Int64) AS i64,
'UTC' AS tz
SELECT
tz,
i64,
snowflakeToDateTime(i64, tz) as dt,
toTypeName(dt),
snowflakeToDateTime64(i64, tz) as dt64,
toTypeName(dt64);
WITH
CAST(1426860704886947840 AS Int64) AS i64,
'Asia/Shanghai' AS tz
SELECT
tz,
i64,
snowflakeToDateTime(i64, tz) as dt,
toTypeName(dt),
snowflakeToDateTime64(i64, tz) as dt64,
toTypeName(dt64);

Some files were not shown because too many files have changed in this diff Show More