mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge remote-tracking branch 'origin' into integration-2
This commit is contained in:
commit
a87fe5e1bc
2
.github/ISSUE_TEMPLATE/40_bug-report.md
vendored
2
.github/ISSUE_TEMPLATE/40_bug-report.md
vendored
@ -7,7 +7,7 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
(you don't have to strictly follow this form)
|
||||
You have to provide the following information whenever possible.
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what works not as it is supposed to.
|
||||
|
@ -167,6 +167,7 @@ endif ()
|
||||
|
||||
# If turned `ON`, assumes the user has either the system GTest library or the bundled one.
|
||||
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
|
||||
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
|
||||
|
||||
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
|
||||
# Only for Linux, x86_64.
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit a8d43d3142cc6b26fc55bec33f7f6edb1156ab7a
|
||||
Subproject commit 9f0ff347e50429686604002d8ad1fd07515c4f31
|
2
contrib/cyrus-sasl
vendored
2
contrib/cyrus-sasl
vendored
@ -1 +1 @@
|
||||
Subproject commit 9995bf9d8e14f58934d9313ac64f13780d6dd3c9
|
||||
Subproject commit e6466edfd638cc5073debe941c53345b18a09512
|
@ -1019,6 +1019,7 @@ done
|
||||
wait
|
||||
|
||||
# Create per-query flamegraphs
|
||||
touch report/query-files.txt
|
||||
IFS=$'\n'
|
||||
for version in {right,left}
|
||||
do
|
||||
@ -1209,6 +1210,12 @@ function upload_results
|
||||
/^metric/ { print old_sha, new_sha, $2, $3 }' \
|
||||
| "${client[@]}" --query "INSERT INTO run_attributes_v1 FORMAT TSV"
|
||||
|
||||
# Grepping numactl results from log is too crazy, I'll just call it again.
|
||||
"${client[@]}" --query "INSERT INTO run_attributes_v1 FORMAT TSV" <<EOF
|
||||
$REF_SHA $SHA_TO_TEST $(numactl --show | sed -n 's/^cpubind:[[:space:]]\+/numactl-cpubind /p')
|
||||
$REF_SHA $SHA_TO_TEST $(numactl --hardware | sed -n 's/^available:[[:space:]]\+/numactl-available /p')
|
||||
EOF
|
||||
|
||||
set -x
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,10 @@ def substitute_parameters(query_templates, other_templates = []):
|
||||
query_results = []
|
||||
other_results = [[]] * (len(other_templates))
|
||||
for i, q in enumerate(query_templates):
|
||||
keys = set(n for _, n, _, _ in string.Formatter().parse(q) if n)
|
||||
# We need stable order of keys here, so that the order of substitutions
|
||||
# is always the same, and the query indexes are consistent across test
|
||||
# runs.
|
||||
keys = sorted(set(n for _, n, _, _ in string.Formatter().parse(q) if n))
|
||||
values = [available_parameters[k] for k in keys]
|
||||
combos = itertools.product(*values)
|
||||
for c in combos:
|
||||
|
@ -2,6 +2,7 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update --yes && env DEBIAN_FRONTEND=noninteractive apt-get install wget unzip git openjdk-14-jdk maven python3 --yes --no-install-recommends
|
||||
|
||||
RUN wget https://github.com/sqlancer/sqlancer/archive/master.zip -O /sqlancer.zip
|
||||
RUN mkdir /sqlancer && \
|
||||
cd /sqlancer && \
|
||||
|
@ -4,4 +4,21 @@ toc_priority: 3
|
||||
|
||||
# max {#agg_function-max}
|
||||
|
||||
Calculates the maximum.
|
||||
Aggregate function that calculates the maximum across a group of values.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
SELECT max(salary) FROM employees;
|
||||
```
|
||||
|
||||
```
|
||||
SELECT department, max(salary) FROM employees GROUP BY department;
|
||||
```
|
||||
|
||||
If you need non-aggregate function to choose a maximum of two values, see `greatest`:
|
||||
|
||||
```
|
||||
SELECT greatest(a, b) FROM table;
|
||||
```
|
||||
|
||||
|
@ -4,4 +4,20 @@ toc_priority: 2
|
||||
|
||||
## min {#agg_function-min}
|
||||
|
||||
Calculates the minimum.
|
||||
Aggregate function that calculates the minimum across a group of values.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
SELECT min(salary) FROM employees;
|
||||
```
|
||||
|
||||
```
|
||||
SELECT department, min(salary) FROM employees GROUP BY department;
|
||||
```
|
||||
|
||||
If you need non-aggregate function to choose a minimum of two values, see `least`:
|
||||
|
||||
```
|
||||
SELECT least(a, b) FROM table;
|
||||
```
|
||||
|
@ -23,7 +23,7 @@ The point in time is saved as a [Unix timestamp](https://en.wikipedia.org/wiki/U
|
||||
|
||||
Timezone agnostic unix timestamp is stored in tables, and the timezone is used to transform it to text format or back during data import/export or to make calendar calculations on the values (example: `toDate`, `toHour` functions et cetera). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata.
|
||||
|
||||
A list of supported time zones can be found in the [IANA Time Zone Database](https://www.iana.org/time-zones) and also can be queried by `SELECT * FROM system.time_zones`.
|
||||
A list of supported time zones can be found in the [IANA Time Zone Database](https://www.iana.org/time-zones) and also can be queried by `SELECT * FROM system.time_zones`. [The list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) is also available at Wikipedia.
|
||||
|
||||
You can explicitly set a time zone for `DateTime`-type columns when creating a table. Example: `DateTime('UTC')`. If the time zone isn’t set, ClickHouse uses the value of the [timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) parameter in the server settings or the operating system settings at the moment of the ClickHouse server start.
|
||||
|
||||
|
@ -134,6 +134,46 @@ Result:
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## timeZoneOffset {#timezoneoffset}
|
||||
|
||||
Returns a timezone offset in seconds from [UTC](https://en.wikipedia.org/wiki/Coordinated_Universal_Time). The function takes into account [daylight saving time](https://en.wikipedia.org/wiki/Daylight_saving_time) and historical timezone changes at the specified date and time.
|
||||
[IANA timezone database](https://www.iana.org/time-zones) is used to calculate the offset.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
timeZoneOffset(value)
|
||||
```
|
||||
|
||||
Alias: `timezoneOffset`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Offset from UTC in seconds.
|
||||
|
||||
Type: [Int32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toDateTime('2021-04-21 10:20:30', 'America/New_York') AS Time, toTypeName(Time) AS Type,
|
||||
timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────────────Time─┬─Type─────────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐
|
||||
│ 2021-04-21 10:20:30 │ DateTime('America/New_York') │ -14400 │ -4 │
|
||||
└─────────────────────┴──────────────────────────────┴───────────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## toYear {#toyear}
|
||||
|
||||
Converts a date or date with time to a UInt16 number containing the year number (AD).
|
||||
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
toc_priority: 3
|
||||
---
|
||||
|
||||
# max {#agg_function-max}
|
||||
|
||||
Вычисляет максимум.
|
||||
|
1
docs/ru/sql-reference/aggregate-functions/reference/max.md
Symbolic link
1
docs/ru/sql-reference/aggregate-functions/reference/max.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../en/sql-reference/aggregate-functions/reference/max.md
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
toc_priority: 2
|
||||
---
|
||||
|
||||
## min {#agg_function-min}
|
||||
|
||||
Вычисляет минимум.
|
||||
|
1
docs/ru/sql-reference/aggregate-functions/reference/min.md
Symbolic link
1
docs/ru/sql-reference/aggregate-functions/reference/min.md
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../en/sql-reference/aggregate-functions/reference/min.md
|
@ -20,8 +20,7 @@ DateTime([timezone])
|
||||
## Использование {#ispolzovanie}
|
||||
|
||||
Момент времени сохраняется как [Unix timestamp](https://ru.wikipedia.org/wiki/Unix-%D0%B2%D1%80%D0%B5%D0%BC%D1%8F), независимо от часового пояса и переходов на летнее/зимнее время. Дополнительно, тип `DateTime` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime` в текстовом виде и как будут парситься значения заданные в виде строк (‘2020-01-01 05:00:01’). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки.
|
||||
Список поддерживаемых временных зон можно найти в [IANA Time Zone Database](https://www.iana.org/time-zones).
|
||||
Пакет `tzdata`, содержащий [базу данных часовых поясов IANA](https://www.iana.org/time-zones), должен быть установлен в системе. Используйте команду `timedatectl list-timezones` для получения списка часовых поясов, известных локальной системе.
|
||||
Список поддерживаемых часовых поясов можно найти в [IANA Time Zone Database](https://www.iana.org/time-zones) или получить из базы данных, выполнив запрос `SELECT * FROM system.time_zones`. Также [список](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) есть в Википедии.
|
||||
|
||||
Часовой пояс для столбца типа `DateTime` можно в явном виде установить при создании таблицы. Если часовой пояс не установлен, то ClickHouse использует значение параметра [timezone](../../sql-reference/data-types/datetime.md#server_configuration_parameters-timezone), установленное в конфигурации сервера или в настройках операционной системы на момент запуска сервера.
|
||||
|
||||
|
@ -134,6 +134,46 @@ SELECT timezoneOf(now());
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## timeZoneOffset {#timezoneoffset}
|
||||
|
||||
Возвращает смещение часового пояса в секундах от [UTC](https://ru.wikipedia.org/wiki/Всемирное_координированное_время). Функция учитывает [летнее время](https://ru.wikipedia.org/wiki/Летнее_время) и исторические изменения часовых поясов, которые действовали на указанную дату.
|
||||
Для вычисления смещения используется информация из [базы данных IANA](https://www.iana.org/time-zones).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
timeZoneOffset(value)
|
||||
```
|
||||
|
||||
Псевдоним: `timezoneOffset`.
|
||||
|
||||
**Аргументы**
|
||||
|
||||
- `value` — Дата с временем. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Смещение в секундах от UTC.
|
||||
|
||||
Тип: [Int32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT toDateTime('2021-04-21 10:20:30', 'Europe/Moscow') AS Time, toTypeName(Time) AS Type,
|
||||
timeZoneOffset(Time) AS Offset_in_seconds, (Offset_in_seconds / 3600) AS Offset_in_hours;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌────────────────Time─┬─Type──────────────────────┬─Offset_in_seconds─┬─Offset_in_hours─┐
|
||||
│ 2021-04-21 10:20:30 │ DateTime('Europe/Moscow') │ 10800 │ 3 │
|
||||
└─────────────────────┴───────────────────────────┴───────────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## toYear {#toyear}
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер года (AD).
|
||||
|
@ -113,6 +113,16 @@ public:
|
||||
nested_func->merge(place, rhs, arena);
|
||||
}
|
||||
|
||||
void mergeBatch(
|
||||
size_t batch_size,
|
||||
AggregateDataPtr * places,
|
||||
size_t place_offset,
|
||||
const AggregateDataPtr * rhs,
|
||||
Arena * arena) const override
|
||||
{
|
||||
nested_func->mergeBatch(batch_size, places, place_offset, rhs, arena);
|
||||
}
|
||||
|
||||
void serialize(ConstAggregateDataPtr __restrict place, WriteBuffer & buf) const override
|
||||
{
|
||||
nested_func->serialize(place, buf);
|
||||
|
@ -196,6 +196,18 @@ public:
|
||||
place[size_of_data] |= rhs[size_of_data];
|
||||
}
|
||||
|
||||
void mergeBatch(
|
||||
size_t batch_size,
|
||||
AggregateDataPtr * places,
|
||||
size_t place_offset,
|
||||
const AggregateDataPtr * rhs,
|
||||
Arena * arena) const override
|
||||
{
|
||||
nested_function->mergeBatch(batch_size, places, place_offset, rhs, arena);
|
||||
for (size_t i = 0; i < batch_size; ++i)
|
||||
(places[i] + place_offset)[size_of_data] |= rhs[i][size_of_data];
|
||||
}
|
||||
|
||||
void serialize(
|
||||
ConstAggregateDataPtr place,
|
||||
WriteBuffer & buf) const override
|
||||
|
@ -24,6 +24,6 @@ list(REMOVE_ITEM clickhouse_aggregate_functions_headers
|
||||
add_library(clickhouse_aggregate_functions ${clickhouse_aggregate_functions_sources})
|
||||
target_link_libraries(clickhouse_aggregate_functions PRIVATE dbms PUBLIC ${CITYHASH_LIBRARIES})
|
||||
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
if(ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
||||
|
@ -153,6 +153,13 @@ public:
|
||||
Arena * arena,
|
||||
ssize_t if_argument_pos = -1) const = 0;
|
||||
|
||||
virtual void mergeBatch(
|
||||
size_t batch_size,
|
||||
AggregateDataPtr * places,
|
||||
size_t place_offset,
|
||||
const AggregateDataPtr * rhs,
|
||||
Arena * arena) const = 0;
|
||||
|
||||
/** The same for single place.
|
||||
*/
|
||||
virtual void addBatchSinglePlace(
|
||||
@ -279,6 +286,18 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void mergeBatch(
|
||||
size_t batch_size,
|
||||
AggregateDataPtr * places,
|
||||
size_t place_offset,
|
||||
const AggregateDataPtr * rhs,
|
||||
Arena * arena) const override
|
||||
{
|
||||
for (size_t i = 0; i < batch_size; ++i)
|
||||
if (places[i])
|
||||
static_cast<const Derived *>(this)->merge(places[i] + place_offset, rhs[i], arena);
|
||||
}
|
||||
|
||||
void addBatchSinglePlace(
|
||||
size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena, ssize_t if_argument_pos = -1) const override
|
||||
{
|
||||
|
@ -11,6 +11,11 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
static Float64 integrateSimpson(Float64 a, Float64 b, F && func)
|
||||
{
|
||||
@ -48,6 +53,11 @@ std::pair<RanksArray, Float64> computeRanksAndTieCorrection(const Values & value
|
||||
++right;
|
||||
auto adjusted = (left + right + 1.) / 2.;
|
||||
auto count_equal = right - left;
|
||||
|
||||
/// Scipy implementation throws exception in this case too.
|
||||
if (count_equal == size)
|
||||
throw Exception("All numbers in both samples are identical", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
tie_numenator += std::pow(count_equal, 3) - count_equal;
|
||||
for (size_t iter = left; iter < right; ++iter)
|
||||
out[indexes[iter]] = adjusted;
|
||||
|
@ -1 +1,3 @@
|
||||
add_subdirectory(tests)
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
@ -1,3 +1,3 @@
|
||||
if (ENABLE_TESTS)
|
||||
add_subdirectory (tests)
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory (examples)
|
||||
endif ()
|
||||
|
@ -3,6 +3,6 @@ add_subdirectory(StringUtils)
|
||||
#add_subdirectory(ZooKeeper)
|
||||
#add_subdirectory(ConfigProcessor)
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
add_subdirectory (tests)
|
||||
endif ()
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
||||
|
99
src/Common/JSONBuilder.cpp
Normal file
99
src/Common/JSONBuilder.cpp
Normal file
@ -0,0 +1,99 @@
|
||||
#include <Common/JSONBuilder.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
|
||||
namespace DB::JSONBuilder
|
||||
{
|
||||
|
||||
static bool isArrayOrMap(const IItem & item)
|
||||
{
|
||||
return typeid_cast<const JSONArray *>(&item) || typeid_cast<const JSONMap *>(&item);
|
||||
}
|
||||
|
||||
static bool isSimpleArray(const std::vector<ItemPtr> & values)
|
||||
{
|
||||
for (const auto & value : values)
|
||||
if (isArrayOrMap(*value))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void JSONString::format(const FormatSettings & settings, FormatContext & context)
|
||||
{
|
||||
writeJSONString(value, context.out, settings.settings);
|
||||
}
|
||||
|
||||
void JSONBool::format(const FormatSettings &, FormatContext & context)
|
||||
{
|
||||
writeString(value ? "true" : "false", context.out);
|
||||
}
|
||||
|
||||
void JSONArray::format(const FormatSettings & settings, FormatContext & context)
|
||||
{
|
||||
writeChar('[', context.out);
|
||||
|
||||
context.offset += settings.indent;
|
||||
|
||||
bool single_row = settings.print_simple_arrays_in_single_row && isSimpleArray(values);
|
||||
bool first = true;
|
||||
|
||||
for (const auto & value : values)
|
||||
{
|
||||
if (!first)
|
||||
writeChar(',', context.out);
|
||||
|
||||
if (!single_row)
|
||||
{
|
||||
writeChar('\n', context.out);
|
||||
writeChar(' ', context.offset, context.out);
|
||||
}
|
||||
else if (!first)
|
||||
writeChar(' ', context.out);
|
||||
|
||||
first = false;
|
||||
value->format(settings, context);
|
||||
}
|
||||
|
||||
context.offset -= settings.indent;
|
||||
|
||||
if (!single_row)
|
||||
{
|
||||
writeChar('\n', context.out);
|
||||
writeChar(' ', context.offset, context.out);
|
||||
}
|
||||
|
||||
writeChar(']', context.out);
|
||||
}
|
||||
|
||||
void JSONMap::format(const FormatSettings & settings, FormatContext & context)
|
||||
{
|
||||
writeChar('{', context.out);
|
||||
|
||||
context.offset += settings.indent;
|
||||
|
||||
bool first = true;
|
||||
|
||||
for (const auto & value : values)
|
||||
{
|
||||
if (!first)
|
||||
writeChar(',', context.out);
|
||||
first = false;
|
||||
|
||||
writeChar('\n', context.out);
|
||||
writeChar(' ', context.offset, context.out);
|
||||
writeJSONString(value.key, context.out, settings.settings);
|
||||
|
||||
writeChar(':', context.out);
|
||||
writeChar(' ', context.out);
|
||||
value.value->format(settings, context);
|
||||
}
|
||||
|
||||
context.offset -= settings.indent;
|
||||
|
||||
writeChar('\n', context.out);
|
||||
writeChar(' ', context.offset, context.out);
|
||||
writeChar('}', context.out);
|
||||
}
|
||||
|
||||
}
|
111
src/Common/JSONBuilder.h
Normal file
111
src/Common/JSONBuilder.h
Normal file
@ -0,0 +1,111 @@
|
||||
#pragma once
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
namespace DB::JSONBuilder
|
||||
{
|
||||
|
||||
struct FormatSettings
|
||||
{
|
||||
const DB::FormatSettings & settings;
|
||||
size_t indent = 2;
|
||||
bool print_simple_arrays_in_single_row = true;
|
||||
};
|
||||
|
||||
struct FormatContext
|
||||
{
|
||||
WriteBuffer & out;
|
||||
size_t offset = 0;
|
||||
};
|
||||
|
||||
class IItem
|
||||
{
|
||||
public:
|
||||
virtual ~IItem() = default;
|
||||
virtual void format(const FormatSettings & settings, FormatContext & context) = 0;
|
||||
};
|
||||
|
||||
using ItemPtr = std::unique_ptr<IItem>;
|
||||
|
||||
class JSONString : public IItem
|
||||
{
|
||||
public:
|
||||
explicit JSONString(std::string value_) : value(std::move(value_)) {}
|
||||
void format(const FormatSettings & settings, FormatContext & context) override;
|
||||
|
||||
private:
|
||||
std::string value;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class JSONNumber : public IItem
|
||||
{
|
||||
public:
|
||||
explicit JSONNumber(T value_) : value(value_)
|
||||
{
|
||||
static_assert(std::is_arithmetic_v<T>, "JSONNumber support only numeric types");
|
||||
}
|
||||
|
||||
void format(const FormatSettings & settings, FormatContext & context) override
|
||||
{
|
||||
writeJSONNumber(value, context.out, settings.settings);
|
||||
}
|
||||
|
||||
private:
|
||||
T value;
|
||||
};
|
||||
|
||||
class JSONBool : public IItem
|
||||
{
|
||||
public:
|
||||
explicit JSONBool(bool value_) : value(std::move(value_)) {}
|
||||
void format(const FormatSettings & settings, FormatContext & context) override;
|
||||
|
||||
private:
|
||||
bool value;
|
||||
};
|
||||
|
||||
class JSONArray : public IItem
|
||||
{
|
||||
public:
|
||||
void add(ItemPtr value) { values.push_back(std::move(value)); }
|
||||
void add(std::string value) { add(std::make_unique<JSONString>(std::move(value))); }
|
||||
void add(const char * value) { add(std::make_unique<JSONString>(value)); }
|
||||
void add(bool value) { add(std::make_unique<JSONBool>(std::move(value))); }
|
||||
|
||||
template <typename T, std::enable_if_t<std::is_arithmetic<T>::value, bool> = true>
|
||||
void add(T value) { add(std::make_unique<JSONNumber<T>>(value)); }
|
||||
|
||||
void format(const FormatSettings & settings, FormatContext & context) override;
|
||||
|
||||
private:
|
||||
std::vector<ItemPtr> values;
|
||||
};
|
||||
|
||||
class JSONMap : public IItem
|
||||
{
|
||||
struct Pair
|
||||
{
|
||||
std::string key;
|
||||
ItemPtr value;
|
||||
};
|
||||
|
||||
public:
|
||||
void add(std::string key, ItemPtr value) { values.emplace_back(Pair{.key = std::move(key), .value = std::move(value)}); }
|
||||
void add(std::string key, std::string value) { add(std::move(key), std::make_unique<JSONString>(std::move(value))); }
|
||||
void add(std::string key, const char * value) { add(std::move(key), std::make_unique<JSONString>(value)); }
|
||||
void add(std::string key, bool value) { add(std::move(key), std::make_unique<JSONBool>(std::move(value))); }
|
||||
|
||||
template <typename T, std::enable_if_t<std::is_arithmetic<T>::value, bool> = true>
|
||||
void add(std::string key, T value) { add(std::move(key), std::make_unique<JSONNumber<T>>(value)); }
|
||||
|
||||
void format(const FormatSettings & settings, FormatContext & context) override;
|
||||
|
||||
private:
|
||||
std::vector<Pair> values;
|
||||
};
|
||||
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
#include <Common/StatusInfo.h>
|
||||
#include <Interpreters/ExternalLoader.h>
|
||||
#include <Common/ExternalLoaderStatus.h>
|
||||
|
||||
/// Available status. Add something here as you wish.
|
||||
#define APPLY_FOR_STATUS(M) \
|
||||
|
@ -6,6 +6,6 @@ add_library(clickhouse_common_zookeeper ${clickhouse_common_zookeeper_headers} $
|
||||
|
||||
target_link_libraries (clickhouse_common_zookeeper PUBLIC clickhouse_common_io common PRIVATE string_utils)
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
add_subdirectory (tests)
|
||||
endif ()
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
||||
|
@ -35,9 +35,6 @@ target_include_directories(radix_sort SYSTEM PRIVATE ${PDQSORT_INCLUDE_DIR})
|
||||
add_executable (arena_with_free_lists arena_with_free_lists.cpp)
|
||||
target_link_libraries (arena_with_free_lists PRIVATE dbms)
|
||||
|
||||
add_executable (pod_array pod_array.cpp)
|
||||
target_link_libraries (pod_array PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (lru_hash_map_perf lru_hash_map_perf.cpp)
|
||||
target_link_libraries (lru_hash_map_perf PRIVATE clickhouse_common_io)
|
||||
|
@ -4,6 +4,419 @@
|
||||
|
||||
using namespace DB;
|
||||
|
||||
|
||||
TEST(Common, PODArrayBasicMove)
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
{
|
||||
Array arr;
|
||||
Array arr2;
|
||||
arr2 = std::move(arr);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2 = std::move(arr);
|
||||
|
||||
ASSERT_EQ(arr2.size(), 3);
|
||||
ASSERT_EQ(arr2[0], 1);
|
||||
ASSERT_EQ(arr2[1], 2);
|
||||
ASSERT_EQ(arr2[2], 3);
|
||||
|
||||
arr = std::move(arr2);
|
||||
|
||||
ASSERT_EQ(arr.size(), 3);
|
||||
ASSERT_EQ(arr[0], 1);
|
||||
ASSERT_EQ(arr[1], 2);
|
||||
ASSERT_EQ(arr[2], 3);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
arr.push_back(4);
|
||||
arr.push_back(5);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2 = std::move(arr);
|
||||
|
||||
ASSERT_EQ(arr2.size(), 5);
|
||||
ASSERT_EQ(arr2[0], 1);
|
||||
ASSERT_EQ(arr2[1], 2);
|
||||
ASSERT_EQ(arr2[2], 3);
|
||||
ASSERT_EQ(arr2[3], 4);
|
||||
ASSERT_EQ(arr2[4], 5);
|
||||
|
||||
arr = std::move(arr2);
|
||||
|
||||
ASSERT_EQ(arr.size(), 5);
|
||||
ASSERT_EQ(arr[0], 1);
|
||||
ASSERT_EQ(arr[1], 2);
|
||||
ASSERT_EQ(arr[2], 3);
|
||||
ASSERT_EQ(arr[3], 4);
|
||||
ASSERT_EQ(arr[4], 5);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
|
||||
arr2 = std::move(arr);
|
||||
|
||||
ASSERT_EQ(arr2.size(), 3);
|
||||
ASSERT_EQ(arr2[0], 1);
|
||||
ASSERT_EQ(arr2[1], 2);
|
||||
ASSERT_EQ(arr2[2], 3);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
arr2.push_back(8);
|
||||
|
||||
arr = std::move(arr2);
|
||||
|
||||
ASSERT_EQ(arr.size(), 5);
|
||||
ASSERT_EQ(arr[0], 4);
|
||||
ASSERT_EQ(arr[1], 5);
|
||||
ASSERT_EQ(arr[2], 6);
|
||||
ASSERT_EQ(arr[3], 7);
|
||||
ASSERT_EQ(arr[4], 8);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(Common, PODArrayBasicSwap)
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
{
|
||||
Array arr;
|
||||
Array arr2;
|
||||
arr.swap(arr2);
|
||||
arr2.swap(arr);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(1);
|
||||
arr2.push_back(2);
|
||||
arr2.push_back(3);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 3);
|
||||
ASSERT_TRUE(arr[0] == 1);
|
||||
ASSERT_TRUE(arr[1] == 2);
|
||||
ASSERT_TRUE(arr[2] == 3);
|
||||
|
||||
ASSERT_TRUE(arr2.empty());
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.empty());
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 3);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
ASSERT_TRUE(arr2[2] == 3);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(1);
|
||||
arr2.push_back(2);
|
||||
arr2.push_back(3);
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 5);
|
||||
ASSERT_TRUE(arr[0] == 1);
|
||||
ASSERT_TRUE(arr[1] == 2);
|
||||
ASSERT_TRUE(arr[2] == 3);
|
||||
ASSERT_TRUE(arr[3] == 4);
|
||||
ASSERT_TRUE(arr[4] == 5);
|
||||
|
||||
ASSERT_TRUE(arr2.empty());
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.empty());
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 5);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
ASSERT_TRUE(arr2[2] == 3);
|
||||
ASSERT_TRUE(arr2[3] == 4);
|
||||
ASSERT_TRUE(arr2[4] == 5);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 3);
|
||||
ASSERT_TRUE(arr[0] == 4);
|
||||
ASSERT_TRUE(arr[1] == 5);
|
||||
ASSERT_TRUE(arr[2] == 6);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 3);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
ASSERT_TRUE(arr2[2] == 3);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 3);
|
||||
ASSERT_TRUE(arr[0] == 1);
|
||||
ASSERT_TRUE(arr[1] == 2);
|
||||
ASSERT_TRUE(arr[2] == 3);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 3);
|
||||
ASSERT_TRUE(arr2[0] == 4);
|
||||
ASSERT_TRUE(arr2[1] == 5);
|
||||
ASSERT_TRUE(arr2[2] == 6);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(3);
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 3);
|
||||
ASSERT_TRUE(arr[0] == 3);
|
||||
ASSERT_TRUE(arr[1] == 4);
|
||||
ASSERT_TRUE(arr[2] == 5);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 2);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 2);
|
||||
ASSERT_TRUE(arr[0] == 1);
|
||||
ASSERT_TRUE(arr[1] == 2);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 3);
|
||||
ASSERT_TRUE(arr2[0] == 3);
|
||||
ASSERT_TRUE(arr2[1] == 4);
|
||||
ASSERT_TRUE(arr2[2] == 5);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
arr2.push_back(8);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 5);
|
||||
ASSERT_TRUE(arr[0] == 4);
|
||||
ASSERT_TRUE(arr[1] == 5);
|
||||
ASSERT_TRUE(arr[2] == 6);
|
||||
ASSERT_TRUE(arr[3] == 7);
|
||||
ASSERT_TRUE(arr[4] == 8);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 3);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
ASSERT_TRUE(arr2[2] == 3);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 3);
|
||||
ASSERT_TRUE(arr[0] == 1);
|
||||
ASSERT_TRUE(arr[1] == 2);
|
||||
ASSERT_TRUE(arr[2] == 3);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 5);
|
||||
ASSERT_TRUE(arr2[0] == 4);
|
||||
ASSERT_TRUE(arr2[1] == 5);
|
||||
ASSERT_TRUE(arr2[2] == 6);
|
||||
ASSERT_TRUE(arr2[3] == 7);
|
||||
ASSERT_TRUE(arr2[4] == 8);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
arr.push_back(4);
|
||||
arr.push_back(5);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
arr2.push_back(8);
|
||||
arr2.push_back(9);
|
||||
arr2.push_back(10);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 5);
|
||||
ASSERT_TRUE(arr[0] == 6);
|
||||
ASSERT_TRUE(arr[1] == 7);
|
||||
ASSERT_TRUE(arr[2] == 8);
|
||||
ASSERT_TRUE(arr[3] == 9);
|
||||
ASSERT_TRUE(arr[4] == 10);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 5);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
ASSERT_TRUE(arr2[2] == 3);
|
||||
ASSERT_TRUE(arr2[3] == 4);
|
||||
ASSERT_TRUE(arr2[4] == 5);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_TRUE(arr.size() == 5);
|
||||
ASSERT_TRUE(arr[0] == 1);
|
||||
ASSERT_TRUE(arr[1] == 2);
|
||||
ASSERT_TRUE(arr[2] == 3);
|
||||
ASSERT_TRUE(arr[3] == 4);
|
||||
ASSERT_TRUE(arr[4] == 5);
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 5);
|
||||
ASSERT_TRUE(arr2[0] == 6);
|
||||
ASSERT_TRUE(arr2[1] == 7);
|
||||
ASSERT_TRUE(arr2[2] == 8);
|
||||
ASSERT_TRUE(arr2[3] == 9);
|
||||
ASSERT_TRUE(arr2[4] == 10);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Common, PODArrayBasicSwapMoveConstructor)
|
||||
{
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
{
|
||||
Array arr;
|
||||
Array arr2{std::move(arr)};
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2{std::move(arr)};
|
||||
|
||||
ASSERT_TRUE(arr.empty()); // NOLINT
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 3);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
ASSERT_TRUE(arr2[2] == 3);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
arr.push_back(4);
|
||||
arr.push_back(5);
|
||||
|
||||
Array arr2{std::move(arr)};
|
||||
|
||||
ASSERT_TRUE(arr.empty()); // NOLINT
|
||||
|
||||
ASSERT_TRUE(arr2.size() == 5);
|
||||
ASSERT_TRUE(arr2[0] == 1);
|
||||
ASSERT_TRUE(arr2[1] == 2);
|
||||
ASSERT_TRUE(arr2[2] == 3);
|
||||
ASSERT_TRUE(arr2[3] == 4);
|
||||
ASSERT_TRUE(arr2[4] == 5);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Common, PODArrayInsert)
|
||||
{
|
||||
std::string str = "test_string_abacaba";
|
||||
|
@ -1,455 +0,0 @@
|
||||
#include <Common/PODArray.h>
|
||||
#include <common/types.h>
|
||||
#include <iostream>
|
||||
|
||||
#define ASSERT_CHECK(cond, res) \
|
||||
do \
|
||||
{ \
|
||||
if (!(cond)) \
|
||||
{ \
|
||||
std::cerr << __FILE__ << ":" << __LINE__ << ":" \
|
||||
<< "Assertion " << #cond << " failed.\n"; \
|
||||
if ((res)) { (res) = false; } \
|
||||
} \
|
||||
} \
|
||||
while (false)
|
||||
|
||||
static void test1()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
bool res = true;
|
||||
|
||||
{
|
||||
Array arr;
|
||||
Array arr2;
|
||||
arr2 = std::move(arr);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2 = std::move(arr);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
|
||||
arr = std::move(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 3), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
ASSERT_CHECK((arr[2] == 3), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
arr.push_back(4);
|
||||
arr.push_back(5);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2 = std::move(arr);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 5), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
ASSERT_CHECK((arr2[3] == 4), res);
|
||||
ASSERT_CHECK((arr2[4] == 5), res);
|
||||
|
||||
arr = std::move(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 5), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
ASSERT_CHECK((arr[2] == 3), res);
|
||||
ASSERT_CHECK((arr[3] == 4), res);
|
||||
ASSERT_CHECK((arr[4] == 5), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
|
||||
arr2 = std::move(arr);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
arr2.push_back(8);
|
||||
|
||||
arr = std::move(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 5), res);
|
||||
ASSERT_CHECK((arr[0] == 4), res);
|
||||
ASSERT_CHECK((arr[1] == 5), res);
|
||||
ASSERT_CHECK((arr[2] == 6), res);
|
||||
ASSERT_CHECK((arr[3] == 7), res);
|
||||
ASSERT_CHECK((arr[4] == 8), res);
|
||||
}
|
||||
|
||||
if (!res)
|
||||
std::cerr << "Some errors were found in test 1\n";
|
||||
}
|
||||
|
||||
static void test2()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
bool res = true;
|
||||
|
||||
{
|
||||
Array arr;
|
||||
Array arr2;
|
||||
arr.swap(arr2);
|
||||
arr2.swap(arr);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(1);
|
||||
arr2.push_back(2);
|
||||
arr2.push_back(3);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 3), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
ASSERT_CHECK((arr[2] == 3), res);
|
||||
|
||||
ASSERT_CHECK((arr2.empty()), res);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.empty()), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(1);
|
||||
arr2.push_back(2);
|
||||
arr2.push_back(3);
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 5), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
ASSERT_CHECK((arr[2] == 3), res);
|
||||
ASSERT_CHECK((arr[3] == 4), res);
|
||||
ASSERT_CHECK((arr[4] == 5), res);
|
||||
|
||||
ASSERT_CHECK((arr2.empty()), res);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.empty()), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 5), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
ASSERT_CHECK((arr2[3] == 4), res);
|
||||
ASSERT_CHECK((arr2[4] == 5), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 3), res);
|
||||
ASSERT_CHECK((arr[0] == 4), res);
|
||||
ASSERT_CHECK((arr[1] == 5), res);
|
||||
ASSERT_CHECK((arr[2] == 6), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 3), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
ASSERT_CHECK((arr[2] == 3), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 4), res);
|
||||
ASSERT_CHECK((arr2[1] == 5), res);
|
||||
ASSERT_CHECK((arr2[2] == 6), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(3);
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 3), res);
|
||||
ASSERT_CHECK((arr[0] == 3), res);
|
||||
ASSERT_CHECK((arr[1] == 4), res);
|
||||
ASSERT_CHECK((arr[2] == 5), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 2), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 2), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 3), res);
|
||||
ASSERT_CHECK((arr2[1] == 4), res);
|
||||
ASSERT_CHECK((arr2[2] == 5), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(4);
|
||||
arr2.push_back(5);
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
arr2.push_back(8);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 5), res);
|
||||
ASSERT_CHECK((arr[0] == 4), res);
|
||||
ASSERT_CHECK((arr[1] == 5), res);
|
||||
ASSERT_CHECK((arr[2] == 6), res);
|
||||
ASSERT_CHECK((arr[3] == 7), res);
|
||||
ASSERT_CHECK((arr[4] == 8), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 3), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
ASSERT_CHECK((arr[2] == 3), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 5), res);
|
||||
ASSERT_CHECK((arr2[0] == 4), res);
|
||||
ASSERT_CHECK((arr2[1] == 5), res);
|
||||
ASSERT_CHECK((arr2[2] == 6), res);
|
||||
ASSERT_CHECK((arr2[3] == 7), res);
|
||||
ASSERT_CHECK((arr2[4] == 8), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
arr.push_back(4);
|
||||
arr.push_back(5);
|
||||
|
||||
Array arr2;
|
||||
|
||||
arr2.push_back(6);
|
||||
arr2.push_back(7);
|
||||
arr2.push_back(8);
|
||||
arr2.push_back(9);
|
||||
arr2.push_back(10);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 5), res);
|
||||
ASSERT_CHECK((arr[0] == 6), res);
|
||||
ASSERT_CHECK((arr[1] == 7), res);
|
||||
ASSERT_CHECK((arr[2] == 8), res);
|
||||
ASSERT_CHECK((arr[3] == 9), res);
|
||||
ASSERT_CHECK((arr[4] == 10), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 5), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
ASSERT_CHECK((arr2[3] == 4), res);
|
||||
ASSERT_CHECK((arr2[4] == 5), res);
|
||||
|
||||
arr.swap(arr2);
|
||||
|
||||
ASSERT_CHECK((arr.size() == 5), res);
|
||||
ASSERT_CHECK((arr[0] == 1), res);
|
||||
ASSERT_CHECK((arr[1] == 2), res);
|
||||
ASSERT_CHECK((arr[2] == 3), res);
|
||||
ASSERT_CHECK((arr[3] == 4), res);
|
||||
ASSERT_CHECK((arr[4] == 5), res);
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 5), res);
|
||||
ASSERT_CHECK((arr2[0] == 6), res);
|
||||
ASSERT_CHECK((arr2[1] == 7), res);
|
||||
ASSERT_CHECK((arr2[2] == 8), res);
|
||||
ASSERT_CHECK((arr2[3] == 9), res);
|
||||
ASSERT_CHECK((arr2[4] == 10), res);
|
||||
}
|
||||
|
||||
if (!res)
|
||||
std::cerr << "Some errors were found in test 2\n";
|
||||
}
|
||||
|
||||
static void test3()
|
||||
{
|
||||
using namespace DB;
|
||||
|
||||
static constexpr size_t initial_bytes = 32;
|
||||
using Array = PODArray<UInt64, initial_bytes,
|
||||
AllocatorWithStackMemory<Allocator<false>, initial_bytes>>;
|
||||
|
||||
bool res = true;
|
||||
|
||||
{
|
||||
Array arr;
|
||||
Array arr2{std::move(arr)};
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
|
||||
Array arr2{std::move(arr)};
|
||||
|
||||
ASSERT_CHECK((arr.empty()), res); // NOLINT
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 3), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
}
|
||||
|
||||
{
|
||||
Array arr;
|
||||
|
||||
arr.push_back(1);
|
||||
arr.push_back(2);
|
||||
arr.push_back(3);
|
||||
arr.push_back(4);
|
||||
arr.push_back(5);
|
||||
|
||||
Array arr2{std::move(arr)};
|
||||
|
||||
ASSERT_CHECK((arr.empty()), res); // NOLINT
|
||||
|
||||
ASSERT_CHECK((arr2.size() == 5), res);
|
||||
ASSERT_CHECK((arr2[0] == 1), res);
|
||||
ASSERT_CHECK((arr2[1] == 2), res);
|
||||
ASSERT_CHECK((arr2[2] == 3), res);
|
||||
ASSERT_CHECK((arr2[3] == 4), res);
|
||||
ASSERT_CHECK((arr2[4] == 5), res);
|
||||
}
|
||||
|
||||
if (!res)
|
||||
std::cerr << "Some errors were found in test 3\n";
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
std::cout << "test 1\n";
|
||||
test1();
|
||||
std::cout << "test 2\n";
|
||||
test2();
|
||||
std::cout << "test 3\n";
|
||||
test3();
|
||||
|
||||
return 0;
|
||||
}
|
@ -47,6 +47,7 @@ SRCS(
|
||||
FileChecker.cpp
|
||||
IPv6ToBinary.cpp
|
||||
IntervalKind.cpp
|
||||
JSONBuilder.cpp
|
||||
Macros.cpp
|
||||
MemoryStatisticsOS.cpp
|
||||
MemoryTracker.cpp
|
||||
|
@ -1,3 +1,3 @@
|
||||
if(ENABLE_TESTS)
|
||||
add_subdirectory(tests)
|
||||
if(ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
||||
|
@ -1,3 +1,3 @@
|
||||
if (ENABLE_TESTS)
|
||||
add_subdirectory (tests)
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif ()
|
||||
|
@ -435,7 +435,7 @@ class IColumn;
|
||||
M(Bool, allow_experimental_map_type, false, "Allow data type Map", 0) \
|
||||
M(Bool, allow_experimental_window_functions, false, "Allow experimental window functions", 0) \
|
||||
M(Bool, use_antlr_parser, false, "Parse incoming queries using ANTLR-generated experimental parser", 0) \
|
||||
M(Bool, async_socket_for_remote, false, "Asynchronously read from socket executing remote query", 0) \
|
||||
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
||||
M(Bool, insert_null_as_default, true, "Insert DEFAULT values instead of NULL in INSERT SELECT (UNION ALL)", 0) \
|
||||
\
|
||||
M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Core/SortDescription.h>
|
||||
#include <Core/Block.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/JSONBuilder.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -37,6 +38,22 @@ void dumpSortDescription(const SortDescription & description, const Block & head
|
||||
}
|
||||
}
|
||||
|
||||
void SortColumnDescription::explain(JSONBuilder::JSONMap & map, const Block & header) const
|
||||
{
|
||||
if (!column_name.empty())
|
||||
map.add("Column", column_name);
|
||||
else
|
||||
{
|
||||
if (column_number < header.columns())
|
||||
map.add("Column", header.getByPosition(column_number).name);
|
||||
|
||||
map.add("Position", column_number);
|
||||
}
|
||||
|
||||
map.add("Ascending", direction > 0);
|
||||
map.add("With Fill", with_fill);
|
||||
}
|
||||
|
||||
std::string dumpSortDescription(const SortDescription & description)
|
||||
{
|
||||
WriteBufferFromOwnString wb;
|
||||
@ -44,5 +61,17 @@ std::string dumpSortDescription(const SortDescription & description)
|
||||
return wb.str();
|
||||
}
|
||||
|
||||
JSONBuilder::ItemPtr explainSortDescription(const SortDescription & description, const Block & header)
|
||||
{
|
||||
auto json_array = std::make_unique<JSONBuilder::JSONArray>();
|
||||
for (const auto & descr : description)
|
||||
{
|
||||
auto json_map = std::make_unique<JSONBuilder::JSONMap>();
|
||||
descr.explain(*json_map, header);
|
||||
json_array->add(std::move(json_map));
|
||||
}
|
||||
|
||||
return json_array;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -12,6 +12,15 @@ class Collator;
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace JSONBuilder
|
||||
{
|
||||
class JSONMap;
|
||||
class IItem;
|
||||
using ItemPtr = std::unique_ptr<IItem>;
|
||||
}
|
||||
|
||||
class Block;
|
||||
|
||||
struct FillColumnDescription
|
||||
{
|
||||
/// All missed values in range [FROM, TO) will be filled
|
||||
@ -62,16 +71,18 @@ struct SortColumnDescription
|
||||
{
|
||||
return fmt::format("{}:{}:dir {}nulls ", column_name, column_number, direction, nulls_direction);
|
||||
}
|
||||
|
||||
void explain(JSONBuilder::JSONMap & map, const Block & header) const;
|
||||
};
|
||||
|
||||
/// Description of the sorting rule for several columns.
|
||||
using SortDescription = std::vector<SortColumnDescription>;
|
||||
|
||||
class Block;
|
||||
|
||||
/// Outputs user-readable description into `out`.
|
||||
void dumpSortDescription(const SortDescription & description, const Block & header, WriteBuffer & out);
|
||||
|
||||
std::string dumpSortDescription(const SortDescription & description);
|
||||
|
||||
JSONBuilder::ItemPtr explainSortDescription(const SortDescription & description, const Block & header);
|
||||
|
||||
}
|
||||
|
@ -1,3 +1,3 @@
|
||||
if (ENABLE_TESTS)
|
||||
add_subdirectory (tests)
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif ()
|
||||
|
@ -1,3 +1,3 @@
|
||||
if (ENABLE_TESTS)
|
||||
add_subdirectory (tests)
|
||||
if (ENABLE_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif ()
|
||||
|
@ -33,55 +33,11 @@ void SerializationNumber<T>::deserializeText(IColumn & column, ReadBuffer & istr
|
||||
assert_cast<ColumnVector<T> &>(column).getData().push_back(x);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline void writeDenormalNumber(T x, WriteBuffer & ostr)
|
||||
{
|
||||
if constexpr (std::is_floating_point_v<T>)
|
||||
{
|
||||
if (std::signbit(x))
|
||||
{
|
||||
if (isNaN(x))
|
||||
writeCString("-nan", ostr);
|
||||
else
|
||||
writeCString("-inf", ostr);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (isNaN(x))
|
||||
writeCString("nan", ostr);
|
||||
else
|
||||
writeCString("inf", ostr);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// This function is not called for non floating point numbers.
|
||||
(void)x;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
void SerializationNumber<T>::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
auto x = assert_cast<const ColumnVector<T> &>(column).getData()[row_num];
|
||||
bool is_finite = isFinite(x);
|
||||
|
||||
const bool need_quote = (is_integer_v<T> && (sizeof(T) >= 8) && settings.json.quote_64bit_integers)
|
||||
|| (settings.json.quote_denormals && !is_finite);
|
||||
|
||||
if (need_quote)
|
||||
writeChar('"', ostr);
|
||||
|
||||
if (is_finite)
|
||||
writeText(x, ostr);
|
||||
else if (!settings.json.quote_denormals)
|
||||
writeCString("null", ostr);
|
||||
else
|
||||
writeDenormalNumber(x, ostr);
|
||||
|
||||
if (need_quote)
|
||||
writeChar('"', ostr);
|
||||
writeJSONNumber(x, ostr, settings);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
|
@ -91,7 +91,7 @@ void DatabaseAtomic::attachTable(const String & name, const StoragePtr & table,
|
||||
not_in_use = cleanupDetachedTables();
|
||||
auto table_id = table->getStorageID();
|
||||
assertDetachedTableNotInUse(table_id.uuid);
|
||||
DatabaseWithDictionaries::attachTableUnlocked(name, table, lock);
|
||||
DatabaseOrdinary::attachTableUnlocked(name, table, lock);
|
||||
table_name_to_path.emplace(std::make_pair(name, relative_table_path));
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ StoragePtr DatabaseAtomic::detachTable(const String & name)
|
||||
{
|
||||
DetachedTables not_in_use;
|
||||
std::unique_lock lock(mutex);
|
||||
auto table = DatabaseWithDictionaries::detachTableUnlocked(name, lock);
|
||||
auto table = DatabaseOrdinary::detachTableUnlocked(name, lock);
|
||||
table_name_to_path.erase(name);
|
||||
detached_tables.emplace(table->getStorageID().uuid, table);
|
||||
not_in_use = cleanupDetachedTables();
|
||||
@ -133,9 +133,10 @@ void DatabaseAtomic::dropTable(ContextPtr local_context, const String & table_na
|
||||
/// TODO better detection and recovery
|
||||
|
||||
Poco::File(table_metadata_path).renameTo(table_metadata_path_drop); /// Mark table as dropped
|
||||
DatabaseWithDictionaries::detachTableUnlocked(table_name, lock); /// Should never throw
|
||||
DatabaseOrdinary::detachTableUnlocked(table_name, lock); /// Should never throw
|
||||
table_name_to_path.erase(table_name);
|
||||
}
|
||||
|
||||
if (table->storesDataOnDisk())
|
||||
tryRemoveSymlink(table_name);
|
||||
|
||||
@ -156,8 +157,6 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
|
||||
return;
|
||||
}
|
||||
|
||||
if (exchange && dictionary)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot exchange dictionaries");
|
||||
if (exchange && !supportsRenameat2())
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported");
|
||||
|
||||
@ -174,7 +173,7 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
|
||||
/// Path can be not set for DDL dictionaries, but it does not matter for StorageDictionary.
|
||||
if (it != db.table_name_to_path.end())
|
||||
table_data_path_saved = it->second;
|
||||
assert(!table_data_path_saved.empty() || db.dictionaries.find(table_name_) != db.dictionaries.end());
|
||||
assert(!table_data_path_saved.empty());
|
||||
db.tables.erase(table_name_);
|
||||
db.table_name_to_path.erase(table_name_);
|
||||
if (has_symlink)
|
||||
@ -222,21 +221,21 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
|
||||
db_lock = std::unique_lock{mutex};
|
||||
}
|
||||
|
||||
bool is_dictionary = dictionaries.find(table_name) != dictionaries.end();
|
||||
if (exchange && other_db.dictionaries.find(to_table_name) != other_db.dictionaries.end())
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot exchange dictionaries");
|
||||
|
||||
if (dictionary != is_dictionary)
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"Use RENAME DICTIONARY for dictionaries and RENAME TABLE for tables.");
|
||||
|
||||
if (is_dictionary && !inside_database)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot move dictionary to other database");
|
||||
|
||||
if (!exchange)
|
||||
other_db.checkMetadataFilenameAvailabilityUnlocked(to_table_name, inside_database ? db_lock : other_db_lock);
|
||||
|
||||
StoragePtr table = getTableUnlocked(table_name, db_lock);
|
||||
|
||||
if (table->isDictionary() && !dictionary)
|
||||
{
|
||||
if (exchange)
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"Use EXCHANGE DICTIONARIES for dictionaries and EXCHANGE TABLES for tables.");
|
||||
else
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"Use RENAME DICTIONARY for dictionaries and RENAME TABLE for tables.");
|
||||
}
|
||||
|
||||
table->checkTableCanBeRenamed();
|
||||
assert_can_move_mat_view(table);
|
||||
StoragePtr other_table;
|
||||
@ -281,12 +280,6 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_
|
||||
attach(other_db, to_table_name, table_data_path, table);
|
||||
if (exchange)
|
||||
attach(*this, table_name, other_table_data_path, other_table);
|
||||
|
||||
if (is_dictionary)
|
||||
{
|
||||
auto new_table_id = StorageID(other_db.database_name, to_table_name, old_table_id.uuid);
|
||||
renameDictionaryInMemoryUnlocked(old_table_id, new_table_id);
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const StoragePtr & table,
|
||||
@ -528,14 +521,6 @@ void DatabaseAtomic::renameDatabase(const String & new_name)
|
||||
table.second->renameInMemory(table_id);
|
||||
}
|
||||
|
||||
for (auto & dict : dictionaries)
|
||||
{
|
||||
auto old_name = StorageID(dict.second.create_query);
|
||||
auto name = old_name;
|
||||
name.database_name = database_name;
|
||||
renameDictionaryInMemoryUnlocked(old_name, name);
|
||||
}
|
||||
|
||||
path_to_metadata_symlink = getContext()->getPath() + "metadata/" + new_name_escaped;
|
||||
old_path_to_table_symlinks = path_to_table_symlinks;
|
||||
path_to_table_symlinks = getContext()->getPath() + "data/" + new_name_escaped + "/";
|
||||
@ -545,32 +530,6 @@ void DatabaseAtomic::renameDatabase(const String & new_name)
|
||||
tryCreateMetadataSymlink();
|
||||
}
|
||||
|
||||
void DatabaseAtomic::renameDictionaryInMemoryUnlocked(const StorageID & old_name, const StorageID & new_name)
|
||||
{
|
||||
auto it = dictionaries.find(old_name.table_name);
|
||||
assert(it != dictionaries.end());
|
||||
assert(it->second.config->getString("dictionary.uuid") == toString(old_name.uuid));
|
||||
assert(old_name.uuid == new_name.uuid);
|
||||
it->second.config->setString("dictionary.database", new_name.database_name);
|
||||
it->second.config->setString("dictionary.name", new_name.table_name);
|
||||
auto & create = it->second.create_query->as<ASTCreateQuery &>();
|
||||
create.database = new_name.database_name;
|
||||
create.table = new_name.table_name;
|
||||
assert(create.uuid == new_name.uuid);
|
||||
|
||||
if (old_name.table_name != new_name.table_name)
|
||||
{
|
||||
auto attach_info = std::move(it->second);
|
||||
dictionaries.erase(it);
|
||||
dictionaries.emplace(new_name.table_name, std::move(attach_info));
|
||||
}
|
||||
|
||||
auto result = external_loader.getLoadResult(toString(old_name.uuid));
|
||||
if (!result.object)
|
||||
return;
|
||||
const auto & dict = dynamic_cast<const IDictionary &>(*result.object);
|
||||
dict.updateDictionaryName(new_name);
|
||||
}
|
||||
void DatabaseAtomic::waitDetachedTableNotInUse(const UUID & uuid)
|
||||
{
|
||||
/// Table is in use while its shared_ptr counter is greater than 1.
|
||||
|
@ -72,8 +72,6 @@ protected:
|
||||
|
||||
void tryCreateMetadataSymlink();
|
||||
|
||||
void renameDictionaryInMemoryUnlocked(const StorageID & old_name, const StorageID & new_name);
|
||||
|
||||
//TODO store path in DatabaseWithOwnTables::tables
|
||||
using NameToPathMap = std::unordered_map<String, String>;
|
||||
NameToPathMap table_name_to_path;
|
||||
|
@ -21,18 +21,20 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
StoragePtr createStorageDictionary(const String & database_name, const ExternalLoader::LoadResult & load_result)
|
||||
StoragePtr createStorageDictionary(const String & database_name, const ExternalLoader::LoadResult & load_result, ContextPtr context)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (!load_result.config)
|
||||
return nullptr;
|
||||
|
||||
DictionaryStructure dictionary_structure = ExternalDictionariesLoader::getDictionaryStructure(*load_result.config);
|
||||
return StorageDictionary::create(
|
||||
StorageID(database_name, load_result.name),
|
||||
load_result.name,
|
||||
dictionary_structure,
|
||||
StorageDictionary::Location::DictionaryDatabase);
|
||||
StorageDictionary::Location::DictionaryDatabase,
|
||||
context);
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
@ -57,7 +59,7 @@ Tables DatabaseDictionary::listTables(const FilterByNameFunction & filter_by_nam
|
||||
String db_name = getDatabaseName();
|
||||
for (auto & load_result : load_results)
|
||||
{
|
||||
auto storage = createStorageDictionary(db_name, load_result);
|
||||
auto storage = createStorageDictionary(db_name, load_result, getContext());
|
||||
if (storage)
|
||||
tables.emplace(storage->getStorageID().table_name, storage);
|
||||
}
|
||||
@ -72,7 +74,7 @@ bool DatabaseDictionary::isTableExist(const String & table_name, ContextPtr) con
|
||||
StoragePtr DatabaseDictionary::tryGetTable(const String & table_name, ContextPtr) const
|
||||
{
|
||||
auto load_result = getContext()->getExternalDictionariesLoader().getLoadResult(table_name);
|
||||
return createStorageDictionary(getDatabaseName(), load_result);
|
||||
return createStorageDictionary(getDatabaseName(), load_result, getContext());
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabaseDictionary::getTablesIterator(ContextPtr, const FilterByNameFunction & filter_by_table_name)
|
||||
|
@ -37,7 +37,6 @@ namespace ErrorCodes
|
||||
extern const int INCORRECT_FILE_NAME;
|
||||
extern const int SYNTAX_ERROR;
|
||||
extern const int TABLE_ALREADY_EXISTS;
|
||||
extern const int DICTIONARY_ALREADY_EXISTS;
|
||||
extern const int EMPTY_LIST_OF_COLUMNS_PASSED;
|
||||
}
|
||||
|
||||
@ -63,14 +62,21 @@ std::pair<String, StoragePtr> createTableFromAST(
|
||||
storage->renameInMemory(ast_create_query);
|
||||
return {ast_create_query.table, storage};
|
||||
}
|
||||
/// We do not directly use `InterpreterCreateQuery::execute`, because
|
||||
/// - the database has not been loaded yet;
|
||||
/// - the code is simpler, since the query is already brought to a suitable form.
|
||||
if (!ast_create_query.columns_list || !ast_create_query.columns_list->columns)
|
||||
throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
|
||||
|
||||
ColumnsDescription columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true);
|
||||
ConstraintsDescription constraints = InterpreterCreateQuery::getConstraintsDescription(ast_create_query.columns_list->constraints);
|
||||
ColumnsDescription columns;
|
||||
ConstraintsDescription constraints;
|
||||
|
||||
if (!ast_create_query.is_dictionary)
|
||||
{
|
||||
/// We do not directly use `InterpreterCreateQuery::execute`, because
|
||||
/// - the database has not been loaded yet;
|
||||
/// - the code is simpler, since the query is already brought to a suitable form.
|
||||
if (!ast_create_query.columns_list || !ast_create_query.columns_list->columns)
|
||||
throw Exception("Missing definition of columns.", ErrorCodes::EMPTY_LIST_OF_COLUMNS_PASSED);
|
||||
|
||||
columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true);
|
||||
constraints = InterpreterCreateQuery::getConstraintsDescription(ast_create_query.columns_list->constraints);
|
||||
}
|
||||
|
||||
return
|
||||
{
|
||||
@ -220,10 +226,6 @@ void DatabaseOnDisk::createTable(
|
||||
/// A race condition would be possible if a table with the same name is simultaneously created using CREATE and using ATTACH.
|
||||
/// But there is protection from it - see using DDLGuard in InterpreterCreateQuery.
|
||||
|
||||
if (isDictionaryExist(table_name))
|
||||
throw Exception(
|
||||
ErrorCodes::DICTIONARY_ALREADY_EXISTS, "Dictionary {}.{} already exists", backQuote(getDatabaseName()), backQuote(table_name));
|
||||
|
||||
if (isTableExist(table_name, getContext()))
|
||||
throw Exception(
|
||||
ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists", backQuote(getDatabaseName()), backQuote(table_name));
|
||||
|
@ -43,13 +43,15 @@ namespace
|
||||
const String & metadata_path,
|
||||
bool has_force_restore_data_flag)
|
||||
{
|
||||
assert(!query.is_dictionary);
|
||||
try
|
||||
{
|
||||
String table_name;
|
||||
StoragePtr table;
|
||||
std::tie(table_name, table)
|
||||
= createTableFromAST(query, database_name, database.getTableDataPath(query), context, has_force_restore_data_flag);
|
||||
auto [table_name, table] = createTableFromAST(
|
||||
query,
|
||||
database_name,
|
||||
database.getTableDataPath(query),
|
||||
context,
|
||||
has_force_restore_data_flag);
|
||||
|
||||
database.attachTable(table_name, table, database.getTableDataPath(query));
|
||||
}
|
||||
catch (Exception & e)
|
||||
@ -61,28 +63,6 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void tryAttachDictionary(const ASTPtr & query, DatabaseOrdinary & database, const String & metadata_path, ContextPtr context)
|
||||
{
|
||||
auto & create_query = query->as<ASTCreateQuery &>();
|
||||
assert(create_query.is_dictionary);
|
||||
try
|
||||
{
|
||||
Poco::File meta_file(metadata_path);
|
||||
auto config = getDictionaryConfigurationFromAST(create_query, context, database.getDatabaseName());
|
||||
time_t modification_time = meta_file.getLastModified().epochTime();
|
||||
database.attachDictionary(create_query.table, DictionaryAttachInfo{query, config, modification_time});
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage(
|
||||
"Cannot attach dictionary " + backQuote(database.getDatabaseName()) + "." + backQuote(create_query.table)
|
||||
+ " from metadata file " + metadata_path + " from query " + serializeAST(*query));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void logAboutProgress(Poco::Logger * log, size_t processed, size_t total, AtomicStopwatch & watch)
|
||||
{
|
||||
if (processed % PRINT_MESSAGE_EACH_N_OBJECTS == 0 || watch.compareAndRestart(PRINT_MESSAGE_EACH_N_SECONDS))
|
||||
@ -101,7 +81,7 @@ DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata
|
||||
|
||||
DatabaseOrdinary::DatabaseOrdinary(
|
||||
const String & name_, const String & metadata_path_, const String & data_path_, const String & logger, ContextPtr context_)
|
||||
: DatabaseWithDictionaries(name_, metadata_path_, data_path_, logger, context_)
|
||||
: DatabaseOnDisk(name_, metadata_path_, data_path_, logger, context_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -117,7 +97,7 @@ void DatabaseOrdinary::loadStoredObjects(ContextPtr local_context, bool has_forc
|
||||
|
||||
size_t total_dictionaries = 0;
|
||||
|
||||
auto process_metadata = [context_weak = ContextWeakPtr(local_context), &file_names, &total_dictionaries, &file_names_mutex, this](
|
||||
auto process_metadata = [&file_names, &total_dictionaries, &file_names_mutex, this](
|
||||
const String & file_name)
|
||||
{
|
||||
fs::path path(getMetadataPath());
|
||||
@ -164,7 +144,6 @@ void DatabaseOrdinary::loadStoredObjects(ContextPtr local_context, bool has_forc
|
||||
|
||||
AtomicStopwatch watch;
|
||||
std::atomic<size_t> tables_processed{0};
|
||||
std::atomic<size_t> dictionaries_processed{0};
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
@ -176,23 +155,12 @@ void DatabaseOrdinary::loadStoredObjects(ContextPtr local_context, bool has_forc
|
||||
/// loading of its config only, it doesn't involve loading the dictionary itself.
|
||||
|
||||
/// Attach dictionaries.
|
||||
for (const auto & [name, query] : file_names)
|
||||
{
|
||||
auto create_query = query->as<const ASTCreateQuery &>();
|
||||
if (create_query.is_dictionary)
|
||||
{
|
||||
tryAttachDictionary(query, *this, getMetadataPath() + name, local_context);
|
||||
|
||||
/// Messages, so that it's not boring to wait for the server to load for a long time.
|
||||
logAboutProgress(log, ++dictionaries_processed, total_dictionaries, watch);
|
||||
}
|
||||
}
|
||||
|
||||
/// Attach tables.
|
||||
for (const auto & name_with_query : file_names)
|
||||
{
|
||||
const auto & create_query = name_with_query.second->as<const ASTCreateQuery &>();
|
||||
if (!create_query.is_dictionary)
|
||||
|
||||
if (create_query.is_dictionary)
|
||||
{
|
||||
pool.scheduleOrThrowOnError([&]()
|
||||
{
|
||||
tryAttachTable(
|
||||
@ -206,6 +174,32 @@ void DatabaseOrdinary::loadStoredObjects(ContextPtr local_context, bool has_forc
|
||||
/// Messages, so that it's not boring to wait for the server to load for a long time.
|
||||
logAboutProgress(log, ++tables_processed, total_tables, watch);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
|
||||
/// Attach tables.
|
||||
for (const auto & name_with_query : file_names)
|
||||
{
|
||||
const auto & create_query = name_with_query.second->as<const ASTCreateQuery &>();
|
||||
|
||||
if (!create_query.is_dictionary)
|
||||
{
|
||||
pool.scheduleOrThrowOnError([&]()
|
||||
{
|
||||
tryAttachTable(
|
||||
local_context,
|
||||
create_query,
|
||||
*this,
|
||||
database_name,
|
||||
getMetadataPath() + name_with_query.first,
|
||||
has_force_restore_data_flag);
|
||||
|
||||
/// Messages, so that it's not boring to wait for the server to load for a long time.
|
||||
logAboutProgress(log, ++tables_processed, total_tables, watch);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pool.wait();
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <Databases/DatabaseWithDictionaries.h>
|
||||
#include <Databases/DatabaseOnDisk.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ namespace DB
|
||||
* It stores tables list in filesystem using list of .sql files,
|
||||
* that contain declaration of table represented by SQL ATTACH TABLE query.
|
||||
*/
|
||||
class DatabaseOrdinary : public DatabaseWithDictionaries
|
||||
class DatabaseOrdinary : public DatabaseOnDisk
|
||||
{
|
||||
public:
|
||||
DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context);
|
||||
|
@ -511,9 +511,10 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
executeQuery(query, query_context, true);
|
||||
}
|
||||
|
||||
size_t dropped_dicts = 0;
|
||||
size_t moved_tables = 0;
|
||||
std::vector<UUID> dropped_tables;
|
||||
size_t dropped_dictionaries = 0;
|
||||
|
||||
for (const auto & table_name : tables_to_detach)
|
||||
{
|
||||
DDLGuardPtr table_guard = DatabaseCatalog::instance().getDDLGuard(db_name, table_name);
|
||||
@ -521,17 +522,13 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed, will retry");
|
||||
|
||||
auto table = tryGetTable(table_name, getContext());
|
||||
if (isDictionaryExist(table_name))
|
||||
{
|
||||
/// We can safely drop any dictionaries because they do not store data
|
||||
LOG_DEBUG(log, "Will DROP DICTIONARY {}", backQuoteIfNeed(table_name));
|
||||
DatabaseAtomic::removeDictionary(getContext(), table_name);
|
||||
++dropped_dicts;
|
||||
}
|
||||
else if (!table->storesDataOnDisk())
|
||||
|
||||
if (!table->storesDataOnDisk())
|
||||
{
|
||||
LOG_DEBUG(log, "Will DROP TABLE {}, because it does not store data on disk and can be safely dropped", backQuoteIfNeed(table_name));
|
||||
dropped_tables.push_back(tryGetTableUUID(table_name));
|
||||
dropped_dictionaries += table->isDictionary();
|
||||
|
||||
table->shutdown();
|
||||
DatabaseAtomic::dropTable(getContext(), table_name, true);
|
||||
}
|
||||
@ -550,7 +547,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
|
||||
if (!tables_to_detach.empty())
|
||||
LOG_WARNING(log, "Cleaned {} outdated objects: dropped {} dictionaries and {} tables, moved {} tables",
|
||||
tables_to_detach.size(), dropped_dicts, dropped_tables.size(), moved_tables);
|
||||
tables_to_detach.size(), dropped_dictionaries, dropped_tables.size() - dropped_dictionaries, moved_tables);
|
||||
|
||||
/// Now database is cleared from outdated tables, let's rename ReplicatedMergeTree tables to actual names
|
||||
for (const auto & old_to_new : replicated_tables_to_rename)
|
||||
@ -766,33 +763,6 @@ void DatabaseReplicated::commitAlterTable(const StorageID & table_id,
|
||||
DatabaseAtomic::commitAlterTable(table_id, table_metadata_tmp_path, table_metadata_path, statement, query_context);
|
||||
}
|
||||
|
||||
void DatabaseReplicated::createDictionary(ContextPtr local_context,
|
||||
const String & dictionary_name,
|
||||
const ASTPtr & query)
|
||||
{
|
||||
auto txn = local_context->getZooKeeperMetadataTransaction();
|
||||
assert(!ddl_worker->isCurrentlyActive() || txn);
|
||||
if (txn && txn->isInitialQuery())
|
||||
{
|
||||
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(dictionary_name);
|
||||
String statement = getObjectDefinitionFromCreateQuery(query->clone());
|
||||
txn->addOp(zkutil::makeCreateRequest(metadata_zk_path, statement, zkutil::CreateMode::Persistent));
|
||||
}
|
||||
DatabaseAtomic::createDictionary(local_context, dictionary_name, query);
|
||||
}
|
||||
|
||||
void DatabaseReplicated::removeDictionary(ContextPtr local_context, const String & dictionary_name)
|
||||
{
|
||||
auto txn = local_context->getZooKeeperMetadataTransaction();
|
||||
assert(!ddl_worker->isCurrentlyActive() || txn);
|
||||
if (txn && txn->isInitialQuery())
|
||||
{
|
||||
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(dictionary_name);
|
||||
txn->addOp(zkutil::makeRemoveRequest(metadata_zk_path, -1));
|
||||
}
|
||||
DatabaseAtomic::removeDictionary(local_context, dictionary_name);
|
||||
}
|
||||
|
||||
void DatabaseReplicated::detachTablePermanently(ContextPtr local_context, const String & table_name)
|
||||
{
|
||||
auto txn = local_context->getZooKeeperMetadataTransaction();
|
||||
|
@ -40,10 +40,6 @@ public:
|
||||
void commitAlterTable(const StorageID & table_id,
|
||||
const String & table_metadata_tmp_path, const String & table_metadata_path,
|
||||
const String & statement, ContextPtr query_context) override;
|
||||
void createDictionary(ContextPtr context,
|
||||
const String & dictionary_name,
|
||||
const ASTPtr & query) override;
|
||||
void removeDictionary(ContextPtr context, const String & dictionary_name) override;
|
||||
void detachTablePermanently(ContextPtr context, const String & table_name) override;
|
||||
void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach) const override;
|
||||
|
||||
|
@ -1,381 +0,0 @@
|
||||
#include <Databases/DatabaseWithDictionaries.h>
|
||||
#include <Common/StatusInfo.h>
|
||||
#include <Common/ExternalLoaderStatus.h>
|
||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||
#include <Interpreters/ExternalLoaderTempConfigRepository.h>
|
||||
#include <Interpreters/ExternalLoaderDatabaseConfigRepository.h>
|
||||
#include <Interpreters/DDLTask.h>
|
||||
#include <Dictionaries/getDictionaryConfigurationFromAST.h>
|
||||
#include <Dictionaries/DictionaryStructure.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Storages/StorageDictionary.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <Poco/File.h>
|
||||
#include <boost/smart_ptr/make_shared_object.hpp>
|
||||
|
||||
|
||||
namespace CurrentStatusInfo
|
||||
{
|
||||
extern const Status DictionaryStatus;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_GET_CREATE_DICTIONARY_QUERY;
|
||||
extern const int TABLE_ALREADY_EXISTS;
|
||||
extern const int UNKNOWN_DICTIONARY;
|
||||
extern const int DICTIONARY_ALREADY_EXISTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
}
|
||||
|
||||
|
||||
void DatabaseWithDictionaries::attachDictionary(const String & dictionary_name, const DictionaryAttachInfo & attach_info)
|
||||
{
|
||||
auto dict_id = StorageID(attach_info.create_query);
|
||||
String internal_name = dict_id.getInternalDictionaryName();
|
||||
assert(attach_info.create_query->as<const ASTCreateQuery &>().table == dictionary_name);
|
||||
assert(!dict_id.database_name.empty());
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
auto [it, inserted] = dictionaries.emplace(dictionary_name, attach_info);
|
||||
if (!inserted)
|
||||
throw Exception(ErrorCodes::DICTIONARY_ALREADY_EXISTS,
|
||||
"Dictionary {} already exists.", dict_id.getNameForLogs());
|
||||
|
||||
/// Attach the dictionary as table too.
|
||||
try
|
||||
{
|
||||
/// TODO Make StorageDictionary an owner of IDictionary objects.
|
||||
/// All DDL operations with dictionaries will work with StorageDictionary table,
|
||||
/// and StorageDictionary will be responsible for loading of DDL dictionaries.
|
||||
/// ExternalLoaderDatabaseConfigRepository and other hacks related to ExternalLoader
|
||||
/// will not be longer required.
|
||||
attachTableUnlocked(
|
||||
dictionary_name,
|
||||
StorageDictionary::create(
|
||||
dict_id,
|
||||
internal_name,
|
||||
ExternalDictionariesLoader::getDictionaryStructure(*attach_info.config),
|
||||
StorageDictionary::Location::SameDatabaseAndNameAsDictionary),
|
||||
lock);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
dictionaries.erase(it);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
CurrentStatusInfo::set(CurrentStatusInfo::DictionaryStatus, internal_name, static_cast<Int8>(ExternalLoaderStatus::NOT_LOADED));
|
||||
|
||||
/// We want ExternalLoader::reloadConfig() to find out that the dictionary's config
|
||||
/// has been added and in case `dictionaries_lazy_load == false` to load the dictionary.
|
||||
reloadDictionaryConfig(internal_name);
|
||||
}
|
||||
|
||||
void DatabaseWithDictionaries::detachDictionary(const String & dictionary_name)
|
||||
{
|
||||
DictionaryAttachInfo attach_info;
|
||||
detachDictionaryImpl(dictionary_name, attach_info);
|
||||
}
|
||||
|
||||
void DatabaseWithDictionaries::detachDictionaryImpl(const String & dictionary_name, DictionaryAttachInfo & attach_info)
|
||||
{
|
||||
auto dict_id = StorageID::createEmpty();
|
||||
String internal_name;
|
||||
|
||||
{
|
||||
std::unique_lock lock(mutex);
|
||||
auto it = dictionaries.find(dictionary_name);
|
||||
if (it == dictionaries.end())
|
||||
throw Exception(ErrorCodes::UNKNOWN_DICTIONARY,
|
||||
"Dictionary {}.{} doesn't exist.", database_name, dictionary_name);
|
||||
dict_id = StorageID(it->second.create_query);
|
||||
internal_name = dict_id.getInternalDictionaryName();
|
||||
assert(dict_id.table_name == dictionary_name);
|
||||
assert(!dict_id.database_name.empty());
|
||||
|
||||
attach_info = std::move(it->second);
|
||||
dictionaries.erase(it);
|
||||
|
||||
/// Detach the dictionary as table too.
|
||||
try
|
||||
{
|
||||
if (!dict_id.hasUUID())
|
||||
detachTableUnlocked(dictionary_name, lock);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
dictionaries.emplace(dictionary_name, std::move(attach_info));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
CurrentStatusInfo::unset(CurrentStatusInfo::DictionaryStatus, internal_name);
|
||||
|
||||
/// We want ExternalLoader::reloadConfig() to find out that the dictionary's config
|
||||
/// has been removed and to unload the dictionary.
|
||||
reloadDictionaryConfig(internal_name);
|
||||
|
||||
if (dict_id.hasUUID())
|
||||
detachTable(dictionary_name);
|
||||
}
|
||||
|
||||
void DatabaseWithDictionaries::createDictionary(ContextPtr local_context, const String & dictionary_name, const ASTPtr & query)
|
||||
{
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
|
||||
/** The code is based on the assumption that all threads share the same order of operations:
|
||||
* - create the .sql.tmp file;
|
||||
* - add the dictionary to ExternalDictionariesLoader;
|
||||
* - load the dictionary in case dictionaries_lazy_load == false;
|
||||
* - attach the dictionary;
|
||||
* - rename .sql.tmp to .sql.
|
||||
*/
|
||||
|
||||
auto dict_id = StorageID(query);
|
||||
assert(query->as<const ASTCreateQuery &>().table == dictionary_name);
|
||||
assert(!dict_id.database_name.empty());
|
||||
|
||||
/// A race condition would be possible if a dictionary with the same name is simultaneously created using CREATE and using ATTACH.
|
||||
/// But there is protection from it - see using DDLGuard in InterpreterCreateQuery.
|
||||
if (isDictionaryExist(dictionary_name))
|
||||
throw Exception(ErrorCodes::DICTIONARY_ALREADY_EXISTS, "Dictionary {} already exists.", dict_id.getFullTableName());
|
||||
|
||||
/// A dictionary with the same full name could be defined in *.xml config files.
|
||||
if (external_loader.getCurrentStatus(dict_id.getFullNameNotQuoted()) != ExternalLoader::Status::NOT_EXIST)
|
||||
throw Exception(ErrorCodes::DICTIONARY_ALREADY_EXISTS,
|
||||
"Dictionary {} already exists.", dict_id.getFullNameNotQuoted());
|
||||
|
||||
if (isTableExist(dictionary_name, getContext()))
|
||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {} already exists.", dict_id.getFullTableName());
|
||||
|
||||
String dictionary_metadata_path = getObjectMetadataPath(dictionary_name);
|
||||
String dictionary_metadata_tmp_path = dictionary_metadata_path + ".tmp";
|
||||
String statement = getObjectDefinitionFromCreateQuery(query);
|
||||
|
||||
{
|
||||
/// Exclusive flags guarantees, that table is not created right now in another thread. Otherwise, exception will be thrown.
|
||||
WriteBufferFromFile out(dictionary_metadata_tmp_path, statement.size(), O_WRONLY | O_CREAT | O_EXCL);
|
||||
writeString(statement, out);
|
||||
out.next();
|
||||
if (settings.fsync_metadata)
|
||||
out.sync();
|
||||
out.close();
|
||||
}
|
||||
|
||||
bool succeeded = false;
|
||||
bool uuid_locked = false;
|
||||
SCOPE_EXIT({
|
||||
if (!succeeded)
|
||||
{
|
||||
if (uuid_locked)
|
||||
DatabaseCatalog::instance().removeUUIDMappingFinally(dict_id.uuid);
|
||||
Poco::File(dictionary_metadata_tmp_path).remove();
|
||||
}
|
||||
});
|
||||
|
||||
if (dict_id.uuid != UUIDHelpers::Nil)
|
||||
{
|
||||
DatabaseCatalog::instance().addUUIDMapping(dict_id.uuid);
|
||||
uuid_locked = true;
|
||||
}
|
||||
|
||||
/// Add a temporary repository containing the dictionary.
|
||||
/// We need this temp repository to try loading the dictionary before actually attaching it to the database.
|
||||
auto temp_repository = external_loader.addConfigRepository(std::make_unique<ExternalLoaderTempConfigRepository>(
|
||||
getDatabaseName(), dictionary_metadata_tmp_path, getDictionaryConfigurationFromAST(query->as<const ASTCreateQuery &>(), local_context)));
|
||||
|
||||
bool lazy_load = local_context->getConfigRef().getBool("dictionaries_lazy_load", true);
|
||||
if (!lazy_load)
|
||||
{
|
||||
/// load() is called here to force loading the dictionary, wait until the loading is finished,
|
||||
/// and throw an exception if the loading is failed.
|
||||
external_loader.load(dict_id.getInternalDictionaryName());
|
||||
}
|
||||
|
||||
auto config = getDictionaryConfigurationFromAST(query->as<const ASTCreateQuery &>(), local_context);
|
||||
attachDictionary(dictionary_name, DictionaryAttachInfo{query, config, time(nullptr)});
|
||||
SCOPE_EXIT({
|
||||
if (!succeeded)
|
||||
detachDictionary(dictionary_name);
|
||||
});
|
||||
|
||||
auto txn = local_context->getZooKeeperMetadataTransaction();
|
||||
if (txn && !local_context->isInternalSubquery())
|
||||
txn->commit(); /// Commit point (a sort of) for Replicated database
|
||||
|
||||
/// If it was ATTACH query and file with dictionary metadata already exist
|
||||
/// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one.
|
||||
Poco::File(dictionary_metadata_tmp_path).renameTo(dictionary_metadata_path);
|
||||
|
||||
/// ExternalDictionariesLoader doesn't know we renamed the metadata path.
|
||||
/// That's why we have to call ExternalLoader::reloadConfig() here.
|
||||
reloadDictionaryConfig(dict_id.getInternalDictionaryName());
|
||||
|
||||
/// Everything's ok.
|
||||
succeeded = true;
|
||||
}
|
||||
|
||||
void DatabaseWithDictionaries::removeDictionary(ContextPtr local_context, const String & dictionary_name)
|
||||
{
|
||||
DictionaryAttachInfo attach_info;
|
||||
detachDictionaryImpl(dictionary_name, attach_info);
|
||||
|
||||
try
|
||||
{
|
||||
String dictionary_metadata_path = getObjectMetadataPath(dictionary_name);
|
||||
|
||||
auto txn = local_context->getZooKeeperMetadataTransaction();
|
||||
if (txn && !local_context->isInternalSubquery())
|
||||
txn->commit(); /// Commit point (a sort of) for Replicated database
|
||||
|
||||
Poco::File(dictionary_metadata_path).remove();
|
||||
CurrentStatusInfo::unset(CurrentStatusInfo::DictionaryStatus,
|
||||
StorageID(attach_info.create_query).getInternalDictionaryName());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// If remove was not possible for some reason
|
||||
attachDictionary(dictionary_name, attach_info);
|
||||
throw;
|
||||
}
|
||||
|
||||
UUID dict_uuid = attach_info.create_query->as<ASTCreateQuery>()->uuid;
|
||||
if (dict_uuid != UUIDHelpers::Nil)
|
||||
DatabaseCatalog::instance().removeUUIDMappingFinally(dict_uuid);
|
||||
}
|
||||
|
||||
DatabaseDictionariesIteratorPtr DatabaseWithDictionaries::getDictionariesIterator(const FilterByNameFunction & filter_by_dictionary_name)
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
DictionariesWithID filtered_dictionaries;
|
||||
for (const auto & dictionary : dictionaries)
|
||||
{
|
||||
if (filter_by_dictionary_name && !filter_by_dictionary_name(dictionary.first))
|
||||
continue;
|
||||
filtered_dictionaries.emplace_back();
|
||||
filtered_dictionaries.back().first = dictionary.first;
|
||||
filtered_dictionaries.back().second = dictionary.second.create_query->as<const ASTCreateQuery &>().uuid;
|
||||
}
|
||||
return std::make_unique<DatabaseDictionariesSnapshotIterator>(std::move(filtered_dictionaries), database_name);
|
||||
}
|
||||
|
||||
bool DatabaseWithDictionaries::isDictionaryExist(const String & dictionary_name) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return dictionaries.find(dictionary_name) != dictionaries.end();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseWithDictionaries::getCreateDictionaryQueryImpl(
|
||||
const String & dictionary_name,
|
||||
bool throw_on_error) const
|
||||
{
|
||||
{
|
||||
/// Try to get create query ifg for an attached dictionary.
|
||||
std::lock_guard lock{mutex};
|
||||
auto it = dictionaries.find(dictionary_name);
|
||||
if (it != dictionaries.end())
|
||||
{
|
||||
ASTPtr ast = it->second.create_query->clone();
|
||||
auto & create_query = ast->as<ASTCreateQuery &>();
|
||||
create_query.attach = false;
|
||||
create_query.database = database_name;
|
||||
return ast;
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to get create query for non-attached dictionary.
|
||||
ASTPtr ast;
|
||||
try
|
||||
{
|
||||
auto dictionary_metadata_path = getObjectMetadataPath(dictionary_name);
|
||||
ast = getCreateQueryFromMetadata(dictionary_metadata_path, throw_on_error);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
if (throw_on_error && (e.code() != ErrorCodes::FILE_DOESNT_EXIST))
|
||||
throw;
|
||||
}
|
||||
|
||||
if (ast)
|
||||
{
|
||||
const auto * create_query = ast->as<const ASTCreateQuery>();
|
||||
if (create_query && create_query->is_dictionary)
|
||||
return ast;
|
||||
}
|
||||
if (throw_on_error)
|
||||
throw Exception{"Dictionary " + backQuote(dictionary_name) + " doesn't exist",
|
||||
ErrorCodes::CANNOT_GET_CREATE_DICTIONARY_QUERY};
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Poco::AutoPtr<Poco::Util::AbstractConfiguration> DatabaseWithDictionaries::getDictionaryConfiguration(const String & dictionary_name) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = dictionaries.find(dictionary_name);
|
||||
if (it != dictionaries.end())
|
||||
return it->second.config;
|
||||
throw Exception("Dictionary " + backQuote(dictionary_name) + " doesn't exist", ErrorCodes::UNKNOWN_DICTIONARY);
|
||||
}
|
||||
|
||||
time_t DatabaseWithDictionaries::getObjectMetadataModificationTime(const String & object_name) const
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = dictionaries.find(object_name);
|
||||
if (it != dictionaries.end())
|
||||
return it->second.modification_time;
|
||||
}
|
||||
return DatabaseOnDisk::getObjectMetadataModificationTime(object_name);
|
||||
}
|
||||
|
||||
|
||||
bool DatabaseWithDictionaries::empty() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
return tables.empty() && dictionaries.empty();
|
||||
}
|
||||
|
||||
void DatabaseWithDictionaries::reloadDictionaryConfig(const String & full_name)
|
||||
{
|
||||
/// Ensure that this database is attached to ExternalLoader as a config repository.
|
||||
if (!database_as_config_repo_for_external_loader.load())
|
||||
{
|
||||
auto repository = std::make_unique<ExternalLoaderDatabaseConfigRepository>(*this, getContext());
|
||||
auto remove_repository_callback = external_loader.addConfigRepository(std::move(repository));
|
||||
database_as_config_repo_for_external_loader = boost::make_shared<ext::scope_guard>(std::move(remove_repository_callback));
|
||||
}
|
||||
|
||||
external_loader.reloadConfig(getDatabaseName(), full_name);
|
||||
}
|
||||
|
||||
|
||||
void DatabaseWithDictionaries::shutdown()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
dictionaries.clear();
|
||||
}
|
||||
|
||||
/// Invoke removing the database from ExternalLoader.
|
||||
database_as_config_repo_for_external_loader = nullptr;
|
||||
|
||||
DatabaseOnDisk::shutdown();
|
||||
}
|
||||
|
||||
|
||||
DatabaseWithDictionaries::DatabaseWithDictionaries(
|
||||
const String & name, const String & metadata_path_, const String & data_path_, const String & logger, ContextPtr context_)
|
||||
: DatabaseOnDisk(name, metadata_path_, data_path_, logger, context_)
|
||||
, external_loader(context_->getExternalDictionariesLoader())
|
||||
{
|
||||
}
|
||||
|
||||
DatabaseWithDictionaries::~DatabaseWithDictionaries() = default;
|
||||
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
#pragma once
|
||||
#include <Databases/DatabaseOnDisk.h>
|
||||
#include <boost/smart_ptr/atomic_shared_ptr.hpp>
|
||||
#include <ext/scope_guard.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
class ExternalDictionariesLoader;
|
||||
|
||||
|
||||
class DatabaseWithDictionaries : public DatabaseOnDisk
|
||||
{
|
||||
public:
|
||||
void attachDictionary(const String & dictionary_name, const DictionaryAttachInfo & attach_info) override;
|
||||
|
||||
void detachDictionary(const String & dictionary_name) override;
|
||||
|
||||
void createDictionary(ContextPtr context,
|
||||
const String & dictionary_name,
|
||||
const ASTPtr & query) override;
|
||||
|
||||
void removeDictionary(ContextPtr context, const String & dictionary_name) override;
|
||||
|
||||
bool isDictionaryExist(const String & dictionary_name) const override;
|
||||
|
||||
DatabaseDictionariesIteratorPtr getDictionariesIterator(const FilterByNameFunction & filter_by_dictionary_name) override;
|
||||
|
||||
Poco::AutoPtr<Poco::Util::AbstractConfiguration> getDictionaryConfiguration(const String & /*name*/) const override;
|
||||
|
||||
time_t getObjectMetadataModificationTime(const String & object_name) const override;
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
~DatabaseWithDictionaries() override;
|
||||
|
||||
protected:
|
||||
DatabaseWithDictionaries(const String & name, const String & metadata_path_, const String & data_path_, const String & logger, ContextPtr context);
|
||||
|
||||
ASTPtr getCreateDictionaryQueryImpl(const String & dictionary_name, bool throw_on_error) const override;
|
||||
|
||||
std::unordered_map<String, DictionaryAttachInfo> dictionaries;
|
||||
const ExternalDictionariesLoader & external_loader;
|
||||
|
||||
private:
|
||||
void detachDictionaryImpl(const String & dictionary_name, DictionaryAttachInfo & attach_info);
|
||||
void reloadDictionaryConfig(const String & full_name);
|
||||
|
||||
boost::atomic_shared_ptr<ext::scope_guard> database_as_config_repo_for_external_loader;
|
||||
};
|
||||
|
||||
}
|
@ -29,7 +29,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int CANNOT_GET_CREATE_TABLE_QUERY;
|
||||
extern const int CANNOT_GET_CREATE_DICTIONARY_QUERY;
|
||||
}
|
||||
|
||||
class IDatabaseTablesIterator
|
||||
@ -95,38 +94,7 @@ public:
|
||||
const StoragePtr & table() const override { return it->second; }
|
||||
};
|
||||
|
||||
/// Copies list of dictionaries and iterates through such snapshot.
|
||||
class DatabaseDictionariesSnapshotIterator
|
||||
{
|
||||
private:
|
||||
DictionariesWithID dictionaries;
|
||||
DictionariesWithID::iterator it;
|
||||
String database_name;
|
||||
|
||||
public:
|
||||
DatabaseDictionariesSnapshotIterator() = default;
|
||||
DatabaseDictionariesSnapshotIterator(DictionariesWithID & dictionaries_, const String & database_name_)
|
||||
: dictionaries(dictionaries_), it(dictionaries.begin()), database_name(database_name_)
|
||||
{
|
||||
}
|
||||
DatabaseDictionariesSnapshotIterator(DictionariesWithID && dictionaries_, const String & database_name_)
|
||||
: dictionaries(dictionaries_), it(dictionaries.begin()), database_name(database_name_)
|
||||
{
|
||||
}
|
||||
|
||||
void next() { ++it; }
|
||||
|
||||
bool isValid() const { return !dictionaries.empty() && it != dictionaries.end(); }
|
||||
|
||||
const String & name() const { return it->first; }
|
||||
|
||||
const UUID & uuid() const { return it->second; }
|
||||
|
||||
const String & databaseName() const { assert(!database_name.empty()); return database_name; }
|
||||
};
|
||||
|
||||
using DatabaseTablesIteratorPtr = std::unique_ptr<IDatabaseTablesIterator>;
|
||||
using DatabaseDictionariesIteratorPtr = std::unique_ptr<DatabaseDictionariesSnapshotIterator>;
|
||||
|
||||
|
||||
/** Database engine.
|
||||
@ -158,12 +126,6 @@ public:
|
||||
/// Check the existence of the table.
|
||||
virtual bool isTableExist(const String & name, ContextPtr context) const = 0;
|
||||
|
||||
/// Check the existence of the dictionary
|
||||
virtual bool isDictionaryExist(const String & /*name*/) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Get the table for work. Return nullptr if there is no table.
|
||||
virtual StoragePtr tryGetTable(const String & name, ContextPtr context) const = 0;
|
||||
|
||||
@ -175,12 +137,6 @@ public:
|
||||
/// It is possible to have "hidden" tables that are not visible when passing through, but are visible if you get them by name using the functions above.
|
||||
virtual DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name = {}) = 0;
|
||||
|
||||
/// Get an iterator to pass through all the dictionaries.
|
||||
virtual DatabaseDictionariesIteratorPtr getDictionariesIterator([[maybe_unused]] const FilterByNameFunction & filter_by_dictionary_name = {})
|
||||
{
|
||||
return std::make_unique<DatabaseDictionariesSnapshotIterator>();
|
||||
}
|
||||
|
||||
/// Is the database empty.
|
||||
virtual bool empty() const = 0;
|
||||
|
||||
@ -194,15 +150,6 @@ public:
|
||||
throw Exception("There is no CREATE TABLE query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Add the dictionary to the database. Record its presence in the metadata.
|
||||
virtual void createDictionary(
|
||||
ContextPtr /*context*/,
|
||||
const String & /*dictionary_name*/,
|
||||
const ASTPtr & /*query*/)
|
||||
{
|
||||
throw Exception("There is no CREATE DICTIONARY query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Delete the table from the database, drop table and delete the metadata.
|
||||
virtual void dropTable(
|
||||
ContextPtr /*context*/,
|
||||
@ -212,14 +159,6 @@ public:
|
||||
throw Exception("There is no DROP TABLE query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Delete the dictionary from the database. Delete the metadata.
|
||||
virtual void removeDictionary(
|
||||
ContextPtr /*context*/,
|
||||
const String & /*dictionary_name*/)
|
||||
{
|
||||
throw Exception("There is no DROP DICTIONARY query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Add a table to the database, but do not add it to the metadata. The database may not support this method.
|
||||
///
|
||||
/// Note: ATTACH TABLE statement actually uses createTable method.
|
||||
@ -228,25 +167,12 @@ public:
|
||||
throw Exception("There is no ATTACH TABLE query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Add dictionary to the database, but do not add it to the metadata. The database may not support this method.
|
||||
/// If dictionaries_lazy_load is false it also starts loading the dictionary asynchronously.
|
||||
virtual void attachDictionary(const String & /* dictionary_name */, const DictionaryAttachInfo & /* attach_info */)
|
||||
{
|
||||
throw Exception("There is no ATTACH DICTIONARY query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Forget about the table without deleting it, and return it. The database may not support this method.
|
||||
virtual StoragePtr detachTable(const String & /*name*/)
|
||||
{
|
||||
throw Exception("There is no DETACH TABLE query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Forget about the dictionary without deleting it. The database may not support this method.
|
||||
virtual void detachDictionary(const String & /*name*/)
|
||||
{
|
||||
throw Exception("There is no DETACH DICTIONARY query for Database" + getEngineName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Forget about the table without deleting it's data, but rename metadata file to prevent reloading it
|
||||
/// with next restart. The database may not support this method.
|
||||
virtual void detachTablePermanently(ContextPtr /*context*/, const String & /*name*/)
|
||||
@ -295,22 +221,6 @@ public:
|
||||
return getCreateTableQueryImpl(name, context, true);
|
||||
}
|
||||
|
||||
/// Get the CREATE DICTIONARY query for the dictionary. Returns nullptr if dictionary doesn't exists.
|
||||
ASTPtr tryGetCreateDictionaryQuery(const String & name) const noexcept
|
||||
{
|
||||
return getCreateDictionaryQueryImpl(name, false);
|
||||
}
|
||||
|
||||
ASTPtr getCreateDictionaryQuery(const String & name) const
|
||||
{
|
||||
return getCreateDictionaryQueryImpl(name, true);
|
||||
}
|
||||
|
||||
virtual Poco::AutoPtr<Poco::Util::AbstractConfiguration> getDictionaryConfiguration(const String & /*name*/) const
|
||||
{
|
||||
throw Exception(getEngineName() + ": getDictionaryConfiguration() is not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// Get the CREATE DATABASE query for current database.
|
||||
virtual ASTPtr getCreateDatabaseQuery() const = 0;
|
||||
|
||||
@ -364,13 +274,6 @@ protected:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual ASTPtr getCreateDictionaryQueryImpl(const String & /*name*/, bool throw_on_error) const
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception("There is no SHOW CREATE DICTIONARY query for Database" + getEngineName(), ErrorCodes::CANNOT_GET_CREATE_DICTIONARY_QUERY);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
mutable std::mutex mutex;
|
||||
String database_name;
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user