Merge branch 'master' into fix-netloc

This commit is contained in:
Alexey Milovidov 2020-08-07 22:57:46 +03:00
commit 3c3350451e
44 changed files with 295 additions and 90 deletions

1
.gitignore vendored
View File

@ -79,6 +79,7 @@ configure-stamp
*.bin
*.mrk
*.mrk2
*.mrk3
.dupload.conf

View File

@ -10,6 +10,7 @@ ClickHouse is an open-source column-oriented database management system that all
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.

View File

@ -1,9 +1,9 @@
# This strings autochanged from release_lib.sh:
SET(VERSION_REVISION 54437)
SET(VERSION_REVISION 54438)
SET(VERSION_MAJOR 20)
SET(VERSION_MINOR 7)
SET(VERSION_MINOR 8)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH d64e51d1a78c1b53c33915ca0f75c97b2333844f)
SET(VERSION_DESCRIBE v20.7.1.1-prestable)
SET(VERSION_STRING 20.7.1.1)
SET(VERSION_GITHASH 5d60ab33a511efd149c7c3de77c0dd4b81e65b13)
SET(VERSION_DESCRIBE v20.8.1.1-prestable)
SET(VERSION_STRING 20.8.1.1)
# end of autochange

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (20.7.1.1) unstable; urgency=low
clickhouse (20.8.1.1) unstable; urgency=low
* Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 13 Jul 2020 18:25:58 +0300
-- clickhouse-release <clickhouse-release@yandex-team.ru> Fri, 07 Aug 2020 21:45:46 +0300

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.7.1.*
ARG version=20.8.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \

View File

@ -21,7 +21,7 @@ RUN apt-get --allow-unauthenticated update -y \
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/dpkg-deb
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb
RUN chmod +x dpkg-deb
RUN cp dpkg-deb /usr/bin

View File

@ -1,7 +1,7 @@
FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.7.1.*
ARG version=20.8.1.*
ARG gosu_ver=1.10
RUN apt-get update \

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.7.1.*
ARG version=20.8.1.*
RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \

View File

@ -179,10 +179,10 @@ def advanceRowAnchor():
return currentRowAnchor()
def tr(x):
a = advanceRowAnchor()
def tr(x, anchor=None):
#return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
return '<tr id={a}>{x}</tr>'.format(a=a, x=str(x))
anchor = anchor if anchor else advanceRowAnchor()
return f'<tr id={anchor}>{x}</tr>'
def td(value, cell_attributes = ''):
return '<td {cell_attributes}>{value}</td>'.format(
@ -192,12 +192,14 @@ def td(value, cell_attributes = ''):
def th(x):
return '<th>' + str(x) + '</th>'
def tableRow(cell_values, cell_attributes = []):
return tr(''.join([td(v, a)
for v, a in itertools.zip_longest(
cell_values, cell_attributes,
fillvalue = '')
if a is not None and v is not None]))
def tableRow(cell_values, cell_attributes = [], anchor=None):
return tr(
''.join([td(v, a)
for v, a in itertools.zip_longest(
cell_values, cell_attributes,
fillvalue = '')
if a is not None and v is not None]),
anchor)
def tableHeader(r):
return tr(''.join([th(f) for f in r]))
@ -291,8 +293,8 @@ def add_errors_explained():
if not errors_explained:
return
text = tableStart('Error summary')
text += '<a name="fail1"/>'
text = '<a name="fail1"/>'
text += tableStart('Error summary')
text += tableHeader(['Description'])
for row in errors_explained:
text += tableRow(row)
@ -342,19 +344,20 @@ if args.report == 'main':
text += tableHeader(columns)
attrs = ['' for c in columns]
for row in rows:
anchor = f'{currentTableAnchor()}.{row[2]}.{row[3]}'
if float(row[1]) > 0.10:
attrs[1] = f'style="background: {color_bad}"'
unstable_partial_queries += 1
errors_explained.append([f'<a href="#{nextRowAnchor()}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>'])
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>'])
else:
attrs[1] = ''
if float(row[0]) > allowed_single_run_time:
attrs[0] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="#{nextRowAnchor()}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run} seconds"</a>'])
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run} seconds"</a>'])
slow_average_tests += 1
else:
attrs[0] = ''
text += tableRow(row, attrs)
text += tableRow(row, attrs, anchor)
text += tableEnd()
tables.append(text)
@ -385,6 +388,7 @@ if args.report == 'main':
attrs = ['' for c in columns]
attrs[5] = None
for row in rows:
anchor = f'{currentTableAnchor()}.{row[6]}.{row[7]}'
if int(row[5]):
if float(row[3]) < 0.:
faster_queries += 1
@ -392,11 +396,11 @@ if args.report == 'main':
else:
slower_queries += 1
attrs[2] = attrs[3] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="#{nextRowAnchor()}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>'])
errors_explained.append([f'<a href="#{anchor}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>'])
else:
attrs[2] = attrs[3] = ''
text += tableRow(row, attrs)
text += tableRow(row, attrs, anchor)
text += tableEnd()
tables.append(text)
@ -429,13 +433,14 @@ if args.report == 'main':
attrs = ['' for c in columns]
attrs[4] = None
for r in unstable_rows:
anchor = f'{currentTableAnchor()}.{r[5]}.{r[6]}'
if int(r[4]):
very_unstable_queries += 1
attrs[3] = f'style="background: {color_bad}"'
else:
attrs[3] = ''
text += tableRow(r, attrs)
text += tableRow(r, attrs, anchor)
text += tableEnd()
tables.append(text)
@ -477,14 +482,14 @@ if args.report == 'main':
# FIXME should be 15s max -- investigate parallel_insert
slow_average_tests += 1
attrs[6] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="./all-queries.html#all-query-times.0">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
else:
attrs[6] = ''
if float(r[5]) > allowed_single_run_time * total_runs:
slow_average_tests += 1
attrs[5] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="./all-queries.html#all-query-times.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
else:
attrs[5] = ''
@ -659,6 +664,7 @@ elif args.report == 'all-queries':
attrs[0] = None
attrs[1] = None
for r in rows:
anchor = f'{currentTableAnchor()}.{r[7]}.{r[8]}'
if int(r[1]):
attrs[6] = f'style="background: {color_bad}"'
else:
@ -679,7 +685,7 @@ elif args.report == 'all-queries':
attrs[2] = ''
attrs[3] = ''
text += tableRow(r, attrs)
text += tableRow(r, attrs, anchor)
text += tableEnd()
tables.append(text)

View File

@ -277,8 +277,4 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed
``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
```
[Original article](https://clickhouse.tech/docs/en/query_language/system/) <!--hide-->

View File

@ -3,4 +3,16 @@ toc_folder_title: Integrations
toc_priority: 30
---
# Движки таблиц для интеграции {#table-engines-for-integrations}
Для интеграции с внешними системами ClickHouse предоставляет различные средства, включая движки таблиц. Конфигурирование интеграционных движков осуществляется с помощью запросов `CREATE TABLE` или `ALTER TABLE`, как и для других табличных движков. С точки зрения пользователя, настроенная интеграция выглядит как обычная таблица, но запросы к ней передаются через прокси во внешнюю систему. Этот прозрачный запрос является одним из ключевых преимуществ этого подхода по сравнению с альтернативными методами интеграции, такими как внешние словари или табличные функции, которые требуют использования пользовательских методов запроса при каждом использовании.
Список поддерживаемых интеграций:
- [ODBC](../../../engines/table-engines/integrations/odbc.md)
- [JDBC](../../../engines/table-engines/integrations/jdbc.md)
- [MySQL](../../../engines/table-engines/integrations/mysql.md)
- [HDFS](../../../engines/table-engines/integrations/hdfs.md)
- [Kafka](../../../engines/table-engines/integrations/kafka.md)
[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/) <!--hide-->

View File

@ -1,6 +1,6 @@
# ReplacingMergeTree {#replacingmergetree}
Движок отличается от [MergeTree](mergetree.md#table_engines-mergetree) тем, что выполняет удаление дублирующихся записей с одинаковым значением первичного ключа (точнее, с одинаковым значением [ключа сортировки](mergetree.md)).
Движок отличается от [MergeTree](mergetree.md#table_engines-mergetree) тем, что выполняет удаление дублирующихся записей с одинаковым значением [ключа сортировки](mergetree.md)).
Дедупликация данных производится лишь во время слияний. Слияние происходят в фоне в неизвестный момент времени, на который вы не можете ориентироваться. Некоторая часть данных может остаться необработанной. Хотя вы можете вызвать внеочередное слияние с помощью запроса `OPTIMIZE`, на это не стоит рассчитывать, так как запрос `OPTIMIZE` приводит к чтению и записи большого объёма данных.
@ -27,7 +27,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
- `ver` — столбец с версией, тип `UInt*`, `Date` или `DateTime`. Необязательный параметр.
При слиянии, из всех строк с одинаковым значением первичного ключа `ReplacingMergeTree` оставляет только одну:
При слиянии, из всех строк с одинаковым значением ключа сортировки `ReplacingMergeTree` оставляет только одну:
- Последнюю в выборке, если `ver` не задан.
- С максимальной версией, если `ver` задан.
@ -40,7 +40,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
<summary>Устаревший способ создания таблицы</summary>
!!! attention "Attention"
!!! attention "Внимание"
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
``` sql

View File

@ -3,4 +3,14 @@ toc_folder_title: Special
toc_priority: 31
---
# Специальные движки таблиц {#special-table-engines}
Существует три основные категории движков таблиц:
- [Семейство MergeTree](../../../engines/table-engines/mergetree-family/index.md) для основного использования.
- [Семейство Log](../../../engines/table-engines/log-family/index.md) для небольших временных данных.
- [Движки таблиц для интеграции](../../../engines/table-engines/integrations/index.md).
Остальные движки таблиц уникальны по своему назначению и еще не сгруппированы в семейства, поэтому они помещены в эту специальную категорию.
[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/special/) <!--hide-->

View File

@ -7,7 +7,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
| Формат | INSERT | SELECT |
|-----------------------------------------------------------------|--------|--------|
| [TabSeparated](#tabseparated) | ✔ | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | | ✔ |
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
| [Template](#format-template) | ✔ | ✔ |
@ -132,7 +132,7 @@ SELECT * FROM nestedt FORMAT TSV
## TabSeparatedRaw {#tabseparatedraw}
Отличается от формата `TabSeparated` тем, что строки выводятся без экранирования.
Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу).
Используя этот формат, следите, чтобы в полях не было символов табуляции или разрыва строки.
Этот формат также доступен под именем `TSVRaw`.

View File

@ -4,4 +4,16 @@ toc_folder_title: "\u041E\u0442 \u0441\u0442\u043E\u0440\u043E\u043D\u043D\u0438
toc_priority: 24
---
# Сторонние интерфейсы {#third-party-interfaces}
Раздел содержит список сторонних интерфейсов для ClickHouse. Это может быть визуальный интерфейс, интерфейс командной строки, либо API:
- [Client libraries](../../interfaces/third-party/client-libraries.md)
- [Integrations](../../interfaces/third-party/integrations.md)
- [GUI](../../interfaces/third-party/gui.md)
- [Proxies](../../interfaces/third-party/proxy.md)
!!! note "Примечание"
С ClickHouse работают также универсальные инструменты, поддерживающие общий API, такие как [ODBC](../../interfaces/odbc.md) или [JDBC](../../interfaces/jdbc.md).
[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/) <!--hide-->

View File

@ -235,14 +235,10 @@ SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций.
``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
```
### RESTART REPLICAS {#query_language-system-restart-replicas}
Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/system/) <!--hide-->

View File

@ -280,8 +280,4 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
重置所有 `ReplicatedMergeTree`表的ZooKeeper会话状态。该操作会以Zookeeper为参照对比当前状态有需要的情况下将任务添加到ZooKeeper队列。
``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
```
[原始文档](https://clickhouse.tech/docs/en/query_language/system/) <!--hide-->

11
programs/server/.gitignore vendored Normal file
View File

@ -0,0 +1,11 @@
/access
/dictionaries_lib
/flags
/format_schemas
/metadata_dropped
/preprocessed_configs
/tmp
/user_files
status

View File

@ -1,5 +1,3 @@
*.bin
*.mrk
*.txt
*.dat
*.idx

View File

@ -1,2 +0,0 @@
*.bin
*.mrk

1
programs/server/metadata/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.sql

View File

@ -1,6 +1,7 @@
#pragma once
#include <cstddef>
#include <cassert>
#include <type_traits>
#include <common/defines.h>
@ -11,6 +12,7 @@
*/
inline unsigned int bitScanReverse(unsigned int x)
{
assert(x != 0);
return sizeof(unsigned int) * 8 - 1 - __builtin_clz(x);
}

View File

@ -21,6 +21,15 @@
#include <Common/PODArray_fwd.h>
/** Whether we can use memcpy instead of a loop with assignment to T from U.
* It is Ok if types are the same. And if types are integral and of the same size,
* example: char, signed char, unsigned char.
* It's not Ok for int and float.
* Don't forget to apply std::decay when using this constexpr.
*/
template <typename T, typename U>
constexpr bool memcpy_can_be_used_for_assignment = std::is_same_v<T, U>
|| (std::is_integral_v<T> && std::is_integral_v<U> && sizeof(T) == sizeof(U));
namespace DB
{
@ -313,7 +322,15 @@ public:
insert(from_begin, from_end);
}
PODArray(std::initializer_list<T> il) : PODArray(std::begin(il), std::end(il)) {}
PODArray(std::initializer_list<T> il)
{
this->reserve(std::size(il));
for (const auto & x : il)
{
this->push_back(x);
}
}
PODArray(PODArray && other)
{
@ -428,17 +445,21 @@ public:
void insertSmallAllowReadWriteOverflow15(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{
static_assert(pad_right_ >= 15);
static_assert(sizeof(T) == sizeof(*from_begin));
insertPrepare(from_begin, from_end, std::forward<TAllocatorParams>(allocator_params)...);
size_t bytes_to_copy = this->byte_size(from_end - from_begin);
memcpySmallAllowReadWriteOverflow15(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy;
}
/// Do not insert into the array a piece of itself. Because with the resize, the iterators on themselves can be invalidated.
template <typename It1, typename It2>
void insert(iterator it, It1 from_begin, It2 from_end)
{
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
size_t bytes_to_copy = this->byte_size(from_end - from_begin);
size_t bytes_to_move = (end() - it) * sizeof(T);
size_t bytes_to_move = this->byte_size(end() - it);
insertPrepare(from_begin, from_end);
@ -446,12 +467,15 @@ public:
memcpy(this->c_end + bytes_to_copy - bytes_to_move, this->c_end - bytes_to_move, bytes_to_move);
memcpy(this->c_end - bytes_to_move, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy;
}
template <typename It1, typename It2>
void insert_assume_reserved(It1 from_begin, It2 from_end)
{
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
size_t bytes_to_copy = this->byte_size(from_end - from_begin);
memcpy(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy;
@ -584,12 +608,15 @@ public:
template <typename It1, typename It2, typename... TAllocatorParams>
void assign(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
size_t required_capacity = from_end - from_begin;
if (required_capacity > this->capacity())
this->reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward<TAllocatorParams>(allocator_params)...);
size_t bytes_to_copy = this->byte_size(required_capacity);
memcpy(this->c_start, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end = this->c_start + bytes_to_copy;
}

View File

@ -13,6 +13,7 @@
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeNothing.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypeDate.h>
@ -884,11 +885,18 @@ private:
if (tuple_size != typeid_cast<const DataTypeTuple &>(*c1.type).getElements().size())
throw Exception("Cannot compare tuples of different sizes.", ErrorCodes::BAD_ARGUMENTS);
auto & res = block.getByPosition(result);
if (res.type->onlyNull())
{
res.column = res.type->createColumnConstWithDefaultValue(input_rows_count);
return;
}
ColumnsWithTypeAndName x(tuple_size);
ColumnsWithTypeAndName y(tuple_size);
auto x_const = checkAndGetColumnConst<ColumnTuple>(c0.column.get());
auto y_const = checkAndGetColumnConst<ColumnTuple>(c1.column.get());
const auto * x_const = checkAndGetColumnConst<ColumnTuple>(c0.column.get());
const auto * y_const = checkAndGetColumnConst<ColumnTuple>(c1.column.get());
Columns x_columns;
Columns y_columns;
@ -1135,17 +1143,22 @@ public:
FunctionComparison<Op, Name>::create(context)));
bool has_nullable = false;
bool has_null = false;
size_t size = left_tuple->getElements().size();
for (size_t i = 0; i < size; ++i)
{
ColumnsWithTypeAndName args = {{nullptr, left_tuple->getElements()[i], ""},
{nullptr, right_tuple->getElements()[i], ""}};
has_nullable = has_nullable || adaptor.build(args)->getReturnType()->isNullable();
auto element_type = adaptor.build(args)->getReturnType();
has_nullable = has_nullable || element_type->isNullable();
has_null = has_null || element_type->onlyNull();
}
/// If any element comparison is nullable, return type will also be nullable.
/// We useDefaultImplementationForNulls, but it doesn't work for tuples.
if (has_null)
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
if (has_nullable)
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeUInt8>());
}

View File

@ -104,6 +104,7 @@ ReturnType parseDateTimeBestEffortImpl(
return false;
};
res = 0;
UInt16 year = 0;
UInt8 month = 0;
UInt8 day_of_month = 0;

View File

@ -1,9 +1,29 @@
#pragma once
#include <limits>
#include <IO/ReadHelpers.h>
#include <Common/intExp.h>
/// This is only needed for non-official, "unbundled" build.
/// https://stackoverflow.com/questions/41198673/uint128-t-not-working-with-clang-and-libstdc
#if !defined(_LIBCPP_LIMITS) && !defined(__GLIBCXX_BITSIZE_INT_N_0) && defined(__SIZEOF_INT128__)
namespace std
{
template <>
struct numeric_limits<__int128_t>
{
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = true;
static constexpr int radix = 2;
static constexpr int digits = 127;
static constexpr int digits10 = 38;
};
}
#endif
namespace DB
{
@ -160,12 +180,24 @@ inline void readDecimalText(ReadBuffer & buf, T & x, uint32_t precision, uint32_
if (static_cast<int32_t>(scale) + exponent < 0)
{
/// Too many digits after point. Just cut off excessive digits.
auto divisor = intExp10OfSize<T>(-exponent - static_cast<int32_t>(scale));
assert(divisor > 0); /// This is for Clang Static Analyzer. It is not smart enough to infer it automatically.
x.value /= divisor;
scale = 0;
return;
auto divisor_exp = -exponent - static_cast<int32_t>(scale);
if (divisor_exp >= std::numeric_limits<typename T::NativeType>::digits10)
{
/// Too big negative exponent
x.value = 0;
scale = 0;
return;
}
else
{
/// Too many digits after point. Just cut off excessive digits.
auto divisor = intExp10OfSize<T>(divisor_exp);
assert(divisor > 0); /// This is for Clang Static Analyzer. It is not smart enough to infer it automatically.
x.value /= divisor;
scale = 0;
return;
}
}
scale += exponent;

View File

@ -43,13 +43,17 @@ void ArrowBlockOutputFormat::consume(Chunk chunk)
void ArrowBlockOutputFormat::finalize()
{
if (writer)
if (!writer)
{
auto status = writer->Close();
if (!status.ok())
throw Exception(ErrorCodes::UNKNOWN_EXCEPTION,
"Error while closing a table: {}", status.ToString());
const Block & header = getPort(PortKind::Main).getHeader();
consume(Chunk(header.getColumns(), 0));
}
auto status = writer->Close();
if (!status.ok())
throw Exception(ErrorCodes::UNKNOWN_EXCEPTION,
"Error while closing a table: {}", status.ToString());
}
void ArrowBlockOutputFormat::prepareWriter(const std::shared_ptr<arrow::Schema> & schema)

View File

@ -66,12 +66,16 @@ void ParquetBlockOutputFormat::consume(Chunk chunk)
void ParquetBlockOutputFormat::finalize()
{
if (file_writer)
if (!file_writer)
{
auto status = file_writer->Close();
if (!status.ok())
throw Exception{"Error while closing a table: " + status.ToString(), ErrorCodes::UNKNOWN_EXCEPTION};
const Block & header = getPort(PortKind::Main).getHeader();
consume(Chunk(header.getColumns(), 0));
}
auto status = file_writer->Close();
if (!status.ok())
throw Exception{"Error while closing a table: " + status.ToString(), ErrorCodes::UNKNOWN_EXCEPTION};
}
void registerOutputFormatProcessorParquet(FormatFactory & factory)

View File

@ -25,23 +25,16 @@ FinishSortingTransform::FinishSortingTransform(
const SortDescription & description_to_sort_,
size_t max_merged_block_size_, UInt64 limit_)
: SortingTransform(header, description_to_sort_, max_merged_block_size_, limit_)
, description_sorted(description_sorted_)
{
const auto & sample = inputs.front().getHeader();
/// Replace column names to column position in description_sorted.
for (auto & column_description : description_sorted)
{
if (!column_description.column_name.empty())
{
column_description.column_number = sample.getPositionByName(column_description.column_name);
column_description.column_name.clear();
}
}
if (!isPrefix(description_sorted, description))
throw Exception("Can`t finish sorting. SortDescription of already sorted stream is not prefix of "
/// Check for sanity non-modified descriptions
if (!isPrefix(description_sorted_, description_to_sort_))
throw Exception("Can't finish sorting. SortDescription of already sorted stream is not prefix of "
"SortDescription needed to sort", ErrorCodes::LOGICAL_ERROR);
/// The target description is modified in SortingTransform constructor.
/// To avoid doing the same actions with description_sorted just copy it from prefix of target description.
size_t prefix_size = description_sorted_.size();
description_sorted.assign(description.begin(), description.begin() + prefix_size);
}
static bool less(const Columns & lhs, const Columns & rhs, size_t i, size_t j, const SortDescription & descr)

View File

@ -34,6 +34,7 @@ namespace std
static constexpr bool is_integer = true;
static constexpr int radix = 2;
static constexpr int digits = 128;
static constexpr int digits10 = 38;
static constexpr __uint128_t min () { return 0; } // used in boost 1.65.1+
static constexpr __uint128_t max () { return __uint128_t(0) - 1; } // used in boost 1.68.0+
};

View File

@ -52,6 +52,7 @@ const char * auto_contributors[] {
"Aliaksandr Shylau",
"Amos Bird",
"Amy Krishnevsky",
"AnaUvarova",
"Anastasiya Rodigina",
"Anastasiya Tsarkova",
"Anatoly Pugachev",
@ -72,8 +73,10 @@ const char * auto_contributors[] {
"Andrey Skobtsov",
"Andrey Urusov",
"Andy Yang",
"Anna",
"Anton Ivashkin",
"Anton Kobzev",
"Anton Kvasha",
"Anton Okhitin",
"Anton Okulov",
"Anton Patsev",
@ -90,6 +93,7 @@ const char * auto_contributors[] {
"Artem Zuikov",
"Artemeey",
"Artemkin Pavel",
"Arthur Petukhovsky",
"Arthur Tokarchuk",
"Artur Beglaryan",
"AsiaKorushkina",
@ -104,6 +108,7 @@ const char * auto_contributors[] {
"BayoNet",
"Bharat Nallan",
"Big Elephant",
"Bill",
"BlahGeek",
"Bogdan",
"Bogdan Voronin",
@ -130,6 +135,7 @@ const char * auto_contributors[] {
"Darío",
"Denis Burlaka",
"Denis Glazachev",
"Denis Krivak",
"Denis Zhuravlev",
"Derek Perkins",
"Ding Xiang Fei",
@ -152,6 +158,7 @@ const char * auto_contributors[] {
"Eldar Zaitov",
"Elena Baskakova",
"Elghazal Ahmed",
"Elizaveta Mironyuk",
"Emmanuel Donin de Rosière",
"Eric",
"Ernest Poletaev",
@ -259,6 +266,7 @@ const char * auto_contributors[] {
"Marek Vavrusa",
"Marek Vavruša",
"Marek Vavruša",
"Mark Papadakis",
"Maroun Maroun",
"Marsel Arduanov",
"Marti Raudsepp",
@ -307,6 +315,7 @@ const char * auto_contributors[] {
"Mohammad Hossein Sekhavat",
"MovElb",
"Murat Kabilov",
"MyroTk",
"NIKITA MIKHAILOV",
"Narek Galstyan",
"NeZeD [Mac Pro]",
@ -346,6 +355,7 @@ const char * auto_contributors[] {
"Pavel Yakunin",
"Pavlo Bashynskiy",
"Pawel Rog",
"Peng Jian",
"Persiyanov Dmitriy Andreevich",
"Pervakov Grigorii",
"Pervakov Grigory",
@ -359,6 +369,7 @@ const char * auto_contributors[] {
"Reilee",
"Reto Kromer",
"Ri",
"Roman Bug",
"Roman Lipovsky",
"Roman Nikolaev",
"Roman Nozdrin",
@ -466,6 +477,7 @@ const char * auto_contributors[] {
"Yurii Vlasenko",
"Yuriy",
"Yuriy Baranov",
"Yuriy Chernyshov",
"Yury Karpovich",
"Yury Stankevich",
"Zhichang Yu",
@ -483,6 +495,7 @@ const char * auto_contributors[] {
"alex.lvxin",
"alexander kozhikhov",
"alexey-milovidov",
"amoschen",
"amudong",
"andrei-karpliuk",
"andrewsg",
@ -556,6 +569,7 @@ const char * auto_contributors[] {
"imgbot[bot]",
"ivan-kush",
"ivanzhukov",
"jakalletti",
"javartisan",
"javi",
"javi santana",
@ -610,6 +624,7 @@ const char * auto_contributors[] {
"objatie_groba",
"ogorbacheva",
"olegkv",
"olgarev",
"orantius",
"palasonicq",
"peshkurov",
@ -621,6 +636,7 @@ const char * auto_contributors[] {
"qianlixiang",
"quid",
"rainbowsysu",
"ritaank",
"robot-clickhouse",
"robot-metrika-test",
"root",
@ -652,6 +668,7 @@ const char * auto_contributors[] {
"vinity",
"vitstn",
"vivarum",
"vladimir golovchenko",
"vxider",
"vzakaznikov",
"wangchao",
@ -679,6 +696,7 @@ const char * auto_contributors[] {
"张风啸",
"极客青年",
"谢磊",
"贾顺名(Jarvis)",
"黄朝晖",
"黄璞",
"박현우",

View File

@ -13,3 +13,4 @@
1 0 0 0 1 1
0 1 1 0 1 0
0 1 0 1 0 1
\N \N \N \N \N

View File

@ -103,3 +103,9 @@ SELECT
tuple(2) > tuple(1),
tuple(2) <= tuple(1),
tuple(2) >= tuple(1);
SELECT
tuple(NULL) < tuple(1),
tuple(NULL) = tuple(1),
tuple(NULL) <= tuple(1),
tuple(1, NULL) = tuple(2, 1),
tuple(1, NULL) < tuple(2, 1);

View File

@ -0,0 +1,6 @@
1E-9 0
1E-8 0
1E-7 0
1e-7 0
1E-9 0.000000001
1E-10 0.000000000

View File

@ -0,0 +1,10 @@
SELECT '-1E9-1E9-1E9-1E9' AS x, toDecimal32(x, 0); -- { serverError 6 }
SELECT '-1E9' AS x, toDecimal32(x, 0); -- { serverError 69 }
SELECT '1E-9' AS x, toDecimal32(x, 0);
SELECT '1E-8' AS x, toDecimal32(x, 0);
SELECT '1E-7' AS x, toDecimal32(x, 0);
SELECT '1e-7' AS x, toDecimal32(x, 0);
SELECT '1E-9' AS x, toDecimal32(x, 9);
SELECT '1E-9' AS x, toDecimal32(x, 10); -- { serverError 69 }
SELECT '1E-10' AS x, toDecimal32(x, 10); -- { serverError 69 }
SELECT '1E-10' AS x, toDecimal32(x, 9);

View File

@ -0,0 +1,6 @@
1
1
2
2
3
3

View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -e
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_empty_data"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_empty_data (x Int8) ENGINE = Memory"
(echo "INSERT INTO test_empty_data FORMAT Arrow" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x FORMAT Arrow") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT Arrow" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x LIMIT 0 FORMAT Arrow") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT ArrowStream" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x FORMAT ArrowStream") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT ArrowStream" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x LIMIT 0 FORMAT ArrowStream") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT Parquet" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x FORMAT Parquet") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT Parquet" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x LIMIT 0 FORMAT Parquet") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"

View File

@ -0,0 +1,3 @@
1 2020-05-05 01:00:00 0
1 2020-05-05 01:00:00 1
1 2020-05-05 01:00:00 2

View File

@ -0,0 +1,7 @@
DROP TABLE IF EXISTS pk_func;
CREATE TABLE pk_func (`d` DateTime, `ui` UInt32 ) ENGINE = MergeTree ORDER BY toDate(d);
INSERT INTO pk_func SELECT '2020-05-05 01:00:00', number FROM numbers(1000);
SELECT 1, * FROM pk_func ORDER BY toDate(d) ASC, ui ASC LIMIT 3;
DROP TABLE IF EXISTS pk_func;

View File

@ -0,0 +1,3 @@
2020-08-07 01:29:00
1973-03-03 12:46:40
2020-08-07 00:00:00

View File

@ -0,0 +1,3 @@
SELECT parseDateTimeBestEffort('1596752940', 'Europe/Moscow');
SELECT parseDateTimeBestEffort('100000000', 'Europe/Moscow');
SELECT parseDateTimeBestEffort('20200807', 'Europe/Moscow');

View File

@ -0,0 +1,2 @@
0.123
1

View File

@ -0,0 +1,4 @@
CREATE TEMPORARY TABLE t (x Float64);
INSERT INTO t VALUES (0x1.f7ced916872b0p-4);
SELECT * FROM t;
SELECT x = 0x1.f7ced916872b0p-4 FROM t;

View File

@ -49,9 +49,7 @@ $(document).ready(function () {
$('#sidebar .nav-link.active').parents('.collapse').each(function() {
var current = $(this);
if (current.attr('id') !== 'sidebar') {
current.css('transition-duration', '0s');
current.collapse('show');
current.css('transition-duration', '0.4s');
}
});
$(window).resize(onResize);