Merge branch 'master' into fix-netloc

This commit is contained in:
Alexey Milovidov 2020-08-07 22:57:46 +03:00
commit 3c3350451e
44 changed files with 295 additions and 90 deletions

1
.gitignore vendored
View File

@ -79,6 +79,7 @@ configure-stamp
*.bin *.bin
*.mrk *.mrk
*.mrk2 *.mrk2
*.mrk3
.dupload.conf .dupload.conf

View File

@ -10,6 +10,7 @@ ClickHouse is an open-source column-oriented database management system that all
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format. * [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time. * [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events. * [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian. * [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any. * [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person. * You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.

View File

@ -1,9 +1,9 @@
# This strings autochanged from release_lib.sh: # This strings autochanged from release_lib.sh:
SET(VERSION_REVISION 54437) SET(VERSION_REVISION 54438)
SET(VERSION_MAJOR 20) SET(VERSION_MAJOR 20)
SET(VERSION_MINOR 7) SET(VERSION_MINOR 8)
SET(VERSION_PATCH 1) SET(VERSION_PATCH 1)
SET(VERSION_GITHASH d64e51d1a78c1b53c33915ca0f75c97b2333844f) SET(VERSION_GITHASH 5d60ab33a511efd149c7c3de77c0dd4b81e65b13)
SET(VERSION_DESCRIBE v20.7.1.1-prestable) SET(VERSION_DESCRIBE v20.8.1.1-prestable)
SET(VERSION_STRING 20.7.1.1) SET(VERSION_STRING 20.8.1.1)
# end of autochange # end of autochange

4
debian/changelog vendored
View File

@ -1,5 +1,5 @@
clickhouse (20.7.1.1) unstable; urgency=low clickhouse (20.8.1.1) unstable; urgency=low
* Modified source code * Modified source code
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 13 Jul 2020 18:25:58 +0300 -- clickhouse-release <clickhouse-release@yandex-team.ru> Fri, 07 Aug 2020 21:45:46 +0300

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.7.1.* ARG version=20.8.1.*
RUN apt-get update \ RUN apt-get update \
&& apt-get install --yes --no-install-recommends \ && apt-get install --yes --no-install-recommends \

View File

@ -21,7 +21,7 @@ RUN apt-get --allow-unauthenticated update -y \
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able # Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip. # to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
# Significantly increase deb packaging speed and compatible with old systems # Significantly increase deb packaging speed and compatible with old systems
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/dpkg-deb RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb
RUN chmod +x dpkg-deb RUN chmod +x dpkg-deb
RUN cp dpkg-deb /usr/bin RUN cp dpkg-deb /usr/bin

View File

@ -1,7 +1,7 @@
FROM ubuntu:20.04 FROM ubuntu:20.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.7.1.* ARG version=20.8.1.*
ARG gosu_ver=1.10 ARG gosu_ver=1.10
RUN apt-get update \ RUN apt-get update \

View File

@ -1,7 +1,7 @@
FROM ubuntu:18.04 FROM ubuntu:18.04
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/" ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
ARG version=20.7.1.* ARG version=20.8.1.*
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y apt-transport-https dirmngr && \ apt-get install -y apt-transport-https dirmngr && \

View File

@ -179,10 +179,10 @@ def advanceRowAnchor():
return currentRowAnchor() return currentRowAnchor()
def tr(x): def tr(x, anchor=None):
a = advanceRowAnchor()
#return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x)) #return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
return '<tr id={a}>{x}</tr>'.format(a=a, x=str(x)) anchor = anchor if anchor else advanceRowAnchor()
return f'<tr id={anchor}>{x}</tr>'
def td(value, cell_attributes = ''): def td(value, cell_attributes = ''):
return '<td {cell_attributes}>{value}</td>'.format( return '<td {cell_attributes}>{value}</td>'.format(
@ -192,12 +192,14 @@ def td(value, cell_attributes = ''):
def th(x): def th(x):
return '<th>' + str(x) + '</th>' return '<th>' + str(x) + '</th>'
def tableRow(cell_values, cell_attributes = []): def tableRow(cell_values, cell_attributes = [], anchor=None):
return tr(''.join([td(v, a) return tr(
for v, a in itertools.zip_longest( ''.join([td(v, a)
cell_values, cell_attributes, for v, a in itertools.zip_longest(
fillvalue = '') cell_values, cell_attributes,
if a is not None and v is not None])) fillvalue = '')
if a is not None and v is not None]),
anchor)
def tableHeader(r): def tableHeader(r):
return tr(''.join([th(f) for f in r])) return tr(''.join([th(f) for f in r]))
@ -291,8 +293,8 @@ def add_errors_explained():
if not errors_explained: if not errors_explained:
return return
text = tableStart('Error summary') text = '<a name="fail1"/>'
text += '<a name="fail1"/>' text += tableStart('Error summary')
text += tableHeader(['Description']) text += tableHeader(['Description'])
for row in errors_explained: for row in errors_explained:
text += tableRow(row) text += tableRow(row)
@ -342,19 +344,20 @@ if args.report == 'main':
text += tableHeader(columns) text += tableHeader(columns)
attrs = ['' for c in columns] attrs = ['' for c in columns]
for row in rows: for row in rows:
anchor = f'{currentTableAnchor()}.{row[2]}.{row[3]}'
if float(row[1]) > 0.10: if float(row[1]) > 0.10:
attrs[1] = f'style="background: {color_bad}"' attrs[1] = f'style="background: {color_bad}"'
unstable_partial_queries += 1 unstable_partial_queries += 1
errors_explained.append([f'<a href="#{nextRowAnchor()}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>']) errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>'])
else: else:
attrs[1] = '' attrs[1] = ''
if float(row[0]) > allowed_single_run_time: if float(row[0]) > allowed_single_run_time:
attrs[0] = f'style="background: {color_bad}"' attrs[0] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="#{nextRowAnchor()}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run} seconds"</a>']) errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run} seconds"</a>'])
slow_average_tests += 1 slow_average_tests += 1
else: else:
attrs[0] = '' attrs[0] = ''
text += tableRow(row, attrs) text += tableRow(row, attrs, anchor)
text += tableEnd() text += tableEnd()
tables.append(text) tables.append(text)
@ -385,6 +388,7 @@ if args.report == 'main':
attrs = ['' for c in columns] attrs = ['' for c in columns]
attrs[5] = None attrs[5] = None
for row in rows: for row in rows:
anchor = f'{currentTableAnchor()}.{row[6]}.{row[7]}'
if int(row[5]): if int(row[5]):
if float(row[3]) < 0.: if float(row[3]) < 0.:
faster_queries += 1 faster_queries += 1
@ -392,11 +396,11 @@ if args.report == 'main':
else: else:
slower_queries += 1 slower_queries += 1
attrs[2] = attrs[3] = f'style="background: {color_bad}"' attrs[2] = attrs[3] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="#{nextRowAnchor()}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>']) errors_explained.append([f'<a href="#{anchor}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>'])
else: else:
attrs[2] = attrs[3] = '' attrs[2] = attrs[3] = ''
text += tableRow(row, attrs) text += tableRow(row, attrs, anchor)
text += tableEnd() text += tableEnd()
tables.append(text) tables.append(text)
@ -429,13 +433,14 @@ if args.report == 'main':
attrs = ['' for c in columns] attrs = ['' for c in columns]
attrs[4] = None attrs[4] = None
for r in unstable_rows: for r in unstable_rows:
anchor = f'{currentTableAnchor()}.{r[5]}.{r[6]}'
if int(r[4]): if int(r[4]):
very_unstable_queries += 1 very_unstable_queries += 1
attrs[3] = f'style="background: {color_bad}"' attrs[3] = f'style="background: {color_bad}"'
else: else:
attrs[3] = '' attrs[3] = ''
text += tableRow(r, attrs) text += tableRow(r, attrs, anchor)
text += tableEnd() text += tableEnd()
tables.append(text) tables.append(text)
@ -477,14 +482,14 @@ if args.report == 'main':
# FIXME should be 15s max -- investigate parallel_insert # FIXME should be 15s max -- investigate parallel_insert
slow_average_tests += 1 slow_average_tests += 1
attrs[6] = f'style="background: {color_bad}"' attrs[6] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="./all-queries.html#all-query-times.0">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up']) errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
else: else:
attrs[6] = '' attrs[6] = ''
if float(r[5]) > allowed_single_run_time * total_runs: if float(r[5]) > allowed_single_run_time * total_runs:
slow_average_tests += 1 slow_average_tests += 1
attrs[5] = f'style="background: {color_bad}"' attrs[5] = f'style="background: {color_bad}"'
errors_explained.append([f'<a href="./all-queries.html#all-query-times.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report']) errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
else: else:
attrs[5] = '' attrs[5] = ''
@ -659,6 +664,7 @@ elif args.report == 'all-queries':
attrs[0] = None attrs[0] = None
attrs[1] = None attrs[1] = None
for r in rows: for r in rows:
anchor = f'{currentTableAnchor()}.{r[7]}.{r[8]}'
if int(r[1]): if int(r[1]):
attrs[6] = f'style="background: {color_bad}"' attrs[6] = f'style="background: {color_bad}"'
else: else:
@ -679,7 +685,7 @@ elif args.report == 'all-queries':
attrs[2] = '' attrs[2] = ''
attrs[3] = '' attrs[3] = ''
text += tableRow(r, attrs) text += tableRow(r, attrs, anchor)
text += tableEnd() text += tableEnd()
tables.append(text) tables.append(text)

View File

@ -277,8 +277,4 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed Provides possibility to reinitialize Zookeeper sessions state for all `ReplicatedMergeTree` tables, will compare current state with Zookeeper as source of true and add tasks to Zookeeper queue if needed
``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
```
[Original article](https://clickhouse.tech/docs/en/query_language/system/) <!--hide--> [Original article](https://clickhouse.tech/docs/en/query_language/system/) <!--hide-->

View File

@ -3,4 +3,16 @@ toc_folder_title: Integrations
toc_priority: 30 toc_priority: 30
--- ---
# Движки таблиц для интеграции {#table-engines-for-integrations}
Для интеграции с внешними системами ClickHouse предоставляет различные средства, включая движки таблиц. Конфигурирование интеграционных движков осуществляется с помощью запросов `CREATE TABLE` или `ALTER TABLE`, как и для других табличных движков. С точки зрения пользователя, настроенная интеграция выглядит как обычная таблица, но запросы к ней передаются через прокси во внешнюю систему. Этот прозрачный запрос является одним из ключевых преимуществ этого подхода по сравнению с альтернативными методами интеграции, такими как внешние словари или табличные функции, которые требуют использования пользовательских методов запроса при каждом использовании.
Список поддерживаемых интеграций:
- [ODBC](../../../engines/table-engines/integrations/odbc.md)
- [JDBC](../../../engines/table-engines/integrations/jdbc.md)
- [MySQL](../../../engines/table-engines/integrations/mysql.md)
- [HDFS](../../../engines/table-engines/integrations/hdfs.md)
- [Kafka](../../../engines/table-engines/integrations/kafka.md)
[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/integrations/) <!--hide-->

View File

@ -1,6 +1,6 @@
# ReplacingMergeTree {#replacingmergetree} # ReplacingMergeTree {#replacingmergetree}
Движок отличается от [MergeTree](mergetree.md#table_engines-mergetree) тем, что выполняет удаление дублирующихся записей с одинаковым значением первичного ключа (точнее, с одинаковым значением [ключа сортировки](mergetree.md)). Движок отличается от [MergeTree](mergetree.md#table_engines-mergetree) тем, что выполняет удаление дублирующихся записей с одинаковым значением [ключа сортировки](mergetree.md)).
Дедупликация данных производится лишь во время слияний. Слияние происходят в фоне в неизвестный момент времени, на который вы не можете ориентироваться. Некоторая часть данных может остаться необработанной. Хотя вы можете вызвать внеочередное слияние с помощью запроса `OPTIMIZE`, на это не стоит рассчитывать, так как запрос `OPTIMIZE` приводит к чтению и записи большого объёма данных. Дедупликация данных производится лишь во время слияний. Слияние происходят в фоне в неизвестный момент времени, на который вы не можете ориентироваться. Некоторая часть данных может остаться необработанной. Хотя вы можете вызвать внеочередное слияние с помощью запроса `OPTIMIZE`, на это не стоит рассчитывать, так как запрос `OPTIMIZE` приводит к чтению и записи большого объёма данных.
@ -27,7 +27,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
- `ver` — столбец с версией, тип `UInt*`, `Date` или `DateTime`. Необязательный параметр. - `ver` — столбец с версией, тип `UInt*`, `Date` или `DateTime`. Необязательный параметр.
При слиянии, из всех строк с одинаковым значением первичного ключа `ReplacingMergeTree` оставляет только одну: При слиянии, из всех строк с одинаковым значением ключа сортировки `ReplacingMergeTree` оставляет только одну:
- Последнюю в выборке, если `ver` не задан. - Последнюю в выборке, если `ver` не задан.
- С максимальной версией, если `ver` задан. - С максимальной версией, если `ver` задан.
@ -40,7 +40,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
<summary>Устаревший способ создания таблицы</summary> <summary>Устаревший способ создания таблицы</summary>
!!! attention "Attention" !!! attention "Внимание"
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше. Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
``` sql ``` sql

View File

@ -3,4 +3,14 @@ toc_folder_title: Special
toc_priority: 31 toc_priority: 31
--- ---
# Специальные движки таблиц {#special-table-engines}
Существует три основные категории движков таблиц:
- [Семейство MergeTree](../../../engines/table-engines/mergetree-family/index.md) для основного использования.
- [Семейство Log](../../../engines/table-engines/log-family/index.md) для небольших временных данных.
- [Движки таблиц для интеграции](../../../engines/table-engines/integrations/index.md).
Остальные движки таблиц уникальны по своему назначению и еще не сгруппированы в семейства, поэтому они помещены в эту специальную категорию.
[Оригинальная статья](https://clickhouse.tech/docs/ru/engines/table-engines/special/) <!--hide-->

View File

@ -7,7 +7,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
| Формат | INSERT | SELECT | | Формат | INSERT | SELECT |
|-----------------------------------------------------------------|--------|--------| |-----------------------------------------------------------------|--------|--------|
| [TabSeparated](#tabseparated) | ✔ | ✔ | | [TabSeparated](#tabseparated) | ✔ | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | | ✔ | | [TabSeparatedRaw](#tabseparatedraw) | | ✔ |
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | | [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | | [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
| [Template](#format-template) | ✔ | ✔ | | [Template](#format-template) | ✔ | ✔ |
@ -132,7 +132,7 @@ SELECT * FROM nestedt FORMAT TSV
## TabSeparatedRaw {#tabseparatedraw} ## TabSeparatedRaw {#tabseparatedraw}
Отличается от формата `TabSeparated` тем, что строки выводятся без экранирования. Отличается от формата `TabSeparated` тем, что строки выводятся без экранирования.
Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). Используя этот формат, следите, чтобы в полях не было символов табуляции или разрыва строки.
Этот формат также доступен под именем `TSVRaw`. Этот формат также доступен под именем `TSVRaw`.

View File

@ -4,4 +4,16 @@ toc_folder_title: "\u041E\u0442 \u0441\u0442\u043E\u0440\u043E\u043D\u043D\u0438
toc_priority: 24 toc_priority: 24
--- ---
# Сторонние интерфейсы {#third-party-interfaces}
Раздел содержит список сторонних интерфейсов для ClickHouse. Это может быть визуальный интерфейс, интерфейс командной строки, либо API:
- [Client libraries](../../interfaces/third-party/client-libraries.md)
- [Integrations](../../interfaces/third-party/integrations.md)
- [GUI](../../interfaces/third-party/gui.md)
- [Proxies](../../interfaces/third-party/proxy.md)
!!! note "Примечание"
С ClickHouse работают также универсальные инструменты, поддерживающие общий API, такие как [ODBC](../../interfaces/odbc.md) или [JDBC](../../interfaces/jdbc.md).
[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/third-party/) <!--hide-->

View File

@ -235,14 +235,10 @@ SYSTEM SYNC REPLICA [db.]replicated_merge_tree_family_table_name
Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций. Инициализация очереди репликации на основе данных ZooKeeper, происходит так же как при attach table. На короткое время таблица станет недоступной для любых операций.
``` sql ``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
``` ```
### RESTART REPLICAS {#query_language-system-restart-replicas} ### RESTART REPLICAS {#query_language-system-restart-replicas}
Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо Реинициализация состояния Zookeeper сессий для всех `ReplicatedMergeTree` таблиц, сравнивает текущее состояние с тем что хранится в Zookeeper как источник правды и добавляет задачи Zookeeper очередь если необходимо
``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
```
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/system/) <!--hide--> [Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/system/) <!--hide-->

View File

@ -280,8 +280,4 @@ SYSTEM RESTART REPLICA [db.]replicated_merge_tree_family_table_name
重置所有 `ReplicatedMergeTree`表的ZooKeeper会话状态。该操作会以Zookeeper为参照对比当前状态有需要的情况下将任务添加到ZooKeeper队列。 重置所有 `ReplicatedMergeTree`表的ZooKeeper会话状态。该操作会以Zookeeper为参照对比当前状态有需要的情况下将任务添加到ZooKeeper队列。
``` sql
SYSTEM RESTART QUEUES [db.]replicated_merge_tree_family_table_name
```
[原始文档](https://clickhouse.tech/docs/en/query_language/system/) <!--hide--> [原始文档](https://clickhouse.tech/docs/en/query_language/system/) <!--hide-->

11
programs/server/.gitignore vendored Normal file
View File

@ -0,0 +1,11 @@
/access
/dictionaries_lib
/flags
/format_schemas
/metadata_dropped
/preprocessed_configs
/tmp
/user_files
status

View File

@ -1,5 +1,3 @@
*.bin
*.mrk
*.txt *.txt
*.dat *.dat
*.idx *.idx

View File

@ -1,2 +0,0 @@
*.bin
*.mrk

1
programs/server/metadata/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.sql

View File

@ -1,6 +1,7 @@
#pragma once #pragma once
#include <cstddef> #include <cstddef>
#include <cassert>
#include <type_traits> #include <type_traits>
#include <common/defines.h> #include <common/defines.h>
@ -11,6 +12,7 @@
*/ */
inline unsigned int bitScanReverse(unsigned int x) inline unsigned int bitScanReverse(unsigned int x)
{ {
assert(x != 0);
return sizeof(unsigned int) * 8 - 1 - __builtin_clz(x); return sizeof(unsigned int) * 8 - 1 - __builtin_clz(x);
} }

View File

@ -21,6 +21,15 @@
#include <Common/PODArray_fwd.h> #include <Common/PODArray_fwd.h>
/** Whether we can use memcpy instead of a loop with assignment to T from U.
* It is Ok if types are the same. And if types are integral and of the same size,
* example: char, signed char, unsigned char.
* It's not Ok for int and float.
* Don't forget to apply std::decay when using this constexpr.
*/
template <typename T, typename U>
constexpr bool memcpy_can_be_used_for_assignment = std::is_same_v<T, U>
|| (std::is_integral_v<T> && std::is_integral_v<U> && sizeof(T) == sizeof(U));
namespace DB namespace DB
{ {
@ -313,7 +322,15 @@ public:
insert(from_begin, from_end); insert(from_begin, from_end);
} }
PODArray(std::initializer_list<T> il) : PODArray(std::begin(il), std::end(il)) {} PODArray(std::initializer_list<T> il)
{
this->reserve(std::size(il));
for (const auto & x : il)
{
this->push_back(x);
}
}
PODArray(PODArray && other) PODArray(PODArray && other)
{ {
@ -428,17 +445,21 @@ public:
void insertSmallAllowReadWriteOverflow15(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params) void insertSmallAllowReadWriteOverflow15(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{ {
static_assert(pad_right_ >= 15); static_assert(pad_right_ >= 15);
static_assert(sizeof(T) == sizeof(*from_begin));
insertPrepare(from_begin, from_end, std::forward<TAllocatorParams>(allocator_params)...); insertPrepare(from_begin, from_end, std::forward<TAllocatorParams>(allocator_params)...);
size_t bytes_to_copy = this->byte_size(from_end - from_begin); size_t bytes_to_copy = this->byte_size(from_end - from_begin);
memcpySmallAllowReadWriteOverflow15(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy); memcpySmallAllowReadWriteOverflow15(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy; this->c_end += bytes_to_copy;
} }
/// Do not insert into the array a piece of itself. Because with the resize, the iterators on themselves can be invalidated.
template <typename It1, typename It2> template <typename It1, typename It2>
void insert(iterator it, It1 from_begin, It2 from_end) void insert(iterator it, It1 from_begin, It2 from_end)
{ {
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
size_t bytes_to_copy = this->byte_size(from_end - from_begin); size_t bytes_to_copy = this->byte_size(from_end - from_begin);
size_t bytes_to_move = (end() - it) * sizeof(T); size_t bytes_to_move = this->byte_size(end() - it);
insertPrepare(from_begin, from_end); insertPrepare(from_begin, from_end);
@ -446,12 +467,15 @@ public:
memcpy(this->c_end + bytes_to_copy - bytes_to_move, this->c_end - bytes_to_move, bytes_to_move); memcpy(this->c_end + bytes_to_copy - bytes_to_move, this->c_end - bytes_to_move, bytes_to_move);
memcpy(this->c_end - bytes_to_move, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy); memcpy(this->c_end - bytes_to_move, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy; this->c_end += bytes_to_copy;
} }
template <typename It1, typename It2> template <typename It1, typename It2>
void insert_assume_reserved(It1 from_begin, It2 from_end) void insert_assume_reserved(It1 from_begin, It2 from_end)
{ {
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
size_t bytes_to_copy = this->byte_size(from_end - from_begin); size_t bytes_to_copy = this->byte_size(from_end - from_begin);
memcpy(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy); memcpy(this->c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end += bytes_to_copy; this->c_end += bytes_to_copy;
@ -584,12 +608,15 @@ public:
template <typename It1, typename It2, typename... TAllocatorParams> template <typename It1, typename It2, typename... TAllocatorParams>
void assign(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params) void assign(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{ {
static_assert(memcpy_can_be_used_for_assignment<std::decay_t<T>, std::decay_t<decltype(*from_begin)>>);
size_t required_capacity = from_end - from_begin; size_t required_capacity = from_end - from_begin;
if (required_capacity > this->capacity()) if (required_capacity > this->capacity())
this->reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward<TAllocatorParams>(allocator_params)...); this->reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward<TAllocatorParams>(allocator_params)...);
size_t bytes_to_copy = this->byte_size(required_capacity); size_t bytes_to_copy = this->byte_size(required_capacity);
memcpy(this->c_start, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy); memcpy(this->c_start, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
this->c_end = this->c_start + bytes_to_copy; this->c_end = this->c_start + bytes_to_copy;
} }

View File

@ -13,6 +13,7 @@
#include <DataTypes/DataTypesNumber.h> #include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeNullable.h> #include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeNothing.h>
#include <DataTypes/DataTypeDateTime.h> #include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeDateTime64.h> #include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypeDate.h> #include <DataTypes/DataTypeDate.h>
@ -884,11 +885,18 @@ private:
if (tuple_size != typeid_cast<const DataTypeTuple &>(*c1.type).getElements().size()) if (tuple_size != typeid_cast<const DataTypeTuple &>(*c1.type).getElements().size())
throw Exception("Cannot compare tuples of different sizes.", ErrorCodes::BAD_ARGUMENTS); throw Exception("Cannot compare tuples of different sizes.", ErrorCodes::BAD_ARGUMENTS);
auto & res = block.getByPosition(result);
if (res.type->onlyNull())
{
res.column = res.type->createColumnConstWithDefaultValue(input_rows_count);
return;
}
ColumnsWithTypeAndName x(tuple_size); ColumnsWithTypeAndName x(tuple_size);
ColumnsWithTypeAndName y(tuple_size); ColumnsWithTypeAndName y(tuple_size);
auto x_const = checkAndGetColumnConst<ColumnTuple>(c0.column.get()); const auto * x_const = checkAndGetColumnConst<ColumnTuple>(c0.column.get());
auto y_const = checkAndGetColumnConst<ColumnTuple>(c1.column.get()); const auto * y_const = checkAndGetColumnConst<ColumnTuple>(c1.column.get());
Columns x_columns; Columns x_columns;
Columns y_columns; Columns y_columns;
@ -1135,17 +1143,22 @@ public:
FunctionComparison<Op, Name>::create(context))); FunctionComparison<Op, Name>::create(context)));
bool has_nullable = false; bool has_nullable = false;
bool has_null = false;
size_t size = left_tuple->getElements().size(); size_t size = left_tuple->getElements().size();
for (size_t i = 0; i < size; ++i) for (size_t i = 0; i < size; ++i)
{ {
ColumnsWithTypeAndName args = {{nullptr, left_tuple->getElements()[i], ""}, ColumnsWithTypeAndName args = {{nullptr, left_tuple->getElements()[i], ""},
{nullptr, right_tuple->getElements()[i], ""}}; {nullptr, right_tuple->getElements()[i], ""}};
has_nullable = has_nullable || adaptor.build(args)->getReturnType()->isNullable(); auto element_type = adaptor.build(args)->getReturnType();
has_nullable = has_nullable || element_type->isNullable();
has_null = has_null || element_type->onlyNull();
} }
/// If any element comparison is nullable, return type will also be nullable. /// If any element comparison is nullable, return type will also be nullable.
/// We useDefaultImplementationForNulls, but it doesn't work for tuples. /// We useDefaultImplementationForNulls, but it doesn't work for tuples.
if (has_null)
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeNothing>());
if (has_nullable) if (has_nullable)
return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeUInt8>()); return std::make_shared<DataTypeNullable>(std::make_shared<DataTypeUInt8>());
} }

View File

@ -104,6 +104,7 @@ ReturnType parseDateTimeBestEffortImpl(
return false; return false;
}; };
res = 0;
UInt16 year = 0; UInt16 year = 0;
UInt8 month = 0; UInt8 month = 0;
UInt8 day_of_month = 0; UInt8 day_of_month = 0;

View File

@ -1,9 +1,29 @@
#pragma once #pragma once
#include <limits>
#include <IO/ReadHelpers.h> #include <IO/ReadHelpers.h>
#include <Common/intExp.h> #include <Common/intExp.h>
/// This is only needed for non-official, "unbundled" build.
/// https://stackoverflow.com/questions/41198673/uint128-t-not-working-with-clang-and-libstdc
#if !defined(_LIBCPP_LIMITS) && !defined(__GLIBCXX_BITSIZE_INT_N_0) && defined(__SIZEOF_INT128__)
namespace std
{
template <>
struct numeric_limits<__int128_t>
{
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = true;
static constexpr int radix = 2;
static constexpr int digits = 127;
static constexpr int digits10 = 38;
};
}
#endif
namespace DB namespace DB
{ {
@ -160,12 +180,24 @@ inline void readDecimalText(ReadBuffer & buf, T & x, uint32_t precision, uint32_
if (static_cast<int32_t>(scale) + exponent < 0) if (static_cast<int32_t>(scale) + exponent < 0)
{ {
/// Too many digits after point. Just cut off excessive digits. auto divisor_exp = -exponent - static_cast<int32_t>(scale);
auto divisor = intExp10OfSize<T>(-exponent - static_cast<int32_t>(scale));
assert(divisor > 0); /// This is for Clang Static Analyzer. It is not smart enough to infer it automatically. if (divisor_exp >= std::numeric_limits<typename T::NativeType>::digits10)
x.value /= divisor; {
scale = 0; /// Too big negative exponent
return; x.value = 0;
scale = 0;
return;
}
else
{
/// Too many digits after point. Just cut off excessive digits.
auto divisor = intExp10OfSize<T>(divisor_exp);
assert(divisor > 0); /// This is for Clang Static Analyzer. It is not smart enough to infer it automatically.
x.value /= divisor;
scale = 0;
return;
}
} }
scale += exponent; scale += exponent;

View File

@ -43,13 +43,17 @@ void ArrowBlockOutputFormat::consume(Chunk chunk)
void ArrowBlockOutputFormat::finalize() void ArrowBlockOutputFormat::finalize()
{ {
if (writer) if (!writer)
{ {
auto status = writer->Close(); const Block & header = getPort(PortKind::Main).getHeader();
if (!status.ok())
throw Exception(ErrorCodes::UNKNOWN_EXCEPTION, consume(Chunk(header.getColumns(), 0));
"Error while closing a table: {}", status.ToString());
} }
auto status = writer->Close();
if (!status.ok())
throw Exception(ErrorCodes::UNKNOWN_EXCEPTION,
"Error while closing a table: {}", status.ToString());
} }
void ArrowBlockOutputFormat::prepareWriter(const std::shared_ptr<arrow::Schema> & schema) void ArrowBlockOutputFormat::prepareWriter(const std::shared_ptr<arrow::Schema> & schema)

View File

@ -66,12 +66,16 @@ void ParquetBlockOutputFormat::consume(Chunk chunk)
void ParquetBlockOutputFormat::finalize() void ParquetBlockOutputFormat::finalize()
{ {
if (file_writer) if (!file_writer)
{ {
auto status = file_writer->Close(); const Block & header = getPort(PortKind::Main).getHeader();
if (!status.ok())
throw Exception{"Error while closing a table: " + status.ToString(), ErrorCodes::UNKNOWN_EXCEPTION}; consume(Chunk(header.getColumns(), 0));
} }
auto status = file_writer->Close();
if (!status.ok())
throw Exception{"Error while closing a table: " + status.ToString(), ErrorCodes::UNKNOWN_EXCEPTION};
} }
void registerOutputFormatProcessorParquet(FormatFactory & factory) void registerOutputFormatProcessorParquet(FormatFactory & factory)

View File

@ -25,23 +25,16 @@ FinishSortingTransform::FinishSortingTransform(
const SortDescription & description_to_sort_, const SortDescription & description_to_sort_,
size_t max_merged_block_size_, UInt64 limit_) size_t max_merged_block_size_, UInt64 limit_)
: SortingTransform(header, description_to_sort_, max_merged_block_size_, limit_) : SortingTransform(header, description_to_sort_, max_merged_block_size_, limit_)
, description_sorted(description_sorted_)
{ {
const auto & sample = inputs.front().getHeader(); /// Check for sanity non-modified descriptions
if (!isPrefix(description_sorted_, description_to_sort_))
/// Replace column names to column position in description_sorted. throw Exception("Can't finish sorting. SortDescription of already sorted stream is not prefix of "
for (auto & column_description : description_sorted)
{
if (!column_description.column_name.empty())
{
column_description.column_number = sample.getPositionByName(column_description.column_name);
column_description.column_name.clear();
}
}
if (!isPrefix(description_sorted, description))
throw Exception("Can`t finish sorting. SortDescription of already sorted stream is not prefix of "
"SortDescription needed to sort", ErrorCodes::LOGICAL_ERROR); "SortDescription needed to sort", ErrorCodes::LOGICAL_ERROR);
/// The target description is modified in SortingTransform constructor.
/// To avoid doing the same actions with description_sorted just copy it from prefix of target description.
size_t prefix_size = description_sorted_.size();
description_sorted.assign(description.begin(), description.begin() + prefix_size);
} }
static bool less(const Columns & lhs, const Columns & rhs, size_t i, size_t j, const SortDescription & descr) static bool less(const Columns & lhs, const Columns & rhs, size_t i, size_t j, const SortDescription & descr)

View File

@ -34,6 +34,7 @@ namespace std
static constexpr bool is_integer = true; static constexpr bool is_integer = true;
static constexpr int radix = 2; static constexpr int radix = 2;
static constexpr int digits = 128; static constexpr int digits = 128;
static constexpr int digits10 = 38;
static constexpr __uint128_t min () { return 0; } // used in boost 1.65.1+ static constexpr __uint128_t min () { return 0; } // used in boost 1.65.1+
static constexpr __uint128_t max () { return __uint128_t(0) - 1; } // used in boost 1.68.0+ static constexpr __uint128_t max () { return __uint128_t(0) - 1; } // used in boost 1.68.0+
}; };

View File

@ -52,6 +52,7 @@ const char * auto_contributors[] {
"Aliaksandr Shylau", "Aliaksandr Shylau",
"Amos Bird", "Amos Bird",
"Amy Krishnevsky", "Amy Krishnevsky",
"AnaUvarova",
"Anastasiya Rodigina", "Anastasiya Rodigina",
"Anastasiya Tsarkova", "Anastasiya Tsarkova",
"Anatoly Pugachev", "Anatoly Pugachev",
@ -72,8 +73,10 @@ const char * auto_contributors[] {
"Andrey Skobtsov", "Andrey Skobtsov",
"Andrey Urusov", "Andrey Urusov",
"Andy Yang", "Andy Yang",
"Anna",
"Anton Ivashkin", "Anton Ivashkin",
"Anton Kobzev", "Anton Kobzev",
"Anton Kvasha",
"Anton Okhitin", "Anton Okhitin",
"Anton Okulov", "Anton Okulov",
"Anton Patsev", "Anton Patsev",
@ -90,6 +93,7 @@ const char * auto_contributors[] {
"Artem Zuikov", "Artem Zuikov",
"Artemeey", "Artemeey",
"Artemkin Pavel", "Artemkin Pavel",
"Arthur Petukhovsky",
"Arthur Tokarchuk", "Arthur Tokarchuk",
"Artur Beglaryan", "Artur Beglaryan",
"AsiaKorushkina", "AsiaKorushkina",
@ -104,6 +108,7 @@ const char * auto_contributors[] {
"BayoNet", "BayoNet",
"Bharat Nallan", "Bharat Nallan",
"Big Elephant", "Big Elephant",
"Bill",
"BlahGeek", "BlahGeek",
"Bogdan", "Bogdan",
"Bogdan Voronin", "Bogdan Voronin",
@ -130,6 +135,7 @@ const char * auto_contributors[] {
"Darío", "Darío",
"Denis Burlaka", "Denis Burlaka",
"Denis Glazachev", "Denis Glazachev",
"Denis Krivak",
"Denis Zhuravlev", "Denis Zhuravlev",
"Derek Perkins", "Derek Perkins",
"Ding Xiang Fei", "Ding Xiang Fei",
@ -152,6 +158,7 @@ const char * auto_contributors[] {
"Eldar Zaitov", "Eldar Zaitov",
"Elena Baskakova", "Elena Baskakova",
"Elghazal Ahmed", "Elghazal Ahmed",
"Elizaveta Mironyuk",
"Emmanuel Donin de Rosière", "Emmanuel Donin de Rosière",
"Eric", "Eric",
"Ernest Poletaev", "Ernest Poletaev",
@ -259,6 +266,7 @@ const char * auto_contributors[] {
"Marek Vavrusa", "Marek Vavrusa",
"Marek Vavruša", "Marek Vavruša",
"Marek Vavruša", "Marek Vavruša",
"Mark Papadakis",
"Maroun Maroun", "Maroun Maroun",
"Marsel Arduanov", "Marsel Arduanov",
"Marti Raudsepp", "Marti Raudsepp",
@ -307,6 +315,7 @@ const char * auto_contributors[] {
"Mohammad Hossein Sekhavat", "Mohammad Hossein Sekhavat",
"MovElb", "MovElb",
"Murat Kabilov", "Murat Kabilov",
"MyroTk",
"NIKITA MIKHAILOV", "NIKITA MIKHAILOV",
"Narek Galstyan", "Narek Galstyan",
"NeZeD [Mac Pro]", "NeZeD [Mac Pro]",
@ -346,6 +355,7 @@ const char * auto_contributors[] {
"Pavel Yakunin", "Pavel Yakunin",
"Pavlo Bashynskiy", "Pavlo Bashynskiy",
"Pawel Rog", "Pawel Rog",
"Peng Jian",
"Persiyanov Dmitriy Andreevich", "Persiyanov Dmitriy Andreevich",
"Pervakov Grigorii", "Pervakov Grigorii",
"Pervakov Grigory", "Pervakov Grigory",
@ -359,6 +369,7 @@ const char * auto_contributors[] {
"Reilee", "Reilee",
"Reto Kromer", "Reto Kromer",
"Ri", "Ri",
"Roman Bug",
"Roman Lipovsky", "Roman Lipovsky",
"Roman Nikolaev", "Roman Nikolaev",
"Roman Nozdrin", "Roman Nozdrin",
@ -466,6 +477,7 @@ const char * auto_contributors[] {
"Yurii Vlasenko", "Yurii Vlasenko",
"Yuriy", "Yuriy",
"Yuriy Baranov", "Yuriy Baranov",
"Yuriy Chernyshov",
"Yury Karpovich", "Yury Karpovich",
"Yury Stankevich", "Yury Stankevich",
"Zhichang Yu", "Zhichang Yu",
@ -483,6 +495,7 @@ const char * auto_contributors[] {
"alex.lvxin", "alex.lvxin",
"alexander kozhikhov", "alexander kozhikhov",
"alexey-milovidov", "alexey-milovidov",
"amoschen",
"amudong", "amudong",
"andrei-karpliuk", "andrei-karpliuk",
"andrewsg", "andrewsg",
@ -556,6 +569,7 @@ const char * auto_contributors[] {
"imgbot[bot]", "imgbot[bot]",
"ivan-kush", "ivan-kush",
"ivanzhukov", "ivanzhukov",
"jakalletti",
"javartisan", "javartisan",
"javi", "javi",
"javi santana", "javi santana",
@ -610,6 +624,7 @@ const char * auto_contributors[] {
"objatie_groba", "objatie_groba",
"ogorbacheva", "ogorbacheva",
"olegkv", "olegkv",
"olgarev",
"orantius", "orantius",
"palasonicq", "palasonicq",
"peshkurov", "peshkurov",
@ -621,6 +636,7 @@ const char * auto_contributors[] {
"qianlixiang", "qianlixiang",
"quid", "quid",
"rainbowsysu", "rainbowsysu",
"ritaank",
"robot-clickhouse", "robot-clickhouse",
"robot-metrika-test", "robot-metrika-test",
"root", "root",
@ -652,6 +668,7 @@ const char * auto_contributors[] {
"vinity", "vinity",
"vitstn", "vitstn",
"vivarum", "vivarum",
"vladimir golovchenko",
"vxider", "vxider",
"vzakaznikov", "vzakaznikov",
"wangchao", "wangchao",
@ -679,6 +696,7 @@ const char * auto_contributors[] {
"张风啸", "张风啸",
"极客青年", "极客青年",
"谢磊", "谢磊",
"贾顺名(Jarvis)",
"黄朝晖", "黄朝晖",
"黄璞", "黄璞",
"박현우", "박현우",

View File

@ -13,3 +13,4 @@
1 0 0 0 1 1 1 0 0 0 1 1
0 1 1 0 1 0 0 1 1 0 1 0
0 1 0 1 0 1 0 1 0 1 0 1
\N \N \N \N \N

View File

@ -103,3 +103,9 @@ SELECT
tuple(2) > tuple(1), tuple(2) > tuple(1),
tuple(2) <= tuple(1), tuple(2) <= tuple(1),
tuple(2) >= tuple(1); tuple(2) >= tuple(1);
SELECT
tuple(NULL) < tuple(1),
tuple(NULL) = tuple(1),
tuple(NULL) <= tuple(1),
tuple(1, NULL) = tuple(2, 1),
tuple(1, NULL) < tuple(2, 1);

View File

@ -0,0 +1,6 @@
1E-9 0
1E-8 0
1E-7 0
1e-7 0
1E-9 0.000000001
1E-10 0.000000000

View File

@ -0,0 +1,10 @@
SELECT '-1E9-1E9-1E9-1E9' AS x, toDecimal32(x, 0); -- { serverError 6 }
SELECT '-1E9' AS x, toDecimal32(x, 0); -- { serverError 69 }
SELECT '1E-9' AS x, toDecimal32(x, 0);
SELECT '1E-8' AS x, toDecimal32(x, 0);
SELECT '1E-7' AS x, toDecimal32(x, 0);
SELECT '1e-7' AS x, toDecimal32(x, 0);
SELECT '1E-9' AS x, toDecimal32(x, 9);
SELECT '1E-9' AS x, toDecimal32(x, 10); -- { serverError 69 }
SELECT '1E-10' AS x, toDecimal32(x, 10); -- { serverError 69 }
SELECT '1E-10' AS x, toDecimal32(x, 9);

View File

@ -0,0 +1,6 @@
1
1
2
2
3
3

View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -e
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test_empty_data"
${CLICKHOUSE_CLIENT} --query="CREATE TABLE test_empty_data (x Int8) ENGINE = Memory"
(echo "INSERT INTO test_empty_data FORMAT Arrow" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x FORMAT Arrow") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT Arrow" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x LIMIT 0 FORMAT Arrow") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT ArrowStream" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x FORMAT ArrowStream") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT ArrowStream" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x LIMIT 0 FORMAT ArrowStream") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT Parquet" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x FORMAT Parquet") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"
(echo "INSERT INTO test_empty_data FORMAT Parquet" && ${CLICKHOUSE_CLIENT} --query="SELECT 1 AS x LIMIT 0 FORMAT Parquet") | ${CLICKHOUSE_CLIENT}
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM test_empty_data"

View File

@ -0,0 +1,3 @@
1 2020-05-05 01:00:00 0
1 2020-05-05 01:00:00 1
1 2020-05-05 01:00:00 2

View File

@ -0,0 +1,7 @@
DROP TABLE IF EXISTS pk_func;
CREATE TABLE pk_func (`d` DateTime, `ui` UInt32 ) ENGINE = MergeTree ORDER BY toDate(d);
INSERT INTO pk_func SELECT '2020-05-05 01:00:00', number FROM numbers(1000);
SELECT 1, * FROM pk_func ORDER BY toDate(d) ASC, ui ASC LIMIT 3;
DROP TABLE IF EXISTS pk_func;

View File

@ -0,0 +1,3 @@
2020-08-07 01:29:00
1973-03-03 12:46:40
2020-08-07 00:00:00

View File

@ -0,0 +1,3 @@
SELECT parseDateTimeBestEffort('1596752940', 'Europe/Moscow');
SELECT parseDateTimeBestEffort('100000000', 'Europe/Moscow');
SELECT parseDateTimeBestEffort('20200807', 'Europe/Moscow');

View File

@ -0,0 +1,2 @@
0.123
1

View File

@ -0,0 +1,4 @@
CREATE TEMPORARY TABLE t (x Float64);
INSERT INTO t VALUES (0x1.f7ced916872b0p-4);
SELECT * FROM t;
SELECT x = 0x1.f7ced916872b0p-4 FROM t;

View File

@ -49,9 +49,7 @@ $(document).ready(function () {
$('#sidebar .nav-link.active').parents('.collapse').each(function() { $('#sidebar .nav-link.active').parents('.collapse').each(function() {
var current = $(this); var current = $(this);
if (current.attr('id') !== 'sidebar') { if (current.attr('id') !== 'sidebar') {
current.css('transition-duration', '0s');
current.collapse('show'); current.collapse('show');
current.css('transition-duration', '0.4s');
} }
}); });
$(window).resize(onResize); $(window).resize(onResize);