Merged with master.

This commit is contained in:
Nikolai Kochetov 2018-05-21 14:38:50 +03:00
commit 937cb2db29
674 changed files with 14928 additions and 1007125 deletions

View File

@ -1,24 +1,21 @@
---
BasedOnStyle: WebKit
Language: Cpp
BasedOnStyle: WebKit
Language: Cpp
AlignAfterOpenBracket: false
BreakBeforeBraces: Custom
BraceWrapping: {
AfterClass: 'true'
AfterControlStatement: 'true'
AfterEnum : 'true'
AfterFunction : 'true'
AfterNamespace : 'true'
AfterStruct : 'true'
AfterUnion : 'true'
BeforeCatch : 'true'
BeforeElse : 'true'
IndentBraces : 'false'
}
BraceWrapping:
AfterClass: true
AfterControlStatement: true
AfterEnum: true
AfterFunction: true
AfterNamespace: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
BreakConstructorInitializersBeforeComma: false
Cpp11BracedListStyle: true
ColumnLimit: 140
ColumnLimit: 140
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ExperimentalAutoDetectBinPacking: true
UseTab: Never
@ -28,36 +25,35 @@ Standard: Cpp11
PointerAlignment: Middle
MaxEmptyLinesToKeep: 2
KeepEmptyLinesAtTheStartOfBlocks: false
#AllowShortFunctionsOnASingleLine: Inline
AllowShortFunctionsOnASingleLine: Empty
AlwaysBreakTemplateDeclarations: true
IndentCaseLabels: true
#SpaceAfterTemplateKeyword: true
SpaceAfterTemplateKeyword: true
SortIncludes: true
IncludeCategories:
- Regex: '^<[a-z_]+>'
Priority: 1
- Regex: '^<[a-z_]+.h>'
Priority: 2
- Regex: '^["<](common|ext|mysqlxx|daemon|zkutil)/'
Priority: 90
- Regex: '^["<](DB)/'
Priority: 100
- Regex: '^["<](Poco)/'
Priority: 50
- Regex: '^"'
Priority: 110
- Regex: '/'
Priority: 30
- Regex: '.*'
Priority: 40
- Regex: '^<[a-z_]+>'
Priority: 1
- Regex: '^<[a-z_]+.h>'
Priority: 2
- Regex: '^["<](common|ext|mysqlxx|daemon|zkutil)/'
Priority: 90
- Regex: '^["<](DB)/'
Priority: 100
- Regex: '^["<](Poco)/'
Priority: 50
- Regex: '^"'
Priority: 110
- Regex: '/'
Priority: 30
- Regex: '.*'
Priority: 40
ReflowComments: false
AlignEscapedNewlinesLeft: true
# Not changed:
AccessModifierOffset: -4
AlignConsecutiveAssignments: false
AlignOperands: false
AlignOperands: false
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
@ -70,16 +66,15 @@ BinPackArguments: false
BinPackParameters: false
BreakBeforeBinaryOperators: All
BreakBeforeTernaryOperators: true
CommentPragmas: '^ IWYU pragma:'
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
DerivePointerAlignment: false
DisableFormat: false
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
IndentWidth: 4
DisableFormat: false
IndentWidth: 4
IndentWrappedFunctionNames: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MacroBlockEnd: ''
NamespaceIndentation: Inner
ObjCBlockIndentWidth: 4
ObjCSpaceAfterProperty: true
@ -99,5 +94,3 @@ SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
...

3
.gitignore vendored
View File

@ -9,7 +9,7 @@
# auto generated files
*.logrt
build
/build
/docs/en_single_page/
/docs/ru_single_page/
/docs/venv/
@ -43,6 +43,7 @@ cmake-build-*
# Python cache
*.pyc
__pycache__
*.pytest_cache
# ignore generated files
*-metrika-yandex

3
.gitmodules vendored
View File

@ -34,3 +34,6 @@
[submodule "contrib/boost"]
path = contrib/boost
url = https://github.com/ClickHouse-Extras/boost.git
[submodule "contrib/llvm"]
path = contrib/llvm
url = https://github.com/ClickHouse-Extras/llvm

View File

@ -11,9 +11,10 @@ matrix:
#
# addons:
# apt:
# update: true
# sources:
# - ubuntu-toolchain-r-test
# packages: [ g++-7, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libzookeeper-mt-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl ]
# packages: [ g++-7, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl ]
#
# env:
# - MATRIX_EVAL="export CC=gcc-7 && export CXX=g++-7"
@ -33,10 +34,11 @@ matrix:
addons:
apt:
update: true
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty-5.0
packages: [ g++-7, clang-5.0, lld-5.0, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libzookeeper-mt-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl]
packages: [ g++-7, clang-5.0, lld-5.0, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl]
env:
- MATRIX_EVAL="export CC=clang-5.0 && export CXX=clang++-5.0"
@ -77,6 +79,7 @@ matrix:
addons:
apt:
update: true
packages: [ pbuilder, fakeroot, debhelper ]
script:
@ -94,6 +97,7 @@ matrix:
#
# addons:
# apt:
# update: true
# packages: [ pbuilder, fakeroot, debhelper ]
#
# env:
@ -115,6 +119,7 @@ matrix:
#
# addons:
# apt:
# update: true
# packages: [ pbuilder, fakeroot, debhelper ]
#
# env:
@ -137,7 +142,7 @@ matrix:
# - brew link --overwrite gcc || true
#
# env:
# - MATRIX_EVAL="export CC=gcc-7 && export CXX=g++-7"
# - MATRIX_EVAL="export CC=gcc-8 && export CXX=g++-8"
#
# script:
# - env CMAKE_FLAGS="-DUSE_INTERNAL_BOOST_LIBRARY=1" utils/travis/normal.sh

View File

@ -1,3 +1,81 @@
# ClickHouse release 1.1.54381, 2018-05-14
## Bug fixes:
* Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server.
# ClickHouse release 1.1.54380, 2018-04-21
## New features:
* Added table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random` `clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`.
## Improvements:
* Subqueries could be wrapped by `()` braces (to enhance queries readability). For example, `(SELECT 1) UNION ALL (SELECT 1)`.
* Simple `SELECT` queries from table `system.processes` are not counted in `max_concurrent_queries` limit.
## Bug fixes:
* Fixed incorrect behaviour of `IN` operator when select from `MATERIALIZED VIEW`.
* Fixed incorrect filtering by partition index in expressions like `WHERE partition_key_column IN (...)`
* Fixed inability to execute `OPTIMIZE` query on non-leader replica if the table was `REANAME`d.
* Fixed authorization error when execute `OPTIMIZE` or `ALTER` queries on a non-leader replica.
* Fixed freezing of `KILL QUERY` queries.
* Fixed an error in ZooKeeper client library which led to watches loses, freezing of distributed DDL queue and slowing replication queue if non-empty `chroot` prefix is used in ZooKeeper configuration.
## Backward incompatible changes:
* Removed support of expressions like `(a, b) IN (SELECT (a, b))` (instead of them you can use their equivalent `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined data filtering or caused errors.
# ClickHouse release 1.1.54378, 2018-04-16
## New features:
* Logging level can be changed without restarting the server.
* Added the `SHOW CREATE DATABASE` query.
* The `query_id` can be passed to `clickhouse-client` (elBroom).
* New setting: `max_network_bandwidth_for_all_users`.
* Added support for `ALTER TABLE ... PARTITION ... ` for `MATERIALIZED VIEW`.
* Added information about the size of data parts in uncompressed form in the system table.
* Server-to-server encryption support for distributed tables (`<secure>1</secure>` in the replica config in `<remote_servers>`).
* Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in zookeeper: `use_minimalistic_checksums_in_zookeeper = 1`
* Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed; it's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov).
* Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson).
* When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was cancelled` exception instead of an incomplete response.
## Improvements:
* `ALTER TABLE ... DROP/DETACH PARTITION` queries are run at the front of the replication queue.
* `SELECT ... FINAL` and `OPTIMIZE ... FINAL` can be used even when the table has a single data part.
* A `query_log` table is recreated on the fly if it was deleted manually (Kirill Shvakov).
* The `lengthUTF8` function runs faster (zhang2014).
* Improved performance of synchronous inserts in `Distributed` tables (`insert_distributed_sync = 1`) when there is a very large number of shards.
* The server accepts the `send_timeout` and `receive_timeout` settings from the client and applies them when connecting to the client (they are applied in reverse order: the server socket's `send_timeout` is set to the `receive_timeout` value received from the client, and vice versa).
* More robust crash recovery for asynchronous insertion into `Distributed` tables.
* The return type of the `countEqual` function changed from `UInt32` to `UInt64` (谢磊).
## Bug fixes:
* Fixed an error with `IN` when the left side of the expression is `Nullable`.
* Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index.
* The `max_execution_time` limit now works correctly with distributed queries.
* Fixed errors when calculating the size of composite columns in the `system.columns` table.
* Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS`.
* Fixed errors in `StorageKafka` (#2075)
* Fixed server crashes from invalid arguments of certain aggregate functions.
* Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables.
* `Too many parts` state is less likely to happen when inserting into aggregated materialized views (#2084).
* Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level.
* Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`.
* `SummingMergeTree` now works correctly for summation of nested data structures with a composite key.
* Fixed the possibility of a race condition when choosing the leader for `ReplicatedMergeTree` tables.
## Build changes:
* The build supports `ninja` instead of `make` and uses it by default for building releases.
* Renamed packages: `clickhouse-server-base` is now `clickhouse-common-static`; `clickhouse-server-common` is now `clickhouse-server`; `clickhouse-common-dbg` is now `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility.
## Backward-incompatible changes:
* Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as "at least one `arr` element belongs to the `set`". To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`.
* Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `<listen_reuse_port>1</listen_reuse_port>` in the config.
# ClickHouse release 1.1.54370, 2018-03-16
## New features:

View File

@ -1,3 +1,83 @@
# ClickHouse release 1.1.54381, 2018-05-14
## Исправление ошибок:
* Исправлена ошибка, приводящая к "утеканию" метаданных в ZooKeeper при потере соединения с сервером ZooKeeper.
# ClickHouse release 1.1.54380, 2018-04-21
## Новые возможности:
* Добавлена табличная функция `file(path, format, structure)`. Пример, читающий байты из `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random` `clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`.
## Улучшения:
* Добавлена возможность оборачивать подзапросы скобками `()` для повышения читаемости запросов. Например: `(SELECT 1) UNION ALL (SELECT 1)`.
* Простые запросы `SELECT` из таблицы `system.processes` не учитываются в ограничении `max_concurrent_queries`.
## Исправление ошибок:
* Исправлена неправильная работа оператора `IN` в `MATERIALIZED VIEW`.
* Исправлена неправильная работа индекса по ключу партиционирования в выражениях типа `partition_key_column IN (...)`.
* Исправлена невозможность выполнить `OPTIMIZE` запрос на лидирующей реплике после выполнения `RENAME` таблицы.
* Исправлены ошибки авторизации возникающие при выполнении запросов `OPTIMIZE` и `ALTER` на нелидирующей реплике.
* Исправлены зависания запросов `KILL QUERY`.
* Исправлена ошибка в клиентской библиотеке ZooKeeper, которая при использовании непустого префикса `chroot` в конфигурации приводила к потере watch'ей, остановке очереди distributed DDL запросов и замедлению репликации.
## Обратно несовместимые изменения:
* Убрана поддержка выражений типа `(a, b) IN (SELECT (a, b))` (можно использовать эквивалентные выражение `(a, b) IN (SELECT a, b)`). Раньше такие запросы могли приводить к недетерминированной фильтрации в `WHERE`.
# ClickHouse release 1.1.54378, 2018-04-16
## Новые возможности:
* Возможность изменения уровня логгирования без перезагрузки сервера.
* Добавлен запрос `SHOW CREATE DATABASE`.
* Возможность передать `query_id` в `clickhouse-client` (elBroom).
* Добавлена настройка `max_network_bandwidth_for_all_users`.
* Добавлена поддержка `ALTER TABLE ... PARTITION ... ` для `MATERIALIZED VIEW`.
* Добавлена информация о размере кусков данных в несжатом виде в системные таблицы.
* Поддержка межсерверного шифрования для distributed таблиц (`<secure>1</secure>` в конфигурации реплики в `<remote_servers>`).
* Добавлена настройка уровня таблицы семейства `ReplicatedMergeTree` для уменьшения объема данных, хранимых в zookeeper: `use_minimalistic_checksums_in_zookeeper = 1`
* Возможность настройки приглашения `clickhouse-client`. По-умолчанию добавлен вывод имени сервера в приглашение. Возможность изменить отображаемое имя сервера. Отправка его в HTTP заголовке `X-ClickHouse-Display-Name` (Kirill Shvakov).
* Возможность указания нескольких `topics` через запятую для движка `Kafka` (Tobias Adamson)
* При остановке запроса по причине `KILL QUERY` или `replace_running_query`, клиент получает исключение `Query was cancelled` вместо неполного результата.
## Улучшения:
* Запросы вида `ALTER TABLE ... DROP/DETACH PARTITION` выполняются впереди очереди репликации.
* Возможность использовать `SELECT ... FINAL` и `OPTIMIZE ... FINAL` даже в случае, если данные в таблице представлены одним куском.
* Пересоздание таблицы `query_log` налету в случае если было произведено её удаление вручную (Kirill Shvakov).
* Ускорение функции `lengthUTF8` (zhang2014).
* Улучшена производительность синхронной вставки в `Distributed` таблицы (`insert_distributed_sync = 1`) в случае очень большого количества шардов.
* Сервер принимает настройки `send_timeout` и `receive_timeout` от клиента и применяет их на своей стороне для соединения с клиентом (в переставленном порядке: `send_timeout` у сокета на стороне сервера выставляется в значение `receive_timeout` принятое от клиента, и наоборот).
* Более надёжное восстановление после сбоев при асинхронной вставке в `Distributed` таблицы.
* Возвращаемый тип функции `countEqual` изменён с `UInt32` на `UInt64` (谢磊)
## Исправление ошибок:
* Исправлена ошибка c `IN` где левая часть выражения `Nullable`.
* Исправлен неправильный результат при использовании кортежей с `IN` в случае, если часть компоненнтов кортежа есть в индексе таблицы.
* Исправлена работа ограничения `max_execution_time` с распределенными запросами.
* Исправлены ошибки при вычислении размеров составных столбцов в таблице `system.columns`.
* Исправлена ошибка при создании временной таблицы `CREATE TEMPORARY TABLE IF NOT EXISTS`
* Исправлены ошибки в `StorageKafka` (#2075)
* Исправлены падения сервера от некорректных аргументов некоторых аггрегатных функций.
* Исправлена ошибка, из-за которой запрос `DETACH DATABASE` мог не приводить к остановке фоновых задач таблицы типа `ReplicatedMergeTree`.
* Исправлена проблема с появлением `Too many parts` в агрегирующих материализованных представлениях (#2084).
* Исправлена рекурсивная обработка подстановок в конфиге, если после одной подстановки, требуется другая подстановка на том же уровне.
* Исправлена ошибка с неправильным синтаксисом в файле с метаданными при создании `VIEW`, использующих запрос с `UNION ALL`.
* Исправлена работа `SummingMergeTree` в случае суммирования вложенных структур данных с составным ключом.
* Исправлена возможность возникновения race condition при выборе лидера таблиц `ReplicatedMergeTree`.
## Изменения сборки:
* Поддержка `ninja` вместо `make` при сборке. `ninja` используется по-умолчанию при сборке релизов.
* Переименованы пакеты `clickhouse-server-base` в `clickhouse-common-static`; `clickhouse-server-common` в `clickhouse-server`; `clickhouse-common-dbg` в `clickhouse-common-static-dbg`. Для установки используйте `clickhouse-server clickhouse-client`. Для совместимости, пакеты со старыми именами продолжают загружаться в репозиторий.
## Обратно несовместимые изменения:
* Удалена специальная интерпретация выражения IN, если слева указан массив. Ранее выражение вида `arr IN (set)` воспринималось как "хотя бы один элемент `arr` принадлежит множеству `set`". Для получения такого же поведения в новой версии, напишите `arrayExists(x -> x IN (set), arr)`.
* Отключено ошибочное использование опции сокета `SO_REUSEPORT` (которая по ошибке включена по-умолчанию в библиотеке Poco). Стоит обратить внимание, что на Linux системах теперь не имеет смысла указывать одновременно адреса `::` и `0.0.0.0` для listen - следует использовать лишь адрес `::`, который (с настройками ядра по-умолчанию) позволяет слушать соединения как по IPv4 так и по IPv6. Также вы можете вернуть поведение старых версий, указав в конфиге `<listen_reuse_port>1</listen_reuse_port>`.
# ClickHouse release 1.1.54370, 2018-03-16
## Новые возможности:

View File

@ -17,7 +17,6 @@ else ()
message (WARNING "You are using an unsupported compiler! Compilation has only been tested with Clang 5+ and GCC 7+.")
endif ()
# Write compile_commands.json
set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
@ -247,6 +246,7 @@ include (cmake/find_boost.cmake)
include (cmake/find_zlib.cmake)
include (cmake/find_zstd.cmake)
include (cmake/find_ltdl.cmake) # for odbc
include (cmake/find_termcap.cmake)
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/poco/cmake/FindODBC.cmake)
include (${CMAKE_CURRENT_SOURCE_DIR}/contrib/poco/cmake/FindODBC.cmake) # for poco
else ()

View File

@ -1,8 +1,6 @@
# ClickHouse
ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real time.
🎤🥂 **ClickHouse Meetup in [Sunnyvale](https://www.meetup.com/San-Francisco-Bay-Area-ClickHouse-Meetup/events/248898966/) & [San Francisco](https://www.meetup.com/San-Francisco-Bay-Area-ClickHouse-Meetup/events/249162518/), April 23-27** 🍰🔥🐻
Learn more about ClickHouse at [https://clickhouse.yandex/](https://clickhouse.yandex/)
[![Build Status](https://travis-ci.org/yandex/ClickHouse.svg?branch=master)](https://travis-ci.org/yandex/ClickHouse)

142
ci/README.md Normal file
View File

@ -0,0 +1,142 @@
## Build and test ClickHouse on various plaforms
Quick and dirty scripts.
Usage example:
```
./run-with-docker.sh ubuntu:bionic jobs/quick-build/run.sh
```
Another example, check build on ARM 64:
```
./prepare-docker-image-ubuntu.sh
./run-with-docker.sh multiarch/ubuntu-core:arm64-bionic jobs/quick-build/run.sh
```
Another example, check build on FreeBSD:
```
./prepare-vagrant-image-freebsd.sh
./run-with-vagrant.sh freebsd jobs/quick-build/run.sh
```
Look at `default_config` and `jobs/quick-build/run.sh`
Various possible options. We are not going to automate testing all of them.
#### CPU architectures:
- x86_64;
- AArch64.
x86_64 is the main CPU architecture. We also have minimal support for AArch64.
#### Operating systems:
- Linux;
- FreeBSD.
We also target Mac OS X, but it's more difficult to test.
Linux is the main. FreeBSD is also supported as production OS.
Mac OS is intended only for development and have minimal support: client should work, server should just start.
#### Linux distributions:
For build:
- Ubuntu Bionic;
- Ubuntu Trusty.
For run:
- Ubuntu Hardy;
- CentOS 5
We should support almost any Linux to run ClickHouse. That's why we test also on old distributions.
#### How to obtain sources:
- use sources from local working copy;
- clone sources from github;
- download source tarball.
#### Compilers:
- gcc-7;
- gcc-8;
- clang-6;
- clang-svn.
#### Compiler installation:
- from OS packages;
- build from sources.
#### C++ standard library implementation:
- libc++;
- libstdc++ with C++11 ABI;
- libstdc++ with old ABI.
When building with clang, libc++ is used. When building with gcc, we choose libstdc++ with C++11 ABI.
#### Linkers:
- ldd;
- gold;
When building with clang on x86_64, ldd is used. Otherwise we use gold.
#### Build types:
- RelWithDebInfo;
- Debug;
- ASan;
- TSan.
#### Build types, extra:
- -g0 for quick build;
- enable test coverage;
- debug tcmalloc.
#### What to build:
- only `clickhouse` target;
- all targets;
- debian packages;
We also have intent to build RPM and simple tgz packages.
#### Where to get third-party libraries:
- from contrib directory (submodules);
- from OS packages.
The only production option is to use libraries from contrib directory.
Using libraries from OS packages is discouraged, but we also support this option.
#### Linkage types:
- static;
- shared;
Static linking is the only option for production usage.
We also have support for shared linking, but it is indended only for developers.
#### Make tools:
- make;
- ninja.
#### Installation options:
- run built `clickhouse` binary directly;
- install from packages.
#### How to obtain packages:
- build them;
- download from repository.
#### Sanity checks:
- check that clickhouse binary has no dependencies on unexpected shared libraries;
- check that source code have no style violations.
#### Tests:
- Functional tests;
- Integration tests;
- Unit tests;
- Simple sh/reference tests;
- Performance tests (note that they require predictable computing power);
- Tests for external dictionaries (should be moved to integration tests);
- Jepsen like tests for quorum inserts (not yet available in opensource).
#### Tests extra:
- Run functional tests with Valgrind.
#### Static analyzers:
- CppCheck;
- clang-tidy;
- Coverity.

38
ci/build-clang-from-sources.sh Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -e -x
source default-config
# TODO Non debian systems
./install-os-packages.sh svn
./install-os-packages.sh cmake
mkdir "${WORKSPACE}/llvm"
svn co "http://llvm.org/svn/llvm-project/llvm/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm"
svn co "http://llvm.org/svn/llvm-project/cfe/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm/tools/clang"
svn co "http://llvm.org/svn/llvm-project/lld/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm/tools/lld"
svn co "http://llvm.org/svn/llvm-project/polly/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm/tools/polly"
svn co "http://llvm.org/svn/llvm-project/clang-tools-extra/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm/tools/clang/tools/extra"
svn co "http://llvm.org/svn/llvm-project/compiler-rt/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm/projects/compiler-rt"
svn co "http://llvm.org/svn/llvm-project/libcxx/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm/projects/libcxx"
svn co "http://llvm.org/svn/llvm-project/libcxxabi/${CLANG_SOURCES_BRANCH}" "${WORKSPACE}/llvm/llvm/projects/libcxxabi"
mkdir "${WORKSPACE}/llvm/build"
cd "${WORKSPACE}/llvm/build"
# NOTE You must build LLVM with the same ABI as ClickHouse.
# For example, if you compile ClickHouse with libc++, you must add
# -DLLVM_ENABLE_LIBCXX=1
# to the line below.
cmake -DCMAKE_BUILD_TYPE:STRING=Release ../llvm
make -j $THREADS
$SUDO make install
hash clang
cd ../../..
export CC=clang
export CXX=clang++

8
ci/build-debian-packages.sh Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
set -e -x
source default-config
[[ -d "${WORKSPACE}/sources" ]] || die "Run get-sources.sh first"
./sources/release

47
ci/build-gcc-from-sources.sh Executable file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
set -e -x
source default-config
./install-os-packages.sh curl
if [[ "${GCC_SOURCES_VERSION}" == "latest" ]]; then
GCC_SOURCES_VERSION=$(curl -sSL https://ftpmirror.gnu.org/gcc/ | grep -oE 'gcc-[0-9]+(\.[0-9]+)+' | sort -Vr | head -n1)
fi
GCC_VERSION_SHORT=$(echo "$GCC_SOURCES_VERSION" | grep -oE '[0-9]' | head -n1)
echo "Will download ${GCC_SOURCES_VERSION} (short version: $GCC_VERSION_SHORT)."
THREADS=$(grep -c ^processor /proc/cpuinfo)
mkdir "${WORKSPACE}/gcc"
pushd "${WORKSPACE}/gcc"
wget https://ftpmirror.gnu.org/gcc/${GCC_SOURCES_VERSION}/${GCC_SOURCES_VERSION}.tar.xz
tar xf ${GCC_SOURCES_VERSION}.tar.xz
pushd ${GCC_SOURCES_VERSION}
./contrib/download_prerequisites
popd
mkdir gcc-build
pushd gcc-build
../${GCC_SOURCES_VERSION}/configure --enable-languages=c,c++ --disable-multilib
make -j $THREADS
$SUDO make install
popd
popd
$SUDO ln -sf /usr/local/bin/gcc /usr/local/bin/gcc-${GCC_GCC_SOURCES_VERSION_SHORT}
$SUDO ln -sf /usr/local/bin/g++ /usr/local/bin/g++-${GCC_GCC_SOURCES_VERSION_SHORT}
$SUDO ln -sf /usr/local/bin/gcc /usr/local/bin/cc
$SUDO ln -sf /usr/local/bin/g++ /usr/local/bin/c++
echo '/usr/local/lib64' | $SUDO tee /etc/ld.so.conf.d/10_local-lib64.conf
$SUDO ldconfig
hash gcc g++
gcc --version
export CC=gcc
export CXX=g++

22
ci/build-normal.sh Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e -x
source default-config
[[ -d "${WORKSPACE}/sources" ]] || die "Run get-sources.sh first"
mkdir -p "${WORKSPACE}/build"
pushd "${WORKSPACE}/build"
if [[ "${ENABLE_EMBEDDED_COMPILER}" == 1 ]]; then
[[ "$USE_LLVM_LIBRARIES_FROM_SYSTEM" == 0 ]] && CMAKE_FLAGS="$CMAKE_FLAGS -DUSE_INTERNAL_LLVM_LIBRARY=1"
[[ "$USE_LLVM_LIBRARIES_FROM_SYSTEM" != 0 ]] && CMAKE_FLAGS="$CMAKE_FLAGS -DUSE_INTERNAL_LLVM_LIBRARY=0"
fi
cmake -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DENABLE_EMBEDDED_COMPILER=${ENABLE_EMBEDDED_COMPILER} $CMAKE_FLAGS ../sources
[[ "$BUILD_TARGETS" != 'all' ]] && BUILD_TARGETS_STRING="--target $BUILD_TARGETS"
cmake --build . $BUILD_TARGETS_STRING -- -j $THREADS
popd

7
ci/check-docker.sh Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
set -e -x
source default-config
command -v docker > /dev/null || die "You need to install Docker"
docker ps > /dev/null || die "You need to have access to Docker: run '$SUDO usermod -aG docker $USER' and relogin"

20
ci/check-syntax.sh Executable file
View File

@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -e -x
source default-config
./install-os-packages.sh jq
[[ -d "${WORKSPACE}/sources" ]] || die "Run get-sources.sh first"
mkdir -p "${WORKSPACE}/build"
pushd "${WORKSPACE}/build"
cmake -DCMAKE_BUILD_TYPE=Debug $CMAKE_FLAGS ../sources
make -j $THREADS re2_st # Generated headers
jq --raw-output '.[] | .command' compile_commands.json | grep -v -P -- '-c .+/contrib/' | sed -r -e 's/-o\s+\S+/-fsyntax-only/' > syntax-commands
xargs --arg-file=syntax-commands --max-procs=$THREADS --replace /bin/sh -c "{}"
popd

10
ci/create-sources-tarball.sh Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
set -e -x
source default-config
if [[ -d "${WORKSPACE}/sources" ]]; then
tar -c -z -f "${WORKSPACE}/sources.tar.gz" --directory "${WORKSPACE}/sources" .
else
die "Run get-sources first"
fi

65
ci/default-config Normal file
View File

@ -0,0 +1,65 @@
#!/usr/bin/env bash
set -e -x
if [[ -z "$INITIALIZED" ]]; then
INITIALIZED=1
SCRIPTPATH=$(pwd)
WORKSPACE=${SCRIPTPATH}/workspace
PROJECT_ROOT=$(cd $SCRIPTPATH/.. && pwd)
# Almost all scripts take no arguments. Arguments should be in config.
# get-sources
SOURCES_METHOD=local # clone, local, tarball
SOURCES_CLONE_URL="https://github.com/yandex/ClickHouse.git"
SOURCES_BRANCH="master"
SOURCES_COMMIT=HEAD # do checkout of this commit after clone
# prepare-toolchain
COMPILER=gcc # gcc, clang
COMPILER_INSTALL_METHOD=packages # packages, sources
COMPILER_PACKAGE_VERSION=7 # or 6.0 for clang
# install-compiler-from-sources
CLANG_SOURCES_BRANCH=trunk # or tags/RELEASE_600/final
GCC_SOURCES_VERSION=latest # or gcc-7.1.0
# install-libraries
USE_LLVM_LIBRARIES_FROM_SYSTEM=0 # 0 or 1
ENABLE_EMBEDDED_COMPILER=1
# build
BUILD_METHOD=normal # normal, debian
BUILD_TARGETS=clickhouse # tagtet name, all; only for "normal"
BUILD_TYPE=RelWithDebInfo # RelWithDebInfo, Debug, ASan, TSan
CMAKE_FLAGS=""
# prepare-docker-image-ubuntu
DOCKER_UBUNTU_VERSION=bionic
DOCKER_UBUNTU_ARCH=arm64 # How the architecture is named in a tarball at https://partner-images.canonical.com/core/
DOCKER_UBUNTU_QUEMU_ARCH=aarch64 # How the architecture is named in QEMU
DOCKER_UBUNTU_TAG_ARCH=arm64 # How the architecture is named in Docker
DOCKER_UBUNTU_QEMU_VER=v2.9.1
DOCKER_UBUNTU_REPO=multiarch/ubuntu-core
THREADS=$(grep -c ^processor /proc/cpuinfo || nproc || sysctl -a | grep -F 'hw.ncpu' | grep -oE '[0-9]+')
# All scripts should return 0 in case of success, 1 in case of permanent error,
# 2 in case of temporary error, any other code in case of permanent error.
function die {
echo ${1:-Error}
exit ${2:1}
}
[[ $EUID -ne 0 ]] && SUDO=sudo
./install-os-packages.sh prepare
# Configuration parameters may be overriden with CONFIG environment variable pointing to config file.
[[ -n "$CONFIG" ]] && source $CONFIG
mkdir -p $WORKSPACE
fi

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2016 Multiarch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,53 @@
Source: https://github.com/multiarch/ubuntu-core
Commit: 3972a7794b40a965615abd710759d3ed439c9a55
# :earth_africa: ubuntu-core
![](https://raw.githubusercontent.com/multiarch/dockerfile/master/logo.jpg)
Multiarch Ubuntu images for Docker.
Based on https://github.com/tianon/docker-brew-ubuntu-core/
* `multiarch/ubuntu-core` on [Docker Hub](https://hub.docker.com/r/multiarch/ubuntu-core/)
* [Available tags](https://hub.docker.com/r/multiarch/ubuntu-core/tags/)
## Usage
Once you need to configure binfmt-support on your Docker host.
This works locally or remotely (i.e using boot2docker or swarm).
```console
# configure binfmt-support on the Docker host (works locally or remotely, i.e: using boot2docker)
$ docker run --rm --privileged multiarch/qemu-user-static:register --reset
```
Then you can run an `armhf` image from your `x86_64` Docker host.
```console
$ docker run -it --rm multiarch/ubuntu-core:armhf-wily
root@a0818570f614:/# uname -a
Linux a0818570f614 4.1.13-boot2docker #1 SMP Fri Nov 20 19:05:50 UTC 2015 armv7l armv7l armv7l GNU/Linux
root@a0818570f614:/# exit
```
Or an `x86_64` image from your `x86_64` Docker host, directly, without qemu emulation.
```console
$ docker run -it --rm multiarch/ubuntu-core:amd64-wily
root@27fe384370c9:/# uname -a
Linux 27fe384370c9 4.1.13-boot2docker #1 SMP Fri Nov 20 19:05:50 UTC 2015 x86_64 x86_64 x86_64 GNU/Linux
root@27fe384370c9:/#
```
It also works for `arm64`
```console
$ docker run -it --rm multiarch/ubuntu-core:arm64-wily
root@723fb9f184fa:/# uname -a
Linux 723fb9f184fa 4.1.13-boot2docker #1 SMP Fri Nov 20 19:05:50 UTC 2015 aarch64 aarch64 aarch64 GNU/Linux
```
## License
MIT

96
ci/docker-multiarch/update.sh Executable file
View File

@ -0,0 +1,96 @@
#!/usr/bin/env bash
set -e -x
# A POSIX variable
OPTIND=1 # Reset in case getopts has been used previously in the shell.
while getopts "a:v:q:u:d:t:" opt; do
case "$opt" in
a) ARCH=$OPTARG
;;
v) VERSION=$OPTARG
;;
q) QEMU_ARCH=$OPTARG
;;
u) QEMU_VER=$OPTARG
;;
d) DOCKER_REPO=$OPTARG
;;
t) TAG_ARCH=$OPTARG
;;
esac
done
thisTarBase="ubuntu-$VERSION-core-cloudimg-$ARCH"
thisTar="$thisTarBase-root.tar.gz"
baseUrl="https://partner-images.canonical.com/core/$VERSION"
# install qemu-user-static
if [ -n "${QEMU_ARCH}" ]; then
if [ ! -f x86_64_qemu-${QEMU_ARCH}-static.tar.gz ]; then
wget -N https://github.com/multiarch/qemu-user-static/releases/download/${QEMU_VER}/x86_64_qemu-${QEMU_ARCH}-static.tar.gz
fi
tar -xvf x86_64_qemu-${QEMU_ARCH}-static.tar.gz -C $ROOTFS/usr/bin/
fi
# get the image
if \
wget -q --spider "$baseUrl/current" \
&& wget -q --spider "$baseUrl/current/$thisTar" \
; then
baseUrl+='/current'
fi
wget -qN "$baseUrl/"{{MD5,SHA{1,256}}SUMS{,.gpg},"$thisTarBase.manifest",'unpacked/build-info.txt'} || true
wget -N "$baseUrl/$thisTar"
# check checksum
if [ -f SHA256SUMS ]; then
sha256sum="$(sha256sum "$thisTar" | cut -d' ' -f1)"
if ! grep -q "$sha256sum" SHA256SUMS; then
echo >&2 "error: '$thisTar' has invalid SHA256"
exit 1
fi
fi
cat > Dockerfile <<-EOF
FROM scratch
ADD $thisTar /
ENV ARCH=${ARCH} UBUNTU_SUITE=${VERSION} DOCKER_REPO=${DOCKER_REPO}
EOF
# add qemu-user-static binary
if [ -n "${QEMU_ARCH}" ]; then
cat >> Dockerfile <<EOF
# Add qemu-user-static binary for amd64 builders
ADD x86_64_qemu-${QEMU_ARCH}-static.tar.gz /usr/bin
EOF
fi
cat >> Dockerfile <<-EOF
# a few minor docker-specific tweaks
# see https://github.com/docker/docker/blob/master/contrib/mkimage/debootstrap
RUN echo '#!/bin/sh' > /usr/sbin/policy-rc.d \\
&& echo 'exit 101' >> /usr/sbin/policy-rc.d \\
&& chmod +x /usr/sbin/policy-rc.d \\
&& dpkg-divert --local --rename --add /sbin/initctl \\
&& cp -a /usr/sbin/policy-rc.d /sbin/initctl \\
&& sed -i 's/^exit.*/exit 0/' /sbin/initctl \\
&& echo 'force-unsafe-io' > /etc/dpkg/dpkg.cfg.d/docker-apt-speedup \\
&& echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' > /etc/apt/apt.conf.d/docker-clean \\
&& echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' >> /etc/apt/apt.conf.d/docker-clean \\
&& echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' >> /etc/apt/apt.conf.d/docker-clean \\
&& echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/docker-no-languages \\
&& echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' > /etc/apt/apt.conf.d/docker-gzip-indexes
# enable the universe
RUN sed -i 's/^#\s*\(deb.*universe\)$/\1/g' /etc/apt/sources.list
# overwrite this with 'CMD []' in a dependent Dockerfile
CMD ["/bin/bash"]
EOF
docker build -t "${DOCKER_REPO}:${TAG_ARCH}-${VERSION}" .
docker run --rm "${DOCKER_REPO}:${TAG_ARCH}-${VERSION}" /bin/bash -ec "echo Hello from Ubuntu!"

18
ci/get-sources.sh Executable file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -e -x
source default-config
if [[ "$SOURCES_METHOD" == "clone" ]]; then
./install-os-packages.sh git
SOURCES_DIR="${WORKSPACE}/sources"
mkdir -p "${SOURCES_DIR}"
git clone --recursive --branch "$SOURCES_BRANCH" "$SOURCES_CLONE_URL" "${SOURCES_DIR}"
pushd "${SOURCES_DIR}"
git checkout --recurse-submodules "$SOURCES_COMMIT"
popd
elif [[ "$SOURCES_METHOD" == "local" ]]; then
ln -f -s "${PROJECT_ROOT}" "${WORKSPACE}/sources"
else
die "Unknown SOURCES_METHOD"
fi

View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -e -x
source default-config
# TODO Install from PPA on older Ubuntu
./install-os-packages.sh ${COMPILER}-${COMPILER_PACKAGE_VERSION}
if [[ "$COMPILER" == "gcc" ]]; then
if command -v gcc-${COMPILER_PACKAGE_VERSION}; then export CC=gcc-${COMPILER_PACKAGE_VERSION} CXX=g++-${COMPILER_PACKAGE_VERSION};
elif command -v gcc${COMPILER_PACKAGE_VERSION}; then export CC=gcc${COMPILER_PACKAGE_VERSION} CXX=g++${COMPILER_PACKAGE_VERSION};
elif command -v gcc; then export CC=gcc CXX=g++;
fi
elif [[ "$COMPILER" == "clang" ]]; then
if command -v clang-${COMPILER_PACKAGE_VERSION}; then export CC=clang-${COMPILER_PACKAGE_VERSION} CXX=clang++-${COMPILER_PACKAGE_VERSION};
elif command -v clang${COMPILER_PACKAGE_VERSION}; then export CC=clang${COMPILER_PACKAGE_VERSION} CXX=clang++${COMPILER_PACKAGE_VERSION};
elif command -v clang; then export CC=clang CXX=clang++;
fi
else
die "Unknown compiler specified"
fi

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -e -x
source default-config
if [[ "$COMPILER" == "gcc" ]]; then
. build-gcc-from-sources.sh
elif [[ "$COMPILER" == "clang" ]]; then
. build-clang-from-sources.sh
else
die "Unknown COMPILER"
fi

14
ci/install-libraries.sh Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -e -x
source default-config
./install-os-packages.sh libssl-dev
./install-os-packages.sh libicu-dev
./install-os-packages.sh libreadline-dev
./install-os-packages.sh libmariadbclient-dev
./install-os-packages.sh libunixodbc-dev
if [[ "$ENABLE_EMBEDDED_COMPILER" == 1 && "$USE_LLVM_LIBRARIES_FROM_SYSTEM" == 1 ]]; then
./install-os-packages.sh llvm-libs-5.0
fi

120
ci/install-os-packages.sh Executable file
View File

@ -0,0 +1,120 @@
#!/usr/bin/env bash
set -e -x
# Dispatches package installation on various OS and distributives
WHAT=$1
[[ $EUID -ne 0 ]] && SUDO=sudo
command -v apt-get && PACKAGE_MANAGER=apt
command -v yum && PACKAGE_MANAGER=yum
command -v pkg && PACKAGE_MANAGER=pkg
case $PACKAGE_MANAGER in
apt)
case $WHAT in
prepare)
$SUDO apt-get update
;;
svn)
$SUDO apt-get install -y subversion
;;
gcc*)
$SUDO apt-get install -y $WHAT ${WHAT/cc/++}
;;
clang*)
$SUDO apt-get install -y $WHAT libc++-dev libc++abi-dev
[[ $(uname -m) == "x86_64" ]] && $SUDO apt-get install -y ${WHAT/clang/lld} || true
;;
git)
$SUDO apt-get install -y git
;;
cmake)
$SUDO apt-get install -y cmake3 || $SUDO apt-get install -y cmake
;;
curl)
$SUDO apt-get install -y curl
;;
jq)
$SUDO apt-get install -y jq
;;
libssl-dev)
$SUDO apt-get install -y libssl-dev
;;
libicu-dev)
$SUDO apt-get install -y libicu-dev
;;
libreadline-dev)
$SUDO apt-get install -y libreadline-dev
;;
libunixodbc-dev)
$SUDO apt-get install -y unixodbc-dev
;;
libmariadbclient-dev)
$SUDO apt-get install -y libmariadbclient-dev
;;
llvm-libs*)
$SUDO apt-get install -y ${WHAT/llvm-libs/liblld}-dev ${WHAT/llvm-libs/libclang}-dev
;;
qemu-user-static)
$SUDO apt-get install -y qemu-user-static
;;
vagrant-virtualbox)
$SUDO apt-get install -y vagrant virtualbox
;;
*)
echo "Unknown package"; exit 1;
;;
esac
;;
pkg)
case $WHAT in
prepare)
;;
svn)
$SUDO pkg install -y subversion
;;
gcc*)
$SUDO pkg install -y ${WHAT/-/}
;;
clang*)
$SUDO pkg install -y clang-devel
;;
git)
$SUDO pkg install -y git
;;
cmake)
$SUDO pkg install -y cmake
;;
curl)
$SUDO pkg install -y curl
;;
jq)
$SUDO pkg install -y jq
;;
libssl-dev)
$SUDO pkg install -y openssl
;;
libicu-dev)
$SUDO pkg install -y icu
;;
libreadline-dev)
$SUDO pkg install -y readline
;;
libunixodbc-dev)
$SUDO pkg install -y unixODBC libltdl
;;
libmariadbclient-dev)
$SUDO pkg install -y mariadb102-client
;;
*)
echo "Unknown package"; exit 1;
;;
esac
;;
*)
echo "Unknown distributive"; exit 1;
;;
esac

View File

@ -0,0 +1,5 @@
## Build with debug mode and without many libraries
This job is intended as first check that build is not broken on wide variety of platforms.
Results of this build are not intended for production usage.

31
ci/jobs/quick-build/run.sh Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/env bash
set -e -x
# How to run:
# From "ci" directory:
# jobs/quick-build/run.sh
# or:
# ./run-with-docker.sh ubuntu:bionic jobs/quick-build/run.sh
cd "$(dirname $0)"/../..
. default-config
SOURCES_METHOD=local
COMPILER=clang
COMPILER_INSTALL_METHOD=packages
COMPILER_PACKAGE_VERSION=6.0
USE_LLVM_LIBRARIES_FROM_SYSTEM=0
BUILD_METHOD=normal
BUILD_TARGETS=clickhouse
BUILD_TYPE=Debug
ENABLE_EMBEDDED_COMPILER=0
CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_TCMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_POCO_ODBC=0 -D ENABLE_MYSQL=0"
[[ $(uname) == "FreeBSD" ]] && COMPILER_PACKAGE_VERSION=devel && export COMPILER_PATH=/usr/local/bin
. get-sources.sh
. prepare-toolchain.sh
. install-libraries.sh
. build-normal.sh

View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
set -e -x
source default-config
./check-docker.sh
# http://fl47l1n3.net/2015/12/24/binfmt/
./install-os-packages.sh qemu-user-static
pushd docker-multiarch
$SUDO ./update.sh \
-a "$DOCKER_UBUNTU_ARCH" \
-v "$DOCKER_UBUNTU_VERSION" \
-q "$DOCKER_UBUNTU_QUEMU_ARCH" \
-u "$DOCKER_UBUNTU_QEMU_VER" \
-d "$DOCKER_UBUNTU_REPO" \
-t "$DOCKER_UBUNTU_TAG_ARCH"
docker run --rm --privileged multiarch/qemu-user-static:register
popd

14
ci/prepare-toolchain.sh Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -e -x
source default-config
./install-os-packages.sh cmake
if [[ "$COMPILER_INSTALL_METHOD" == "packages" ]]; then
. install-compiler-from-packages.sh
elif [[ "$COMPILER_INSTALL_METHOD" == "sources" ]]; then
. install-compiler-from-sources.sh
else
die "Unknown COMPILER_INSTALL_METHOD"
fi

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -e -x
source default-config
./install-os-packages.sh vagrant-virtualbox
pushd "vagrant-freebsd"
vagrant up
vagrant ssh-config > vagrant-ssh
ssh -F vagrant-ssh default 'uname -a'
popd

View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
set -e -x
# Usage example:
# ./run-with-docker.sh centos:centos6 ./run-clickhouse-from-binaries.sh
source default-config
SERVER_BIN="${WORKSPACE}/build/dbms/src/Server/clickhouse"
SERVER_CONF="${WORKSPACE}/sources/dbms/src/Server/config.xml"
SERVER_DATADIR="${WORKSPACE}/clickhouse"
[[ -x "$SERVER_BIN" ]] || die "Run build-normal.sh first"
[[ -r "$SERVER_CONF" ]] || die "Run get-sources.sh first"
mkdir -p "${SERVER_DATADIR}"
$SERVER_BIN server --config-file "$SERVER_CONF" --pid-file="${WORKSPACE}/clickhouse.pid" -- --path "$SERVER_DATADIR"

9
ci/run-with-docker.sh Executable file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env bash
set -e -x
mkdir -p /var/cache/ccache
DOCKER_ENV+=" --mount=type=bind,source=/var/cache/ccache,destination=/ccache -e CCACHE_DIR=/ccache "
PROJECT_ROOT="$(cd "$(dirname "$0")/.."; pwd -P)"
[[ -n "$CONFIG" ]] && DOCKER_ENV="--env=CONFIG"
docker run -t --network=host --mount=type=bind,source=${PROJECT_ROOT},destination=/ClickHouse --workdir=/ClickHouse/ci $DOCKER_ENV "$1" "$2"

14
ci/run-with-vagrant.sh Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
set -e -x
[[ -r "vagrant-${1}/vagrant-ssh" ]] || die "Run prepare-vagrant-image-... first."
pushd vagrant-$1
shopt -s extglob
vagrant ssh -c "mkdir -p ClickHouse"
scp -q -F vagrant-ssh -r ../../!(*build*) default:~/ClickHouse
vagrant ssh -c "cd ClickHouse/ci; $2"
popd

1
ci/vagrant-freebsd/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.vagrant

3
ci/vagrant-freebsd/Vagrantfile vendored Normal file
View File

@ -0,0 +1,3 @@
Vagrant.configure("2") do |config|
config.vm.box = "generic/freebsd11"
end

View File

@ -22,6 +22,10 @@ if (NOT MSVC)
set (NOT_MSVC 1)
endif ()
if (NOT APPLE)
set (NOT_APPLE 1)
endif ()
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (COMPILER_GCC 1)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")

View File

@ -1,13 +1,10 @@
function(generate_function_register FUNCTION_AREA)
foreach(FUNCTION IN LISTS ARGN)
configure_file (registerFunction.h.in register${FUNCTION}.h)
configure_file (registerFunction.cpp.in register${FUNCTION}.cpp)
set(REGISTER_HEADERS "${REGISTER_HEADERS} #include \"register${FUNCTION}.h\"\n")
set(REGISTER_FUNCTIONS "${REGISTER_FUNCTIONS} register${FUNCTION}(factory);\n")
foreach(FUNCTION IN LISTS ARGN)
configure_file (registerFunction.h.in register${FUNCTION}.h)
configure_file (registerFunction.cpp.in register${FUNCTION}.cpp)
set(REGISTER_HEADERS "${REGISTER_HEADERS}#include \"register${FUNCTION}.h\"\n")
set(REGISTER_FUNCTIONS "${REGISTER_FUNCTIONS} register${FUNCTION}(factory);\n")
endforeach()
configure_file (registerFunctions_area.cpp.in registerFunctions${FUNCTION_AREA}.cpp)
endfunction()

View File

@ -1,6 +1,9 @@
if (ARCH_FREEBSD)
find_library (EXECINFO_LIBRARY execinfo)
find_library (ELF_LIBRARY elf)
message (STATUS "Using execinfo: ${EXECINFO_LIBRARY}")
message (STATUS "Using elf: ${ELF_LIBRARY}")
else ()
set (EXECINFO_LIBRARY "")
set (ELF_LIBRARY "")
endif ()

View File

@ -1,107 +1,57 @@
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" FALSE)
option (ENABLE_EMBEDDED_COMPILER "Set to TRUE to enable support for 'compile' option for query execution" 1)
option (USE_INTERNAL_LLVM_LIBRARY "Use bundled or system LLVM library. Default: system library for quicker developer builds." ${APPLE})
if (ENABLE_EMBEDDED_COMPILER)
# Based on source code of YT.
# Authors: Ivan Puzyrevskiy, Alexey Lukyanchikov, Ruslan Savchenko.
if (USE_INTERNAL_LLVM_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/CMakeLists.txt")
message (WARNING "submodule contrib/llvm is missing. to fix try run: \n git submodule update --init --recursive")
set (USE_INTERNAL_LLVM_LIBRARY 0)
endif ()
# Find LLVM includes and libraries.
#
# LLVM_VERSION - LLVM version.
# LLVM_INCLUDE_DIRS - Directory containing LLVM headers.
# LLVM_LIBRARY_DIRS - Directory containing LLVM libraries.
# LLVM_CXXFLAGS - C++ compiler flags for files that include LLVM headers.
# LLVM_FOUND - True if LLVM was found.
if (NOT USE_INTERNAL_LLVM_LIBRARY)
set (LLVM_PATHS "/usr/local/lib/llvm")
# llvm_map_components_to_libraries - Maps LLVM used components to required libraries.
# Usage: llvm_map_components_to_libraries(REQUIRED_LLVM_LIBRARIES core jit interpreter native ...)
if (LLVM_VERSION)
find_package(LLVM ${LLVM_VERSION} CONFIG PATHS ${LLVM_PATHS})
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
find_package(LLVM ${CMAKE_CXX_COMPILER_VERSION} CONFIG PATHS ${LLVM_PATHS})
else ()
find_package (LLVM 6 CONFIG PATHS ${LLVM_PATHS})
if (NOT LLVM_FOUND)
find_package (LLVM 5 CONFIG PATHS ${LLVM_PATHS})
endif ()
if (NOT LLVM_FOUND)
find_package (LLVM 7 CONFIG PATHS ${LLVM_PATHS})
endif ()
endif ()
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(LLVM_VERSION_POSTFIX "${COMPILER_POSTFIX}" CACHE STRING "")
else()
if (ARCH_FREEBSD)
set(LLVM_VERSION_POSTFIX "50" CACHE STRING "")
else()
set(LLVM_VERSION_POSTFIX "-5.0" CACHE STRING "")
if (LLVM_FOUND)
# Remove dynamically-linked zlib and libedit from LLVM's dependencies:
set_target_properties(LLVMSupport PROPERTIES INTERFACE_LINK_LIBRARIES "-lpthread;LLVMDemangle")
set_target_properties(LLVMLineEditor PROPERTIES INTERFACE_LINK_LIBRARIES "LLVMSupport")
option(LLVM_HAS_RTTI "Enable if LLVM was build with RTTI enabled" ON)
set (USE_EMBEDDED_COMPILER 1)
endif()
else()
set (LLVM_FOUND 1)
set (USE_EMBEDDED_COMPILER 1)
set (LLVM_VERSION "7.0.0bundled")
set (LLVM_INCLUDE_DIRS
${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/include
${ClickHouse_SOURCE_DIR}/contrib/llvm/clang/include
${ClickHouse_BINARY_DIR}/contrib/llvm/clang/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/clang/include
${ClickHouse_SOURCE_DIR}/contrib/llvm/lld/include
${ClickHouse_BINARY_DIR}/contrib/llvm/lld/include
${ClickHouse_BINARY_DIR}/contrib/llvm/llvm/tools/lld/include)
set (LLVM_LIBRARY_DIRS ${ClickHouse_BINARY_DIR}/contrib/llvm/llvm)
endif()
find_program(LLVM_CONFIG_EXECUTABLE
NAMES llvm-config${LLVM_VERSION_POSTFIX} llvm-config llvm-config-devel
PATHS $ENV{LLVM_ROOT}/bin)
mark_as_advanced(LLVM_CONFIG_EXECUTABLE)
if(NOT LLVM_CONFIG_EXECUTABLE)
message(FATAL_ERROR "Cannot find LLVM (looking for `llvm-config${LLVM_VERSION_POSTFIX}`, `llvm-config`, `llvm-config-devel`). Please, provide LLVM_ROOT environment variable.")
else()
set(LLVM_FOUND TRUE)
execute_process(
COMMAND ${LLVM_CONFIG_EXECUTABLE} --version
OUTPUT_VARIABLE LLVM_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(LLVM_VERSION VERSION_LESS "5")
message(FATAL_ERROR "LLVM 5+ is required. You have ${LLVM_VERSION} (${LLVM_CONFIG_EXECUTABLE})")
endif()
message(STATUS "LLVM config: ${LLVM_CONFIG_EXECUTABLE}; version: ${LLVM_VERSION}")
execute_process(
COMMAND ${LLVM_CONFIG_EXECUTABLE} --includedir
OUTPUT_VARIABLE LLVM_INCLUDE_DIRS
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(
COMMAND ${LLVM_CONFIG_EXECUTABLE} --libdir
OUTPUT_VARIABLE LLVM_LIBRARY_DIRS
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(
COMMAND ${LLVM_CONFIG_EXECUTABLE} --cxxflags
OUTPUT_VARIABLE LLVM_CXXFLAGS
OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(
COMMAND ${LLVM_CONFIG_EXECUTABLE} --targets-built
OUTPUT_VARIABLE LLVM_TARGETS_BUILT
OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REPLACE " " ";" LLVM_TARGETS_BUILT "${LLVM_TARGETS_BUILT}")
if (USE_STATIC_LIBRARIES)
set (LLVM_CONFIG_ADD "--link-static")
endif()
# Get the link libs we need.
function(llvm_map_components_to_libraries RESULT)
execute_process(
COMMAND ${LLVM_CONFIG_EXECUTABLE} ${LLVM_CONFIG_ADD} --libs ${ARGN}
OUTPUT_VARIABLE _tmp
OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REPLACE " " ";" _libs_module "${_tmp}")
#message(STATUS "LLVM Libraries for '${ARGN}': ${_libs_module}")
execute_process(
COMMAND ${LLVM_CONFIG_EXECUTABLE} --system-libs ${ARGN}
OUTPUT_VARIABLE _libs_system
OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REPLACE "\n" " " _libs_system "${_libs_system}")
string(REPLACE " " " " _libs_system "${_libs_system}")
string(REPLACE " " ";" _libs_system "${_libs_system}")
set(${RESULT} ${_libs_module} ${_libs_system} PARENT_SCOPE)
endfunction(llvm_map_components_to_libraries)
message(STATUS "LLVM Include Directory: ${LLVM_INCLUDE_DIRS}")
message(STATUS "LLVM Library Directory: ${LLVM_LIBRARY_DIRS}")
message(STATUS "LLVM C++ Compiler: ${LLVM_CXXFLAGS}")
endif()
if (LLVM_FOUND AND LLVM_INCLUDE_DIRS AND LLVM_LIBRARY_DIRS)
set(USE_EMBEDDED_COMPILER TRUE)
if (LLVM_FOUND)
message(STATUS "LLVM version: ${LLVM_PACKAGE_VERSION}")
message(STATUS "LLVM include Directory: ${LLVM_INCLUDE_DIRS}")
message(STATUS "LLVM library Directory: ${LLVM_LIBRARY_DIRS}")
message(STATUS "LLVM C++ compiler flags: ${LLVM_CXXFLAGS}")
endif()
endif()

View File

@ -8,8 +8,21 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/CMakeLists.txt")
set (MISSING_INTERNAL_POCO_LIBRARY 1)
endif ()
set (POCO_COMPONENTS Net XML SQL Data)
if (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)
list (APPEND POCO_COMPONENTS Crypto NetSSL)
endif ()
if (NOT DEFINED ENABLE_POCO_MONGODB OR ENABLE_POCO_MONGODB)
list (APPEND POCO_COMPONENTS MongoDB)
endif ()
# TODO: after new poco release with SQL library rename ENABLE_POCO_ODBC -> ENABLE_POCO_SQLODBC
if (NOT DEFINED ENABLE_POCO_ODBC OR ENABLE_POCO_ODBC)
list (APPEND POCO_COMPONENTS DataODBC)
#list (APPEND POCO_COMPONENTS SQLODBC) # future
endif ()
if (NOT USE_INTERNAL_POCO_LIBRARY)
find_package (Poco COMPONENTS Net NetSSL XML SQL Data Crypto DataODBC MongoDB)
find_package (Poco COMPONENTS ${POCO_COMPONENTS})
endif ()
if (Poco_INCLUDE_DIRS AND Poco_Foundation_LIBRARY)
@ -46,13 +59,11 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY)
"${ClickHouse_SOURCE_DIR}/contrib/poco/Util/include/"
)
if (NOT DEFINED POCO_ENABLE_MONGODB OR POCO_ENABLE_MONGODB)
set (Poco_MongoDB_FOUND 1)
if (NOT DEFINED ENABLE_POCO_MONGODB OR ENABLE_POCO_MONGODB)
set (Poco_MongoDB_LIBRARY PocoMongoDB)
set (Poco_MongoDB_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/MongoDB/include/")
endif ()
if (EXISTS "${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/")
set (Poco_SQL_FOUND 1)
set (Poco_SQL_LIBRARY PocoSQL)
@ -60,8 +71,7 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY)
"${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/include"
"${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include"
)
if (ODBC_FOUND)
set (Poco_SQLODBC_FOUND 1)
if ((NOT DEFINED ENABLE_POCO_ODBC OR ENABLE_POCO_ODBC) AND ODBC_FOUND)
set (Poco_SQLODBC_INCLUDE_DIRS
"${ClickHouse_SOURCE_DIR}/contrib/poco/SQL/ODBC/include/"
"${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/"
@ -73,8 +83,8 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY)
set (Poco_Data_FOUND 1)
set (Poco_Data_INCLUDE_DIRS "${ClickHouse_SOURCE_DIR}/contrib/poco/Data/include")
set (Poco_Data_LIBRARY PocoData)
if (ODBC_FOUND)
set (Poco_DataODBC_FOUND 1)
if ((NOT DEFINED ENABLE_POCO_ODBC OR ENABLE_POCO_ODBC) AND ODBC_FOUND)
set (USE_POCO_DATAODBC 1)
set (Poco_DataODBC_INCLUDE_DIRS
"${ClickHouse_SOURCE_DIR}/contrib/poco/Data/ODBC/include/"
${ODBC_INCLUDE_DIRECTORIES}
@ -84,8 +94,7 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY)
endif ()
# TODO! fix internal ssl
if (OPENSSL_FOUND AND NOT USE_INTERNAL_SSL_LIBRARY)
set (Poco_NetSSL_FOUND 1)
if (OPENSSL_FOUND AND NOT USE_INTERNAL_SSL_LIBRARY AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL))
set (Poco_NetSSL_LIBRARY PocoNetSSL)
set (Poco_Crypto_LIBRARY PocoCrypto)
endif ()
@ -103,7 +112,20 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY)
set (Poco_XML_LIBRARY PocoXML)
endif ()
message(STATUS "Using Poco: ${Poco_INCLUDE_DIRS} : ${Poco_Foundation_LIBRARY},${Poco_Util_LIBRARY},${Poco_Net_LIBRARY},${Poco_NetSSL_LIBRARY},${Poco_XML_LIBRARY},${Poco_Data_LIBRARY},${Poco_DataODBC_LIBRARY},${Poco_MongoDB_LIBRARY}; MongoDB=${Poco_MongoDB_FOUND}, DataODBC=${Poco_DataODBC_FOUND}, NetSSL=${Poco_NetSSL_FOUND}")
if (Poco_NetSSL_LIBRARY AND Poco_Crypto_LIBRARY)
set (USE_POCO_NETSSL 1)
endif ()
if (Poco_MongoDB_LIBRARY)
set (USE_POCO_MONGODB 1)
endif ()
if (Poco_DataODBC_LIBRARY)
set (USE_POCO_DATAODBC 1)
endif ()
if (Poco_SQLODBC_LIBRARY)
set (USE_POCO_SQLODBC 1)
endif ()
message(STATUS "Using Poco: ${Poco_INCLUDE_DIRS} : ${Poco_Foundation_LIBRARY},${Poco_Util_LIBRARY},${Poco_Net_LIBRARY},${Poco_NetSSL_LIBRARY},${Poco_Crypto_LIBRARY},${Poco_XML_LIBRARY},${Poco_Data_LIBRARY},${Poco_DataODBC_LIBRARY},${Poco_SQL_LIBRARY},${Poco_SQLODBC_LIBRARY},${Poco_MongoDB_LIBRARY}; MongoDB=${USE_POCO_MONGODB}, DataODBC=${USE_POCO_DATAODBC}, NetSSL=${USE_POCO_NETSSL}")
# How to make sutable poco:
# use branch:

View File

@ -24,6 +24,9 @@ if (RDKAFKA_LIB AND RDKAFKA_INCLUDE_DIR)
if (SASL2_LIBRARY)
list (APPEND RDKAFKA_LIBRARY ${SASL2_LIBRARY})
endif ()
if (LZ4_LIBRARY)
list (APPEND RDKAFKA_LIBRARY ${LZ4_LIBRARY})
endif ()
elseif (NOT MISSING_INTERNAL_RDKAFKA_LIBRARY)
set (USE_INTERNAL_RDKAFKA_LIBRARY 1)
set (RDKAFKA_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/librdkafka/src")

View File

@ -1,15 +1,10 @@
if (APPLE)
# lib from libs/libcommon
set (RT_LIBRARY "apple_rt")
else ()
elseif (ARCH_FREEBSD)
find_library (RT_LIBRARY rt)
else ()
set (RT_LIBRARY "")
endif ()
message(STATUS "Using rt: ${RT_LIBRARY}")
function (target_link_rt_by_force TARGET)
if (NOT APPLE)
set (FLAGS "-Wl,-no-as-needed -lrt -Wl,-as-needed")
set_property (TARGET ${TARGET} APPEND PROPERTY LINK_FLAGS "${FLAGS}")
endif ()
endfunction ()

5
cmake/find_termcap.cmake Normal file
View File

@ -0,0 +1,5 @@
find_library (TERMCAP_LIBRARY termcap)
if (NOT TERMCAP_LIBRARY)
find_library (TERMCAP_LIBRARY tinfo)
endif()
message (STATUS "Using termcap: ${TERMCAP_LIBRARY}")

View File

@ -21,7 +21,7 @@ if (USE_INTERNAL_RE2_LIBRARY)
endif ()
if (USE_INTERNAL_DOUBLE_CONVERSION_LIBRARY)
set (BUILD_TESTING ${ENABLE_TESTS} CACHE INTERNAL "")
set (BUILD_TESTING 0 CACHE INTERNAL "")
add_subdirectory (double-conversion)
endif ()
@ -113,11 +113,7 @@ if (USE_INTERNAL_RDKAFKA_LIBRARY)
endif ()
if (USE_INTERNAL_CAPNP_LIBRARY)
if (APPLE) # tests never end
set (BUILD_TESTING 0 CACHE INTERNAL "")
else ()
set (BUILD_TESTING ${ENABLE_TESTS} CACHE INTERNAL "")
endif ()
set (BUILD_TESTING 0 CACHE INTERNAL "")
set (_save ${CMAKE_CXX_EXTENSIONS})
set (CMAKE_CXX_EXTENSIONS)
add_subdirectory (capnproto/c++)
@ -132,7 +128,7 @@ if (USE_INTERNAL_POCO_LIBRARY)
set (_save ${ENABLE_TESTS})
set (ENABLE_TESTS 0)
set (CMAKE_DISABLE_FIND_PACKAGE_ZLIB 1)
if (USE_INTERNAL_SSL_LIBRARY)
if (USE_INTERNAL_SSL_LIBRARY OR (DEFINED ENABLE_POCO_NETSSL AND NOT ENABLE_POCO_NETSSL))
set (DISABLE_INTERNAL_OPENSSL 1 CACHE INTERNAL "")
set (ENABLE_NETSSL 0 CACHE INTERNAL "") # TODO!
set (ENABLE_CRYPTO 0 CACHE INTERNAL "") # TODO!
@ -145,8 +141,13 @@ if (USE_INTERNAL_POCO_LIBRARY)
set (ENABLE_TESTS ${_save})
set (CMAKE_CXX_FLAGS ${save_CMAKE_CXX_FLAGS})
set (CMAKE_C_FLAGS ${save_CMAKE_C_FLAGS})
if (OPENSSL_FOUND AND TARGET Crypto)
if (OPENSSL_FOUND AND TARGET Crypto AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL))
# Bug in poco https://github.com/pocoproject/poco/pull/2100 found on macos
target_include_directories(Crypto PUBLIC ${OPENSSL_INCLUDE_DIR})
endif ()
endif ()
if (USE_INTERNAL_LLVM_LIBRARY)
add_subdirectory (llvm/llvm)
endif ()

2
contrib/boost vendored

@ -1 +1 @@
Subproject commit 5121cc9d0375c7b81b24b6087a51684e6cd62ded
Subproject commit 2d5cb2c86f61126f4e1efe9ab97332efd44e7dea

2
contrib/capnproto vendored

@ -1 +1 @@
Subproject commit c949a18da5f041a36cc218c5c4b79c7705999b4f
Subproject commit 7173ab638fdf144032411dc69fb1082cd473e08f

2
contrib/librdkafka vendored

@ -1 +1 @@
Subproject commit c3d50eb613704fb9c8ab3bce95a88275cb5875b7
Subproject commit 7478b5ef16aadd6543fe38bc6a2deb895c70da98

View File

@ -1088,7 +1088,7 @@ class sparsegroup {
// This is equivalent to memmove(), but faster on my Intel P4,
// at least with gcc4.1 -O2 / glibc 2.3.6.
for (size_type i = settings.num_buckets; i > offset; --i)
memcpy(group + i, group + i-1, sizeof(*group));
memcpy(static_cast<void*>(group + i), group + i-1, sizeof(*group));
}
// Create space at group[offset], without special assumptions about value_type
@ -1154,7 +1154,7 @@ class sparsegroup {
// at lesat with gcc4.1 -O2 / glibc 2.3.6.
assert(settings.num_buckets > 0);
for (size_type i = offset; i < settings.num_buckets-1; ++i)
memcpy(group + i, group + i+1, sizeof(*group)); // hopefully inlined!
memcpy(static_cast<void*>(group + i), group + i+1, sizeof(*group)); // hopefully inlined!
group = settings.realloc_or_die(group, settings.num_buckets-1);
}

View File

@ -1,4 +1,3 @@
message (STATUS "Building: tcmalloc_minimal_internal")
add_library (tcmalloc_minimal_internal

1
contrib/llvm vendored Submodule

@ -0,0 +1 @@
Subproject commit 163def217817c90fb982a6daf384744d8472b92b

2
contrib/poco vendored

@ -1 +1 @@
Subproject commit 930a7ec1154f4f9711edfb4b4a39f9fff2a5bbb5
Subproject commit 3a2d0a833a22ef5e1164a9ada54e3253cb038904

View File

@ -99,6 +99,17 @@ else ()
install (TARGETS dbms LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
endif ()
if (USE_EMBEDDED_COMPILER)
llvm_map_components_to_libnames(REQUIRED_LLVM_LIBRARIES all)
if (TERMCAP_LIBRARY)
list(APPEND REQUIRED_LLVM_LIBRARIES ${TERMCAP_LIBRARY})
endif ()
list(APPEND REQUIRED_LLVM_LIBRARIES ${CMAKE_DL_LIBS})
target_link_libraries (dbms ${REQUIRED_LLVM_LIBRARIES})
target_include_directories (dbms BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
endif ()
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL")
# Won't generate debug info for files with heavy template instantiation to achieve faster linking and lower size.
@ -136,6 +147,7 @@ target_link_libraries (clickhouse_common_io
${Poco_Data_LIBRARY}
${ZLIB_LIBRARIES}
${EXECINFO_LIBRARY}
${ELF_LIBRARY}
${Boost_SYSTEM_LIBRARY}
${CMAKE_DL_LIBS}
)
@ -159,7 +171,7 @@ if (NOT USE_INTERNAL_BOOST_LIBRARY)
target_include_directories (clickhouse_common_io BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
endif ()
if (Poco_SQLODBC_FOUND)
if (USE_POCO_SQLODBC)
target_link_libraries (clickhouse_common_io ${Poco_SQL_LIBRARY})
target_link_libraries (dbms ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY})
if (NOT USE_INTERNAL_POCO_LIBRARY)
@ -173,7 +185,7 @@ if (Poco_Data_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories (dbms PRIVATE ${Poco_Data_INCLUDE_DIRS})
endif()
if (Poco_DataODBC_FOUND)
if (USE_POCO_DATAODBC)
target_link_libraries (clickhouse_common_io ${Poco_Data_LIBRARY})
target_link_libraries (dbms ${Poco_DataODBC_LIBRARY})
if (NOT USE_INTERNAL_POCO_LIBRARY)
@ -181,13 +193,12 @@ if (Poco_DataODBC_FOUND)
endif()
endif()
if (Poco_MongoDB_FOUND)
if (USE_POCO_MONGODB)
target_link_libraries (dbms ${Poco_MongoDB_LIBRARY})
endif()
if (Poco_NetSSL_FOUND)
target_link_libraries (clickhouse_common_io ${Poco_NetSSL_LIBRARY})
if (USE_POCO_NETSSL)
target_link_libraries (clickhouse_common_io ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
endif()
target_link_libraries (dbms ${Poco_Foundation_LIBRARY})

View File

@ -1,7 +1,7 @@
# This strings autochanged from release_lib.sh:
set(VERSION_DESCRIBE v1.1.54378-testing)
set(VERSION_REVISION 54378)
set(VERSION_GITHASH 5b19d89133a5ff7c72e40cc8c0226cb00466ba10)
set(VERSION_DESCRIBE v1.1.54382-testing)
set(VERSION_REVISION 54382)
set(VERSION_GITHASH fd9938cc463456513eba5dce06a0e75b43997d69)
# end of autochange
set (VERSION_MAJOR 1)

View File

@ -242,9 +242,7 @@ private:
auto throw_exception = [&](const std::string & msg)
{
throw Exception{
msg + " '" + std::string(pos, end) + "' at position " + toString(pos - begin),
ErrorCodes::SYNTAX_ERROR};
throw Exception{msg + " '" + std::string(pos, end) + "' at position " + toString(pos - begin), ErrorCodes::SYNTAX_ERROR};
};
auto match = [&pos, end](const char * str) mutable
@ -286,9 +284,7 @@ private:
if (actions.back().type != PatternActionType::SpecificEvent &&
actions.back().type != PatternActionType::AnyEvent &&
actions.back().type != PatternActionType::KleeneStar)
throw Exception{
"Temporal condition should be preceeded by an event condition",
ErrorCodes::BAD_ARGUMENTS};
throw Exception{"Temporal condition should be preceeded by an event condition", ErrorCodes::BAD_ARGUMENTS};
actions.emplace_back(type, duration);
}
@ -301,9 +297,7 @@ private:
throw_exception("Could not parse number");
if (event_number > arg_count - 1)
throw Exception{
"Event number " + toString(event_number) + " is out of range",
ErrorCodes::BAD_ARGUMENTS};
throw Exception{"Event number " + toString(event_number) + " is out of range", ErrorCodes::BAD_ARGUMENTS};
actions.emplace_back(PatternActionType::SpecificEvent, event_number - 1);
}
@ -428,13 +422,10 @@ protected:
break;
}
else
throw Exception{
"Unknown PatternActionType",
ErrorCodes::LOGICAL_ERROR};
throw Exception{"Unknown PatternActionType", ErrorCodes::LOGICAL_ERROR};
if (++i > sequence_match_max_iterations)
throw Exception{
"Pattern application proves too difficult, exceeding max iterations (" + toString(sequence_match_max_iterations) + ")",
throw Exception{"Pattern application proves too difficult, exceeding max iterations (" + toString(sequence_match_max_iterations) + ")",
ErrorCodes::TOO_SLOW};
}

View File

@ -0,0 +1,34 @@
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <AggregateFunctions/AggregateFunctionWindowFunnel.h>
#include <AggregateFunctions/Helpers.h>
#include <AggregateFunctions/FactoryHelpers.h>
namespace DB
{
namespace
{
AggregateFunctionPtr createAggregateFunctionWindowFunnel(const std::string & name, const DataTypes & arguments, const Array & params)
{
if (params.size() != 1)
throw Exception{"Aggregate function " + name + " requires exactly one parameter.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH};
if (arguments.size() < 2)
throw Exception("Aggregate function " + name + " requires one timestamp argument and at least one event condition.", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
if (arguments.size() > AggregateFunctionWindowFunnelData::max_events + 1)
throw Exception("Too many event arguments for aggregate function " + name, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
return std::make_shared<AggregateFunctionWindowFunnel>(arguments, params);
}
}
void registerAggregateFunctionWindowFunnel(AggregateFunctionFactory & factory)
{
factory.registerFunction("windowFunnel", createAggregateFunctionWindowFunnel, AggregateFunctionFactory::CaseInsensitive);
}
}

View File

@ -0,0 +1,260 @@
#pragma once
#include <iostream>
#include <sstream>
#include <unordered_set>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypesNumber.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Common/ArenaAllocator.h>
#include <Common/typeid_cast.h>
#include <ext/range.h>
#include <AggregateFunctions/IAggregateFunction.h>
namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
struct ComparePairFirst final
{
template <typename T1, typename T2>
bool operator()(const std::pair<T1, T2> & lhs, const std::pair<T1, T2> & rhs) const
{
return lhs.first < rhs.first;
}
};
struct AggregateFunctionWindowFunnelData
{
static constexpr auto max_events = 32;
using TimestampEvent = std::pair<UInt32, UInt8>;
static constexpr size_t bytes_on_stack = 64;
using TimestampEvents = PODArray<TimestampEvent, bytes_on_stack, AllocatorWithStackMemory<Allocator<false>, bytes_on_stack>>;
using Comparator = ComparePairFirst;
bool sorted = true;
TimestampEvents events_list;
size_t size() const
{
return events_list.size();
}
void add(UInt32 timestamp, UInt8 event)
{
// Since most events should have already been sorted by timestamp.
if (sorted && events_list.size() > 0 && events_list.back().first > timestamp)
sorted = false;
events_list.emplace_back(timestamp, event);
}
void merge(const AggregateFunctionWindowFunnelData & other)
{
const auto size = events_list.size();
events_list.insert(std::begin(other.events_list), std::end(other.events_list));
/// either sort whole container or do so partially merging ranges afterwards
if (!sorted && !other.sorted)
std::sort(std::begin(events_list), std::end(events_list), Comparator{});
else
{
const auto begin = std::begin(events_list);
const auto middle = std::next(begin, size);
const auto end = std::end(events_list);
if (!sorted)
std::sort(begin, middle, Comparator{});
if (!other.sorted)
std::sort(middle, end, Comparator{});
std::inplace_merge(begin, middle, end, Comparator{});
}
sorted = true;
}
void sort()
{
if (!sorted)
{
std::sort(std::begin(events_list), std::end(events_list), Comparator{});
sorted = true;
}
}
void serialize(WriteBuffer & buf) const
{
writeBinary(sorted, buf);
writeBinary(events_list.size(), buf);
for (const auto & events : events_list)
{
writeBinary(events.first, buf);
writeBinary(events.second, buf);
}
}
void deserialize(ReadBuffer & buf)
{
readBinary(sorted, buf);
size_t size;
readBinary(size, buf);
/// TODO Protection against huge size
events_list.clear();
events_list.resize(size);
UInt32 timestamp;
UInt8 event;
for (size_t i = 0; i < size; ++i)
{
readBinary(timestamp, buf);
readBinary(event, buf);
events_list.emplace_back(timestamp, event);
}
}
};
/** Calculates the max event level in a sliding window.
* The max size of events is 32, that's enough for funnel analytics
*
* Usage:
* - windowFunnel(window)(timestamp, cond1, cond2, cond3, ....)
*/
class AggregateFunctionWindowFunnel final
: public IAggregateFunctionDataHelper<AggregateFunctionWindowFunnelData, AggregateFunctionWindowFunnel>
{
private:
UInt32 window;
UInt8 events_size;
// Loop through the entire events_list, update the event timestamp value
// The level path must be 1---2---3---...---check_events_size, find the max event level that statisfied the path in the sliding window.
// If found, returns the max event level, else return 0.
// The Algorithm complexity is O(n).
UInt8 getEventLevel(const AggregateFunctionWindowFunnelData & data) const
{
if (data.size() == 0)
return 0;
if (events_size == 1)
return 1;
const_cast<AggregateFunctionWindowFunnelData &>(data).sort();
// events_timestamp stores the timestamp that latest i-th level event happen withing time window after previous level event.
// timestamp defaults to -1, which unsigned timestamp value never meet
std::vector<Int32> events_timestamp(events_size, -1);
for (const auto & pair : data.events_list)
{
const auto & timestamp = pair.first;
const auto & event_idx = pair.second - 1;
if (event_idx == 0)
events_timestamp[0] = timestamp;
else if (events_timestamp[event_idx - 1] >= 0 && timestamp <= events_timestamp[event_idx - 1] + window)
{
events_timestamp[event_idx] = events_timestamp[event_idx - 1];
if (event_idx + 1 == events_size)
return events_size;
}
}
for (size_t event = events_timestamp.size(); event > 0; --event)
{
if (events_timestamp[event - 1] >= 0)
return event;
}
return 0;
}
public:
String getName() const override
{
return "windowFunnel";
}
AggregateFunctionWindowFunnel(const DataTypes & arguments, const Array & params)
{
const auto time_arg = arguments.front().get();
if (!typeid_cast<const DataTypeDateTime *>(time_arg) && !typeid_cast<const DataTypeUInt32 *>(time_arg))
throw Exception{"Illegal type " + time_arg->getName() + " of first argument of aggregate function " + getName()
+ ", must be DateTime or UInt32"};
for (const auto i : ext::range(1, arguments.size()))
{
auto cond_arg = arguments[i].get();
if (!typeid_cast<const DataTypeUInt8 *>(cond_arg))
throw Exception{"Illegal type " + cond_arg->getName() + " of argument " + toString(i + 1) + " of aggregate function "
+ getName() + ", must be UInt8",
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
}
events_size = arguments.size() - 1;
window = params.at(0).safeGet<UInt64>();
}
DataTypePtr getReturnType() const override
{
return std::make_shared<DataTypeUInt8>();
}
void add(AggregateDataPtr place, const IColumn ** columns, const size_t row_num, Arena *) const override
{
UInt8 event_level = 0;
for (const auto i : ext::range(1, events_size + 1))
{
auto event = static_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num];
if (event)
{
event_level = i;
break;
}
}
if (event_level)
{
this->data(place).add(static_cast<const ColumnVector<UInt32> *>(columns[0])->getData()[row_num], event_level);
}
}
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override
{
this->data(place).merge(this->data(rhs));
}
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override
{
this->data(place).serialize(buf);
}
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override
{
this->data(place).deserialize(buf);
}
void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const override
{
static_cast<ColumnUInt8 &>(to).getData().push_back(getEventLevel(this->data(place)));
}
const char * getHeaderFilePath() const override
{
return __FILE__;
}
};
}

View File

@ -179,7 +179,7 @@ namespace detail
? level * elems.size()
: (elems.size() - 1);
/// Sorting an array will not be considered a violation of constancy.
/// Sorting an array will not be considered a violation of constancy.
auto & array = const_cast<Array &>(elems);
std::nth_element(array.begin(), array.begin() + n, array.end());
quantile = array[n];

View File

@ -15,10 +15,10 @@
#include <Poco/Exception.h>
/// Implementation of Reservoir Sampling algorithm. Incrementally selects from the added objects a random subset of the `sample_count` size.
/// Can approximately get quantiles.
/// The `quantile` call takes O(sample_count log sample_count), if after the previous call `quantile` there was at least one call to insert. Otherwise, O(1).
/// That is, it makes sense to first add, then get quantiles without adding.
/// Implementation of Reservoir Sampling algorithm. Incrementally selects from the added objects a random subset of the `sample_count` size.
/// Can approximately get quantiles.
/// The `quantile` call takes O(sample_count log sample_count), if after the previous call `quantile` there was at least one call to insert. Otherwise, O(1).
/// That is, it makes sense to first add, then get quantiles without adding.
namespace DB

View File

@ -14,6 +14,7 @@ void registerAggregateFunctionGroupUniqArray(AggregateFunctionFactory &);
void registerAggregateFunctionGroupArrayInsertAt(AggregateFunctionFactory &);
void registerAggregateFunctionsQuantile(AggregateFunctionFactory &);
void registerAggregateFunctionsSequenceMatch(AggregateFunctionFactory &);
void registerAggregateFunctionWindowFunnel(AggregateFunctionFactory &);
void registerAggregateFunctionsMinMaxAny(AggregateFunctionFactory &);
void registerAggregateFunctionsStatisticsStable(AggregateFunctionFactory &);
void registerAggregateFunctionsStatisticsSimple(AggregateFunctionFactory &);
@ -45,6 +46,7 @@ void registerAggregateFunctions()
registerAggregateFunctionGroupArrayInsertAt(factory);
registerAggregateFunctionsQuantile(factory);
registerAggregateFunctionsSequenceMatch(factory);
registerAggregateFunctionWindowFunnel(factory);
registerAggregateFunctionsMinMaxAny(factory);
registerAggregateFunctionsStatisticsStable(factory);
registerAggregateFunctionsStatisticsSimple(factory);

View File

@ -26,6 +26,11 @@
#include <Functions/IFunction.h>
#include <Common/typeid_cast.h>
#include <DataTypes/DataTypeFunction.h>
#include <algorithm>
#include <sstream>
#include <unordered_map>
#include <vector>
#include <memory>
namespace DB
@ -251,7 +256,7 @@ void processFunction(const String & column_name, ASTPtr & ast, TypeAndConstantIn
size_t result_position = argument_numbers.size();
block_with_constants.insert({nullptr, expression_info.data_type, column_name});
function_ptr->execute(block_with_constants, argument_numbers, result_position);
function_ptr->execute(block_with_constants, argument_numbers, result_position, 1);
const auto & result_column = block_with_constants.getByPosition(result_position).column;
if (result_column->isColumnConst())

View File

@ -17,10 +17,11 @@
#include <Common/Exception.h>
#include <Common/NetException.h>
#include <Common/CurrentMetrics.h>
#include <Common/DNSResolver.h>
#include <Interpreters/ClientInfo.h>
#include <Common/config.h>
#if Poco_NetSSL_FOUND
#if USE_POCO_NETSSL
#include <Poco/Net/SecureStreamSocket.h>
#endif
@ -56,7 +57,7 @@ void Connection::connect()
if (static_cast<bool>(secure))
{
#if Poco_NetSSL_FOUND
#if USE_POCO_NETSSL
socket = std::make_unique<Poco::Net::SecureStreamSocket>();
#else
throw Exception{"tcp_secure protocol is disabled because poco library was built without NetSSL support.", ErrorCodes::SUPPORT_IS_DISABLED};
@ -66,7 +67,10 @@ void Connection::connect()
{
socket = std::make_unique<Poco::Net::StreamSocket>();
}
socket->connect(resolved_address, timeouts.connection_timeout);
current_resolved_address = DNSResolver::instance().resolveAddress(host, port);
socket->connect(current_resolved_address, timeouts.connection_timeout);
socket->setReceiveTimeout(timeouts.receive_timeout);
socket->setSendTimeout(timeouts.send_timeout);
socket->setNoDelay(true);
@ -462,6 +466,14 @@ void Connection::sendExternalTablesData(ExternalTablesData & data)
LOG_DEBUG(log_wrapper.get(), msg.rdbuf());
}
Poco::Net::SocketAddress Connection::getResolvedAddress() const
{
if (connected)
return current_resolved_address;
return DNSResolver::instance().resolveAddress(host, port);
}
bool Connection::poll(size_t timeout_microseconds)
{
@ -571,6 +583,7 @@ void Connection::initBlockInput()
void Connection::setDescription()
{
auto resolved_address = getResolvedAddress();
description = host + ":" + toString(resolved_address.port());
auto ip_address = resolved_address.host().toString();
@ -610,7 +623,7 @@ void Connection::fillBlockExtraInfo(BlockExtraInfo & info) const
{
info.is_valid = true;
info.host = host;
info.resolved_address = resolved_address.toString();
info.resolved_address = getResolvedAddress().toString();
info.port = port;
info.user = user;
}

View File

@ -53,32 +53,7 @@ class Connection : private boost::noncopyable
friend class MultiplexedConnections;
public:
Connection(const String & host_, UInt16 port_, const String & default_database_,
const String & user_, const String & password_,
const ConnectionTimeouts & timeouts_,
const String & client_name_ = "client",
Protocol::Compression compression_ = Protocol::Compression::Enable,
Protocol::Secure secure_ = Protocol::Secure::Disable,
Poco::Timespan sync_request_timeout_ = Poco::Timespan(DBMS_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC, 0))
:
host(host_), port(port_), default_database(default_database_),
user(user_), password(password_), resolved_address(host, port),
client_name(client_name_),
compression(compression_),
secure(secure_),
timeouts(timeouts_),
sync_request_timeout(sync_request_timeout_),
log_wrapper(*this)
{
/// Don't connect immediately, only on first need.
if (user.empty())
user = "default";
setDescription();
}
Connection(const String & host_, UInt16 port_, const Poco::Net::SocketAddress & resolved_address_,
Connection(const String & host_, UInt16 port_,
const String & default_database_,
const String & user_, const String & password_,
const ConnectionTimeouts & timeouts_,
@ -87,10 +62,8 @@ public:
Protocol::Secure secure_ = Protocol::Secure::Disable,
Poco::Timespan sync_request_timeout_ = Poco::Timespan(DBMS_DEFAULT_SYNC_REQUEST_TIMEOUT_SEC, 0))
:
host(host_), port(port_),
default_database(default_database_),
user(user_), password(password_),
resolved_address(resolved_address_),
host(host_), port(port_), default_database(default_database_),
user(user_), password(password_), current_resolved_address(host, port),
client_name(client_name_),
compression(compression_),
secure(secure_),
@ -189,6 +162,9 @@ public:
size_t outBytesCount() const { return out ? out->count() : 0; }
size_t inBytesCount() const { return in ? in->count() : 0; }
/// Returns initially resolved address
Poco::Net::SocketAddress getResolvedAddress() const;
private:
String host;
UInt16 port;
@ -196,10 +172,9 @@ private:
String user;
String password;
/** Address could be resolved beforehand and passed to constructor. Then 'host' and 'port' fields are used just for logging.
* Otherwise address is resolved in constructor. Thus, DNS based load balancing is not supported.
*/
Poco::Net::SocketAddress resolved_address;
/// Address is resolved during the first connection (or the following reconnects)
/// Use it only for logging purposes
Poco::Net::SocketAddress current_resolved_address;
/// For messages in log and in exceptions.
String description;

View File

@ -54,24 +54,7 @@ public:
Protocol::Secure secure_ = Protocol::Secure::Disable)
: Base(max_connections_, &Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
host(host_), port(port_), default_database(default_database_),
user(user_), password(password_), resolved_address(host_, port_),
client_name(client_name_), compression(compression_),
secure{secure_},
timeouts(timeouts)
{
}
ConnectionPool(unsigned max_connections_,
const String & host_, UInt16 port_, const Poco::Net::SocketAddress & resolved_address_,
const String & default_database_,
const String & user_, const String & password_,
const ConnectionTimeouts & timeouts,
const String & client_name_ = "client",
Protocol::Compression compression_ = Protocol::Compression::Enable,
Protocol::Secure secure_ = Protocol::Secure::Disable)
: Base(max_connections_, &Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
host(host_), port(port_), default_database(default_database_),
user(user_), password(password_), resolved_address(resolved_address_),
user(user_), password(password_),
client_name(client_name_), compression(compression_),
secure{secure_},
timeouts(timeouts)
@ -102,7 +85,7 @@ protected:
ConnectionPtr allocObject() override
{
return std::make_shared<Connection>(
host, port, resolved_address,
host, port,
default_database, user, password, timeouts,
client_name, compression, secure);
}
@ -114,11 +97,6 @@ private:
String user;
String password;
/** The address can be resolved in advance and passed to the constructor. Then `host` and `port` fields are meaningful only for logging.
* Otherwise, address is resolved in constructor. That is, DNS balancing is not supported.
*/
Poco::Net::SocketAddress resolved_address;
String client_name;
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.

View File

@ -91,6 +91,11 @@ public:
return data->getInt(0);
}
bool getBool(size_t) const override
{
return data->getBool(0);
}
bool isNullAt(size_t) const override
{
return data->isNullAt(0);
@ -189,6 +194,7 @@ public:
bool isFixedAndContiguous() const override { return data->isFixedAndContiguous(); }
bool valuesHaveFixedSize() const override { return data->valuesHaveFixedSize(); }
size_t sizeOfValueIfFixed() const override { return data->sizeOfValueIfFixed(); }
StringRef getRawData() const override { return data->getRawData(); }
/// Not part of the common interface.

View File

@ -134,7 +134,7 @@ public:
bool isFixedAndContiguous() const override { return true; }
size_t sizeOfValueIfFixed() const override { return n; }
StringRef getRawData() const override { return StringRef(chars.data(), chars.size()); }
/// Specialized part of interface, not from IColumn.

View File

@ -203,7 +203,7 @@ ColumnWithTypeAndName ColumnFunction::reduce() const
for (size_t i = 0; i < captured_columns.size(); ++i)
arguments[i] = i;
function->execute(block, arguments, captured_columns.size());
function->execute(block, arguments, captured_columns.size(), size_);
return block.getByPosition(captured_columns.size());
}

View File

@ -46,6 +46,7 @@ public:
bool isNullAt(size_t n) const override { return static_cast<const ColumnUInt8 &>(*null_map).getData()[n] != 0;}
Field operator[](size_t n) const override;
void get(size_t n, Field & res) const override;
bool getBool(size_t n) const override { return isNullAt(n) ? 0 : nested_column->getBool(n); }
UInt64 get64(size_t n) const override { return nested_column->get64(n); }
StringRef getDataAt(size_t n) const override;
void insertData(const char * pos, size_t length) override;

View File

@ -74,7 +74,7 @@ void ColumnTuple::get(size_t n, Field & res) const
{
const size_t tuple_size = columns.size();
res = Tuple(TupleBackend(tuple_size));
TupleBackend & res_arr = DB::get<Tuple &>(res).t;
TupleBackend & res_arr = DB::get<Tuple &>(res).toUnderType();
for (const auto i : ext::range(0, tuple_size))
columns[i]->get(n, res_arr[i]);
}
@ -91,7 +91,7 @@ void ColumnTuple::insertData(const char *, size_t)
void ColumnTuple::insert(const Field & x)
{
const TupleBackend & tuple = DB::get<const Tuple &>(x).t;
const TupleBackend & tuple = DB::get<const Tuple &>(x).toUnderType();
const size_t tuple_size = columns.size();
if (tuple.size() != tuple_size)
@ -320,8 +320,8 @@ void ColumnTuple::getExtremes(Field & min, Field & max) const
min = Tuple(TupleBackend(tuple_size));
max = Tuple(TupleBackend(tuple_size));
auto & min_backend = min.get<Tuple &>().t;
auto & max_backend = max.get<Tuple &>().t;
auto & min_backend = min.get<Tuple &>().toUnderType();
auto & max_backend = max.get<Tuple &>().toUnderType();
for (const auto i : ext::range(0, tuple_size))
columns[i]->getExtremes(min_backend[i], max_backend[i]);

View File

@ -231,6 +231,11 @@ public:
return UInt64(data[n]);
}
bool getBool(size_t n) const override
{
return bool(data[n]);
}
Int64 getInt(size_t n) const override
{
return Int64(data[n]);
@ -268,7 +273,7 @@ public:
bool isFixedAndContiguous() const override { return true; }
size_t sizeOfValueIfFixed() const override { return sizeof(T); }
StringRef getRawData() const override { return StringRef(reinterpret_cast<const char*>(data.data()), data.size()); }
/** More efficient methods of manipulation - to manipulate with data directly. */
Container & getData()

View File

@ -4,6 +4,7 @@
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnNullable.h>
#include <Columns/ColumnConst.h>
#include <Core/ColumnWithTypeAndName.h>
namespace DB
@ -81,4 +82,18 @@ FilterDescription::FilterDescription(const IColumn & column)
ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER);
}
void checkColumnCanBeUsedAsFilter(const ColumnWithTypeAndName & column_elem)
{
ConstantFilterDescription const_filter;
if (column_elem.column)
const_filter = ConstantFilterDescription(*column_elem.column);
if (!const_filter.always_false && !const_filter.always_true)
{
auto column = column_elem.column ? column_elem.column : column_elem.type->createColumn();
FilterDescription filter(*column);
}
}
}

View File

@ -29,4 +29,10 @@ struct FilterDescription
explicit FilterDescription(const IColumn & column);
};
struct ColumnWithTypeAndName;
/// Will throw an exception if column_elem is cannot be used as a filter column.
void checkColumnCanBeUsedAsFilter(const ColumnWithTypeAndName & column_elem);
}

View File

@ -88,6 +88,7 @@ public:
}
/** If column is numeric, return value of n-th element, casted to UInt64.
* For NULL values of Nullable column it is allowed to return arbitary value.
* Otherwise throw an exception.
*/
virtual UInt64 getUInt(size_t /*n*/) const
@ -102,6 +103,15 @@ public:
virtual bool isNullAt(size_t /*n*/) const { return false; }
/** If column is numeric, return value of n-th element, casted to bool.
* For NULL values of Nullable column returns false.
* Otherwise throw an exception.
*/
virtual bool getBool(size_t /*n*/) const
{
throw Exception("Method getBool is not supported for " + getName(), ErrorCodes::NOT_IMPLEMENTED);
}
/// Removes all elements outside of specified range.
/// Is used in LIMIT operation, for example.
virtual Ptr cut(size_t start, size_t length) const
@ -302,6 +312,9 @@ public:
/// Values in column are represented as continuous memory segment of fixed size. Implies valuesHaveFixedSize.
virtual bool isFixedAndContiguous() const { return false; }
/// If isFixedAndContiguous, returns the underlying data array, otherwise throws an exception.
virtual StringRef getRawData() const { throw Exception("Column " + getName() + " is not a contiguous block of memory", ErrorCodes::NOT_IMPLEMENTED); }
/// If valuesHaveFixedSize, returns size of value, otherwise throw an exception.
virtual size_t sizeOfValueIfFixed() const { throw Exception("Values of column " + getName() + " are not fixed size.", ErrorCodes::CANNOT_GET_SIZE_OF_FIELD); }

View File

@ -15,6 +15,7 @@
#include <linux/aio_abi.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <errno.h>
/** Small wrappers for asynchronous I/O.

View File

@ -0,0 +1,300 @@
#include <Common/BackgroundSchedulePool.h>
#include <Common/MemoryTracker.h>
#include <Common/CurrentMetrics.h>
#include <Common/Exception.h>
#include <Common/setThreadName.h>
#include <Common/Stopwatch.h>
#include <common/logger_useful.h>
#include <chrono>
namespace CurrentMetrics
{
extern const Metric BackgroundSchedulePoolTask;
extern const Metric MemoryTrackingInBackgroundSchedulePool;
}
namespace DB
{
// TaskNotification
class TaskNotification final : public Poco::Notification
{
public:
explicit TaskNotification(const BackgroundSchedulePool::TaskHandle & task) : task(task) {}
void execute() { task->execute(); }
private:
BackgroundSchedulePool::TaskHandle task;
};
// BackgroundSchedulePool::TaskInfo
BackgroundSchedulePool::TaskInfo::TaskInfo(BackgroundSchedulePool & pool, const std::string & name, const Task & function):
name(name),
pool(pool),
function(function)
{
}
bool BackgroundSchedulePool::TaskInfo::schedule()
{
std::lock_guard lock(schedule_mutex);
if (deactivated || scheduled)
return false;
scheduled = true;
if(!executing)
{
if (delayed)
pool.cancelDelayedTask(shared_from_this(), lock);
pool.queue.enqueueNotification(new TaskNotification(shared_from_this()));
}
return true;
}
bool BackgroundSchedulePool::TaskInfo::scheduleAfter(size_t ms)
{
std::lock_guard lock(schedule_mutex);
if (deactivated || scheduled)
return false;
pool.scheduleDelayedTask(shared_from_this(), ms, lock);
return true;
}
void BackgroundSchedulePool::TaskInfo::deactivate()
{
std::lock_guard lock_exec(exec_mutex);
std::lock_guard lock_schedule(schedule_mutex);
if (deactivated)
return;
deactivated = true;
scheduled = false;
if (delayed)
pool.cancelDelayedTask(shared_from_this(), lock_schedule);
}
void BackgroundSchedulePool::TaskInfo::activate()
{
std::lock_guard lock(schedule_mutex);
deactivated = false;
}
void BackgroundSchedulePool::TaskInfo::execute()
{
std::lock_guard lock_exec(exec_mutex);
{
std::lock_guard lock_schedule(schedule_mutex);
if (deactivated)
return;
scheduled = false;
executing = true;
}
CurrentMetrics::Increment metric_increment{CurrentMetrics::BackgroundSchedulePoolTask};
Stopwatch watch;
function();
UInt64 milliseconds = watch.elapsedMilliseconds();
/// If the task is executed longer than specified time, it will be logged.
static const int32_t slow_execution_threshold_ms = 50;
if (milliseconds >= slow_execution_threshold_ms)
LOG_INFO(&Logger::get("BackgroundSchedulePool"), "Executing " << name << " took " << milliseconds << " ms.");
{
std::lock_guard lock_schedule(schedule_mutex);
executing = false;
/// In case was scheduled while executing (including a scheduleAfter which expired) we schedule the task
/// on the queue. We don't call the function again here because this way all tasks
/// will have their chance to execute
if(scheduled)
pool.queue.enqueueNotification(new TaskNotification(shared_from_this()));
}
}
zkutil::WatchCallback BackgroundSchedulePool::TaskInfo::getWatchCallback()
{
return [t=shared_from_this()](const ZooKeeperImpl::ZooKeeper::WatchResponse &) {
t->schedule();
};
}
// BackgroundSchedulePool
BackgroundSchedulePool::BackgroundSchedulePool(size_t size)
: size(size)
{
LOG_INFO(&Logger::get("BackgroundSchedulePool"), "Create BackgroundSchedulePool with " << size << " threads");
threads.resize(size);
for (auto & thread : threads)
thread = std::thread([this] { threadFunction(); });
delayed_thread = std::thread([this] { delayExecutionThreadFunction(); });
}
BackgroundSchedulePool::~BackgroundSchedulePool()
{
try
{
{
std::unique_lock lock(delayed_tasks_lock);
shutdown = true;
wakeup_cond.notify_all();
}
queue.wakeUpAll();
delayed_thread.join();
LOG_TRACE(&Logger::get("BackgroundSchedulePool"), "Waiting for threads to finish.");
for (std::thread & thread : threads)
thread.join();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
BackgroundSchedulePool::TaskHandle BackgroundSchedulePool::addTask(const std::string & name, const Task & task)
{
return std::make_shared<TaskInfo>(*this, name, task);
}
void BackgroundSchedulePool::removeTask(const TaskHandle & task)
{
task->deactivate();
}
void BackgroundSchedulePool::scheduleDelayedTask(const TaskHandle & task, size_t ms, std::lock_guard<std::mutex> & /* schedule_mutex_lock */)
{
Poco::Timestamp current_time;
{
std::lock_guard lock(delayed_tasks_lock);
if (task->delayed)
delayed_tasks.erase(task->iterator);
task->iterator = delayed_tasks.emplace(current_time + (ms * 1000), task);
task->delayed = true;
}
wakeup_cond.notify_all();
}
void BackgroundSchedulePool::cancelDelayedTask(const TaskHandle & task, std::lock_guard<std::mutex> & /* schedule_mutex_lock */)
{
{
std::lock_guard lock(delayed_tasks_lock);
delayed_tasks.erase(task->iterator);
task->delayed = false;
}
wakeup_cond.notify_all();
}
void BackgroundSchedulePool::threadFunction()
{
setThreadName("BackgrSchedPool");
MemoryTracker memory_tracker;
memory_tracker.setMetric(CurrentMetrics::MemoryTrackingInBackgroundSchedulePool);
current_memory_tracker = &memory_tracker;
while (!shutdown)
{
if (Poco::AutoPtr<Poco::Notification> notification = queue.waitDequeueNotification())
{
TaskNotification & task_notification = static_cast<TaskNotification &>(*notification);
task_notification.execute();
}
}
current_memory_tracker = nullptr;
}
void BackgroundSchedulePool::delayExecutionThreadFunction()
{
setThreadName("BckSchPoolDelay");
while (!shutdown)
{
TaskHandle task;
bool found = false;
{
std::unique_lock lock(delayed_tasks_lock);
while(!shutdown)
{
Poco::Timestamp min_time;
if (!delayed_tasks.empty())
{
auto t = delayed_tasks.begin();
min_time = t->first;
task = t->second;
}
if (!task)
{
wakeup_cond.wait(lock);
continue;
}
Poco::Timestamp current_time;
if (min_time > current_time)
{
wakeup_cond.wait_for(lock, std::chrono::microseconds(min_time - current_time));
continue;
}
else
{
/// We have a task ready for execution
found = true;
break;
}
}
}
if(found)
task->schedule();
}
}
}

View File

@ -0,0 +1,120 @@
#pragma once
#include <Poco/Notification.h>
#include <Poco/NotificationQueue.h>
#include <Poco/Timestamp.h>
#include <thread>
#include <atomic>
#include <mutex>
#include <condition_variable>
#include <vector>
#include <map>
#include <functional>
#include <boost/noncopyable.hpp>
#include <Common/ZooKeeper/Types.h>
namespace DB
{
class TaskNotification;
/** Executes functions scheduled at a specific point in time.
* Basically all tasks are added in a queue and precessed by worker threads.
*
* The most important difference between this and BackgroundProcessingPool
* is that we have the guarantee that the same function is not executed from many workers in the same time.
*
* The usage scenario: instead starting a separate thread for each task,
* register a task in BackgroundSchedulePool and when you need to run the task,
* call schedule or scheduleAfter(duration) method.
*/
class BackgroundSchedulePool
{
public:
class TaskInfo;
using TaskHandle = std::shared_ptr<TaskInfo>;
using Tasks = std::multimap<Poco::Timestamp, TaskHandle>;
using Task = std::function<void()>;
class TaskInfo : public std::enable_shared_from_this<TaskInfo>, private boost::noncopyable
{
public:
TaskInfo(BackgroundSchedulePool & pool, const std::string & name, const Task & function);
/// All these methods waits for current execution of task.
/// Schedule for execution as soon as possible (if not already scheduled).
/// If the task was already scheduled with delay, the delay will be ignored.
bool schedule();
/// Schedule for execution after specified delay.
bool scheduleAfter(size_t ms);
/// Further attempts to schedule become no-op.
void deactivate();
void activate();
/// get zkutil::WatchCallback needed for zookeeper callbacks.
zkutil::WatchCallback getWatchCallback();
private:
friend class TaskNotification;
friend class BackgroundSchedulePool;
void execute();
std::mutex schedule_mutex;
std::mutex exec_mutex;
std::string name;
bool deactivated = false;
bool scheduled = false;
bool delayed = false;
bool executing = false;
BackgroundSchedulePool & pool;
Task function;
/// If the task is scheduled with delay, points to element of delayed_tasks.
Tasks::iterator iterator;
};
BackgroundSchedulePool(size_t size);
~BackgroundSchedulePool();
TaskHandle addTask(const std::string & name, const Task & task);
void removeTask(const TaskHandle & task);
size_t getNumberOfThreads() const { return size; }
private:
using Threads = std::vector<std::thread>;
void threadFunction();
void delayExecutionThreadFunction();
/// Schedule task for execution after specified delay from now.
void scheduleDelayedTask(const TaskHandle & task, size_t ms, std::lock_guard<std::mutex> &);
/// Remove task, that was scheduled with delay, from schedule.
void cancelDelayedTask(const TaskHandle & task, std::lock_guard<std::mutex> &);
/// Number for worker threads.
const size_t size;
std::atomic<bool> shutdown {false};
Threads threads;
Poco::NotificationQueue queue;
/// Delayed notifications.
std::condition_variable wakeup_cond;
std::mutex delayed_tasks_lock;
/// Thread waiting for next delayed task.
std::thread delayed_thread;
/// Tasks ordered by scheduled time.
Tasks delayed_tasks;
};
using BackgroundSchedulePoolPtr = std::shared_ptr<BackgroundSchedulePool>;
}

View File

@ -369,14 +369,17 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string &
Files files;
Poco::Path merge_dir_path(config_path);
merge_dir_path.setExtension("d");
std::set<std::string> merge_dirs;
std::vector<std::string> merge_dirs;
merge_dirs.push_back(merge_dir_path.toString());
if (merge_dir_path.getBaseName() != "conf") {
merge_dir_path.setBaseName("conf");
merge_dirs.push_back(merge_dir_path.toString());
}
/// Add path_to_config/config_name.d dir
merge_dir_path.setExtension("d");
merge_dirs.insert(merge_dir_path.toString());
/// Add path_to_config/conf.d dir
merge_dir_path.setBaseName("conf");
merge_dirs.insert(merge_dir_path.toString());
/// Add path_to_config/config.d dir
merge_dir_path.setBaseName("config");
merge_dirs.insert(merge_dir_path.toString());
for (const std::string & merge_dir_name : merge_dirs)
{

View File

@ -9,6 +9,7 @@
M(ReplicatedSend) \
M(ReplicatedChecks) \
M(BackgroundPoolTask) \
M(BackgroundSchedulePoolTask) \
M(DiskSpaceReservedForMerge) \
M(DistributedSend) \
M(QueryPreempted) \
@ -25,6 +26,7 @@
M(LeaderReplica) \
M(MemoryTracking) \
M(MemoryTrackingInBackgroundProcessingPool) \
M(MemoryTrackingInBackgroundSchedulePool) \
M(MemoryTrackingForMerges) \
M(LeaderElection) \
M(EphemeralNode) \

View File

@ -1,40 +0,0 @@
#pragma once
#include <Poco/Net/IPAddress.h>
#include <Poco/Net/SocketAddress.h>
#include <memory>
#include <ext/singleton.h>
namespace DB
{
/// A singleton implementing global and permanent DNS cache
/// It could be updated only manually via drop() method
class DNSCache : public ext::singleton<DNSCache>
{
public:
DNSCache(const DNSCache &) = delete;
/// Accepts host names like 'example.com' or '127.0.0.1' or '::1' and resolve its IP
Poco::Net::IPAddress resolveHost(const std::string & host);
/// Accepts host names like 'example.com:port' or '127.0.0.1:port' or '[::1]:port' and resolve its IP and port
Poco::Net::SocketAddress resolveHostAndPort(const std::string & host_and_port);
/// Drops all caches
void drop();
~DNSCache();
protected:
DNSCache();
friend class ext::singleton<DNSCache>;
struct Impl;
std::unique_ptr<Impl> impl;
};
}

View File

@ -1,4 +1,4 @@
#include "DNSCache.h"
#include "DNSResolver.h"
#include <Common/SimpleCache.h>
#include <Common/Exception.h>
#include <Core/Types.h>
@ -6,6 +6,7 @@
#include <Poco/Net/NetException.h>
#include <Poco/NumberParser.h>
#include <arpa/inet.h>
#include <atomic>
namespace DB
@ -74,34 +75,47 @@ static Poco::Net::IPAddress resolveIPAddressImpl(const std::string & host)
}
struct DNSCache::Impl
struct DNSResolver::Impl
{
SimpleCache<decltype(resolveIPAddressImpl), &resolveIPAddressImpl> cache_host;
/// If disabled, will not make cache lookups, will resolve addresses manually on each call
std::atomic<bool> disable_cache{false};
};
DNSCache::DNSCache() : impl(std::make_unique<DNSCache::Impl>()) {}
DNSResolver::DNSResolver() : impl(std::make_unique<DNSResolver::Impl>()) {}
Poco::Net::IPAddress DNSCache::resolveHost(const std::string & host)
Poco::Net::IPAddress DNSResolver::resolveHost(const std::string & host)
{
return impl->cache_host(host);
return !impl->disable_cache ? impl->cache_host(host) : resolveIPAddressImpl(host);
}
Poco::Net::SocketAddress DNSCache::resolveHostAndPort(const std::string & host_and_port)
Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host_and_port)
{
String host;
UInt16 port;
splitHostAndPort(host_and_port, host, port);
return Poco::Net::SocketAddress(impl->cache_host(host), port);
return !impl->disable_cache ? Poco::Net::SocketAddress(impl->cache_host(host), port) : Poco::Net::SocketAddress(host_and_port);
}
void DNSCache::drop()
Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, UInt16 port)
{
return !impl->disable_cache ? Poco::Net::SocketAddress(impl->cache_host(host), port) : Poco::Net::SocketAddress(host, port);
}
void DNSResolver::dropCache()
{
impl->cache_host.drop();
}
DNSCache::~DNSCache() = default;
void DNSResolver::setDisableCacheFlag(bool is_disabled)
{
impl->disable_cache = is_disabled;
}
DNSResolver::~DNSResolver() = default;
}

View File

@ -0,0 +1,46 @@
#pragma once
#include <Poco/Net/IPAddress.h>
#include <Poco/Net/SocketAddress.h>
#include <memory>
#include <ext/singleton.h>
#include <Core/Types.h>
namespace DB
{
/// A singleton implementing DNS names resolving with optional permanent DNS cache
/// The cache could be updated only manually via drop() method
class DNSResolver : public ext::singleton<DNSResolver>
{
public:
DNSResolver(const DNSResolver &) = delete;
/// Accepts host names like 'example.com' or '127.0.0.1' or '::1' and resolve its IP
Poco::Net::IPAddress resolveHost(const std::string & host);
/// Accepts host names like 'example.com:port' or '127.0.0.1:port' or '[::1]:port' and resolve its IP and port
Poco::Net::SocketAddress resolveAddress(const std::string & host_and_port);
Poco::Net::SocketAddress resolveAddress(const std::string & host, UInt16 port);
/// Disables caching
void setDisableCacheFlag(bool is_disabled = true);
/// Drops all caches
void dropCache();
~DNSResolver();
protected:
DNSResolver();
friend class ext::singleton<DNSResolver>;
struct Impl;
std::unique_ptr<Impl> impl;
};
}

View File

@ -378,6 +378,9 @@ namespace ErrorCodes
extern const int POCO_EXCEPTION = 1000;
extern const int STD_EXCEPTION = 1001;
extern const int UNKNOWN_EXCEPTION = 1002;
extern const int CONDITIONAL_TREE_PARENT_NOT_FOUND = 2001;
extern const int ILLEGAL_PROJECTION_MANIPULATOR = 2002;
}
}

View File

@ -1,4 +1,3 @@
#include <errno.h>
#include <string.h>
#include <cxxabi.h>

View File

@ -165,7 +165,7 @@ public:
/// Parsing of external table used when sending tables via http
/// The `handlePart` function will be called for each table passed,
/// so it's also necessary to call `clean` at the end of the `handlePart`.
/// so it's also necessary to call `clean` at the end of the `handlePart`.
class ExternalTablesHandler : public Poco::Net::PartHandler, BaseExternalTable
{
public:

View File

@ -61,7 +61,7 @@ String FieldVisitorDump::operator() (const Array & x) const
String FieldVisitorDump::operator() (const Tuple & x_def) const
{
auto & x = x_def.t;
auto & x = x_def.toUnderType();
WriteBufferFromOwnString wb;
wb.write("Tuple_(", 7);
@ -124,7 +124,7 @@ String FieldVisitorToString::operator() (const Array & x) const
String FieldVisitorToString::operator() (const Tuple & x_def) const
{
auto & x = x_def.t;
auto & x = x_def.toUnderType();
WriteBufferFromOwnString wb;
writeChar('(', wb);

View File

@ -408,7 +408,7 @@ protected:
/// Copy to a new location and zero the old one.
x.setHash(hash_value);
memcpy(&buf[place_value], &x, sizeof(x));
memcpy(static_cast<void*>(&buf[place_value]), &x, sizeof(x));
x.setZero();
/// Then the elements that previously were in collision with this can move to the old place.
@ -726,7 +726,7 @@ public:
{
size_t place_value = findEmptyCell(grower.place(hash_value));
memcpy(&buf[place_value], cell, sizeof(*cell));
memcpy(static_cast<void*>(&buf[place_value]), cell, sizeof(*cell));
++m_size;
if (unlikely(grower.overflow(m_size)))
@ -897,7 +897,7 @@ public:
this->clearHasZero();
m_size = 0;
memset(buf, 0, grower.bufSize() * sizeof(*buf));
memset(static_cast<void*>(buf), 0, grower.bufSize() * sizeof(*buf));
}
/// After executing this function, the table can only be destroyed,

View File

@ -69,4 +69,14 @@ String Macros::expand(const String & s, size_t level) const
return expand(res, level + 1);
}
Names Macros::expand(const Names & source_names, size_t level) const
{
Names result_names;
result_names.reserve(source_names.size());
for (const String & name : source_names)
result_names.push_back(expand(name, level));
return result_names;
}
}

View File

@ -1,9 +1,11 @@
#pragma once
#include <Core/Types.h>
#include <Core/Names.h>
#include <map>
namespace Poco
{
namespace Util
@ -29,6 +31,10 @@ public:
*/
String expand(const String & s, size_t level = 0) const;
/** Apply expand for the list.
*/
Names expand(const Names & source_names, size_t level = 0) const;
using MacroMap = std::map<String, String>;
const MacroMap getMacroMap() const { return macros; }

View File

@ -79,6 +79,8 @@
M(CompileAttempt) \
M(CompileSuccess) \
\
M(CompileFunction) \
\
M(ExternalSortWritePart) \
M(ExternalSortMerge) \
M(ExternalAggregationWritePart) \
@ -138,7 +140,9 @@
M(RWLockAcquiredReadLocks) \
M(RWLockAcquiredWriteLocks) \
M(RWLockReadersWaitMilliseconds) \
M(RWLockWritersWaitMilliseconds)
M(RWLockWritersWaitMilliseconds) \
\
M(NetworkErrors)
namespace ProfileEvents
{

View File

@ -86,8 +86,8 @@ public:
if (*needle < 0x80u)
{
first_needle_symbol_is_ascii = true;
l = static_cast<const UInt8>(std::tolower(*needle));
u = static_cast<const UInt8>(std::toupper(*needle));
l = std::tolower(*needle);
u = std::toupper(*needle);
}
else
{
@ -121,7 +121,7 @@ public:
continue;
}
const auto src_len = DB::UTF8::seqLength(*needle_pos);
const auto src_len = UTF8::seqLength(*needle_pos);
const auto c_u32 = utf8.convert(needle_pos);
const auto c_l_u32 = Poco::Unicode::toLower(c_u32);
@ -132,9 +132,7 @@ public:
/// @note Unicode standard states it is a rare but possible occasion
if (!(dst_l_len == dst_u_len && dst_u_len == src_len))
throw DB::Exception{
"UTF8 sequences with different lowercase and uppercase lengths are not supported",
DB::ErrorCodes::UNSUPPORTED_PARAMETER};
throw Exception{"UTF8 sequences with different lowercase and uppercase lengths are not supported", ErrorCodes::UNSUPPORTED_PARAMETER};
cache_actual_len += src_len;
if (cache_actual_len < n)
@ -183,7 +181,7 @@ public:
Poco::Unicode::toLower(utf8.convert(needle_pos)))
{
/// @note assuming sequences for lowercase and uppercase have exact same length
const auto len = DB::UTF8::seqLength(*pos);
const auto len = UTF8::seqLength(*pos);
pos += len, needle_pos += len;
}
@ -207,7 +205,7 @@ public:
Poco::Unicode::toLower(utf8.convert(pos)) ==
Poco::Unicode::toLower(utf8.convert(needle_pos)))
{
const auto len = DB::UTF8::seqLength(*pos);
const auto len = UTF8::seqLength(*pos);
pos += len, needle_pos += len;
}
@ -240,7 +238,7 @@ public:
if (mask == 0)
{
haystack += n;
DB::UTF8::syncForward(haystack, haystack_end);
UTF8::syncForward(haystack, haystack_end);
continue;
}
@ -267,7 +265,7 @@ public:
Poco::Unicode::toLower(utf8.convert(needle_pos)))
{
/// @note assuming sequences for lowercase and uppercase have exact same length
const auto len = DB::UTF8::seqLength(*haystack_pos);
const auto len = UTF8::seqLength(*haystack_pos);
haystack_pos += len, needle_pos += len;
}
@ -279,7 +277,7 @@ public:
return haystack;
/// first octet was ok, but not the first 16, move to start of next sequence and reapply
haystack += DB::UTF8::seqLength(*haystack);
haystack += UTF8::seqLength(*haystack);
continue;
}
}
@ -297,7 +295,7 @@ public:
Poco::Unicode::toLower(utf8.convert(haystack_pos)) ==
Poco::Unicode::toLower(utf8.convert(needle_pos)))
{
const auto len = DB::UTF8::seqLength(*haystack_pos);
const auto len = UTF8::seqLength(*haystack_pos);
haystack_pos += len, needle_pos += len;
}
@ -306,7 +304,7 @@ public:
}
/// advance to the start of the next sequence
haystack += DB::UTF8::seqLength(*haystack);
haystack += UTF8::seqLength(*haystack);
}
return haystack_end;

View File

@ -3,6 +3,7 @@
#include <string>
#include <cstring>
#include <cstddef>
#include <type_traits>
namespace detail

View File

@ -7,6 +7,7 @@
#include <emmintrin.h>
#endif
namespace DB
{
@ -55,27 +56,18 @@ inline size_t countCodePoints(const UInt8 * data, size_t size)
const auto end = data + size;
#if __SSE2__
const auto bytes_sse = sizeof(__m128i);
const auto src_end_sse = (data + size) - (size % bytes_sse);
constexpr auto bytes_sse = sizeof(__m128i);
const auto src_end_sse = data + size / bytes_sse * bytes_sse;
const auto align_sse = _mm_set1_epi8(0x40);
const auto upper_bound = _mm_set1_epi8(0xBF);
const auto threshold = _mm_set1_epi8(0xBF);
for (; data < src_end_sse; data += bytes_sse)
{
const auto chars = _mm_loadu_si128(reinterpret_cast<const __m128i *>(data));
///Align to zero for the solve two case
const auto align_res = _mm_adds_epu8(chars, align_sse);
const auto less_than_and_equals = _mm_cmpeq_epi8(_mm_min_epu8(align_res, upper_bound), align_res);
res += __builtin_popcount(_mm_movemask_epi8(less_than_and_equals));
}
res += __builtin_popcount(_mm_movemask_epi8(
_mm_cmpgt_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i *>(data)), threshold)));
#endif
for (; data < end; ++data) /// Skip UTF-8 continuation bytes.
res += (*data <= 0x7F || *data >= 0xC0);
res += static_cast<Int8>(*data) > static_cast<Int8>(0xBF);
return res;
}

View File

@ -1,10 +1,12 @@
#pragma once
#include "ZooKeeper.h"
#include "KeeperException.h"
#include <functional>
#include <memory>
#include <common/logger_useful.h>
#include <Common/CurrentMetrics.h>
#include <Common/BackgroundSchedulePool.h>
namespace ProfileEvents
@ -35,9 +37,10 @@ public:
* It means that different participants of leader election have different identifiers
* and existence of more than one ephemeral node with same identifier indicates an error.
*/
LeaderElection(const std::string & path_, ZooKeeper & zookeeper_, LeadershipHandler handler_, const std::string & identifier_ = "")
: path(path_), zookeeper(zookeeper_), handler(handler_), identifier(identifier_)
LeaderElection(DB::BackgroundSchedulePool & pool_, const std::string & path_, ZooKeeper & zookeeper_, LeadershipHandler handler_, const std::string & identifier_ = "")
: pool(pool_), path(path_), zookeeper(zookeeper_), handler(handler_), identifier(identifier_)
{
task_handle = pool.addTask("LeaderElection", [this] { threadFunction(); });
createNode();
}
@ -47,17 +50,18 @@ public:
return;
shutdown_called = true;
event->set();
if (thread.joinable())
thread.join();
task_handle->deactivate();
}
~LeaderElection()
{
releaseNode();
pool.removeTask(task_handle);
}
private:
DB::BackgroundSchedulePool & pool;
DB::BackgroundSchedulePool::TaskHandle task_handle;
std::string path;
ZooKeeper & zookeeper;
LeadershipHandler handler;
@ -66,9 +70,7 @@ private:
EphemeralNodeHolderPtr node;
std::string node_name;
std::thread thread;
std::atomic<bool> shutdown_called {false};
zkutil::EventPtr event = std::make_shared<Poco::Event>();
CurrentMetrics::Increment metric_increment{CurrentMetrics::LeaderElection};
@ -80,7 +82,8 @@ private:
std::string node_path = node->getPath();
node_name = node_path.substr(node_path.find_last_of('/') + 1);
thread = std::thread(&LeaderElection::threadFunction, this);
task_handle->activate();
task_handle->schedule();
}
void releaseNode()
@ -91,38 +94,42 @@ private:
void threadFunction()
{
while (!shutdown_called)
bool success = false;
try
{
bool success = false;
Strings children = zookeeper.getChildren(path);
std::sort(children.begin(), children.end());
auto it = std::lower_bound(children.begin(), children.end(), node_name);
if (it == children.end() || *it != node_name)
throw Poco::Exception("Assertion failed in LeaderElection");
try
if (it == children.begin())
{
Strings children = zookeeper.getChildren(path);
std::sort(children.begin(), children.end());
auto it = std::lower_bound(children.begin(), children.end(), node_name);
if (it == children.end() || *it != node_name)
throw Poco::Exception("Assertion failed in LeaderElection");
if (it == children.begin())
{
ProfileEvents::increment(ProfileEvents::LeaderElectionAcquiredLeadership);
handler();
return;
}
if (zookeeper.exists(path + "/" + *(it - 1), nullptr, event))
event->wait();
success = true;
}
catch (...)
{
DB::tryLogCurrentException("LeaderElection");
ProfileEvents::increment(ProfileEvents::LeaderElectionAcquiredLeadership);
handler();
return;
}
if (!success)
event->tryWait(10 * 1000);
if (!zookeeper.existsWatch(path + "/" + *(it - 1), nullptr, task_handle->getWatchCallback()))
task_handle->schedule();
success = true;
}
catch (const KeeperException & e)
{
DB::tryLogCurrentException("LeaderElection");
if (e.code == ZooKeeperImpl::ZooKeeper::ZSESSIONEXPIRED)
return;
}
catch (...)
{
DB::tryLogCurrentException("LeaderElection");
}
if (!success)
task_handle->scheduleAfter(10 * 1000);
}
};

View File

@ -367,6 +367,16 @@ std::string ZooKeeper::get(const std::string & path, Stat * stat, const EventPtr
throw KeeperException("Can't get data for node " + path + ": node doesn't exist", code);
}
std::string ZooKeeper::getWatch(const std::string & path, Stat * stat, WatchCallback watch_callback)
{
int32_t code = 0;
std::string res;
if (tryGetWatch(path, res, stat, watch_callback, &code))
return res;
else
throw KeeperException("Can't get data for node " + path + ": node doesn't exist", code);
}
bool ZooKeeper::tryGet(const std::string & path, std::string & res, Stat * stat, const EventPtr & watch, int * return_code)
{
return tryGetWatch(path, res, stat, callbackForEvent(watch), return_code);
@ -739,6 +749,18 @@ std::future<ZooKeeperImpl::ZooKeeper::MultiResponse> ZooKeeper::asyncMulti(const
return future;
}
int32_t ZooKeeper::tryMultiNoThrow(const Requests & requests, Responses & responses)
{
try
{
return multiImpl(requests, responses);
}
catch (const ZooKeeperImpl::Exception & e)
{
return e.code;
}
}
size_t KeeperMultiException::getFailedOpIndex(int32_t code, const Responses & responses) const
{
@ -759,7 +781,7 @@ size_t KeeperMultiException::getFailedOpIndex(int32_t code, const Responses & re
KeeperMultiException::KeeperMultiException(int32_t code, const Requests & requests, const Responses & responses)
: KeeperException("Transaction failed at op #" + std::to_string(getFailedOpIndex(code, responses)), code),
requests(requests), responses(responses)
requests(requests), responses(responses), failed_op_index(getFailedOpIndex(code, responses))
{
}

View File

@ -113,6 +113,7 @@ public:
bool existsWatch(const std::string & path, Stat * stat, WatchCallback watch_callback);
std::string get(const std::string & path, Stat * stat = nullptr, const EventPtr & watch = nullptr);
std::string getWatch(const std::string & path, Stat * stat, WatchCallback watch_callback);
/// Doesn't not throw in the following cases:
/// * The node doesn't exist. Returns false in this case.
@ -148,11 +149,8 @@ public:
/// Throws only if some operation has returned an "unexpected" error
/// - an error that would cause the corresponding try- method to throw.
int32_t tryMulti(const Requests & requests, Responses & responses);
/// Throws nothing, just alias of multiImpl
int32_t tryMultiNoThrow(const Requests & requests, Responses & responses)
{
return multiImpl(requests, responses);
}
/// Throws nothing (even session expired errors)
int32_t tryMultiNoThrow(const Requests & requests, Responses & responses);
Int64 getClientID();
@ -184,26 +182,31 @@ public:
///
/// Future should not be destroyed before the result is gotten.
std::future<ZooKeeperImpl::ZooKeeper::GetResponse> asyncGet(const std::string & path);
using FutureGet = std::future<ZooKeeperImpl::ZooKeeper::GetResponse>;
FutureGet asyncGet(const std::string & path);
std::future<ZooKeeperImpl::ZooKeeper::GetResponse> asyncTryGet(const std::string & path);
FutureGet asyncTryGet(const std::string & path);
std::future<ZooKeeperImpl::ZooKeeper::ExistsResponse> asyncExists(const std::string & path);
using FutureExists = std::future<ZooKeeperImpl::ZooKeeper::ExistsResponse> ;
FutureExists asyncExists(const std::string & path);
std::future<ZooKeeperImpl::ZooKeeper::ListResponse> asyncGetChildren(const std::string & path);
using FutureGetChildren = std::future<ZooKeeperImpl::ZooKeeper::ListResponse>;
FutureGetChildren asyncGetChildren(const std::string & path);
std::future<ZooKeeperImpl::ZooKeeper::RemoveResponse> asyncRemove(const std::string & path, int32_t version = -1);
using FutureRemove = std::future<ZooKeeperImpl::ZooKeeper::RemoveResponse>;
FutureRemove asyncRemove(const std::string & path, int32_t version = -1);
/// Doesn't throw in the following cases:
/// * The node doesn't exist
/// * The versions do not match
/// * The node has children
std::future<ZooKeeperImpl::ZooKeeper::RemoveResponse> asyncTryRemove(const std::string & path, int32_t version = -1);
FutureRemove asyncTryRemove(const std::string & path, int32_t version = -1);
std::future<ZooKeeperImpl::ZooKeeper::MultiResponse> asyncMulti(const Requests & ops);
using FutureMulti = std::future<ZooKeeperImpl::ZooKeeper::MultiResponse>;
FutureMulti asyncMulti(const Requests & ops);
/// Like the previous one but don't throw any exceptions on future.get()
std::future<ZooKeeperImpl::ZooKeeper::MultiResponse> tryAsyncMulti(const Requests & ops);
FutureMulti tryAsyncMulti(const Requests & ops);
static std::string error2string(int32_t code);

View File

@ -432,6 +432,35 @@ void ZooKeeper::read(T & x)
}
void addRootPath(String & path, const String & root_path)
{
if (path.empty())
throw Exception("Path cannot be empty", ZooKeeper::ZBADARGUMENTS);
if (path[0] != '/')
throw Exception("Path must begin with /", ZooKeeper::ZBADARGUMENTS);
if (root_path.empty())
return;
if (path.size() == 1) /// "/"
path = root_path;
else
path = root_path + path;
}
void removeRootPath(String & path, const String & root_path)
{
if (root_path.empty())
return;
if (path.size() <= root_path.size())
throw Exception("Received path is not longer than root_path", ZooKeeper::ZDATAINCONSISTENCY);
path = path.substr(root_path.size());
}
static constexpr int32_t protocol_version = 0;
static constexpr ZooKeeper::XID watch_xid = -1;
@ -735,6 +764,7 @@ void ZooKeeper::sendThread()
if (expired)
break;
info.request->addRootPath(root_path);
info.request->write(*out);
if (info.request->xid == close_xid)
@ -844,35 +874,6 @@ ZooKeeper::ResponsePtr ZooKeeper::MultiRequest::makeResponse() const { return st
ZooKeeper::ResponsePtr ZooKeeper::CloseRequest::makeResponse() const { return std::make_shared<CloseResponse>(); }
void addRootPath(String & path, const String & root_path)
{
if (path.empty())
throw Exception("Path cannot be empty", ZooKeeper::ZBADARGUMENTS);
if (path[0] != '/')
throw Exception("Path must begin with /", ZooKeeper::ZBADARGUMENTS);
if (root_path.empty())
return;
if (path.size() == 1) /// "/"
path = root_path;
else
path = root_path + path;
}
void removeRootPath(String & path, const String & root_path)
{
if (root_path.empty())
return;
if (path.size() <= root_path.size())
throw Exception("Received path is not longer than root_path", ZooKeeper::ZDATAINCONSISTENCY);
path = path.substr(root_path.size());
}
void ZooKeeper::CreateRequest::addRootPath(const String & root_path) { ZooKeeperImpl::addRootPath(path, root_path); }
void ZooKeeper::RemoveRequest::addRootPath(const String & root_path) { ZooKeeperImpl::addRootPath(path, root_path); }
void ZooKeeper::ExistsRequest::addRootPath(const String & root_path) { ZooKeeperImpl::addRootPath(path, root_path); }
@ -1108,7 +1109,6 @@ void ZooKeeper::finalize(bool error_send, bool error_receive)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
if (info.watch)
{
@ -1335,8 +1335,6 @@ void ZooKeeper::pushRequest(RequestInfo && info)
{
try
{
info.request->addRootPath(root_path);
info.time = clock::now();
if (!info.request->xid)

View File

@ -596,7 +596,7 @@ private:
std::mutex operations_mutex;
using WatchCallbacks = std::vector<WatchCallback>;
using Watches = std::map<String /* path */, WatchCallbacks>;
using Watches = std::map<String /* path, relative of root_path */, WatchCallbacks>;
Watches watches;
std::mutex watches_mutex;

View File

@ -61,7 +61,7 @@ TEST(zkutil, multi_nice_exception_msg)
String msg = getCurrentExceptionMessage(false);
bool msg_has_reqired_patterns = msg.find("/clickhouse_test/zkutil_multi/a") != std::string::npos && msg.find("#2") != std::string::npos;
bool msg_has_reqired_patterns = msg.find("#2") != std::string::npos;
EXPECT_TRUE(msg_has_reqired_patterns) << msg;
}
}
@ -129,40 +129,54 @@ TEST(zkutil, multi_async)
}
}
/// Run this test under sudo
TEST(zkutil, multi_async_libzookeeper_segfault)
TEST(zkutil, watch_get_children_with_chroot)
{
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181", "", 1000);
zkutil::Requests ops;
try
{
const String zk_server = "localhost:2181";
const String prefix = "/clickhouse_test/zkutil/watch_get_children_with_chroot";
ops.emplace_back(zkutil::makeCheckRequest("/clickhouse_test/zkutil_multi", 0));
/// Create chroot node firstly
auto zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server);
zookeeper->createAncestors(prefix + "/");
zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server, "", zkutil::DEFAULT_SESSION_TIMEOUT, prefix);
/// Uncomment to test
//auto cmd = ShellCommand::execute("sudo service zookeeper restart");
//cmd->wait();
String queue_path = "/queue";
zookeeper->tryRemoveRecursive(queue_path);
zookeeper->createAncestors(queue_path + "/");
auto future = zookeeper->asyncMulti(ops);
auto res = future.get();
EXPECT_TRUE(zkutil::isHardwareError(res.error));
zkutil::EventPtr event = std::make_shared<Poco::Event>();
zookeeper->getChildren(queue_path, nullptr, event);
{
auto zookeeper2 = std::make_unique<zkutil::ZooKeeper>(zk_server, "", zkutil::DEFAULT_SESSION_TIMEOUT, prefix);
zookeeper2->create(queue_path + "/children-", "", zkutil::CreateMode::PersistentSequential);
}
event->wait();
}
catch (...)
{
std::cerr << getCurrentExceptionMessage(true);
throw;
}
}
TEST(zkutil, multi_create_sequential)
{
try
{
const String zk_server = "localhost:2181";
const String prefix = "/clickhouse_test/zkutil";
/// Create chroot node firstly
auto zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181");
zookeeper->createAncestors("/clickhouse_test/");
auto zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server);
zookeeper->createAncestors(prefix + "/");
zookeeper = std::make_unique<zkutil::ZooKeeper>(zk_server, "", zkutil::DEFAULT_SESSION_TIMEOUT, "/clickhouse_test");
zookeeper = std::make_unique<zkutil::ZooKeeper>("localhost:2181", "", zkutil::DEFAULT_SESSION_TIMEOUT, "/clickhouse_test");
zkutil::Requests ops;
String base_path = "/zkutil/multi_create_sequential";
String base_path = "/multi_create_sequential";
zookeeper->tryRemoveRecursive(base_path);
zookeeper->createAncestors(base_path + "/");
zkutil::Requests ops;
String sequential_node_prefix = base_path + "/queue-";
ops.emplace_back(zkutil::makeCreateRequest(sequential_node_prefix, "", zkutil::CreateMode::EphemeralSequential));
auto results = zookeeper->multi(ops);
@ -180,3 +194,4 @@ TEST(zkutil, multi_create_sequential)
}

View File

@ -9,7 +9,8 @@
#cmakedefine01 USE_RDKAFKA
#cmakedefine01 USE_CAPNP
#cmakedefine01 USE_EMBEDDED_COMPILER
#cmakedefine01 Poco_SQLODBC_FOUND
#cmakedefine01 Poco_DataODBC_FOUND
#cmakedefine01 Poco_MongoDB_FOUND
#cmakedefine01 Poco_NetSSL_FOUND
#cmakedefine01 LLVM_HAS_RTTI
#cmakedefine01 USE_POCO_SQLODBC
#cmakedefine01 USE_POCO_DATAODBC
#cmakedefine01 USE_POCO_MONGODB
#cmakedefine01 USE_POCO_NETSSL

View File

@ -35,10 +35,10 @@ const char * auto_config_build[]
"USE_VECTORCLASS", "@USE_VECTORCLASS@",
"USE_RDKAFKA", "@USE_RDKAFKA@",
"USE_CAPNP", "@USE_CAPNP@",
"USE_Poco_SQLODBC", "@Poco_SQLODBC_FOUND@",
"USE_Poco_DataODBC", "@Poco_DataODBC_FOUND@",
"USE_Poco_MongoDB", "@Poco_MongoDB_FOUND@",
"USE_Poco_NetSSL", "@Poco_NetSSL_FOUND@",
"USE_POCO_SQLODBC", "@USE_POCO_SQLODBC@",
"USE_POCO_DATAODBC", "@USE_POCO_DATAODBC@",
"USE_POCO_MONGODB", "@USE_POCO_MONGODB@",
"USE_POCO_NETSSL", "@USE_POCO_NETSSL@",
nullptr, nullptr
};

View File

@ -1,5 +1,6 @@
#include <Common/getNumberOfPhysicalCPUCores.h>
#include <thread>
#include <fstream>
#if defined(__x86_64__)
@ -13,8 +14,26 @@
unsigned getNumberOfPhysicalCPUCores()
{
#if defined(__x86_64__)
#if defined(__linux__)
/// On Linux we try to look at Cgroups limit if it is available.
std::ifstream cgroup_read_in("/sys/fs/cgroup/cpu/cpu.cfs_quota_us");
if (cgroup_read_in.is_open())
{
std::string allocated_cpus_share_str{ std::istreambuf_iterator<char>(cgroup_read_in), std::istreambuf_iterator<char>() };
int allocated_cpus_share_int = std::stoi(allocated_cpus_share_str);
cgroup_read_in.close();
// If a valid value is present
if (allocated_cpus_share_int > 0)
{
unsigned allocated_cpus = (allocated_cpus_share_int + 999) / 1000;
return allocated_cpus;
}
}
#endif
#if defined(__x86_64__)
cpu_raw_data_t raw_data;
if (0 != cpuid_get_raw_data(&raw_data))
throw DB::Exception("Cannot cpuid_get_raw_data: " + std::string(cpuid_error()), DB::ErrorCodes::CPUID_ERROR);

Some files were not shown because too many files have changed in this diff Show More