diff --git a/.gitignore b/.gitignore index 8d1a188befb..6c0865d1959 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,8 @@ /docs/tools/venv/ /docs/en/development/build/ /docs/ru/development/build/ +/docs/en/single.md +/docs/ru/single.md # callgrind files callgrind.out.* @@ -176,7 +178,6 @@ utils/zookeeper-create-entry-to-download-part/zookeeper-create-entry-to-download utils/zookeeper-dump-tree/zookeeper-dump-tree utils/zookeeper-remove-by-list/zookeeper-remove-by-list dbms/src/Storages/tests/remove_symlink_directory -dbms/tests/queries/1_stateful debian/control debian/copyright debian/tmp/ @@ -239,5 +240,6 @@ node_modules public website/docs website/presentations +website/package-lock.json .DS_Store */.DS_Store diff --git a/.gitmodules b/.gitmodules index 1f392b73c83..742e4616276 100644 --- a/.gitmodules +++ b/.gitmodules @@ -37,3 +37,12 @@ [submodule "contrib/llvm"] path = contrib/llvm url = https://github.com/ClickHouse-Extras/llvm +[submodule "contrib/mariadb-connector-c"] + path = contrib/mariadb-connector-c + url = https://github.com/MariaDB/mariadb-connector-c.git +[submodule "contrib/jemalloc"] + path = contrib/jemalloc + url = https://github.com/jemalloc/jemalloc.git +[submodule "contrib/unixodbc"] + path = contrib/unixodbc + url = https://github.com/ClickHouse-Extras/UnixODBC.git diff --git a/.travis.yml b/.travis.yml index 705b6977114..d658b8d285c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,26 +3,6 @@ language: generic matrix: fast_finish: true include: -# - os: linux -# -# cache: -# ccache: true -# timeout: 1000 -# -# addons: -# apt: -# update: true -# sources: -# - ubuntu-toolchain-r-test -# packages: [ g++-7, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl ] -# -# env: -# - MATRIX_EVAL="export CC=gcc-7 && export CXX=g++-7" -# -# script: -# - env TEST_RUN= utils/travis/normal.sh - - # We need to have gcc7 headers to compile c++17 code on clang - os: linux @@ -41,33 +21,11 @@ matrix: packages: [ ninja-build, g++-7, clang-5.0, lld-5.0, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl] env: - - MATRIX_EVAL="export CC=clang-5.0 && export CXX=clang++-5.0" + - MATRIX_EVAL="export CC=clang-5.0 CXX=clang++-5.0" script: - utils/travis/normal.sh - -# TODO: fix internal compiler -# - os: linux -# -# sudo: required -# -# cache: -# timeout: 1000 -# directories: -# - /var/cache/pbuilder/ccache -# -# addons: -# apt: -# packages: [ pbuilder, fakeroot, debhelper ] -# -# env: -# - MATRIX_EVAL="export DEB_CC=clang-5.0 && export DEB_CXX=clang++-5.0" -# -# script: -# - utils/travis/pbuilder.sh - - - os: linux sudo: required @@ -85,69 +43,6 @@ matrix: script: - utils/travis/pbuilder.sh - -# - os: linux -# -# sudo: required -# -# cache: -# timeout: 1000 -# directories: -# - /var/cache/pbuilder/ccache -# -# addons: -# apt: -# update: true -# packages: [ pbuilder, fakeroot, debhelper ] -# -# env: -# - MATRIX_EVAL="export ARCH=i386" -# -# script: -# - env PBUILDER_TIMEOUT=40m TEST_TRUE=true TEST_RUN= utils/travis/pbuilder.sh - - -# TODO: Can't bootstrap bionic on trusty host -# - os: linux -# -# sudo: required -# -# cache: -# timeout: 1000 -# directories: -# - /var/cache/pbuilder/ccache -# -# addons: -# apt: -# update: true -# packages: [ pbuilder, fakeroot, debhelper ] -# -# env: -# - MATRIX_EVAL="export DEB_CC=clang-6.0 && export DEB_CXX=clang++-6.0 && export DIST=bionic && export EXTRAPACKAGES='clang-6.0 lld-6.0'" -# -# script: -# - utils/travis/pbuilder.sh - - -# Cant fit to time limit (48min) -# - os: osx -# osx_image: xcode9.2 -# -# cache: -# ccache: true -# timeout: 1000 -# -# before_install: -# - brew install unixodbc gcc ccache libtool gettext zlib readline double-conversion gperftools google-sparsehash lz4 zstd || true -# - brew link --overwrite gcc || true -# -# env: -# - MATRIX_EVAL="export CC=gcc-8 && export CXX=g++-8" -# -# script: -# - env CMAKE_FLAGS="-DUSE_INTERNAL_BOOST_LIBRARY=1" utils/travis/normal.sh - - allow_failures: - os: osx diff --git a/CHANGELOG.draft.md b/CHANGELOG.draft.md index 5ed63c620ba..93c681b0336 100644 --- a/CHANGELOG.draft.md +++ b/CHANGELOG.draft.md @@ -1,15 +1 @@ -## en: - -### Improvements: -* Added Nullable support for runningDifference function. [#2590](https://github.com/yandex/ClickHouse/issues/2590) - -### Bug fiexs: -* Fixed switching to default databases in case of client reconnection. [#2580](https://github.com/yandex/ClickHouse/issues/2580) - -## ru: - -### Улучшения: -* Добавлена поддержка Nullable для функции runningDifference. [#2590](https://github.com/yandex/ClickHouse/issues/2590) - -### Исправление ошибок: -* Исправлено переключение на дефолтную базу данных при переподключении клиента. [#2580](https://github.com/yandex/ClickHouse/issues/2580) +## RU diff --git a/CHANGELOG.md b/CHANGELOG.md index cde357611d4..4f26f565e8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,122 @@ +## ClickHouse release 18.5.1, 2018-07-31 + +### New features: + +* Added the hash function `murmurHash2_32` [#2756](https://github.com/yandex/ClickHouse/pull/2756). + +### Improvements: + +* Now you can use the `from_env` attribute to set values in config files from environment variables [#2741](https://github.com/yandex/ClickHouse/pull/2741). +* Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [#2752](https://github.com/yandex/ClickHouse/pull/2752). + +### Bug fixes: + +* Fixed a possible bug when starting a replica [#2759](https://github.com/yandex/ClickHouse/pull/2759). + +## ClickHouse release 18.4.0, 2018-07-28 + +### New features: + +* Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/yandex/ClickHouse/pull/2721). +* Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster` table function [#2708](https://github.com/yandex/ClickHouse/pull/2708). +* Support for `HTTP Basic` authentication in the replication protocol [#2727](https://github.com/yandex/ClickHouse/pull/2727). +* The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/yandex/ClickHouse/pull/2699). +* Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/yandex/ClickHouse/pull/2701). + +### Improvements: + +* The `ALTER TABLE t DELETE WHERE` query does not rewrite data chunks that were not affected by the WHERE condition [#2694](https://github.com/yandex/ClickHouse/pull/2694). +* The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed. +* Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2689). + +### Bug fixes: + +* Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/yandex/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +* Fixed a bug in the `windowFunnel` aggregate function [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735). +* Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +* Fixed server crash when using the `countArray()` aggregate function. + +## ClickHouse release 18.1.0, 2018-07-23 + +### New features: + +* Support for the `ALTER TABLE t DELETE WHERE` query for non-replicated MergeTree tables ([#2634](https://github.com/yandex/ClickHouse/pull/2634)). +* Support for arbitrary types for the `uniq*` family of aggregate functions ([#2010](https://github.com/yandex/ClickHouse/issues/2010)). +* Support for arbitrary types in comparison operators ([#2026](https://github.com/yandex/ClickHouse/issues/2026)). +* The `users.xml` file allows setting a subnet mask in the format `10.0.0.1/255.255.255.0`. This is necessary for using masks for IPv6 networks with zeros in the middle ([#2637](https://github.com/yandex/ClickHouse/pull/2637)). +* Added the `arrayDistinct` function ([#2670](https://github.com/yandex/ClickHouse/pull/2670)). +* The SummingMergeTree engine can now work with AggregateFunction type columns ([Constantin S. Pan](https://github.com/yandex/ClickHouse/pull/2566)). + +### Improvements: + +* Changed the numbering scheme for release versions. Now the first part contains the year of release (A.D., Moscow timezone, minus 2000), the second part contains the number for major changes (increases for most releases), and the third part is the patch version. Releases are still backwards compatible, unless otherwise stated in the changelog. +* Faster conversions of floating-point numbers to a string ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2664)). +* If some rows were skipped during an insert due to parsing errors (this is possible with the `input_allow_errors_num` and `input_allow_errors_ratio` settings enabled), the number of skipped rows is now written to the server log ([Leonardo Cecchi](https://github.com/yandex/ClickHouse/pull/2669)). + +### Bug fixes: + +* Fixed the TRUNCATE command for temporary tables ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2624)). +* Fixed a rare deadlock in the ZooKeeper client library that occurred when there was a network error while reading the response ([c315200](https://github.com/yandex/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +* Fixed an error during a CAST to Nullable types ([#1322](https://github.com/yandex/ClickHouse/issues/1322)). +* Fixed the incorrect result of the `maxIntersection()` function when the boundaries of intervals coincided ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2657)). +* Fixed incorrect transformation of the OR expression chain in a function argument ([chenxing-xc](https://github.com/yandex/ClickHouse/pull/2663)). +* Fixed performance degradation for queries containing `IN (subquery)` expressions inside another subquery ([#2571](https://github.com/yandex/ClickHouse/issues/2571)). +* Fixed incompatibility between servers with different versions in distributed queries that use a `CAST` function that isn't in uppercase letters ([fe8c4d6](https://github.com/yandex/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +* Added missing quoting of identifiers for queries to an external DBMS ([#2635](https://github.com/yandex/ClickHouse/issues/2635)). + +### Backward incompatible changes: + +* Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `0` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`. + + +## ClickHouse release 1.1.54394, 2018-07-12 + +### New features: + +* Added the `histogram` aggregate function ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2521)). +* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying partitions for `ReplicatedMergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2600)). + +### Bug fixes: + +* Fixed a problem with a very small timeout for sockets (one second) for reading and writing when sending and downloading replicated data, which made it impossible to download larger parts if there is a load on the network or disk (it resulted in cyclical attempts to download parts). This error occurred in version 1.1.54388. +* Fixed issues when using chroot in ZooKeeper if you inserted duplicate data blocks in the table. +* The `has` function now works correctly for an array with Nullable elements ([#2115](https://github.com/yandex/ClickHouse/issues/2115)). +* The `system.tables` table now works correctly when used in distributed queries. The `metadata_modification_time` and `engine_full` columns are now non-virtual. Fixed an error that occurred if only these columns were requested from the table. +* Fixed how an empty `TinyLog` table works after inserting an empty data block ([#2563](https://github.com/yandex/ClickHouse/issues/2563)). +* The `system.zookeeper` table works if the value of the node in ZooKeeper is NULL. + +## ClickHouse release 1.1.54390, 2018-07-06 + +### New features: + +* Queries can be sent in `multipart/form-data` format (in the `query` field), which is useful if external data is also sent for query processing ([Olga Hvostikova](https://github.com/yandex/ClickHouse/pull/2490)). +* Added the ability to enable or disable processing single or double quotes when reading data in CSV format. You can configure this in the `format_csv_allow_single_quotes` and `format_csv_allow_double_quotes` settings ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2574)). +* Now `OPTIMIZE TABLE ... FINAL` can be used without specifying the partition for non-replicated variants of `MergeTree` ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2599)). + +### Improvements: + +* Improved performance, reduced memory consumption, and correct tracking of memory consumption with use of the IN operator when a table index could be used ([#2584](https://github.com/yandex/ClickHouse/pull/2584)). +* Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2. +* Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([#2573](https://github.com/yandex/ClickHouse/pull/2573)). +* Added `Nullable` support for the `runningDifference` function ([#2594](https://github.com/yandex/ClickHouse/pull/2594)). +* Improved query analysis performance when there is a very large number of expressions ([#2572](https://github.com/yandex/ClickHouse/pull/2572)). +* Faster selection of data parts for merging in `ReplicatedMergeTree` tables. Faster recovery of the ZooKeeper session ([#2597](https://github.com/yandex/ClickHouse/pull/2597)). +* The `format_version.txt` file for `MergeTree` tables is re-created if it is missing, which makes sense if ClickHouse is launched after copying the directory structure without files ([Ciprian Hacman](https://github.com/yandex/ClickHouse/pull/2593)). + +### Bug fixes: + +* Fixed a bug when working with ZooKeeper that could make it impossible to recover the session and readonly states of tables before restarting the server. +* Fixed a bug when working with ZooKeeper that could result in old nodes not being deleted if the session is interrupted. +* Fixed an error in the `quantileTDigest` function for Float arguments (this bug was introduced in version 1.1.54388) ([Mikhail Surin](https://github.com/yandex/ClickHouse/pull/2553)). +* Fixed a bug in the index for MergeTree tables if the primary key column is located inside the function for converting types between signed and unsigned integers of the same size ([#2603](https://github.com/yandex/ClickHouse/pull/2603)). +* Fixed segfault if `macros` are used but they aren't in the config file ([#2570](https://github.com/yandex/ClickHouse/pull/2570)). +* Fixed switching to the default database when reconnecting the client ([#2583](https://github.com/yandex/ClickHouse/pull/2583)). +* Fixed a bug that occurred when the `use_index_for_in_with_subqueries` setting was disabled. + +### Security fix: + +* Sending files is no longer possible when connected to MySQL (`LOAD DATA LOCAL INFILE`). + ## ClickHouse release 1.1.54388, 2018-06-28 ### New features: diff --git a/CHANGELOG_RU.md b/CHANGELOG_RU.md index 5282e10a556..494f76379c2 100644 --- a/CHANGELOG_RU.md +++ b/CHANGELOG_RU.md @@ -1,3 +1,78 @@ +## ClickHouse release 18.6.0, 2018-08-02 + +### Новые возможности: +* Добавлена поддержка ON выражений для JOIN ON синтаксиса: +`JOIN ON Expr([table.]column, ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]` +Выражение должно представлять из себя цепочку равенств, объединенных оператором AND. Каждая часть равенства может являться произвольным выражением над столбцами одной из таблиц. Поддержана возможность использования fully qualified имен столбцов (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) для правой таблицы. [#2742](https://github.com/yandex/ClickHouse/pull/2742) +* Добавлена возможность включить HTTPS для репликации. [#2760](https://github.com/yandex/ClickHouse/pull/2760) + +### Улучшения: +* Сервер передаёт на клиент также patch-компонент своей версии. Данные о patch компоненте версии добавлены в `system.processes` и `query_log`. [#2646](https://github.com/yandex/ClickHouse/pull/2646) + + +## ClickHouse release 18.5.1, 2018-07-31 + +### Новые возможности: +* Добавлена функция хеширования `murmurHash2_32` [#2756](https://github.com/yandex/ClickHouse/pull/2756). + +### Улучшения: +* Добавлена возможность указывать значения в конфигурационных файлах из переменных окружения с помощью атрибута `from_env` [#2741](https://github.com/yandex/ClickHouse/pull/2741). +* Добавлены регистронезависимые версии функций `coalesce`, `ifNull`, `nullIf` [#2752](https://github.com/yandex/ClickHouse/pull/2752). + +### Исправление ошибок: +* Исправлена возможная ошибка при старте реплики [#2759](https://github.com/yandex/ClickHouse/pull/2759). + + +## ClickHouse release 18.4.0, 2018-07-28 + +### Новые возможности: +* Добавлены системные таблицы `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/yandex/ClickHouse/pull/2721). +* Добавлена возможность использования табличной функции вместо таблицы в качестве аргумента табличной функции `remote` и `cluster` [#2708](https://github.com/yandex/ClickHouse/pull/2708). +* Поддержка `HTTP Basic` аутентификации в протоколе репликации [#2727](https://github.com/yandex/ClickHouse/pull/2727). +* В функции `has` добавлена возможность поиска в массиве значений типа `Enum` по числовому значению [Maxim Khrisanfov](https://github.com/yandex/ClickHouse/pull/2699). +* Поддержка добавления произвольных разделителей сообщений в процессе чтения из `Kafka` [Amos Bird](https://github.com/yandex/ClickHouse/pull/2701). + +### Улучшения: +* Запрос `ALTER TABLE t DELETE WHERE` не перезаписывает куски данных, которые не были затронуты условием WHERE [#2694](https://github.com/yandex/ClickHouse/pull/2694). +* Настройка `use_minimalistic_checksums_in_zookeeper` таблиц семейства `ReplicatedMergeTree` включена по-умолчанию. Эта настройка была добавлена в версии 1.1.54378, 2018-04-16. Установка версий, более старых, чем 1.1.54378, становится невозможной. +* Поддерживается запуск запросов `KILL` и `OPTIMIZE` с указанием `ON CLUSTER` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2689). + +### Исправление ошибок: +* Исправлена ошибка `Column ... is not under aggregate function and not in GROUP BY` в случае агрегации по выражению с оператором IN. Ошибка появилась в версии 18.1.0. ([bbdd780b](https://github.com/yandex/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2)) +* Исправлена ошибка в агрегатной функции `windowFunnel` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735). +* Исправлена ошибка в агрегатной функции `anyHeavy` ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee)) +* Исправлено падение сервера при использовании функции `countArray()`. + + +## ClickHouse release 18.1.0, 2018-07-23 + +### Новые возможности: +* Поддержка запроса `ALTER TABLE t DELETE WHERE` для нереплицированных MergeTree-таблиц ([#2634](https://github.com/yandex/ClickHouse/pull/2634)). +* Поддержка произвольных типов для семейства агрегатных функций `uniq*` ([#2010](https://github.com/yandex/ClickHouse/issues/2010)). +* Поддержка произвольных типов в операторах сравнения ([#2026](https://github.com/yandex/ClickHouse/issues/2026)). +* Возможность в `users.xml` указывать маску подсети в формате `10.0.0.1/255.255.255.0`. Это необходимо для использования "дырявых" масок IPv6 сетей ([#2637](https://github.com/yandex/ClickHouse/pull/2637)). +* Добавлена функция `arrayDistinct` ([#2670](https://github.com/yandex/ClickHouse/pull/2670)). +* Движок SummingMergeTree теперь может работать со столбцами типа AggregateFunction ([Constantin S. Pan](https://github.com/yandex/ClickHouse/pull/2566)). + +### Улучшения: +* Изменена схема версионирования релизов. Теперь первый компонент содержит год релиза (A.D.; по московскому времени; из номера вычитается 2000), второй - номер крупных изменений (увеличивается для большинства релизов), третий - патч-версия. Релизы по-прежнему обратно совместимы, если другое не указано в changelog. +* Ускорено преобразование чисел с плавающей точкой в строку ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2664)). +* Теперь, если при вставке из-за ошибок парсинга пропущено некоторое количество строк (такое возможно про включённых настройках `input_allow_errors_num`, `input_allow_errors_ratio`), это количество пишется в лог сервера ([Leonardo Cecchi](https://github.com/yandex/ClickHouse/pull/2669)). + +### Исправление ошибок: +* Исправлена работа команды TRUNCATE для временных таблиц ([Amos Bird](https://github.com/yandex/ClickHouse/pull/2624)). +* Исправлен редкий deadlock в клиентской библиотеке ZooKeeper, который возникал при сетевой ошибке во время вычитывания ответа ([c315200](https://github.com/yandex/ClickHouse/commit/c315200e64b87e44bdf740707fc857d1fdf7e947)). +* Исправлена ошибка при CAST в Nullable типы ([#1322](https://github.com/yandex/ClickHouse/issues/1322)). +* Исправлен неправильный результат функции `maxIntersection()` в случае совпадения границ отрезков ([Michael Furmur](https://github.com/yandex/ClickHouse/pull/2657)). +* Исправлено неверное преобразование цепочки OR-выражений в аргументе функции ([chenxing-xc](https://github.com/yandex/ClickHouse/pull/2663)). +* Исправлена деградация производительности запросов, содержащих выражение `IN (подзапрос)` внутри другого подзапроса ([#2571](https://github.com/yandex/ClickHouse/issues/2571)). +* Исправлена несовместимость серверов разных версий при распределённых запросах, использующих функцию `CAST` не в верхнем регистре ([fe8c4d6](https://github.com/yandex/ClickHouse/commit/fe8c4d64e434cacd4ceef34faa9005129f2190a5)). +* Добавлено недостающее квотирование идентификаторов при запросах к внешним СУБД ([#2635](https://github.com/yandex/ClickHouse/issues/2635)). + +### Обратно несовместимые изменения: +* Не работает преобразование строки, содержащей число ноль, в DateTime. Пример: `SELECT toDateTime('0')`. По той же причине не работает `DateTime DEFAULT '0'` в таблицах, а также `0` в словарях. Решение: заменить `0` на `0000-00-00 00:00:00`. + + ## ClickHouse release 1.1.54394, 2018-07-12 ### Новые возможности: diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c53d3245a7..f5aee27ddab 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,10 +34,9 @@ endif () string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) message (STATUS "CMAKE_BUILD_TYPE: " ${CMAKE_BUILD_TYPE} ) -# ASan - build type with address sanitizer -# UBSan - build type with undefined behaviour sanitizer -# TSan is not supported due to false positive errors in libstdc++ and necessity to rebuild libstdc++ with TSan -set (CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel;ASan;UBSan" CACHE STRING "" FORCE) +set (CMAKE_CONFIGURATION_TYPES "RelWithDebInfo;Debug;Release;MinSizeRel" CACHE STRING "" FORCE) + +include (cmake/sanitize.cmake) include (cmake/arch.cmake) @@ -61,10 +60,6 @@ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (COMMON_WARNING_FLAGS "${COMMON_WARNING_FLAGS} -Wno-unused-command-line-argument") endif () -if (ARCH_LINUX) - set (CXX11_ABI "ENABLE" CACHE STRING "Use C++11 ABI: DEFAULT, ENABLE, DISABLE") -endif () - option (TEST_COVERAGE "Enables flags for test coverage" OFF) option (ENABLE_TESTS "Enables tests" ${NOT_MSVC}) @@ -86,7 +81,7 @@ endif () if (CMAKE_LIBRARY_ARCHITECTURE MATCHES "amd64.*|x86_64.*|AMD64.*") option (USE_INTERNAL_MEMCPY "Use internal implementation of 'memcpy' function instead of provided by libc. Only for x86_64." ON) - if (ARCH_LINUX) + if (OS_LINUX) option (GLIBC_COMPATIBILITY "Set to TRUE to enable compatibility with older glibc libraries. Only for x86_64, Linux. Implies USE_INTERNAL_MEMCPY." ON) endif() endif () @@ -95,15 +90,7 @@ if (GLIBC_COMPATIBILITY) set (USE_INTERNAL_MEMCPY ON) endif () -if (CXX11_ABI STREQUAL ENABLE) - set (CXX11_ABI_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=1") -elseif (CXX11_ABI STREQUAL DISABLE) - set (CXX11_ABI_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=0") -else () - set (CXX11_ABI_FLAGS "") -endif () - -set (COMPILER_FLAGS "${COMPILER_FLAGS} ${CXX11_ABI_FLAGS}") +set (COMPILER_FLAGS "${COMPILER_FLAGS}") string(REGEX MATCH "-?[0-9]+(.[0-9]+)?$" COMPILER_POSTFIX ${CMAKE_CXX_COMPILER}) @@ -150,26 +137,29 @@ else () endif () set (CMAKE_BUILD_COLOR_MAKEFILE ON) -set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS} ${GLIBC_COMPATIBILITY_COMPILE_FLAGS}") +set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${PLATFORM_EXTRA_CXX_FLAG} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CXX_WARNING_FLAGS}") #set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_CXX_FLAGS_ADD}") -set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${GLIBC_COMPATIBILITY_COMPILE_FLAGS} ${CMAKE_C_FLAGS_ADD}") +set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} -fno-omit-frame-pointer ${COMMON_WARNING_FLAGS} ${CMAKE_C_FLAGS_ADD}") #set (CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O0 -g3 -ggdb3 -fno-inline ${CMAKE_C_FLAGS_ADD}") -if (MAKE_STATIC_LIBRARIES AND NOT APPLE AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND ARCH_FREEBSD)) +if (MAKE_STATIC_LIBRARIES AND NOT APPLE AND NOT (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND OS_FREEBSD)) set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -static-libstdc++") + + # Along with executables, we also build example of shared library for "library dictionary source"; and it also should be self-contained. + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -static-libgcc -static-libstdc++") endif () set(THREADS_PREFER_PTHREAD_FLAG ON) include (cmake/test_compiler.cmake) -if (ARCH_LINUX AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${GLIBC_COMPATIBILITY_LINK_FLAGS} ${CXX11_ABI_FLAGS}") +if (OS_LINUX AND CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}") option (USE_LIBCXX "Use libc++ and libc++abi instead of libstdc++ (only make sense on Linux with Clang)" ${HAVE_LIBCXX}) set (LIBCXX_PATH "" CACHE STRING "Use custom path for libc++. It should be used for MSan.") @@ -199,8 +189,6 @@ if (NOT MAKE_STATIC_LIBRARIES) set(CMAKE_POSITION_INDEPENDENT_CODE ON) endif () -include (cmake/sanitize.cmake) - # Using "include-what-you-use" tool. option (USE_INCLUDE_WHAT_YOU_USE "Use 'include-what-you-use' tool" OFF) if (USE_INCLUDE_WHAT_YOU_USE) @@ -237,7 +225,7 @@ else () set(NOT_UNBUNDLED 1) endif () # Using system libs can cause lot of warnings in includes. -if (UNBUNDLED OR NOT (ARCH_LINUX OR APPLE) OR ARCH_32) +if (UNBUNDLED OR NOT (OS_LINUX OR APPLE) OR ARCH_32) option (NO_WERROR "Disable -Werror compiler option" ON) endif () @@ -246,24 +234,15 @@ message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE include(GNUInstallDirs) include (cmake/find_ssl.cmake) -if (NOT OPENSSL_FOUND) - message (FATAL_ERROR "Need openssl for build. debian tip: sudo apt install libssl-dev") -endif () - include (cmake/lib_name.cmake) include (cmake/find_icu4c.cmake) include (cmake/find_boost.cmake) -# openssl, zlib before poco include (cmake/find_zlib.cmake) include (cmake/find_zstd.cmake) include (cmake/find_ltdl.cmake) # for odbc include (cmake/find_termcap.cmake) -if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/poco/cmake/FindODBC.cmake) - include (${CMAKE_CURRENT_SOURCE_DIR}/contrib/poco/cmake/FindODBC.cmake) # for poco -else () - include (cmake/find_odbc.cmake) -endif () -message (STATUS "Using odbc: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}") +include (cmake/find_odbc.cmake) +# openssl, zlib, odbc before poco include (cmake/find_poco.cmake) include (cmake/find_lz4.cmake) include (cmake/find_sparsehash.cmake) @@ -275,6 +254,9 @@ include (cmake/find_rdkafka.cmake) include (cmake/find_capnp.cmake) include (cmake/find_llvm.cmake) include (cmake/find_cpuid.cmake) +if (ENABLE_TESTS) + include (cmake/find_gtest.cmake) +endif () include (cmake/find_contrib_lib.cmake) find_contrib_lib(cityhash) diff --git a/README.md b/README.md index 905e6e5ba90..8cb9fa3379e 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ ClickHouse is an open-source column-oriented database management system that allows generating analytical data reports in real time. +[![Build Status](https://travis-ci.org/yandex/ClickHouse.svg?branch=master)](https://travis-ci.org/yandex/ClickHouse) + ## Useful links * [Official website](https://clickhouse.yandex/) has quick high-level overview of ClickHouse on main page. @@ -9,5 +11,3 @@ ClickHouse is an open-source column-oriented database management system that all * [Documentation](https://clickhouse.yandex/docs/en/) provides more in-depth information. * [Contacts](https://clickhouse.yandex/#contacts) can help to get your questions answered if there are any. - -[![Build Status](https://travis-ci.org/yandex/ClickHouse.svg?branch=master)](https://travis-ci.org/yandex/ClickHouse) diff --git a/ci/install-libraries.sh b/ci/install-libraries.sh index 4868221b342..d7fb856dbed 100755 --- a/ci/install-libraries.sh +++ b/ci/install-libraries.sh @@ -3,11 +3,8 @@ set -e -x source default-config -./install-os-packages.sh libssl-dev ./install-os-packages.sh libicu-dev ./install-os-packages.sh libreadline-dev -./install-os-packages.sh libmariadbclient-dev -./install-os-packages.sh libunixodbc-dev if [[ "$ENABLE_EMBEDDED_COMPILER" == 1 && "$USE_LLVM_LIBRARIES_FROM_SYSTEM" == 1 ]]; then ./install-os-packages.sh llvm-libs-5.0 diff --git a/ci/install-os-packages.sh b/ci/install-os-packages.sh index 4aae6268aa1..fe5b4f84833 100755 --- a/ci/install-os-packages.sh +++ b/ci/install-os-packages.sh @@ -43,21 +43,12 @@ case $PACKAGE_MANAGER in jq) $SUDO apt-get install -y jq ;; - libssl-dev) - $SUDO apt-get install -y libssl-dev - ;; libicu-dev) $SUDO apt-get install -y libicu-dev ;; libreadline-dev) $SUDO apt-get install -y libreadline-dev ;; - libunixodbc-dev) - $SUDO apt-get install -y unixodbc-dev - ;; - libmariadbclient-dev) - $SUDO apt-get install -y libmariadbclient-dev - ;; llvm-libs*) $SUDO apt-get install -y ${WHAT/llvm-libs/liblld}-dev ${WHAT/llvm-libs/libclang}-dev ;; @@ -97,22 +88,12 @@ case $PACKAGE_MANAGER in jq) $SUDO yum install -y jq ;; - libssl-dev) - $SUDO yum install -y openssl-devel - ;; libicu-dev) $SUDO yum install -y libicu-devel ;; libreadline-dev) $SUDO yum install -y readline-devel ;; - libunixodbc-dev) - $SUDO yum install -y unixODBC-devel libtool-ltdl-devel - ;; - libmariadbclient-dev) - echo "There is no package with static mysqlclient library"; echo 1; - #$SUDO yum install -y mariadb-connector-c-devel - ;; *) echo "Unknown package"; exit 1; ;; @@ -146,21 +127,12 @@ case $PACKAGE_MANAGER in jq) $SUDO pkg install -y jq ;; - libssl-dev) - $SUDO pkg install -y openssl - ;; libicu-dev) $SUDO pkg install -y icu ;; libreadline-dev) $SUDO pkg install -y readline ;; - libunixodbc-dev) - $SUDO pkg install -y unixODBC libltdl - ;; - libmariadbclient-dev) - $SUDO pkg install -y mariadb102-client - ;; *) echo "Unknown package"; exit 1; ;; diff --git a/ci/jobs/quick-build/run.sh b/ci/jobs/quick-build/run.sh index 5fe57457645..6a948c560ee 100755 --- a/ci/jobs/quick-build/run.sh +++ b/ci/jobs/quick-build/run.sh @@ -21,7 +21,7 @@ BUILD_TARGETS=clickhouse BUILD_TYPE=Debug ENABLE_EMBEDDED_COMPILER=0 -CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_TCMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_POCO_ODBC=0 -D ENABLE_MYSQL=0" +CMAKE_FLAGS="-D CMAKE_C_FLAGS_ADD=-g0 -D CMAKE_CXX_FLAGS_ADD=-g0 -D ENABLE_JEMALLOC=0 -D ENABLE_CAPNP=0 -D ENABLE_RDKAFKA=0 -D ENABLE_UNWIND=0 -D ENABLE_ICU=0 -D ENABLE_POCO_MONGODB=0 -D ENABLE_POCO_NETSSL=0 -D ENABLE_POCO_ODBC=0 -D ENABLE_ODBC=0 -D ENABLE_MYSQL=0" [[ $(uname) == "FreeBSD" ]] && COMPILER_PACKAGE_VERSION=devel && export COMPILER_PATH=/usr/local/bin diff --git a/cmake/Modules/FindODBC.cmake b/cmake/Modules/FindODBC.cmake new file mode 100644 index 00000000000..66d43e93d2d --- /dev/null +++ b/cmake/Modules/FindODBC.cmake @@ -0,0 +1,88 @@ +# This file copied from contrib/poco/cmake/FindODBC.cmake to allow build without submodules + +# +# Find the ODBC driver manager includes and library. +# +# ODBC is an open standard for connecting to different databases in a +# semi-vendor-independent fashion. First you install the ODBC driver +# manager. Then you need a driver for each separate database you want +# to connect to (unless a generic one works). VTK includes neither +# the driver manager nor the vendor-specific drivers: you have to find +# those yourself. +# +# This module defines +# ODBC_INCLUDE_DIRECTORIES, where to find sql.h +# ODBC_LIBRARIES, the libraries to link against to use ODBC +# ODBC_FOUND. If false, you cannot build anything that requires ODBC. + +option (ENABLE_ODBC "Enable ODBC" ${OS_LINUX}) +if (OS_LINUX) + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" ${NOT_UNBUNDLED}) +else () + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" OFF) +endif () + +if (USE_INTERNAL_ODBC_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/README") + message (WARNING "submodule contrib/unixodbc is missing. to fix try run: \n git submodule update --init --recursive") + set (USE_INTERNAL_ODBC_LIBRARY 0) +endif () + +if (ENABLE_ODBC) + if (USE_INTERNAL_ODBC_LIBRARY) + set (ODBC_LIBRARIES unixodbc) + set (ODBC_INCLUDE_DIRECTORIES ${CMAKE_SOURCE_DIR}/contrib/unixodbc/include) + set (ODBC_FOUND 1) + set (USE_ODBC 1) + else () + find_path(ODBC_INCLUDE_DIRECTORIES + NAMES sql.h + HINTS + /usr/include + /usr/include/iodbc + /usr/include/odbc + /usr/local/include + /usr/local/include/iodbc + /usr/local/include/odbc + /usr/local/iodbc/include + /usr/local/odbc/include + "C:/Program Files/ODBC/include" + "C:/Program Files/Microsoft SDKs/Windows/v7.0/include" + "C:/Program Files/Microsoft SDKs/Windows/v6.0a/include" + "C:/ODBC/include" + DOC "Specify the directory containing sql.h." + ) + + find_library(ODBC_LIBRARIES + NAMES iodbc odbc iodbcinst odbcinst odbc32 + HINTS + /usr/lib + /usr/lib/iodbc + /usr/lib/odbc + /usr/local/lib + /usr/local/lib/iodbc + /usr/local/lib/odbc + /usr/local/iodbc/lib + /usr/local/odbc/lib + "C:/Program Files/ODBC/lib" + "C:/ODBC/lib/debug" + "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" + DOC "Specify the ODBC driver manager library here." + ) + + # MinGW find usually fails + if(MINGW) + set(ODBC_INCLUDE_DIRECTORIES ".") + set(ODBC_LIBRARIES odbc32) + endif() + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(ODBC + DEFAULT_MSG + ODBC_INCLUDE_DIRECTORIES + ODBC_LIBRARIES) + + mark_as_advanced(ODBC_FOUND ODBC_LIBRARIES ODBC_INCLUDE_DIRECTORIES) + endif () +endif () + +message (STATUS "Using odbc: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}") diff --git a/cmake/arch.cmake b/cmake/arch.cmake index ba446d95676..65361386035 100644 --- a/cmake/arch.cmake +++ b/cmake/arch.cmake @@ -11,19 +11,12 @@ if ( ( ARCH_ARM AND NOT ARCH_AARCH64 ) OR ARCH_I386) set (ARCH_32 1) message (WARNING "Support for 32bit platforms is highly experimental") endif () + if (CMAKE_SYSTEM MATCHES "Linux") - set (ARCH_LINUX 1) + set (OS_LINUX 1) endif () if (CMAKE_SYSTEM MATCHES "FreeBSD") - set (ARCH_FREEBSD 1) -endif () - -if (NOT MSVC) - set (NOT_MSVC 1) -endif () - -if (NOT APPLE) - set (NOT_APPLE 1) + set (OS_FREEBSD 1) endif () if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") diff --git a/cmake/find_cpuid.cmake b/cmake/find_cpuid.cmake index 6a4361dc42c..d02336021bb 100644 --- a/cmake/find_cpuid.cmake +++ b/cmake/find_cpuid.cmake @@ -2,7 +2,7 @@ # TODO: test new libcpuid - maybe already fixed if (NOT ARCH_ARM) - if (ARCH_FREEBSD) + if (OS_FREEBSD) set (DEFAULT_USE_INTERNAL_CPUID_LIBRARY 1) else () set (DEFAULT_USE_INTERNAL_CPUID_LIBRARY ${NOT_UNBUNDLED}) diff --git a/cmake/find_execinfo.cmake b/cmake/find_execinfo.cmake index 05dd72dbb3d..650d279983c 100644 --- a/cmake/find_execinfo.cmake +++ b/cmake/find_execinfo.cmake @@ -1,4 +1,4 @@ -if (ARCH_FREEBSD) +if (OS_FREEBSD) find_library (EXECINFO_LIBRARY execinfo) find_library (ELF_LIBRARY elf) message (STATUS "Using execinfo: ${EXECINFO_LIBRARY}") diff --git a/cmake/find_llvm.cmake b/cmake/find_llvm.cmake index 6e45f715552..b10a8cb87d4 100644 --- a/cmake/find_llvm.cmake +++ b/cmake/find_llvm.cmake @@ -24,6 +24,15 @@ if (ENABLE_EMBEDDED_COMPILER) endif () endif () + if (LLVM_FOUND) + find_library (LLD_LIBRARY_TEST lldCore PATHS ${LLVM_LIBRARY_DIRS}) + find_path (LLD_INCLUDE_DIR_TEST NAMES lld/Core/AbsoluteAtom.h PATHS ${LLVM_INCLUDE_DIRS}) + if (NOT LLD_LIBRARY_TEST OR NOT LLD_INCLUDE_DIR_TEST) + set (LLVM_FOUND 0) + message(WARNING "liblld (${LLD_LIBRARY_TEST}, ${LLD_INCLUDE_DIR_TEST}) not found in ${LLVM_INCLUDE_DIRS} ${LLVM_LIBRARY_DIRS}. Disabling internal compiler.") + endif () + endif () + if (LLVM_FOUND) # Remove dynamically-linked zlib and libedit from LLVM's dependencies: set_target_properties(LLVMSupport PROPERTIES INTERFACE_LINK_LIBRARIES "-lpthread;LLVMDemangle;${ZLIB_LIBRARIES}") diff --git a/cmake/find_ltdl.cmake b/cmake/find_ltdl.cmake index 935de0d4124..18003618dbd 100644 --- a/cmake/find_ltdl.cmake +++ b/cmake/find_ltdl.cmake @@ -1,3 +1,5 @@ -set (LTDL_PATHS "/usr/local/opt/libtool/lib") -find_library (LTDL_LIBRARY ltdl PATHS ${LTDL_PATHS}) -message (STATUS "Using ltdl: ${LTDL_LIBRARY}") +if (ENABLE_ODBC AND NOT USE_INTERNAL_ODBC_LIBRARY) + set (LTDL_PATHS "/usr/local/opt/libtool/lib") + find_library (LTDL_LIBRARY ltdl PATHS ${LTDL_PATHS}) + message (STATUS "Using ltdl: ${LTDL_LIBRARY}") +endif () diff --git a/cmake/find_odbc.cmake b/cmake/find_odbc.cmake index 338108910bf..95acf40b2b4 100644 --- a/cmake/find_odbc.cmake +++ b/cmake/find_odbc.cmake @@ -13,54 +13,77 @@ # This module defines # ODBC_INCLUDE_DIRECTORIES, where to find sql.h # ODBC_LIBRARIES, the libraries to link against to use ODBC -# ODBC_FOUND. If false, you cannot build anything that requires MySQL. +# ODBC_FOUND. If false, you cannot build anything that requires ODBC. -find_path(ODBC_INCLUDE_DIRECTORIES - NAMES sql.h - HINTS - /usr/include - /usr/include/odbc - /usr/include/iodbc - /usr/local/include - /usr/local/include/odbc - /usr/local/include/iodbc - /usr/local/odbc/include - /usr/local/iodbc/include - "C:/Program Files/ODBC/include" - "C:/Program Files/Microsoft SDKs/Windows/v7.0/include" - "C:/Program Files/Microsoft SDKs/Windows/v6.0a/include" - "C:/ODBC/include" - DOC "Specify the directory containing sql.h." -) +option (ENABLE_ODBC "Enable ODBC" ${OS_LINUX}) +if (OS_LINUX) + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" ${NOT_UNBUNDLED}) +else () + option (USE_INTERNAL_ODBC_LIBRARY "Set to FALSE to use system odbc library instead of bundled" OFF) +endif () -find_library(ODBC_LIBRARIES - NAMES iodbc odbc iodbcinst odbcinst odbc32 - HINTS - /usr/lib - /usr/lib/odbc - /usr/lib/iodbc - /usr/local/lib - /usr/local/lib/odbc - /usr/local/lib/iodbc - /usr/local/odbc/lib - /usr/local/iodbc/lib - "C:/Program Files/ODBC/lib" - "C:/ODBC/lib/debug" - "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" - DOC "Specify the ODBC driver manager library here." -) +if (USE_INTERNAL_ODBC_LIBRARY AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/unixodbc/README") + message (WARNING "submodule contrib/unixodbc is missing. to fix try run: \n git submodule update --init --recursive") + set (USE_INTERNAL_ODBC_LIBRARY 0) +endif () -# MinGW find usually fails -if(MINGW) - set(ODBC_INCLUDE_DIRECTORIES ".") - set(ODBC_LIBRARIES odbc32) -endif() - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(ODBC - DEFAULT_MSG - ODBC_INCLUDE_DIRECTORIES - ODBC_LIBRARIES - ) +set (ODBC_INCLUDE_DIRECTORIES ) # Include directories will be either used automatically by target_include_directories or set later. -mark_as_advanced(ODBC_FOUND ODBC_LIBRARIES ODBC_INCLUDE_DIRECTORIES) +if (ENABLE_ODBC) + if (USE_INTERNAL_ODBC_LIBRARY) + set (ODBC_LIBRARIES unixodbc) + set (ODBC_FOUND 1) + set (USE_ODBC 1) + else () + find_path(ODBC_INCLUDE_DIRECTORIES + NAMES sql.h + HINTS + /usr/include + /usr/include/iodbc + /usr/include/odbc + /usr/local/include + /usr/local/include/iodbc + /usr/local/include/odbc + /usr/local/iodbc/include + /usr/local/odbc/include + "C:/Program Files/ODBC/include" + "C:/Program Files/Microsoft SDKs/Windows/v7.0/include" + "C:/Program Files/Microsoft SDKs/Windows/v6.0a/include" + "C:/ODBC/include" + DOC "Specify the directory containing sql.h." + ) + + find_library(ODBC_LIBRARIES + NAMES iodbc odbc iodbcinst odbcinst odbc32 + HINTS + /usr/lib + /usr/lib/iodbc + /usr/lib/odbc + /usr/local/lib + /usr/local/lib/iodbc + /usr/local/lib/odbc + /usr/local/iodbc/lib + /usr/local/odbc/lib + "C:/Program Files/ODBC/lib" + "C:/ODBC/lib/debug" + "C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/Lib" + DOC "Specify the ODBC driver manager library here." + ) + + # MinGW find usually fails + if(MINGW) + set(ODBC_INCLUDE_DIRECTORIES ".") + set(ODBC_LIBRARIES odbc32) + endif() + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(ODBC + DEFAULT_MSG + ODBC_INCLUDE_DIRECTORIES + ODBC_LIBRARIES) + + mark_as_advanced(ODBC_FOUND ODBC_LIBRARIES ODBC_INCLUDE_DIRECTORIES) + endif () +endif () + +message (STATUS "Using odbc: ${ODBC_INCLUDE_DIRECTORIES} : ${ODBC_LIBRARIES}") diff --git a/cmake/find_poco.cmake b/cmake/find_poco.cmake index 947d31951c9..f0bc535f614 100644 --- a/cmake/find_poco.cmake +++ b/cmake/find_poco.cmake @@ -92,8 +92,7 @@ elseif (NOT MISSING_INTERNAL_POCO_LIBRARY) endif () endif () - # TODO! fix internal ssl - if (OPENSSL_FOUND AND NOT USE_INTERNAL_SSL_LIBRARY AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) + if (OPENSSL_FOUND AND (NOT DEFINED ENABLE_POCO_NETSSL OR ENABLE_POCO_NETSSL)) set (Poco_NetSSL_LIBRARY PocoNetSSL) set (Poco_Crypto_LIBRARY PocoCrypto) endif () diff --git a/cmake/find_rdkafka.cmake b/cmake/find_rdkafka.cmake index 396be18cd1c..f05ced94707 100644 --- a/cmake/find_rdkafka.cmake +++ b/cmake/find_rdkafka.cmake @@ -13,7 +13,7 @@ endif () if (NOT USE_INTERNAL_RDKAFKA_LIBRARY) find_library (RDKAFKA_LIB rdkafka) find_path (RDKAFKA_INCLUDE_DIR NAMES librdkafka/rdkafka.h PATHS ${RDKAFKA_INCLUDE_PATHS}) - if (USE_STATIC_LIBRARIES AND NOT ARCH_FREEBSD) + if (USE_STATIC_LIBRARIES AND NOT OS_FREEBSD) find_library (SASL2_LIBRARY sasl2) endif () endif () diff --git a/cmake/find_rt.cmake b/cmake/find_rt.cmake index 82ec314d195..25614fe55eb 100644 --- a/cmake/find_rt.cmake +++ b/cmake/find_rt.cmake @@ -1,7 +1,7 @@ if (APPLE) # lib from libs/libcommon set (RT_LIBRARY "apple_rt") -elseif (ARCH_FREEBSD) +elseif (OS_FREEBSD) find_library (RT_LIBRARY rt) else () set (RT_LIBRARY "") diff --git a/cmake/find_ssl.cmake b/cmake/find_ssl.cmake index ec40e498da1..51e869f86ea 100644 --- a/cmake/find_ssl.cmake +++ b/cmake/find_ssl.cmake @@ -1,4 +1,4 @@ -option (USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${MSVC}) +option (USE_INTERNAL_SSL_LIBRARY "Set to FALSE to use system *ssl library instead of bundled" ${OS_LINUX}) set (OPENSSL_USE_STATIC_LIBS ${USE_STATIC_LIBRARIES}) diff --git a/cmake/find_zlib.cmake b/cmake/find_zlib.cmake index 17350f9fd58..0e198c9bb0f 100644 --- a/cmake/find_zlib.cmake +++ b/cmake/find_zlib.cmake @@ -17,7 +17,7 @@ if (NOT ZLIB_FOUND) set (USE_INTERNAL_ZLIB_LIBRARY 1) set (ZLIB_COMPAT 1) # for zlib-ng, also enables WITH_GZFILEOP set (WITH_NATIVE_INSTRUCTIONS ${ARCHNATIVE}) - if (ARCH_FREEBSD OR ARCH_I386) + if (OS_FREEBSD OR ARCH_I386) set (WITH_OPTIM 0 CACHE INTERNAL "") # Bug in assembler endif () if (ARCH_AARCH64) diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index bac27578663..a90533345e6 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -1,27 +1,37 @@ +option (SANITIZE "Enable sanitizer: address, memory, thread, undefined" "") + set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER") -if (SAN_DEBUG) - set (SAN_FLAGS "${SAN_FLAGS} -O0") -else () - set (SAN_FLAGS "${SAN_FLAGS} -O3") -endif () -set (CMAKE_CXX_FLAGS_ASAN "${CMAKE_CXX_FLAGS_ASAN} ${SAN_FLAGS} -fsanitize=address") -set (CMAKE_C_FLAGS_ASAN "${CMAKE_C_FLAGS_ASAN} ${SAN_FLAGS} -fsanitize=address") -set (CMAKE_EXE_LINKER_FLAGS_ASAN "${CMAKE_EXE_LINKER_FLAGS_ASAN} -fsanitize=address") -set (CMAKE_CXX_FLAGS_UBSAN "${CMAKE_CXX_FLAGS_UBSAN} ${SAN_FLAGS} -fsanitize=undefined") -set (CMAKE_C_FLAGS_UBSAN "${CMAKE_C_FLAGS_UBSAN} ${SAN_FLAGS} -fsanitize=undefined") -set (CMAKE_EXE_LINKER_FLAGS_UBSAN "${CMAKE_EXE_LINKER_FLAGS_UBSAN} -fsanitize=undefined") -set (CMAKE_CXX_FLAGS_MSAN "${CMAKE_CXX_FLAGS_MSAN} ${SAN_FLAGS} -fsanitize=memory") -set (CMAKE_C_FLAGS_MSAN "${CMAKE_C_FLAGS_MSAN} ${SAN_FLAGS} -fsanitize=memory") -set (CMAKE_EXE_LINKER_FLAGS_MSAN "${CMAKE_EXE_LINKER_FLAGS_MSAN} -fsanitize=memory") -set (CMAKE_CXX_FLAGS_TSAN "${CMAKE_CXX_FLAGS_TSAN} ${SAN_FLAGS} -fsanitize=thread") -set (CMAKE_C_FLAGS_TSAN "${CMAKE_C_FLAGS_TSAN} ${SAN_FLAGS} -fsanitize=thread") -set (CMAKE_EXE_LINKER_FLAGS_TSAN "${CMAKE_EXE_LINKER_FLAGS_TSAN} -fsanitize=thread") - -# clang use static linking by default -if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS_ASAN "${CMAKE_EXE_LINKER_FLAGS_ASAN} -static-libasan") - set (CMAKE_EXE_LINKER_FLAGS_UBSAN "${CMAKE_EXE_LINKER_FLAGS_UBSAN} -static-libubsan") - set (CMAKE_EXE_LINKER_FLAGS_MSAN "${CMAKE_EXE_LINKER_FLAGS_MSAN} -static-libmsan") - set (CMAKE_EXE_LINKER_FLAGS_TSAN "${CMAKE_EXE_LINKER_FLAGS_TSAN} -static-libtsan") -endif () +if (SANITIZE) + if (SANITIZE STREQUAL "address") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=address") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=address") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan") + endif () + elseif (SANITIZE STREQUAL "memory") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=memory") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=memory") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan") + endif () + elseif (SANITIZE STREQUAL "thread") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=thread") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=thread") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan") + endif () + elseif (SANITIZE STREQUAL "undefined") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=undefined") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=undefined") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined") + if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan") + endif () + else () + message (FATAL_ERROR "Unknown sanitizer type: ${SANITIZE}") + endif () +endif() diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index e2e4341e1bd..3c0e284e3b9 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -1,5 +1,11 @@ -if (NOT MSVC) - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-deprecated-declarations -Wno-non-virtual-dtor -std=c++1z") +# Third-party libraries may have substandard code. + +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-maybe-uninitialized -Wno-format -Wno-misleading-indentation") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-non-virtual-dtor -Wno-maybe-uninitialized -Wno-format -Wno-misleading-indentation -std=c++1z") +elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function -Wno-unused-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-format") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-unused-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-non-virtual-dtor -Wno-format -std=c++1z") endif () if (USE_INTERNAL_BOOST_LIBRARY) @@ -37,6 +43,8 @@ if (USE_INTERNAL_METROHASH_LIBRARY) add_subdirectory (libmetrohash) endif () +add_subdirectory (murmurhash) + if (USE_INTERNAL_BTRIE_LIBRARY) add_subdirectory (libbtrie) endif () @@ -75,6 +83,10 @@ if (ENABLE_TCMALLOC AND USE_INTERNAL_GPERFTOOLS_LIBRARY) add_subdirectory (libtcmalloc) endif () +if (ENABLE_JEMALLOC AND USE_INTERNAL_JEMALLOC_LIBRARY) + add_subdirectory (jemalloc-cmake) +endif () + if (USE_INTERNAL_CPUID_LIBRARY) add_subdirectory (libcpuid) endif () @@ -84,11 +96,18 @@ if (USE_INTERNAL_SSL_LIBRARY) set (BUILD_SHARED 1) endif () set (USE_SHARED ${USE_STATIC_LIBRARIES}) + set (LIBRESSL_SKIP_INSTALL 1) add_subdirectory (ssl) target_include_directories(${OPENSSL_CRYPTO_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR}) target_include_directories(${OPENSSL_SSL_LIBRARY} PUBLIC ${OPENSSL_INCLUDE_DIR}) endif () +if (ENABLE_MYSQL AND USE_INTERNAL_MYSQL_LIBRARY) + add_subdirectory (mariadb-connector-c-cmake) + target_include_directories(mysqlclient PRIVATE BEFORE ${ZLIB_INCLUDE_DIR}) + target_include_directories(mysqlclient PRIVATE BEFORE ${OPENSSL_INCLUDE_DIR}) +endif () + if (USE_INTERNAL_RDKAFKA_LIBRARY) set (RDKAFKA_BUILD_EXAMPLES OFF CACHE INTERNAL "") set (RDKAFKA_BUILD_TESTS OFF CACHE INTERNAL "") @@ -112,6 +131,10 @@ if (USE_INTERNAL_RDKAFKA_LIBRARY) target_include_directories(rdkafka PRIVATE BEFORE ${ZLIB_INCLUDE_DIR}) endif () +if (ENABLE_ODBC AND USE_INTERNAL_ODBC_LIBRARY) + add_subdirectory (unixodbc-cmake) +endif () + if (USE_INTERNAL_CAPNP_LIBRARY) set (BUILD_TESTING 0 CACHE INTERNAL "") set (_save ${CMAKE_CXX_EXTENSIONS}) @@ -128,11 +151,6 @@ if (USE_INTERNAL_POCO_LIBRARY) set (_save ${ENABLE_TESTS}) set (ENABLE_TESTS 0) set (CMAKE_DISABLE_FIND_PACKAGE_ZLIB 1) - if (USE_INTERNAL_SSL_LIBRARY OR (DEFINED ENABLE_POCO_NETSSL AND NOT ENABLE_POCO_NETSSL)) - set (DISABLE_INTERNAL_OPENSSL 1 CACHE INTERNAL "") - set (ENABLE_NETSSL 0 CACHE INTERNAL "") # TODO! - set (ENABLE_CRYPTO 0 CACHE INTERNAL "") # TODO! - endif () if (MSVC) set (ENABLE_DATA_ODBC 0 CACHE INTERNAL "") # TODO (build fail) endif () diff --git a/contrib/jemalloc b/contrib/jemalloc new file mode 160000 index 00000000000..41b7372eade --- /dev/null +++ b/contrib/jemalloc @@ -0,0 +1 @@ +Subproject commit 41b7372eadee941b9164751b8d4963f915d3ceae diff --git a/contrib/jemalloc-cmake/CMakeLists.txt b/contrib/jemalloc-cmake/CMakeLists.txt new file mode 100644 index 00000000000..d60d34604a9 --- /dev/null +++ b/contrib/jemalloc-cmake/CMakeLists.txt @@ -0,0 +1,52 @@ +set(JEMALLOC_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/jemalloc) + +set(SRCS +${JEMALLOC_SOURCE_DIR}/src/arena.c +${JEMALLOC_SOURCE_DIR}/src/background_thread.c +${JEMALLOC_SOURCE_DIR}/src/base.c +${JEMALLOC_SOURCE_DIR}/src/bin.c +${JEMALLOC_SOURCE_DIR}/src/bitmap.c +${JEMALLOC_SOURCE_DIR}/src/ckh.c +${JEMALLOC_SOURCE_DIR}/src/ctl.c +${JEMALLOC_SOURCE_DIR}/src/div.c +${JEMALLOC_SOURCE_DIR}/src/extent.c +${JEMALLOC_SOURCE_DIR}/src/extent_dss.c +${JEMALLOC_SOURCE_DIR}/src/extent_mmap.c +${JEMALLOC_SOURCE_DIR}/src/hash.c +${JEMALLOC_SOURCE_DIR}/src/hook.c +${JEMALLOC_SOURCE_DIR}/src/jemalloc.c +${JEMALLOC_SOURCE_DIR}/src/jemalloc_cpp.cpp +${JEMALLOC_SOURCE_DIR}/src/large.c +${JEMALLOC_SOURCE_DIR}/src/log.c +${JEMALLOC_SOURCE_DIR}/src/malloc_io.c +${JEMALLOC_SOURCE_DIR}/src/mutex.c +${JEMALLOC_SOURCE_DIR}/src/mutex_pool.c +${JEMALLOC_SOURCE_DIR}/src/nstime.c +${JEMALLOC_SOURCE_DIR}/src/pages.c +${JEMALLOC_SOURCE_DIR}/src/prng.c +${JEMALLOC_SOURCE_DIR}/src/prof.c +${JEMALLOC_SOURCE_DIR}/src/rtree.c +${JEMALLOC_SOURCE_DIR}/src/sc.c +${JEMALLOC_SOURCE_DIR}/src/stats.c +${JEMALLOC_SOURCE_DIR}/src/sz.c +${JEMALLOC_SOURCE_DIR}/src/tcache.c +${JEMALLOC_SOURCE_DIR}/src/test_hooks.c +${JEMALLOC_SOURCE_DIR}/src/ticker.c +${JEMALLOC_SOURCE_DIR}/src/tsd.c +${JEMALLOC_SOURCE_DIR}/src/witness.c +) + +if(CMAKE_SYSTEM_NAME MATCHES "Darwin") + list(APPEND SRCS ${JEMALLOC_SOURCE_DIR}/src/zone.c) +endif() + +add_library(jemalloc STATIC ${SRCS}) + +target_include_directories(jemalloc PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/include + ${CMAKE_CURRENT_SOURCE_DIR}/include_linux_x86_64) # jemalloc.h + +target_include_directories(jemalloc PRIVATE + ${JEMALLOC_SOURCE_DIR}/include) + +target_compile_definitions(jemalloc PRIVATE -DJEMALLOC_NO_PRIVATE_NAMESPACE) diff --git a/contrib/jemalloc-cmake/README b/contrib/jemalloc-cmake/README new file mode 100644 index 00000000000..0af9c4f0e45 --- /dev/null +++ b/contrib/jemalloc-cmake/README @@ -0,0 +1 @@ +It allows to integrate JEMalloc into CMake project. diff --git a/contrib/jemalloc-cmake/include/jemalloc/jemalloc.h b/contrib/jemalloc-cmake/include/jemalloc/jemalloc.h new file mode 100644 index 00000000000..d06243c5239 --- /dev/null +++ b/contrib/jemalloc-cmake/include/jemalloc/jemalloc.h @@ -0,0 +1,16 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +} +#endif + diff --git a/contrib/jemalloc-cmake/include/jemalloc/jemalloc_rename.h b/contrib/jemalloc-cmake/include/jemalloc/jemalloc_rename.h new file mode 100644 index 00000000000..a2ea2dd3533 --- /dev/null +++ b/contrib/jemalloc-cmake/include/jemalloc/jemalloc_rename.h @@ -0,0 +1,29 @@ +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_aligned_alloc aligned_alloc +# define je_calloc calloc +# define je_dallocx dallocx +# define je_free free +# define je_mallctl mallctl +# define je_mallctlbymib mallctlbymib +# define je_mallctlnametomib mallctlnametomib +# define je_malloc malloc +# define je_malloc_conf malloc_conf +# define je_malloc_message malloc_message +# define je_malloc_stats_print malloc_stats_print +# define je_malloc_usable_size malloc_usable_size +# define je_mallocx mallocx +# define je_nallocx nallocx +# define je_posix_memalign posix_memalign +# define je_rallocx rallocx +# define je_realloc realloc +# define je_sallocx sallocx +# define je_sdallocx sdallocx +# define je_xallocx xallocx +# define je_memalign memalign +# define je_valloc valloc +#endif diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/README b/contrib/jemalloc-cmake/include_linux_x86_64/README new file mode 100644 index 00000000000..bf7663bda8d --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/README @@ -0,0 +1,7 @@ +Here are pre-generated files from jemalloc on Linux x86_64. +You can obtain these files by running ./autogen.sh inside jemalloc source directory. + +Added #define GNU_SOURCE +Added JEMALLOC_OVERRIDE___POSIX_MEMALIGN because why not. +Removed JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF because it's non standard. +Removed JEMALLOC_PURGE_MADVISE_FREE because it's available only from Linux 4.5. diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h new file mode 100644 index 00000000000..43936e8eba0 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_internal_defs.h @@ -0,0 +1,373 @@ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ + +#ifndef _GNU_SOURCE + #define _GNU_SOURCE +#endif + +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +/* #undef JEMALLOC_PREFIX */ +/* #undef JEMALLOC_CPREFIX */ + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE___LIBC_CALLOC +#define JEMALLOC_OVERRIDE___LIBC_FREE +#define JEMALLOC_OVERRIDE___LIBC_MALLOC +#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#define JEMALLOC_OVERRIDE___LIBC_REALLOC +#define JEMALLOC_OVERRIDE___LIBC_VALLOC +#define JEMALLOC_OVERRIDE___POSIX_MEMALIGN + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#define CPU_SPINWAIT __asm__ volatile("pause") +/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */ +#define HAVE_CPU_SPINWAIT 1 + +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#define LG_VADDR 48 + +/* Defined if C11 atomics are available. */ +#define JEMALLOC_C11_ATOMICS 1 + +/* Defined if GCC __atomic atomics are available. */ +#define JEMALLOC_GCC_ATOMIC_ATOMICS 1 + +/* Defined if GCC __sync atomics are available. */ +#define JEMALLOC_GCC_SYNC_ATOMICS 1 + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#define JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +/* #undef JEMALLOC_OSSPIN */ + +/* Defined if syscall(2) is usable. */ +#define JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +// Don't want dependency on newer GLIBC +//#define JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1 + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#define JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +/* #undef JEMALLOC_DEBUG */ + +/* JEMALLOC_STATS enables statistics calculation. */ +#define JEMALLOC_STATS + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage + * segment (DSS). + */ +#define JEMALLOC_DSS + +/* Support memory filling (junk/zero). */ +#define JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 12 + +/* + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. + */ +#define LG_HUGEPAGE 21 + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#define JEMALLOC_MAPS_COALESCE + +/* + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#define JEMALLOC_RETAIN + +/* TLS is used to map arenas and magazine caches to threads. */ +#define JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll +#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl +#define JEMALLOC_INTERNAL_FFS __builtin_ffs + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#define JEMALLOC_CACHE_OBLIVIOUS + +/* + * If defined, enable logging facilities. We make this a configure option to + * avoid taking extra branches everywhere. + */ +/* #undef JEMALLOC_LOG */ + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#define JEMALLOC_HAVE_MADVISE + +/* + * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE + * arguments to madvise(2). + */ +#define JEMALLOC_HAVE_MADVISE_HUGE + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. + */ +//#define JEMALLOC_PURGE_MADVISE_FREE +#define JEMALLOC_PURGE_MADVISE_DONTNEED +#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS + +/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */ +/* #undef JEMALLOC_DEFINE_MADVISE_FREE */ + +/* + * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise. + */ +#define JEMALLOC_MADVISE_DONTDUMP + +/* + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. + */ +/* #undef JEMALLOC_THP */ + +/* Define if operating system has alloca.h header. */ +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +#define JEMALLOC_HAS_RESTRICT 1 + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG 3 + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#define JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#define JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* pthread support */ +#define JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#define JEMALLOC_HAVE_DLSYM + +/* Adaptive mutex support in pthreads. */ +#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* GNU specific sched_getcpu support */ +#define JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#define JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#define JEMALLOC_BACKGROUND_THREAD 1 + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +/* #undef JEMALLOC_EXPORT */ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "" + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#define JEMALLOC_IS_MALLOC 1 + +/* + * Defined if strerror_r returns char * if _GNU_SOURCE is defined. + */ +#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_preamble.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_preamble.h new file mode 100644 index 00000000000..c150785fb4a --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/internal/jemalloc_preamble.h @@ -0,0 +1,194 @@ +#ifndef JEMALLOC_PREAMBLE_H +#define JEMALLOC_PREAMBLE_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# undef JEMALLOC_IS_MALLOC +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "jemalloc/jemalloc.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) je_##n +# include "jemalloc/jemalloc.h" +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#endif + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* + * Note that the ordering matters here; the hook itself is name-mangled. We + * want the inclusion of hooks to happen early, so that we hook as much as + * possible. + */ +#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE +# ifndef JEMALLOC_JET +# include "jemalloc/internal/private_namespace.h" +# else +# include "jemalloc/internal/private_namespace_jet.h" +# endif +#endif +#include "jemalloc/internal/test_hooks.h" + +#ifdef JEMALLOC_DEFINE_MADVISE_FREE +# define JEMALLOC_MADV_FREE 8 +#endif + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool have_madvise_huge = +#ifdef JEMALLOC_HAVE_MADVISE_HUGE + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; +/* + * Undocumented, for jemalloc development use only at the moment. See the note + * in jemalloc/internal/log.h. + */ +static const bool config_log = +#ifdef JEMALLOC_LOG + true +#else + false +#endif + ; +#ifdef JEMALLOC_HAVE_SCHED_GETCPU +/* Currently percpu_arena depends on sched_getcpu. */ +#define JEMALLOC_PERCPU_ARENA +#endif +static const bool have_percpu_arena = +#ifdef JEMALLOC_PERCPU_ARENA + true +#else + false +#endif + ; +/* + * Undocumented, and not recommended; the application should take full + * responsibility for tracking provenance. + */ +static const bool force_ivsalloc = +#ifdef JEMALLOC_FORCE_IVSALLOC + true +#else + false +#endif + ; +static const bool have_background_thread = +#ifdef JEMALLOC_BACKGROUND_THREAD + true +#else + false +#endif + ; + +#endif /* JEMALLOC_PREAMBLE_H */ diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_defs.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_defs.h new file mode 100644 index 00000000000..d1389237a77 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_defs.h @@ -0,0 +1,43 @@ +/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ +/* Defined if __attribute__((...)) syntax is supported. */ +#define JEMALLOC_HAVE_ATTR + +/* Defined if alloc_size attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE + +/* Defined if format(printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE_MEMALIGN +#define JEMALLOC_OVERRIDE_VALLOC + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +#define JEMALLOC_USE_CXX_THROW + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#define LG_SIZEOF_PTR 3 diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_macros.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_macros.h new file mode 100644 index 00000000000..7432f1cda53 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_macros.h @@ -0,0 +1,122 @@ +#include +#include +#include +#include +#include + +#define JEMALLOC_VERSION "5.1.0-56-g41b7372eadee941b9164751b8d4963f915d3ceae" +#define JEMALLOC_VERSION_MAJOR 5 +#define JEMALLOC_VERSION_MINOR 1 +#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_NREV 56 +#define JEMALLOC_VERSION_GID "41b7372eadee941b9164751b8d4963f915d3ceae" + +#define MALLOCX_LG_ALIGN(la) ((int)(la)) +#if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +#else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +#endif +#define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +/* + * Use as arena index in "arena..{purge,decay,dss}" and + * "stats.arenas..*" mallctl interfaces to select all arenas. This + * definition is intentionally specified in raw decimal format to support + * cpp-based string concatenation, e.g. + * + * #define STRINGIFY_HELPER(x) #x + * #define STRINGIFY(x) STRINGIFY_HELPER(x) + * + * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, + * 0); + */ +#define MALLCTL_ARENAS_ALL 4096 +/* + * Use as arena index in "stats.arenas..*" mallctl interfaces to select + * destroyed arenas. + */ +#define MALLCTL_ARENAS_DESTROYED 4097 + +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if defined(_MSC_VER) +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) +# else +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) +# endif +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) +# else +# define JEMALLOC_FORMAT_PRINTF(s, i) +# endif +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#endif diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_protos.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_protos.h new file mode 100644 index 00000000000..ff025e30fa7 --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_protos.h @@ -0,0 +1,66 @@ +/* + * The je_ prefix on the following public symbol declarations is an artifact + * of namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h). + */ +extern JEMALLOC_EXPORT const char *je_malloc_conf; +extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, + const char *s); + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; + +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, + int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, + void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); +#endif diff --git a/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_typedefs.h b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_typedefs.h new file mode 100644 index 00000000000..1a58874306e --- /dev/null +++ b/contrib/jemalloc-cmake/include_linux_x86_64/jemalloc/jemalloc_typedefs.h @@ -0,0 +1,77 @@ +typedef struct extent_hooks_s extent_hooks_t; + +/* + * void * + * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + * size_t alignment, bool *zero, bool *commit, unsigned arena_ind); + */ +typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, + bool *, unsigned); + +/* + * bool + * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * void + * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * bool + * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t, + size_t, unsigned); + +/* + * bool + * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t size_a, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + bool, unsigned); + +/* + * bool + * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + * void *addr_b, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t, + bool, unsigned); + +struct extent_hooks_s { + extent_alloc_t *alloc; + extent_dalloc_t *dalloc; + extent_destroy_t *destroy; + extent_commit_t *commit; + extent_decommit_t *decommit; + extent_purge_t *purge_lazy; + extent_purge_t *purge_forced; + extent_split_t *split; + extent_merge_t *merge; +}; diff --git a/contrib/mariadb-connector-c b/contrib/mariadb-connector-c new file mode 160000 index 00000000000..a0fd36cc5a5 --- /dev/null +++ b/contrib/mariadb-connector-c @@ -0,0 +1 @@ +Subproject commit a0fd36cc5a5313414a5a2ebe9322577a29b4782a diff --git a/contrib/mariadb-connector-c-cmake/CMakeLists.txt b/contrib/mariadb-connector-c-cmake/CMakeLists.txt new file mode 100644 index 00000000000..4c1184b3edb --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/CMakeLists.txt @@ -0,0 +1,66 @@ +set(MARIADB_CLIENT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/mariadb-connector-c) +set(MARIADB_CLIENT_BINARY_DIR ${CMAKE_BINARY_DIR}/contrib/mariadb-connector-c) + +set(SRCS +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/bmove_upp.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/get_password.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_alloc.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_array.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_charset.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_compress.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_context.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_default.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_dtoa.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_errmsg.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_hash.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_init.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_io.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_list.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_ll2str.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_loaddata.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_net.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_password.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_pvio.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_async.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_charset.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_dyncol.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_lib.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/mariadb_stmt.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_sha1.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_stmt_codec.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_string.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_time.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/ma_tls.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/gnutls.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/ma_schannel.c +${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/openssl.c +#${MARIADB_CLIENT_SOURCE_DIR}/libmariadb/secure/schannel.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/auth_gssapi_client.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/dialog.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/gssapi_client.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/gssapi_errmsg.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/mariadb_cleartext.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/my_auth.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/old_password.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sha256_pw.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sspi_client.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/auth/sspi_errmsg.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/connection/aurora.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/connection/replication.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/io/remote_io.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_npipe.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_shmem.c +${MARIADB_CLIENT_SOURCE_DIR}/plugins/pvio/pvio_socket.c +#${MARIADB_CLIENT_SOURCE_DIR}/plugins/trace/trace_example.c +${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libmariadb/ma_client_plugin.c +) + +add_library(mysqlclient STATIC ${SRCS}) + +target_link_libraries(mysqlclient ${OPENSSL_LIBRARIES}) + +target_include_directories(mysqlclient PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/include) +target_include_directories(mysqlclient PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/common/include) +target_include_directories(mysqlclient PUBLIC ${MARIADB_CLIENT_SOURCE_DIR}/include) + +target_compile_definitions(mysqlclient PRIVATE -D THREAD -D HAVE_OPENSSL -D HAVE_TLS) diff --git a/contrib/mariadb-connector-c-cmake/common/include/mysql/mysql.h b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysql.h new file mode 100644 index 00000000000..741c7ba03c9 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysql.h @@ -0,0 +1 @@ +#include diff --git a/contrib/mariadb-connector-c-cmake/common/include/mysql/mysqld_error.h b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysqld_error.h new file mode 100644 index 00000000000..95d26eef163 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/common/include/mysql/mysqld_error.h @@ -0,0 +1 @@ +#include diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/include/config.h b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/config.h new file mode 100644 index 00000000000..90c42c97df6 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/config.h @@ -0,0 +1,269 @@ + +/* + * Include file constants (processed in LibmysqlIncludeFiles.txt 1 + */ +#define HAVE_ALLOCA_H 1 +/* #undef HAVE_BIGENDIAN */ +#define HAVE_SETLOCALE 1 +#define HAVE_NL_LANGINFO 1 +#define HAVE_ARPA_INET_H 1 +#define HAVE_CRYPT_H 1 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_EXECINFO_H 1 +#define HAVE_FCNTL_H 1 +#define HAVE_FENV_H 1 +#define HAVE_FLOAT_H 1 +/* #undef HAVE_FPU_CONTROL_H */ +#define HAVE_GRP_H 1 +/* #undef HAVE_IEEEFP_H */ +#define HAVE_LIMITS_H 1 +#define HAVE_MALLOC_H 1 +#define HAVE_MEMORY_H 1 +#define HAVE_NETINET_IN_H 1 +#define HAVE_PATHS_H 1 +#define HAVE_PWD_H 1 +#define HAVE_SCHED_H 1 +/* #undef HAVE_SELECT_H */ +#define HAVE_STDDEF_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_STRINGS_H 1 +/* #undef HAVE_SYNCH_H */ +/* #undef HAVE_SYS_FPU_H */ +#define HAVE_SYS_IOCTL_H 1 +#define HAVE_SYS_IPC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PRCTL_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SHM_H 1 +#define HAVE_SYS_SOCKET_H 1 +#define HAVE_SYS_STAT_H 1 +/* #undef HAVE_SYS_STREAM_H */ +#define HAVE_SYS_TIMEB_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_UN_H 1 +/* #undef HAVE_SYSENT_H */ +#define HAVE_TERMIO_H 1 +#define HAVE_TERMIOS_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_UTIME_H 1 +#define HAVE_UCONTEXT_H 1 + +/* + * function definitions - processed in LibmysqlFunctions.txt + */ +#define HAVE_ACCESS 1 +/* #undef HAVE_AIOWAIT */ +#define HAVE_ALARM 1 +/* #undef HAVE_ALLOCA */ +#define HAVE_BCMP 1 +/* #undef HAVE_BFILL */ +/* #undef HAVE_BMOVE */ +#define HAVE_BZERO 1 +#define HAVE_CLOCK_GETTIME 1 +/* #undef HAVE_COMPRESS */ +/* #undef HAVE_CRYPT */ +#define HAVE_DLERROR 1 +#define HAVE_DLOPEN 1 +#define HAVE_FCHMOD 1 +#define HAVE_FCNTL 1 +/* #undef HAVE_FCONVERT */ +#define HAVE_FDATASYNC 1 +#define HAVE_FESETROUND 1 +#define HAVE_FINITE 1 +#define HAVE_FSEEKO 1 +#define HAVE_FSYNC 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETCWD 1 +#define HAVE_GETHOSTBYADDR_R 1 +#define HAVE_GETHOSTBYNAME_R 1 +/* #undef HAVE_GETHRTIME */ +#define HAVE_GETNAMEINFO 1 +#define HAVE_GETPAGESIZE 1 +#define HAVE_GETPASS 1 +/* #undef HAVE_GETPASSPHRASE */ +#define HAVE_GETPWNAM 1 +#define HAVE_GETPWUID 1 +#define HAVE_GETRLIMIT 1 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETWD 1 +#define HAVE_GMTIME_R 1 +#define HAVE_INITGROUPS 1 +#define HAVE_LDIV 1 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LOG2 1 +#define HAVE_LONGJMP 1 +#define HAVE_LSTAT 1 +#define HAVE_MADVISE 1 +#define HAVE_MALLINFO 1 +#define HAVE_MEMALIGN 1 +#define HAVE_MEMCPY 1 +#define HAVE_MEMMOVE 1 +#define HAVE_MKSTEMP 1 +#define HAVE_MLOCK 1 +#define HAVE_MLOCKALL 1 +#define HAVE_MMAP 1 +#define HAVE_MMAP64 1 +#define HAVE_PERROR 1 +#define HAVE_POLL 1 +#define HAVE_PREAD 1 +/* #undef HAVE_PTHREAD_ATTR_CREATE */ +#define HAVE_PTHREAD_ATTR_GETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_ATTR_SETPRIO */ +#define HAVE_PTHREAD_ATTR_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_ATTR_SETSCOPE 1 +#define HAVE_PTHREAD_ATTR_SETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_CONDATTR_CREATE */ +/* #undef HAVE_PTHREAD_INIT */ +#define HAVE_PTHREAD_KEY_DELETE 1 +#define HAVE_PTHREAD_KILL 1 +#define HAVE_PTHREAD_RWLOCK_RDLOCK 1 +/* #undef HAVE_PTHREAD_SETPRIO_NP */ +#define HAVE_PTHREAD_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_SIGMASK 1 +/* #undef HAVE_PTHREAD_THREADMASK */ +/* #undef HAVE_PTHREAD_YIELD_NP */ +#define HAVE_READDIR_R 1 +#define HAVE_READLINK 1 +#define HAVE_REALPATH 1 +#define HAVE_RENAME 1 +#define HAVE_SCHED_YIELD 1 +#define HAVE_SELECT 1 +/* #undef HAVE_SETFD */ +/* #undef HAVE_SETFILEPOINTER */ +#define HAVE_SIGNAL 1 +#define HAVE_SIGACTION 1 +/* #undef HAVE_SIGTHREADMASK */ +#define HAVE_SIGWAIT 1 +#define HAVE_SLEEP 1 +#define HAVE_SNPRINTF 1 +/* #undef HAVE_SQLITE */ +#define HAVE_STPCPY 1 +#define HAVE_STRERROR 1 +/* #undef HAVE_STRLCPY */ +#define HAVE_STRNLEN 1 +#define HAVE_STRPBRK 1 +#define HAVE_STRSEP 1 +#define HAVE_STRSTR 1 +#define HAVE_STRTOK_R 1 +#define HAVE_STRTOL 1 +#define HAVE_STRTOLL 1 +#define HAVE_STRTOUL 1 +#define HAVE_STRTOULL 1 +/* #undef HAVE_TELL */ +/* #undef HAVE_THR_SETCONCURRENCY */ +/* #undef HAVE_THR_YIELD */ +#define HAVE_VASPRINTF 1 +#define HAVE_VSNPRINTF 1 + +/* + * types and sizes + */ +/* Types we may use */ +#define SIZEOF_CHAR 1 +#if defined(SIZEOF_CHAR) +# define HAVE_CHAR 1 +#endif + +#define SIZEOF_CHARP 8 +#if defined(SIZEOF_CHARP) +# define HAVE_CHARP 1 +#endif + +#define SIZEOF_SHORT 2 +#if defined(SIZEOF_SHORT) +# define HAVE_SHORT 1 +#endif + +#define SIZEOF_INT 4 +#if defined(SIZEOF_INT) +# define HAVE_INT 1 +#endif + +#define SIZEOF_LONG 8 +#if defined(SIZEOF_LONG) +# define HAVE_LONG 1 +#endif + +#define SIZEOF_LONG_LONG 8 +#if defined(SIZEOF_LONG_LONG) +# define HAVE_LONG_LONG 1 +#endif + + +#define SIZEOF_SIGSET_T 128 +#if defined(SIZEOF_SIGSET_T) +# define HAVE_SIGSET_T 1 +#endif + +#define SIZEOF_SIZE_T 8 +#if defined(SIZEOF_SIZE_T) +# define HAVE_SIZE_T 1 +#endif + +/* #undef SIZEOF_UCHAR */ +#if defined(SIZEOF_UCHAR) +# define HAVE_UCHAR 1 +#endif + +#define SIZEOF_UINT 4 +#if defined(SIZEOF_UINT) +# define HAVE_UINT 1 +#endif + +#define SIZEOF_ULONG 8 +#if defined(SIZEOF_ULONG) +# define HAVE_ULONG 1 +#endif + +/* #undef SIZEOF_INT8 */ +#if defined(SIZEOF_INT8) +# define HAVE_INT8 1 +#endif +/* #undef SIZEOF_UINT8 */ +#if defined(SIZEOF_UINT8) +# define HAVE_UINT8 1 +#endif + +/* #undef SIZEOF_INT16 */ +#if defined(SIZEOF_INT16) +# define HAVE_INT16 1 +#endif +/* #undef SIZEOF_UINT16 */ +#if defined(SIZEOF_UINT16) +# define HAVE_UINT16 1 +#endif + +/* #undef SIZEOF_INT32 */ +#if defined(SIZEOF_INT32) +# define HAVE_INT32 1 +#endif +/* #undef SIZEOF_UINT32 */ +#if defined(SIZEOF_UINT32) +# define HAVE_UINT32 1 +#endif +/* #undef SIZEOF_U_INT32_T */ +#if defined(SIZEOF_U_INT32_T) +# define HAVE_U_INT32_T 1 +#endif + +/* #undef SIZEOF_INT64 */ +#if defined(SIZEOF_INT64) +# define HAVE_INT64 1 +#endif +/* #undef SIZEOF_UINT64 */ +#if defined(SIZEOF_UINT64) +# define HAVE_UINT64 1 +#endif + +/* #undef SIZEOF_SOCKLEN_T */ +#if defined(SIZEOF_SOCKLEN_T) +# define HAVE_SOCKLEN_T 1 +#endif + +#define SOCKET_SIZE_TYPE socklen_t + +#define MARIADB_DEFAULT_CHARSET "latin1" + diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/include/ma_config.h b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/ma_config.h new file mode 100644 index 00000000000..90c42c97df6 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/ma_config.h @@ -0,0 +1,269 @@ + +/* + * Include file constants (processed in LibmysqlIncludeFiles.txt 1 + */ +#define HAVE_ALLOCA_H 1 +/* #undef HAVE_BIGENDIAN */ +#define HAVE_SETLOCALE 1 +#define HAVE_NL_LANGINFO 1 +#define HAVE_ARPA_INET_H 1 +#define HAVE_CRYPT_H 1 +#define HAVE_DIRENT_H 1 +#define HAVE_DLFCN_H 1 +#define HAVE_EXECINFO_H 1 +#define HAVE_FCNTL_H 1 +#define HAVE_FENV_H 1 +#define HAVE_FLOAT_H 1 +/* #undef HAVE_FPU_CONTROL_H */ +#define HAVE_GRP_H 1 +/* #undef HAVE_IEEEFP_H */ +#define HAVE_LIMITS_H 1 +#define HAVE_MALLOC_H 1 +#define HAVE_MEMORY_H 1 +#define HAVE_NETINET_IN_H 1 +#define HAVE_PATHS_H 1 +#define HAVE_PWD_H 1 +#define HAVE_SCHED_H 1 +/* #undef HAVE_SELECT_H */ +#define HAVE_STDDEF_H 1 +#define HAVE_STDINT_H 1 +#define HAVE_STDLIB_H 1 +#define HAVE_STRING_H 1 +#define HAVE_STRINGS_H 1 +/* #undef HAVE_SYNCH_H */ +/* #undef HAVE_SYS_FPU_H */ +#define HAVE_SYS_IOCTL_H 1 +#define HAVE_SYS_IPC_H 1 +#define HAVE_SYS_MMAN_H 1 +#define HAVE_SYS_PRCTL_H 1 +#define HAVE_SYS_SELECT_H 1 +#define HAVE_SYS_SHM_H 1 +#define HAVE_SYS_SOCKET_H 1 +#define HAVE_SYS_STAT_H 1 +/* #undef HAVE_SYS_STREAM_H */ +#define HAVE_SYS_TIMEB_H 1 +#define HAVE_SYS_TYPES_H 1 +#define HAVE_SYS_UN_H 1 +/* #undef HAVE_SYSENT_H */ +#define HAVE_TERMIO_H 1 +#define HAVE_TERMIOS_H 1 +#define HAVE_UNISTD_H 1 +#define HAVE_UTIME_H 1 +#define HAVE_UCONTEXT_H 1 + +/* + * function definitions - processed in LibmysqlFunctions.txt + */ +#define HAVE_ACCESS 1 +/* #undef HAVE_AIOWAIT */ +#define HAVE_ALARM 1 +/* #undef HAVE_ALLOCA */ +#define HAVE_BCMP 1 +/* #undef HAVE_BFILL */ +/* #undef HAVE_BMOVE */ +#define HAVE_BZERO 1 +#define HAVE_CLOCK_GETTIME 1 +/* #undef HAVE_COMPRESS */ +/* #undef HAVE_CRYPT */ +#define HAVE_DLERROR 1 +#define HAVE_DLOPEN 1 +#define HAVE_FCHMOD 1 +#define HAVE_FCNTL 1 +/* #undef HAVE_FCONVERT */ +#define HAVE_FDATASYNC 1 +#define HAVE_FESETROUND 1 +#define HAVE_FINITE 1 +#define HAVE_FSEEKO 1 +#define HAVE_FSYNC 1 +#define HAVE_GETADDRINFO 1 +#define HAVE_GETCWD 1 +#define HAVE_GETHOSTBYADDR_R 1 +#define HAVE_GETHOSTBYNAME_R 1 +/* #undef HAVE_GETHRTIME */ +#define HAVE_GETNAMEINFO 1 +#define HAVE_GETPAGESIZE 1 +#define HAVE_GETPASS 1 +/* #undef HAVE_GETPASSPHRASE */ +#define HAVE_GETPWNAM 1 +#define HAVE_GETPWUID 1 +#define HAVE_GETRLIMIT 1 +#define HAVE_GETRUSAGE 1 +#define HAVE_GETWD 1 +#define HAVE_GMTIME_R 1 +#define HAVE_INITGROUPS 1 +#define HAVE_LDIV 1 +#define HAVE_LOCALTIME_R 1 +#define HAVE_LOG2 1 +#define HAVE_LONGJMP 1 +#define HAVE_LSTAT 1 +#define HAVE_MADVISE 1 +#define HAVE_MALLINFO 1 +#define HAVE_MEMALIGN 1 +#define HAVE_MEMCPY 1 +#define HAVE_MEMMOVE 1 +#define HAVE_MKSTEMP 1 +#define HAVE_MLOCK 1 +#define HAVE_MLOCKALL 1 +#define HAVE_MMAP 1 +#define HAVE_MMAP64 1 +#define HAVE_PERROR 1 +#define HAVE_POLL 1 +#define HAVE_PREAD 1 +/* #undef HAVE_PTHREAD_ATTR_CREATE */ +#define HAVE_PTHREAD_ATTR_GETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_ATTR_SETPRIO */ +#define HAVE_PTHREAD_ATTR_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_ATTR_SETSCOPE 1 +#define HAVE_PTHREAD_ATTR_SETSTACKSIZE 1 +/* #undef HAVE_PTHREAD_CONDATTR_CREATE */ +/* #undef HAVE_PTHREAD_INIT */ +#define HAVE_PTHREAD_KEY_DELETE 1 +#define HAVE_PTHREAD_KILL 1 +#define HAVE_PTHREAD_RWLOCK_RDLOCK 1 +/* #undef HAVE_PTHREAD_SETPRIO_NP */ +#define HAVE_PTHREAD_SETSCHEDPARAM 1 +#define HAVE_PTHREAD_SIGMASK 1 +/* #undef HAVE_PTHREAD_THREADMASK */ +/* #undef HAVE_PTHREAD_YIELD_NP */ +#define HAVE_READDIR_R 1 +#define HAVE_READLINK 1 +#define HAVE_REALPATH 1 +#define HAVE_RENAME 1 +#define HAVE_SCHED_YIELD 1 +#define HAVE_SELECT 1 +/* #undef HAVE_SETFD */ +/* #undef HAVE_SETFILEPOINTER */ +#define HAVE_SIGNAL 1 +#define HAVE_SIGACTION 1 +/* #undef HAVE_SIGTHREADMASK */ +#define HAVE_SIGWAIT 1 +#define HAVE_SLEEP 1 +#define HAVE_SNPRINTF 1 +/* #undef HAVE_SQLITE */ +#define HAVE_STPCPY 1 +#define HAVE_STRERROR 1 +/* #undef HAVE_STRLCPY */ +#define HAVE_STRNLEN 1 +#define HAVE_STRPBRK 1 +#define HAVE_STRSEP 1 +#define HAVE_STRSTR 1 +#define HAVE_STRTOK_R 1 +#define HAVE_STRTOL 1 +#define HAVE_STRTOLL 1 +#define HAVE_STRTOUL 1 +#define HAVE_STRTOULL 1 +/* #undef HAVE_TELL */ +/* #undef HAVE_THR_SETCONCURRENCY */ +/* #undef HAVE_THR_YIELD */ +#define HAVE_VASPRINTF 1 +#define HAVE_VSNPRINTF 1 + +/* + * types and sizes + */ +/* Types we may use */ +#define SIZEOF_CHAR 1 +#if defined(SIZEOF_CHAR) +# define HAVE_CHAR 1 +#endif + +#define SIZEOF_CHARP 8 +#if defined(SIZEOF_CHARP) +# define HAVE_CHARP 1 +#endif + +#define SIZEOF_SHORT 2 +#if defined(SIZEOF_SHORT) +# define HAVE_SHORT 1 +#endif + +#define SIZEOF_INT 4 +#if defined(SIZEOF_INT) +# define HAVE_INT 1 +#endif + +#define SIZEOF_LONG 8 +#if defined(SIZEOF_LONG) +# define HAVE_LONG 1 +#endif + +#define SIZEOF_LONG_LONG 8 +#if defined(SIZEOF_LONG_LONG) +# define HAVE_LONG_LONG 1 +#endif + + +#define SIZEOF_SIGSET_T 128 +#if defined(SIZEOF_SIGSET_T) +# define HAVE_SIGSET_T 1 +#endif + +#define SIZEOF_SIZE_T 8 +#if defined(SIZEOF_SIZE_T) +# define HAVE_SIZE_T 1 +#endif + +/* #undef SIZEOF_UCHAR */ +#if defined(SIZEOF_UCHAR) +# define HAVE_UCHAR 1 +#endif + +#define SIZEOF_UINT 4 +#if defined(SIZEOF_UINT) +# define HAVE_UINT 1 +#endif + +#define SIZEOF_ULONG 8 +#if defined(SIZEOF_ULONG) +# define HAVE_ULONG 1 +#endif + +/* #undef SIZEOF_INT8 */ +#if defined(SIZEOF_INT8) +# define HAVE_INT8 1 +#endif +/* #undef SIZEOF_UINT8 */ +#if defined(SIZEOF_UINT8) +# define HAVE_UINT8 1 +#endif + +/* #undef SIZEOF_INT16 */ +#if defined(SIZEOF_INT16) +# define HAVE_INT16 1 +#endif +/* #undef SIZEOF_UINT16 */ +#if defined(SIZEOF_UINT16) +# define HAVE_UINT16 1 +#endif + +/* #undef SIZEOF_INT32 */ +#if defined(SIZEOF_INT32) +# define HAVE_INT32 1 +#endif +/* #undef SIZEOF_UINT32 */ +#if defined(SIZEOF_UINT32) +# define HAVE_UINT32 1 +#endif +/* #undef SIZEOF_U_INT32_T */ +#if defined(SIZEOF_U_INT32_T) +# define HAVE_U_INT32_T 1 +#endif + +/* #undef SIZEOF_INT64 */ +#if defined(SIZEOF_INT64) +# define HAVE_INT64 1 +#endif +/* #undef SIZEOF_UINT64 */ +#if defined(SIZEOF_UINT64) +# define HAVE_UINT64 1 +#endif + +/* #undef SIZEOF_SOCKLEN_T */ +#if defined(SIZEOF_SOCKLEN_T) +# define HAVE_SOCKLEN_T 1 +#endif + +#define SOCKET_SIZE_TYPE socklen_t + +#define MARIADB_DEFAULT_CHARSET "latin1" + diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/include/mariadb_version.h b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/mariadb_version.h new file mode 100644 index 00000000000..821a7f8add2 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/include/mariadb_version.h @@ -0,0 +1,36 @@ +/* Copyright Abandoned 1996, 1999, 2001 MySQL AB + This file is public domain and comes with NO WARRANTY of any kind */ + +/* Version numbers for protocol & mysqld */ + +#ifndef _mariadb_version_h_ +#define _mariadb_version_h_ + +#ifdef _CUSTOMCONFIG_ +#include +#else +#define PROTOCOL_VERSION 10 +#define MARIADB_CLIENT_VERSION_STR "10.3.6" +#define MARIADB_BASE_VERSION "mariadb-10.3" +#define MARIADB_VERSION_ID 100306 +#define MYSQL_VERSION_ID 100306 +#define MARIADB_PORT 3306 +#define MARIADB_UNIX_ADDR "/var/run/mysqld/mysqld.sock" +#define MYSQL_CONFIG_NAME "my" + +#define MARIADB_PACKAGE_VERSION "3.0.6" +#define MARIADB_PACKAGE_VERSION_ID 30006 +#define MARIADB_SYSTEM_TYPE "Linux" +#define MARIADB_MACHINE_TYPE "x86_64" +#define MARIADB_PLUGINDIR "lib/mariadb/plugin" + +/* mysqld compile time options */ +#ifndef MYSQL_CHARSET +#define MYSQL_CHARSET "" +#endif +#endif + +/* Source information */ +#define CC_SOURCE_REVISION "a0fd36cc5a5313414a5a2ebe9322577a29b4782a" + +#endif /* _mariadb_version_h_ */ diff --git a/contrib/mariadb-connector-c-cmake/linux_x86_64/libmariadb/ma_client_plugin.c b/contrib/mariadb-connector-c-cmake/linux_x86_64/libmariadb/ma_client_plugin.c new file mode 100644 index 00000000000..b7fdcdbcb85 --- /dev/null +++ b/contrib/mariadb-connector-c-cmake/linux_x86_64/libmariadb/ma_client_plugin.c @@ -0,0 +1,499 @@ +/* Copyright (C) 2010 - 2012 Sergei Golubchik and Monty Program Ab + 2015-2016 MariaDB Corporation AB + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not see + or write to the Free Software Foundation, Inc., + 51 Franklin St., Fifth Floor, Boston, MA 02110, USA */ + +/** + @file + + Support code for the client side (libmariadb) plugins + + Client plugins are somewhat different from server plugins, they are simpler. + + They do not need to be installed or in any way explicitly loaded on the + client, they are loaded automatically on demand. + One client plugin per shared object, soname *must* match the plugin name. + + There is no reference counting and no unloading either. +*/ + +#if _MSC_VER +/* Silence warnings about variable 'unused' being used. */ +#define FORCE_INIT_OF_VARS 1 +#endif + +#include +#include +#include +#include +#include + +#include "errmsg.h" +#include + +struct st_client_plugin_int { + struct st_client_plugin_int *next; + void *dlhandle; + struct st_mysql_client_plugin *plugin; +}; + +static my_bool initialized= 0; +static MA_MEM_ROOT mem_root; + +static uint valid_plugins[][2]= { + {MYSQL_CLIENT_AUTHENTICATION_PLUGIN, MYSQL_CLIENT_AUTHENTICATION_PLUGIN_INTERFACE_VERSION}, + {MARIADB_CLIENT_PVIO_PLUGIN, MARIADB_CLIENT_PVIO_PLUGIN_INTERFACE_VERSION}, + {MARIADB_CLIENT_TRACE_PLUGIN, MARIADB_CLIENT_TRACE_PLUGIN_INTERFACE_VERSION}, + {MARIADB_CLIENT_CONNECTION_PLUGIN, MARIADB_CLIENT_CONNECTION_PLUGIN_INTERFACE_VERSION}, + {0, 0} +}; + +/* + Loaded plugins are stored in a linked list. + The list is append-only, the elements are added to the head (like in a stack). + The elements are added under a mutex, but the list can be read and traversed + without any mutex because once an element is added to the list, it stays + there. The main purpose of a mutex is to prevent two threads from + loading the same plugin twice in parallel. +*/ + + +struct st_client_plugin_int *plugin_list[MYSQL_CLIENT_MAX_PLUGINS + MARIADB_CLIENT_MAX_PLUGINS]; +#ifdef THREAD +static pthread_mutex_t LOCK_load_client_plugin; +#endif + + extern struct st_mysql_client_plugin mysql_native_password_client_plugin; + extern struct st_mysql_client_plugin mysql_old_password_client_plugin; + extern struct st_mysql_client_plugin pvio_socket_client_plugin; + + +struct st_mysql_client_plugin *mysql_client_builtins[]= +{ + (struct st_mysql_client_plugin *)&mysql_native_password_client_plugin, + (struct st_mysql_client_plugin *)&mysql_old_password_client_plugin, + (struct st_mysql_client_plugin *)&pvio_socket_client_plugin, + + 0 +}; + + +static int is_not_initialized(MYSQL *mysql, const char *name) +{ + if (initialized) + return 0; + + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, + SQLSTATE_UNKNOWN, ER(CR_AUTH_PLUGIN_CANNOT_LOAD), + name, "not initialized"); + return 1; +} + +static int get_plugin_nr(uint type) +{ + uint i= 0; + for(; valid_plugins[i][1]; i++) + if (valid_plugins[i][0] == type) + return i; + return -1; +} + +static const char *check_plugin_version(struct st_mysql_client_plugin *plugin, unsigned int version) +{ + if (plugin->interface_version < version || + (plugin->interface_version >> 8) > (version >> 8)) + return "Incompatible client plugin interface"; + return 0; +} + +/** + finds a plugin in the list + + @param name plugin name to search for + @param type plugin type + + @note this does NOT necessarily need a mutex, take care! + + @retval a pointer to a found plugin or 0 +*/ +static struct st_mysql_client_plugin *find_plugin(const char *name, int type) +{ + struct st_client_plugin_int *p; + int plugin_nr= get_plugin_nr(type); + + DBUG_ASSERT(initialized); + if (plugin_nr == -1) + return 0; + + if (!name) + return plugin_list[plugin_nr]->plugin; + + for (p= plugin_list[plugin_nr]; p; p= p->next) + { + if (strcmp(p->plugin->name, name) == 0) + return p->plugin; + } + return NULL; +} + + +/** + verifies the plugin and adds it to the list + + @param mysql MYSQL structure (for error reporting) + @param plugin plugin to install + @param dlhandle a handle to the shared object (returned by dlopen) + or 0 if the plugin was not dynamically loaded + @param argc number of arguments in the 'va_list args' + @param args arguments passed to the plugin initialization function + + @retval a pointer to an installed plugin or 0 +*/ + +static struct st_mysql_client_plugin * +add_plugin(MYSQL *mysql, struct st_mysql_client_plugin *plugin, void *dlhandle, + int argc, va_list args) +{ + const char *errmsg; + struct st_client_plugin_int plugin_int, *p; + char errbuf[1024]; + int plugin_nr; + + DBUG_ASSERT(initialized); + + plugin_int.plugin= plugin; + plugin_int.dlhandle= dlhandle; + + if ((plugin_nr= get_plugin_nr(plugin->type)) == -1) + { + errmsg= "Unknown client plugin type"; + goto err1; + } + if ((errmsg= check_plugin_version(plugin, valid_plugins[plugin_nr][1]))) + goto err1; + + /* Call the plugin initialization function, if any */ + if (plugin->init && plugin->init(errbuf, sizeof(errbuf), argc, args)) + { + errmsg= errbuf; + goto err1; + } + + p= (struct st_client_plugin_int *) + ma_memdup_root(&mem_root, (char *)&plugin_int, sizeof(plugin_int)); + + if (!p) + { + errmsg= "Out of memory"; + goto err2; + } + +#ifdef THREAD + safe_mutex_assert_owner(&LOCK_load_client_plugin); +#endif + + p->next= plugin_list[plugin_nr]; + plugin_list[plugin_nr]= p; + + return plugin; + +err2: + if (plugin->deinit) + plugin->deinit(); +err1: + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN, + ER(CR_AUTH_PLUGIN_CANNOT_LOAD), plugin->name, errmsg); + if (dlhandle) + (void)dlclose(dlhandle); + return NULL; +} + + +/** + Loads plugins which are specified in the environment variable + LIBMYSQL_PLUGINS. + + Multiple plugins must be separated by semicolon. This function doesn't + return or log an error. + + The function is be called by mysql_client_plugin_init + + @todo + Support extended syntax, passing parameters to plugins, for example + LIBMYSQL_PLUGINS="plugin1(param1,param2);plugin2;..." + or + LIBMYSQL_PLUGINS="plugin1=int:param1,str:param2;plugin2;..." +*/ + +static void load_env_plugins(MYSQL *mysql) +{ + char *plugs, *free_env, *s= getenv("LIBMYSQL_PLUGINS"); + + if (ma_check_env_str(s)) + return; + + free_env= strdup(s); + plugs= s= free_env; + + do { + if ((s= strchr(plugs, ';'))) + *s= '\0'; + mysql_load_plugin(mysql, plugs, -1, 0); + plugs= s + 1; + } while (s); + + free(free_env); +} + +/********** extern functions to be used by libmariadb *********************/ + +/** + Initializes the client plugin layer. + + This function must be called before any other client plugin function. + + @retval 0 successful + @retval != 0 error occurred +*/ + +int mysql_client_plugin_init() +{ + MYSQL mysql; + struct st_mysql_client_plugin **builtin; + va_list unused; + LINT_INIT_STRUCT(unused); + + if (initialized) + return 0; + + memset(&mysql, 0, sizeof(mysql)); /* dummy mysql for set_mysql_extended_error */ + + pthread_mutex_init(&LOCK_load_client_plugin, MY_MUTEX_INIT_SLOW); + ma_init_alloc_root(&mem_root, 128, 128); + + memset(&plugin_list, 0, sizeof(plugin_list)); + + initialized= 1; + + pthread_mutex_lock(&LOCK_load_client_plugin); + for (builtin= mysql_client_builtins; *builtin; builtin++) + add_plugin(&mysql, *builtin, 0, 0, unused); + + pthread_mutex_unlock(&LOCK_load_client_plugin); + + load_env_plugins(&mysql); + + return 0; +} + + +/** + Deinitializes the client plugin layer. + + Unloades all client plugins and frees any associated resources. +*/ + +void mysql_client_plugin_deinit() +{ + int i; + struct st_client_plugin_int *p; + + if (!initialized) + return; + + for (i=0; i < MYSQL_CLIENT_MAX_PLUGINS; i++) + for (p= plugin_list[i]; p; p= p->next) + { + if (p->plugin->deinit) + p->plugin->deinit(); + if (p->dlhandle) + (void)dlclose(p->dlhandle); + } + + memset(&plugin_list, 0, sizeof(plugin_list)); + initialized= 0; + ma_free_root(&mem_root, MYF(0)); + pthread_mutex_destroy(&LOCK_load_client_plugin); +} + +/************* public facing functions, for client consumption *********/ + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_client_register_plugin(MYSQL *mysql, + struct st_mysql_client_plugin *plugin) +{ + va_list unused; + LINT_INIT_STRUCT(unused); + + if (is_not_initialized(mysql, plugin->name)) + return NULL; + + pthread_mutex_lock(&LOCK_load_client_plugin); + + /* make sure the plugin wasn't loaded meanwhile */ + if (find_plugin(plugin->name, plugin->type)) + { + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, + SQLSTATE_UNKNOWN, ER(CR_AUTH_PLUGIN_CANNOT_LOAD), + plugin->name, "it is already loaded"); + plugin= NULL; + } + else + plugin= add_plugin(mysql, plugin, 0, 0, unused); + + pthread_mutex_unlock(&LOCK_load_client_plugin); + return plugin; +} + + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_load_plugin_v(MYSQL *mysql, const char *name, int type, + int argc, va_list args) +{ + const char *errmsg; +#ifdef _WIN32 + char errbuf[1024]; +#endif + char dlpath[FN_REFLEN+1]; + void *sym, *dlhandle = NULL; + struct st_mysql_client_plugin *plugin; + char *env_plugin_dir= getenv("MARIADB_PLUGIN_DIR"); + + CLEAR_CLIENT_ERROR(mysql); + if (is_not_initialized(mysql, name)) + return NULL; + + pthread_mutex_lock(&LOCK_load_client_plugin); + + /* make sure the plugin wasn't loaded meanwhile */ + if (type >= 0 && find_plugin(name, type)) + { + errmsg= "it is already loaded"; + goto err; + } + + /* Compile dll path */ + snprintf(dlpath, sizeof(dlpath) - 1, "%s/%s%s", + mysql->options.extension && mysql->options.extension->plugin_dir ? + mysql->options.extension->plugin_dir : (env_plugin_dir) ? env_plugin_dir : + MARIADB_PLUGINDIR, name, SO_EXT); + + /* Open new dll handle */ + if (!(dlhandle= dlopen((const char *)dlpath, RTLD_NOW))) + { +#ifdef _WIN32 + char winmsg[255]; + size_t len; + winmsg[0] = 0; + FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM, + NULL, + GetLastError(), + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + winmsg, 255, NULL); + len= strlen(winmsg); + while (len > 0 && (winmsg[len - 1] == '\n' || winmsg[len - 1] == '\r')) + len--; + if (len) + winmsg[len] = 0; + snprintf(errbuf, sizeof(errbuf), "%s Library path is '%s'", winmsg, dlpath); + errmsg= errbuf; +#else + errmsg= dlerror(); +#endif + goto err; + } + + + if (!(sym= dlsym(dlhandle, plugin_declarations_sym))) + { + errmsg= "not a plugin"; + (void)dlclose(dlhandle); + goto err; + } + + plugin= (struct st_mysql_client_plugin*)sym; + + if (type >=0 && type != plugin->type) + { + errmsg= "type mismatch"; + goto err; + } + + if (strcmp(name, plugin->name)) + { + errmsg= "name mismatch"; + goto err; + } + + if (type < 0 && find_plugin(name, plugin->type)) + { + errmsg= "it is already loaded"; + goto err; + } + + plugin= add_plugin(mysql, plugin, dlhandle, argc, args); + + pthread_mutex_unlock(&LOCK_load_client_plugin); + + return plugin; + +err: + if (dlhandle) + dlclose(dlhandle); + pthread_mutex_unlock(&LOCK_load_client_plugin); + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN, + ER(CR_AUTH_PLUGIN_CANNOT_LOAD), name, errmsg); + return NULL; +} + + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_load_plugin(MYSQL *mysql, const char *name, int type, int argc, ...) +{ + struct st_mysql_client_plugin *p; + va_list args; + va_start(args, argc); + p= mysql_load_plugin_v(mysql, name, type, argc, args); + va_end(args); + return p; +} + +/* see for a full description */ +struct st_mysql_client_plugin * STDCALL +mysql_client_find_plugin(MYSQL *mysql, const char *name, int type) +{ + struct st_mysql_client_plugin *p; + int plugin_nr= get_plugin_nr(type); + + if (is_not_initialized(mysql, name)) + return NULL; + + if (plugin_nr == -1) + { + my_set_error(mysql, CR_AUTH_PLUGIN_CANNOT_LOAD, SQLSTATE_UNKNOWN, + ER(CR_AUTH_PLUGIN_CANNOT_LOAD), name, "invalid type"); + } + + if ((p= find_plugin(name, type))) + return p; + + /* not found, load it */ + return mysql_load_plugin(mysql, name, type, 0); +} + diff --git a/contrib/murmurhash/CMakeLists.txt b/contrib/murmurhash/CMakeLists.txt new file mode 100644 index 00000000000..c5e467a2d6d --- /dev/null +++ b/contrib/murmurhash/CMakeLists.txt @@ -0,0 +1,7 @@ +add_library(murmurhash + src/murmurhash2.cpp + src/murmurhash3.cpp + include/murmurhash2.h + include/murmurhash3.h) + +target_include_directories (murmurhash PUBLIC include) diff --git a/contrib/murmurhash/LICENSE b/contrib/murmurhash/LICENSE new file mode 100644 index 00000000000..f6cdede60b8 --- /dev/null +++ b/contrib/murmurhash/LICENSE @@ -0,0 +1 @@ +MurmurHash was written by Austin Appleby, and is placed in the publicdomain. The author hereby disclaims copyright to this source code. diff --git a/contrib/murmurhash/README b/contrib/murmurhash/README new file mode 100644 index 00000000000..5428d30b26d --- /dev/null +++ b/contrib/murmurhash/README @@ -0,0 +1,6 @@ +Original URL: https://github.com/aappleby/smhasher + +version: +commit 61a0530f28277f2e850bfc39600ce61d02b518de +author aappleby@gmail.com +date 2016-01-09T06:07:17Z diff --git a/contrib/murmurhash/include/murmurhash2.h b/contrib/murmurhash/include/murmurhash2.h new file mode 100644 index 00000000000..e95cf2a4d85 --- /dev/null +++ b/contrib/murmurhash/include/murmurhash2.h @@ -0,0 +1,35 @@ +//----------------------------------------------------------------------------- +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH2_H_ +#define _MURMURHASH2_H_ + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include + +#endif // !defined(_MSC_VER) + +uint32_t MurmurHash2 (const void * key, int len, uint32_t seed); +uint64_t MurmurHash64A (const void * key, int len, uint64_t seed); +uint64_t MurmurHash64B (const void * key, int len, uint64_t seed); +uint32_t MurmurHash2A (const void * key, int len, uint32_t seed); +uint32_t MurmurHashNeutral2 (const void * key, int len, uint32_t seed); +uint32_t MurmurHashAligned2 (const void * key, int len, uint32_t seed); + +#endif // _MURMURHASH2_H_ + diff --git a/contrib/murmurhash/include/murmurhash3.h b/contrib/murmurhash/include/murmurhash3.h new file mode 100644 index 00000000000..e1c6d34976c --- /dev/null +++ b/contrib/murmurhash/include/murmurhash3.h @@ -0,0 +1,37 @@ +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +#ifndef _MURMURHASH3_H_ +#define _MURMURHASH3_H_ + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) && (_MSC_VER < 1600) + +typedef unsigned char uint8_t; +typedef unsigned int uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +#include + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_32 ( const void * key, int len, uint32_t seed, void * out ); + +void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out ); + +void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out ); + +//----------------------------------------------------------------------------- + +#endif // _MURMURHASH3_H_ diff --git a/contrib/murmurhash/src/murmurhash2.cpp b/contrib/murmurhash/src/murmurhash2.cpp new file mode 100644 index 00000000000..8a41ba02d98 --- /dev/null +++ b/contrib/murmurhash/src/murmurhash2.cpp @@ -0,0 +1,421 @@ +// MurmurHash2 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - This code makes a few assumptions about how your machine behaves - + +// 1. We can read a 4-byte value from any address without crashing +// 2. sizeof(int) == 4 + +// And it has a few limitations - + +// 1. It will not work incrementally. +// 2. It will not produce the same results on little-endian and big-endian +// machines. + +#include "murmurhash2.h" + +// Platform-specific functions and macros +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + + +uint32_t MurmurHash2(const void * key, int len, uint32_t seed) +{ + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + + const uint32_t m = 0x5bd1e995; + const int r = 24; + + // Initialize the hash to a 'random' value + + uint32_t h = seed ^ len; + + // Mix 4 bytes at a time into the hash + + const unsigned char * data = reinterpret_cast(key); + + while (len >= 4) + { + uint32_t k = *reinterpret_cast(data); + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + // Handle the last few bytes of the input array + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + // Do a few final mixes of the hash to ensure the last few + // bytes are well-incorporated. + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +// MurmurHash2, 64-bit versions, by Austin Appleby + +// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment +// and endian-ness issues if used across multiple platforms. + +// 64-bit hash for 64-bit platforms + +uint64_t MurmurHash64A(const void * key, int len, uint64_t seed) +{ + const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995); + const int r = 47; + + uint64_t h = seed ^ (len * m); + + const uint64_t * data = reinterpret_cast(key); + const uint64_t * end = data + (len/8); + + while (data != end) + { + uint64_t k = *data++; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + const unsigned char * data2 = reinterpret_cast(data); + + switch (len & 7) + { + case 7: h ^= static_cast(data2[6]) << 48; + case 6: h ^= static_cast(data2[5]) << 40; + case 5: h ^= static_cast(data2[4]) << 32; + case 4: h ^= static_cast(data2[3]) << 24; + case 3: h ^= static_cast(data2[2]) << 16; + case 2: h ^= static_cast(data2[1]) << 8; + case 1: h ^= static_cast(data2[0]); + h *= m; + }; + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return h; +} + + +// 64-bit hash for 32-bit platforms + +uint64_t MurmurHash64B(const void * key, int len, uint64_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h1 = static_cast(seed) ^ len; + uint32_t h2 = static_cast(seed >> 32); + + const uint32_t * data = reinterpret_cast(key); + + while (len >= 8) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + + uint32_t k2 = *data++; + k2 *= m; k2 ^= k2 >> r; k2 *= m; + h2 *= m; h2 ^= k2; + len -= 4; + } + + if (len >= 4) + { + uint32_t k1 = *data++; + k1 *= m; k1 ^= k1 >> r; k1 *= m; + h1 *= m; h1 ^= k1; + len -= 4; + } + + switch (len) + { + case 3: h2 ^= reinterpret_cast(data)[2] << 16; + case 2: h2 ^= reinterpret_cast(data)[1] << 8; + case 1: h2 ^= reinterpret_cast(data)[0]; + h2 *= m; + }; + + h1 ^= h2 >> 18; h1 *= m; + h2 ^= h1 >> 22; h2 *= m; + h1 ^= h2 >> 17; h1 *= m; + h2 ^= h1 >> 19; h2 *= m; + + uint64_t h = h1; + + h = (h << 32) | h2; + + return h; +} + +// MurmurHash2A, by Austin Appleby + +// This is a variant of MurmurHash2 modified to use the Merkle-Damgard +// construction. Bulk speed should be identical to Murmur2, small-key speed +// will be 10%-20% slower due to the added overhead at the end of the hash. + +// This variant fixes a minor issue where null keys were more likely to +// collide with each other than expected, and also makes the function +// more amenable to incremental implementations. + +#define mmix(h,k) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + +uint32_t MurmurHash2A(const void * key, int len, uint32_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + uint32_t l = len; + + const unsigned char * data = reinterpret_cast(key); + + uint32_t h = seed; + + while (len >= 4) + { + uint32_t k = *reinterpret_cast(data); + mmix(h,k); + data += 4; + len -= 4; + } + + uint32_t t = 0; + + switch (len) + { + case 3: t ^= data[2] << 16; + case 2: t ^= data[1] << 8; + case 1: t ^= data[0]; + }; + + mmix(h,t); + mmix(h,l); + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +// MurmurHashNeutral2, by Austin Appleby + +// Same as MurmurHash2, but endian- and alignment-neutral. +// Half the speed though, alas. + +uint32_t MurmurHashNeutral2(const void * key, int len, uint32_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + uint32_t h = seed ^ len; + + const unsigned char * data = reinterpret_cast(key); + + while (len >= 4) + { + uint32_t k; + + k = data[0]; + k |= data[1] << 8; + k |= data[2] << 16; + k |= data[3] << 24; + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +//----------------------------------------------------------------------------- +// MurmurHashAligned2, by Austin Appleby + +// Same algorithm as MurmurHash2, but only does aligned reads - should be safer +// on certain platforms. + +// Performance will be lower than MurmurHash2 + +#define MIX(h,k,m) { k *= m; k ^= k >> r; k *= m; h *= m; h ^= k; } + + +uint32_t MurmurHashAligned2(const void * key, int len, uint32_t seed) +{ + const uint32_t m = 0x5bd1e995; + const int r = 24; + + const unsigned char * data = reinterpret_cast(key); + + uint32_t h = seed ^ len; + + int align = reinterpret_cast(data) & 3; + + if (align && (len >= 4)) + { + // Pre-load the temp registers + + uint32_t t = 0, d = 0; + + switch (align) + { + case 1: t |= data[2] << 16; + case 2: t |= data[1] << 8; + case 3: t |= data[0]; + } + + t <<= (8 * align); + + data += 4-align; + len -= 4-align; + + int sl = 8 * (4-align); + int sr = 8 * align; + + // Mix + + while (len >= 4) + { + d = *(reinterpret_cast(data)); + t = (t >> sr) | (d << sl); + + uint32_t k = t; + + MIX(h,k,m); + + t = d; + + data += 4; + len -= 4; + } + + // Handle leftover data in temp registers + + d = 0; + + if (len >= align) + { + switch (align) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + } + + uint32_t k = (t >> sr) | (d << sl); + MIX(h,k,m); + + data += align; + len -= align; + + //---------- + // Handle tail bytes + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + } + else + { + switch (len) + { + case 3: d |= data[2] << 16; + case 2: d |= data[1] << 8; + case 1: d |= data[0]; + case 0: h ^= (t >> sr) | (d << sl); + h *= m; + } + } + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } + else + { + while (len >= 4) + { + uint32_t k = *reinterpret_cast(data); + + MIX(h,k,m); + + data += 4; + len -= 4; + } + + // Handle tail bytes + + switch (len) + { + case 3: h ^= data[2] << 16; + case 2: h ^= data[1] << 8; + case 1: h ^= data[0]; + h *= m; + }; + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; + } +} \ No newline at end of file diff --git a/contrib/murmurhash/src/murmurhash3.cpp b/contrib/murmurhash/src/murmurhash3.cpp new file mode 100644 index 00000000000..2831bf5c73b --- /dev/null +++ b/contrib/murmurhash/src/murmurhash3.cpp @@ -0,0 +1,331 @@ +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The author hereby disclaims copyright to this source code. + +// Note - The x86 and x64 versions do _not_ produce the same results, as the +// algorithms are optimized for their respective platforms. You can still +// compile and run any of them on any platform, but your performance with the +// non-native version will be less than optimal. + +#include "murmurhash3.h" + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +#define FORCE_INLINE __forceinline + +#include + +#define ROTL32(x,y) _rotl(x,y) +#define ROTL64(x,y) _rotl64(x,y) + +#define BIG_CONSTANT(x) (x) + +// Other compilers + +#else // defined(_MSC_VER) + +#define FORCE_INLINE inline __attribute__((always_inline)) + +inline uint32_t rotl32 ( uint32_t x, int8_t r ) +{ + return (x << r) | (x >> (32 - r)); +} + +inline uint64_t rotl64 ( uint64_t x, int8_t r ) +{ + return (x << r) | (x >> (64 - r)); +} + +#define ROTL32(x,y) rotl32(x,y) +#define ROTL64(x,y) rotl64(x,y) + +#define BIG_CONSTANT(x) (x##LLU) + +#endif // !defined(_MSC_VER) + +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here + +FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) +{ + return p[i]; +} + +FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) +{ + return p[i]; +} + +//----------------------------------------------------------------------------- +// Finalization mix - force all bits of a hash block to avalanche + +FORCE_INLINE uint32_t fmix32 ( uint32_t h ) +{ + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +//---------- + +FORCE_INLINE uint64_t fmix64 ( uint64_t k ) +{ + k ^= k >> 33; + k *= BIG_CONSTANT(0xff51afd7ed558ccd); + k ^= k >> 33; + k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); + k ^= k >> 33; + + return k; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_32 ( const void * key, int len, + uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 4; + + uint32_t h1 = seed; + + const uint32_t c1 = 0xcc9e2d51; + const uint32_t c2 = 0x1b873593; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); + + for(int i = -nblocks; i; i++) + { + uint32_t k1 = getblock32(blocks,i); + + k1 *= c1; + k1 = ROTL32(k1,15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1,13); + h1 = h1*5+0xe6546b64; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*4); + + uint32_t k1 = 0; + + switch(len & 3) + { + case 3: k1 ^= tail[2] << 16; + case 2: k1 ^= tail[1] << 8; + case 1: k1 ^= tail[0]; + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; + + h1 = fmix32(h1); + + *(uint32_t*)out = h1; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x86_128 ( const void * key, const int len, + uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint32_t h1 = seed; + uint32_t h2 = seed; + uint32_t h3 = seed; + uint32_t h4 = seed; + + const uint32_t c1 = 0x239b961b; + const uint32_t c2 = 0xab0e9789; + const uint32_t c3 = 0x38b34ae5; + const uint32_t c4 = 0xa1e38b93; + + //---------- + // body + + const uint32_t * blocks = (const uint32_t *)(data + nblocks*16); + + for(int i = -nblocks; i; i++) + { + uint32_t k1 = getblock32(blocks,i*4+0); + uint32_t k2 = getblock32(blocks,i*4+1); + uint32_t k3 = getblock32(blocks,i*4+2); + uint32_t k4 = getblock32(blocks,i*4+3); + + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + + h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; + + k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; + + h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; + + k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; + + h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; + + k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; + + h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*16); + + uint32_t k1 = 0; + uint32_t k2 = 0; + uint32_t k3 = 0; + uint32_t k4 = 0; + + switch(len & 15) + { + case 15: k4 ^= tail[14] << 16; + case 14: k4 ^= tail[13] << 8; + case 13: k4 ^= tail[12] << 0; + k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; + + case 12: k3 ^= tail[11] << 24; + case 11: k3 ^= tail[10] << 16; + case 10: k3 ^= tail[ 9] << 8; + case 9: k3 ^= tail[ 8] << 0; + k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; + + case 8: k2 ^= tail[ 7] << 24; + case 7: k2 ^= tail[ 6] << 16; + case 6: k2 ^= tail[ 5] << 8; + case 5: k2 ^= tail[ 4] << 0; + k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; + + case 4: k1 ^= tail[ 3] << 24; + case 3: k1 ^= tail[ 2] << 16; + case 2: k1 ^= tail[ 1] << 8; + case 1: k1 ^= tail[ 0] << 0; + k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + h1 = fmix32(h1); + h2 = fmix32(h2); + h3 = fmix32(h3); + h4 = fmix32(h4); + + h1 += h2; h1 += h3; h1 += h4; + h2 += h1; h3 += h1; h4 += h1; + + ((uint32_t*)out)[0] = h1; + ((uint32_t*)out)[1] = h2; + ((uint32_t*)out)[2] = h3; + ((uint32_t*)out)[3] = h4; +} + +//----------------------------------------------------------------------------- + +void MurmurHash3_x64_128 ( const void * key, const int len, + const uint32_t seed, void * out ) +{ + const uint8_t * data = (const uint8_t*)key; + const int nblocks = len / 16; + + uint64_t h1 = seed; + uint64_t h2 = seed; + + const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); + const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); + + //---------- + // body + + const uint64_t * blocks = (const uint64_t *)(data); + + for(int i = 0; i < nblocks; i++) + { + uint64_t k1 = getblock64(blocks,i*2+0); + uint64_t k2 = getblock64(blocks,i*2+1); + + k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; + + h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; + + k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; + + h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; + } + + //---------- + // tail + + const uint8_t * tail = (const uint8_t*)(data + nblocks*16); + + uint64_t k1 = 0; + uint64_t k2 = 0; + + switch(len & 15) + { + case 15: k2 ^= ((uint64_t)tail[14]) << 48; + case 14: k2 ^= ((uint64_t)tail[13]) << 40; + case 13: k2 ^= ((uint64_t)tail[12]) << 32; + case 12: k2 ^= ((uint64_t)tail[11]) << 24; + case 11: k2 ^= ((uint64_t)tail[10]) << 16; + case 10: k2 ^= ((uint64_t)tail[ 9]) << 8; + case 9: k2 ^= ((uint64_t)tail[ 8]) << 0; + k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; + + case 8: k1 ^= ((uint64_t)tail[ 7]) << 56; + case 7: k1 ^= ((uint64_t)tail[ 6]) << 48; + case 6: k1 ^= ((uint64_t)tail[ 5]) << 40; + case 5: k1 ^= ((uint64_t)tail[ 4]) << 32; + case 4: k1 ^= ((uint64_t)tail[ 3]) << 24; + case 3: k1 ^= ((uint64_t)tail[ 2]) << 16; + case 2: k1 ^= ((uint64_t)tail[ 1]) << 8; + case 1: k1 ^= ((uint64_t)tail[ 0]) << 0; + k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; + }; + + //---------- + // finalization + + h1 ^= len; h2 ^= len; + + h1 += h2; + h2 += h1; + + h1 = fmix64(h1); + h2 = fmix64(h2); + + h1 += h2; + h2 += h1; + + ((uint64_t*)out)[0] = h1; + ((uint64_t*)out)[1] = h2; +} diff --git a/contrib/poco b/contrib/poco index 3a2d0a833a2..4ab45bc3bb0 160000 --- a/contrib/poco +++ b/contrib/poco @@ -1 +1 @@ -Subproject commit 3a2d0a833a22ef5e1164a9ada54e3253cb038904 +Subproject commit 4ab45bc3bb0d2c476ea5385ec2d398c6bfc9f089 diff --git a/contrib/re2_st/CMakeLists.txt b/contrib/re2_st/CMakeLists.txt index cd0f97e08f3..79362f4bb56 100644 --- a/contrib/re2_st/CMakeLists.txt +++ b/contrib/re2_st/CMakeLists.txt @@ -16,22 +16,22 @@ target_include_directories (re2_st PRIVATE . PUBLIC ${CMAKE_CURRENT_BINARY_DIR} file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/re2_st) foreach (FILENAME filtered_re2.h re2.h set.h stringpiece.h) - add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" - COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/re2/${FILENAME}" - -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" - -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" - COMMENT "Creating ${FILENAME} for re2_st library.") - add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}") - add_dependencies (re2_st transform_${FILENAME}) + add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" + COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/re2/${FILENAME}" + -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}" + -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" + COMMENT "Creating ${FILENAME} for re2_st library.") + add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/re2_st/${FILENAME}") + add_dependencies (re2_st transform_${FILENAME}) endforeach () file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/util) foreach (FILENAME mutex.h) - add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" - COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/util/${FILENAME}" - -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" - -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" - COMMENT "Creating ${FILENAME} for re2_st library.") - add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}") - add_dependencies (re2_st transform_${FILENAME}) + add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" + COMMAND ${CMAKE_COMMAND} -DSOURCE_FILENAME="${RE2_SOURCE_DIR}/util/${FILENAME}" + -DTARGET_FILENAME="${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}" + -P "${CMAKE_CURRENT_SOURCE_DIR}/re2_transform.cmake" + COMMENT "Creating ${FILENAME} for re2_st library.") + add_custom_target (transform_${FILENAME} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/util/${FILENAME}") + add_dependencies (re2_st transform_${FILENAME}) endforeach () diff --git a/contrib/ssl b/contrib/ssl index 6fbe1c6f404..4f9a7b87451 160000 --- a/contrib/ssl +++ b/contrib/ssl @@ -1 +1 @@ -Subproject commit 6fbe1c6f404193989c5f6a63115d80fbe34ce2a3 +Subproject commit 4f9a7b8745184410dc0b31ba548ce21ac64edd9c diff --git a/contrib/unixodbc b/contrib/unixodbc new file mode 160000 index 00000000000..b0ad30f7f62 --- /dev/null +++ b/contrib/unixodbc @@ -0,0 +1 @@ +Subproject commit b0ad30f7f6289c12b76f04bfb9d466374bb32168 diff --git a/contrib/unixodbc-cmake/CMakeLists.txt b/contrib/unixodbc-cmake/CMakeLists.txt new file mode 100644 index 00000000000..4f9f6b41538 --- /dev/null +++ b/contrib/unixodbc-cmake/CMakeLists.txt @@ -0,0 +1,288 @@ +set(ODBC_SOURCE_DIR ${CMAKE_SOURCE_DIR}/contrib/unixodbc) +set(ODBC_BINARY_DIR ${CMAKE_BINARY_DIR}/contrib/unixodbc) + + +set(SRCS +${ODBC_SOURCE_DIR}/libltdl/lt__alloc.c +${ODBC_SOURCE_DIR}/libltdl/lt__strl.c +${ODBC_SOURCE_DIR}/libltdl/ltdl.c +${ODBC_SOURCE_DIR}/libltdl/lt_dlloader.c +${ODBC_SOURCE_DIR}/libltdl/slist.c +${ODBC_SOURCE_DIR}/libltdl/lt_error.c +${ODBC_SOURCE_DIR}/libltdl/loaders/dlopen.c +${ODBC_SOURCE_DIR}/libltdl/loaders/preopen.c +#${ODBC_SOURCE_DIR}/libltdl/lt__dirent.c +#${ODBC_SOURCE_DIR}/libltdl/lt__argz.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/dld_link.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/load_add_on.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/shl_load.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/loadlibrary.c +#${ODBC_SOURCE_DIR}/libltdl/loaders/dyld.c + +# This file is generated by 'libtool' inside libltdl directory and then removed. +${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libltdl/libltdlcS.c +) + +add_library(ltdl STATIC ${SRCS}) + +target_include_directories(ltdl PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/libltdl) +target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl) +target_include_directories(ltdl PUBLIC ${ODBC_SOURCE_DIR}/libltdl/libltdl) + +target_compile_definitions(ltdl PRIVATE -DHAVE_CONFIG_H -DLTDL -DLTDLOPEN=libltdlc) + +target_compile_options(ltdl PRIVATE -Wno-constant-logical-operand -Wno-unknown-warning-option -O2) + + +set(SRCS +${ODBC_SOURCE_DIR}/DriverManager/__attribute.c +${ODBC_SOURCE_DIR}/DriverManager/__connection.c +${ODBC_SOURCE_DIR}/DriverManager/__handles.c +${ODBC_SOURCE_DIR}/DriverManager/__info.c +${ODBC_SOURCE_DIR}/DriverManager/__stats.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocEnv.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocHandle.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocHandleStd.c +${ODBC_SOURCE_DIR}/DriverManager/SQLAllocStmt.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBindCol.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBindParam.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBindParameter.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBrowseConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBrowseConnectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLBulkOperations.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCancel.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCancelHandle.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCloseCursor.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttribute.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributes.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColAttributeW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumnPrivileges.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumnPrivilegesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumns.c +${ODBC_SOURCE_DIR}/DriverManager/SQLColumnsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLConnectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLCopyDesc.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDataSources.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDataSourcesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeCol.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeColW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDescribeParam.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDisconnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDriverConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDriverConnectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDrivers.c +${ODBC_SOURCE_DIR}/DriverManager/SQLDriversW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLEndTran.c +${ODBC_SOURCE_DIR}/DriverManager/SQLError.c +${ODBC_SOURCE_DIR}/DriverManager/SQLErrorW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExecDirect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExecDirectW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExecute.c +${ODBC_SOURCE_DIR}/DriverManager/SQLExtendedFetch.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFetch.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFetchScroll.c +${ODBC_SOURCE_DIR}/DriverManager/SQLForeignKeys.c +${ODBC_SOURCE_DIR}/DriverManager/SQLForeignKeysW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeConnect.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeEnv.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeHandle.c +${ODBC_SOURCE_DIR}/DriverManager/SQLFreeStmt.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetConnectOptionW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetCursorName.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetCursorNameW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetData.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescField.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescFieldW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescRec.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDescRecW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagField.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagFieldW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagRec.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetDiagRecW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetEnvAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetFunctions.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetInfo.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetInfoW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetStmtOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetTypeInfo.c +${ODBC_SOURCE_DIR}/DriverManager/SQLGetTypeInfoW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLMoreResults.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNativeSql.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNativeSqlW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNumParams.c +${ODBC_SOURCE_DIR}/DriverManager/SQLNumResultCols.c +${ODBC_SOURCE_DIR}/DriverManager/SQLParamData.c +${ODBC_SOURCE_DIR}/DriverManager/SQLParamOptions.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrepare.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrepareW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrimaryKeys.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPrimaryKeysW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProcedureColumns.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProcedureColumnsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProcedures.c +${ODBC_SOURCE_DIR}/DriverManager/SQLProceduresW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLPutData.c +${ODBC_SOURCE_DIR}/DriverManager/SQLRowCount.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetConnectOptionW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetCursorName.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetCursorNameW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescField.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescFieldW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetDescRec.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetEnvAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetParam.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetPos.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetScrollOptions.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtAttr.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtAttrW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtOption.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSetStmtOptionW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSpecialColumns.c +${ODBC_SOURCE_DIR}/DriverManager/SQLSpecialColumnsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLStatistics.c +${ODBC_SOURCE_DIR}/DriverManager/SQLStatisticsW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTablePrivileges.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTablePrivilegesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTables.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTablesW.c +${ODBC_SOURCE_DIR}/DriverManager/SQLTransact.c + +${ODBC_SOURCE_DIR}/odbcinst/_logging.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_ConfigModeINI.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTConstructProperties.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTDestructProperties.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_GetEntries.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_GetSections.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTSetProperty.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_SystemINI.c +${ODBC_SOURCE_DIR}/odbcinst/_odbcinst_UserINI.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTValidateProperties.c +${ODBC_SOURCE_DIR}/odbcinst/ODBCINSTValidateProperty.c +${ODBC_SOURCE_DIR}/odbcinst/SQLConfigDataSource.c +${ODBC_SOURCE_DIR}/odbcinst/SQLConfigDriver.c +${ODBC_SOURCE_DIR}/odbcinst/SQLCreateDataSource.c +${ODBC_SOURCE_DIR}/odbcinst/_SQLDriverConnectPrompt.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetAvailableDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetConfigMode.c +${ODBC_SOURCE_DIR}/odbcinst/_SQLGetInstalledDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetInstalledDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetPrivateProfileString.c +${ODBC_SOURCE_DIR}/odbcinst/SQLGetTranslator.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallDriverEx.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallDriverManager.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallerError.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallODBC.c +${ODBC_SOURCE_DIR}/odbcinst/SQLInstallTranslatorEx.c +${ODBC_SOURCE_DIR}/odbcinst/SQLManageDataSources.c +${ODBC_SOURCE_DIR}/odbcinst/SQLPostInstallerError.c +${ODBC_SOURCE_DIR}/odbcinst/SQLReadFileDSN.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDriver.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDriverManager.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveDSNFromIni.c +${ODBC_SOURCE_DIR}/odbcinst/SQLRemoveTranslator.c +${ODBC_SOURCE_DIR}/odbcinst/SQLSetConfigMode.c +${ODBC_SOURCE_DIR}/odbcinst/SQLValidDSN.c +${ODBC_SOURCE_DIR}/odbcinst/SQLWriteDSNToIni.c +${ODBC_SOURCE_DIR}/odbcinst/SQLWriteFileDSN.c +${ODBC_SOURCE_DIR}/odbcinst/_SQLWriteInstalledDrivers.c +${ODBC_SOURCE_DIR}/odbcinst/SQLWritePrivateProfileString.c + +${ODBC_SOURCE_DIR}/ini/iniAllTrim.c +${ODBC_SOURCE_DIR}/ini/iniAppend.c +${ODBC_SOURCE_DIR}/ini/iniClose.c +${ODBC_SOURCE_DIR}/ini/iniCommit.c +${ODBC_SOURCE_DIR}/ini/iniCursor.c +${ODBC_SOURCE_DIR}/ini/iniDelete.c +${ODBC_SOURCE_DIR}/ini/_iniDump.c +${ODBC_SOURCE_DIR}/ini/iniElement.c +${ODBC_SOURCE_DIR}/ini/iniElementCount.c +${ODBC_SOURCE_DIR}/ini/iniGetBookmark.c +${ODBC_SOURCE_DIR}/ini/iniGotoBookmark.c +${ODBC_SOURCE_DIR}/ini/iniObject.c +${ODBC_SOURCE_DIR}/ini/iniObjectDelete.c +${ODBC_SOURCE_DIR}/ini/iniObjectEOL.c +${ODBC_SOURCE_DIR}/ini/iniObjectFirst.c +${ODBC_SOURCE_DIR}/ini/iniObjectInsert.c +${ODBC_SOURCE_DIR}/ini/iniObjectLast.c +${ODBC_SOURCE_DIR}/ini/iniObjectNext.c +${ODBC_SOURCE_DIR}/ini/_iniObjectRead.c +${ODBC_SOURCE_DIR}/ini/iniObjectSeek.c +${ODBC_SOURCE_DIR}/ini/iniObjectSeekSure.c +${ODBC_SOURCE_DIR}/ini/iniObjectUpdate.c +${ODBC_SOURCE_DIR}/ini/iniOpen.c +${ODBC_SOURCE_DIR}/ini/iniProperty.c +${ODBC_SOURCE_DIR}/ini/iniPropertyDelete.c +${ODBC_SOURCE_DIR}/ini/iniPropertyEOL.c +${ODBC_SOURCE_DIR}/ini/iniPropertyFirst.c +${ODBC_SOURCE_DIR}/ini/iniPropertyInsert.c +${ODBC_SOURCE_DIR}/ini/iniPropertyLast.c +${ODBC_SOURCE_DIR}/ini/iniPropertyNext.c +${ODBC_SOURCE_DIR}/ini/_iniPropertyRead.c +${ODBC_SOURCE_DIR}/ini/iniPropertySeek.c +${ODBC_SOURCE_DIR}/ini/iniPropertySeekSure.c +${ODBC_SOURCE_DIR}/ini/iniPropertyUpdate.c +${ODBC_SOURCE_DIR}/ini/iniPropertyValue.c +${ODBC_SOURCE_DIR}/ini/_iniScanUntilObject.c +${ODBC_SOURCE_DIR}/ini/iniToUpper.c +${ODBC_SOURCE_DIR}/ini/iniValue.c + +${ODBC_SOURCE_DIR}/log/logClear.c +${ODBC_SOURCE_DIR}/log/logClose.c +${ODBC_SOURCE_DIR}/log/_logFreeMsg.c +${ODBC_SOURCE_DIR}/log/logOn.c +${ODBC_SOURCE_DIR}/log/logOpen.c +${ODBC_SOURCE_DIR}/log/logPeekMsg.c +${ODBC_SOURCE_DIR}/log/logPopMsg.c +${ODBC_SOURCE_DIR}/log/logPushMsg.c + +${ODBC_SOURCE_DIR}/lst/_lstAdjustCurrent.c +${ODBC_SOURCE_DIR}/lst/lstAppend.c +${ODBC_SOURCE_DIR}/lst/lstClose.c +${ODBC_SOURCE_DIR}/lst/lstDelete.c +${ODBC_SOURCE_DIR}/lst/_lstDump.c +${ODBC_SOURCE_DIR}/lst/lstEOL.c +${ODBC_SOURCE_DIR}/lst/lstFirst.c +${ODBC_SOURCE_DIR}/lst/_lstFreeItem.c +${ODBC_SOURCE_DIR}/lst/lstGetBookMark.c +${ODBC_SOURCE_DIR}/lst/lstGet.c +${ODBC_SOURCE_DIR}/lst/lstGotoBookMark.c +${ODBC_SOURCE_DIR}/lst/lstGoto.c +${ODBC_SOURCE_DIR}/lst/lstInsert.c +${ODBC_SOURCE_DIR}/lst/lstLast.c +${ODBC_SOURCE_DIR}/lst/lstNext.c +${ODBC_SOURCE_DIR}/lst/_lstNextValidItem.c +${ODBC_SOURCE_DIR}/lst/lstOpen.c +${ODBC_SOURCE_DIR}/lst/lstOpenCursor.c +${ODBC_SOURCE_DIR}/lst/lstPrev.c +${ODBC_SOURCE_DIR}/lst/_lstPrevValidItem.c +${ODBC_SOURCE_DIR}/lst/lstSeek.c +${ODBC_SOURCE_DIR}/lst/lstSeekItem.c +${ODBC_SOURCE_DIR}/lst/lstSet.c +${ODBC_SOURCE_DIR}/lst/lstSetFreeFunc.c +${ODBC_SOURCE_DIR}/lst/_lstVisible.c +) + +add_library(unixodbc STATIC ${SRCS}) + +target_link_libraries(unixodbc ltdl) + +# SYSTEM_FILE_PATH was changed to /etc + +target_include_directories(unixodbc PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64/private) +target_include_directories(unixodbc PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/linux_x86_64) +target_include_directories(unixodbc PUBLIC ${ODBC_SOURCE_DIR}/include) + +target_compile_definitions(unixodbc PRIVATE -DHAVE_CONFIG_H) + +target_compile_options(unixodbc PRIVATE -Wno-dangling-else -Wno-parentheses -Wno-misleading-indentation -Wno-unknown-warning-option -O2) diff --git a/contrib/unixodbc-cmake/linux_x86_64/libltdl/config.h b/contrib/unixodbc-cmake/linux_x86_64/libltdl/config.h new file mode 100644 index 00000000000..194779b2b98 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/libltdl/config.h @@ -0,0 +1,181 @@ +/* config.h. Generated from config-h.in by configure. */ +/* config-h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the `argz_add' function. */ +#define HAVE_ARGZ_ADD 1 + +/* Define to 1 if you have the `argz_append' function. */ +#define HAVE_ARGZ_APPEND 1 + +/* Define to 1 if you have the `argz_count' function. */ +#define HAVE_ARGZ_COUNT 1 + +/* Define to 1 if you have the `argz_create_sep' function. */ +#define HAVE_ARGZ_CREATE_SEP 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARGZ_H 1 + +/* Define to 1 if you have the `argz_insert' function. */ +#define HAVE_ARGZ_INSERT 1 + +/* Define to 1 if you have the `argz_next' function. */ +#define HAVE_ARGZ_NEXT 1 + +/* Define to 1 if you have the `argz_stringify' function. */ +#define HAVE_ARGZ_STRINGIFY 1 + +/* Define to 1 if you have the `closedir' function. */ +#define HAVE_CLOSEDIR 1 + +/* Define to 1 if you have the declaration of `cygwin_conv_path', and to 0 if + you don't. */ +/* #undef HAVE_DECL_CYGWIN_CONV_PATH */ + +/* Define to 1 if you have the header file. */ +#define HAVE_DIRENT_H 1 + +/* Define if you have the GNU dld library. */ +/* #undef HAVE_DLD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLD_H */ + +/* Define to 1 if you have the `dlerror' function. */ +#define HAVE_DLERROR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DL_H */ + +/* Define if you have the _dyld_func_lookup function. */ +/* #undef HAVE_DYLD */ + +/* Define to 1 if the system has the type `error_t'. */ +#define HAVE_ERROR_T 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have the libdl library or equivalent. */ +#define HAVE_LIBDL 1 + +/* Define if libdlloader will be built on this platform */ +#define HAVE_LIBDLLOADER 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MACH_O_DYLD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the `opendir' function. */ +#define HAVE_OPENDIR 1 + +/* Define if libtool can extract symbol lists from object files. */ +#define HAVE_PRELOADED_SYMBOLS 1 + +/* Define to 1 if you have the `readdir' function. */ +#define HAVE_READDIR 1 + +/* Define if you have the shl_load function. */ +/* #undef HAVE_SHL_LOAD */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strlcat' function. */ +/* #undef HAVE_STRLCAT */ + +/* Define to 1 if you have the `strlcpy' function. */ +/* #undef HAVE_STRLCPY */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DL_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* This value is set to 1 to indicate that the system argz facility works */ +#define HAVE_WORKING_ARGZ 1 + +/* Define if the OS needs help to load dependent libraries for dlopen(). */ +/* #undef LTDL_DLOPEN_DEPLIBS */ + +/* Define to the system default library search path. */ +#define LT_DLSEARCH_PATH "/lib:/usr/lib:/usr/lib/x86_64-linux-gnu/libfakeroot:/usr/local/lib:/usr/local/lib/x86_64-linux-gnu:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/lib32:/usr/lib32" + +/* The archive extension */ +#define LT_LIBEXT "a" + +/* The archive prefix */ +#define LT_LIBPREFIX "lib" + +/* Define to the extension used for runtime loadable modules, say, ".so". */ +#define LT_MODULE_EXT ".so" + +/* Define to the name of the environment variable that determines the run-time + module search path. */ +#define LT_MODULE_PATH_VAR "LD_LIBRARY_PATH" + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Define to the shared library suffix, say, ".dylib". */ +/* #undef LT_SHARED_EXT */ + +/* Define to the shared archive member specification, say "(shr.o)". */ +/* #undef LT_SHARED_LIB_MEMBER */ + +/* Define if dlsym() requires a leading underscore in symbol names. */ +/* #undef NEED_USCORE */ + +/* Name of package */ +#define PACKAGE "libltdl" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "bug-libtool@gnu.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libltdl" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libltdl 2.4.3a" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libltdl" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "2.4.3a" + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Version number of package */ +#define VERSION "2.4.3a" + +/* Define so that glibc/gnulib argp.h does not typedef error_t. */ +/* #undef __error_t_defined */ + +/* Define to a type to use for 'error_t' if it is not otherwise available. */ +/* #undef error_t */ diff --git a/contrib/unixodbc-cmake/linux_x86_64/libltdl/libltdlcS.c b/contrib/unixodbc-cmake/linux_x86_64/libltdl/libltdlcS.c new file mode 100644 index 00000000000..ca866eb5986 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/libltdl/libltdlcS.c @@ -0,0 +1,53 @@ +/* libltdlcS.c - symbol resolution table for 'libltdlc' dlsym emulation. */ +/* Generated by libtool (GNU libtool) 2.4.6 */ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored "-Wstrict-prototypes" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */ +extern int dlopen_LTX_get_vtable(); + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_libltdlc_LTX_preloaded_symbols[]; +LT_DLSYM_CONST lt_dlsymlist +lt_libltdlc_LTX_preloaded_symbols[] = +{ {"libltdlc", (void *) 0}, + {"dlopen.a", (void *) 0}, + {"dlopen_LTX_get_vtable", (void *) &dlopen_LTX_get_vtable}, + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_libltdlc_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif diff --git a/contrib/unixodbc-cmake/linux_x86_64/private/config.h b/contrib/unixodbc-cmake/linux_x86_64/private/config.h new file mode 100644 index 00000000000..d80a4da4665 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/private/config.h @@ -0,0 +1,496 @@ +/* config.h. Generated from config.h.in by configure. */ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Encoding to use for CHAR */ +#define ASCII_ENCODING "auto-search" + +/* Install bindir */ +#define BIN_PREFIX "/usr/local/bin" + +/* Use a semaphore to allow ODBCConfig to display running counts */ +/* #undef COLLECT_STATS */ + +/* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP + systems. This function is required for `alloca.c' support on those systems. + */ +/* #undef CRAY_STACKSEG_END */ + +/* Define to 1 if using `alloca.c'. */ +/* #undef C_ALLOCA */ + +/* Lib directory */ +#define DEFLIB_PATH "/usr/local/lib" + +/* Using ini cacheing */ +#define ENABLE_INI_CACHING /**/ + +/* Install exec_prefix */ +#define EXEC_PREFIX "/usr/local" + +/* Disable the precise but slow checking of the validity of handles */ +/* #undef FAST_HANDLE_VALIDATE */ + +/* Define to 1 if you have `alloca', as a function or macro. */ +#define HAVE_ALLOCA 1 + +/* Define to 1 if you have and it should be used (not on Ultrix). + */ +#define HAVE_ALLOCA_H 1 + +/* Define to 1 if you have the `argz_add' function. */ +#define HAVE_ARGZ_ADD 1 + +/* Define to 1 if you have the `argz_append' function. */ +#define HAVE_ARGZ_APPEND 1 + +/* Define to 1 if you have the `argz_count' function. */ +#define HAVE_ARGZ_COUNT 1 + +/* Define to 1 if you have the `argz_create_sep' function. */ +#define HAVE_ARGZ_CREATE_SEP 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_ARGZ_H 1 + +/* Define to 1 if you have the `argz_insert' function. */ +#define HAVE_ARGZ_INSERT 1 + +/* Define to 1 if you have the `argz_next' function. */ +#define HAVE_ARGZ_NEXT 1 + +/* Define to 1 if you have the `argz_stringify' function. */ +#define HAVE_ARGZ_STRINGIFY 1 + +/* Define to 1 if you have the `atoll' function. */ +#define HAVE_ATOLL 1 + +/* Define to 1 if you have the `closedir' function. */ +#define HAVE_CLOSEDIR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_CRYPT_H 1 + +/* Define to 1 if you have the declaration of `cygwin_conv_path', and to 0 if + you don't. */ +/* #undef HAVE_DECL_CYGWIN_CONV_PATH */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +#define HAVE_DIRENT_H 1 + +/* Define if you have the GNU dld library. */ +/* #undef HAVE_DLD */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLD_H */ + +/* Define to 1 if you have the `dlerror' function. */ +#define HAVE_DLERROR 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DL_H */ + +/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ +/* #undef HAVE_DOPRNT */ + +/* Define if you have the _dyld_func_lookup function. */ +/* #undef HAVE_DYLD */ + +/* Define to 1 if you have the `endpwent' function. */ +#define HAVE_ENDPWENT 1 + +/* Define to 1 if the system has the type `error_t'. */ +#define HAVE_ERROR_T 1 + +/* Define to 1 if you have the `ftime' function. */ +#define HAVE_FTIME 1 + +/* Define to 1 if you have the `ftok' function. */ +/* #undef HAVE_FTOK */ + +/* Define to 1 if you have the `getpwuid' function. */ +#define HAVE_GETPWUID 1 + +/* Define to 1 if you have the `gettimeofday' function. */ +#define HAVE_GETTIMEOFDAY 1 + +/* Define to 1 if you have the `getuid' function. */ +#define HAVE_GETUID 1 + +/* Define if you have the iconv() function. */ +#define HAVE_ICONV 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if you have and nl_langinfo(CODESET). */ +#define HAVE_LANGINFO_CODESET 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LANGINFO_H 1 + +/* Add -lcrypt to lib list */ +#define HAVE_LIBCRYPT /**/ + +/* Define if you have the libdl library or equivalent. */ +#define HAVE_LIBDL 1 + +/* Define if libdlloader will be built on this platform */ +#define HAVE_LIBDLLOADER 1 + +/* Use the -lpth thread library */ +/* #undef HAVE_LIBPTH */ + +/* Use -lpthread threading lib */ +#define HAVE_LIBPTHREAD 1 + +/* Use the -lthread threading lib */ +/* #undef HAVE_LIBTHREAD */ + +/* Define to 1 if you have the header file. */ +#define HAVE_LIMITS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LOCALE_H 1 + +/* Use rentrant version of localtime */ +#define HAVE_LOCALTIME_R 1 + +/* Define if you have long long */ +#define HAVE_LONG_LONG 1 + +/* Define this if a modern libltdl is already installed */ +#define HAVE_LTDL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MACH_O_DYLD_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MSQL_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. */ +/* #undef HAVE_NDIR_H */ + +/* Define to 1 if you have the `nl_langinfo' function. */ +#define HAVE_NL_LANGINFO 1 + +/* Define to 1 if you have the `opendir' function. */ +#define HAVE_OPENDIR 1 + +/* Define if libtool can extract symbol lists from object files. */ +#define HAVE_PRELOADED_SYMBOLS 1 + +/* Define to 1 if the system has the type `ptrdiff_t'. */ +#define HAVE_PTRDIFF_T 1 + +/* Define to 1 if you have the `putenv' function. */ +#define HAVE_PUTENV 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PWD_H 1 + +/* Define to 1 if you have the `readdir' function. */ +#define HAVE_READDIR 1 + +/* Add readline support */ +#define HAVE_READLINE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_READLINE_HISTORY_H 1 + +/* Use the scandir lib */ +/* #undef HAVE_SCANDIR */ + +/* Define to 1 if you have the `semget' function. */ +/* #undef HAVE_SEMGET */ + +/* Define to 1 if you have the `semop' function. */ +/* #undef HAVE_SEMOP */ + +/* Define to 1 if you have the `setenv' function. */ +#define HAVE_SETENV 1 + +/* Define to 1 if you have the `setlocale' function. */ +#define HAVE_SETLOCALE 1 + +/* Define if you have the shl_load function. */ +/* #undef HAVE_SHL_LOAD */ + +/* Define to 1 if you have the `shmget' function. */ +/* #undef HAVE_SHMGET */ + +/* Define to 1 if you have the `snprintf' function. */ +/* #undef HAVE_SNPRINTF */ + +/* Define to 1 if you have the `socket' function. */ +#define HAVE_SOCKET 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDARG_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDDEF_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the `strcasecmp' function. */ +#define HAVE_STRCASECMP 1 + +/* Define to 1 if you have the `strchr' function. */ +#define HAVE_STRCHR 1 + +/* Define to 1 if you have the `strdup' function. */ +#define HAVE_STRDUP 1 + +/* Define to 1 if you have the `stricmp' function. */ +/* #undef HAVE_STRICMP */ + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `strlcat' function. */ +/* #undef HAVE_STRLCAT */ + +/* Define to 1 if you have the `strlcpy' function. */ +/* #undef HAVE_STRLCPY */ + +/* Define to 1 if you have the `strncasecmp' function. */ +#define HAVE_STRNCASECMP 1 + +/* Define to 1 if you have the `strnicmp' function. */ +/* #undef HAVE_STRNICMP */ + +/* Define to 1 if you have the `strstr' function. */ +#define HAVE_STRSTR 1 + +/* Define to 1 if you have the `strtol' function. */ +#define HAVE_STRTOL 1 + +/* Define to 1 if you have the `strtoll' function. */ +#define HAVE_STRTOLL 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYNCH_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_DIR_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_DL_H */ + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_MALLOC_H */ + +/* Define to 1 if you have the header file, and it defines `DIR'. + */ +/* #undef HAVE_SYS_NDIR_H */ + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_SEM_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIMEB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the `time' function. */ +#define HAVE_TIME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_TIME_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_VARARGS_H */ + +/* Define to 1 if you have the `vprintf' function. */ +#define HAVE_VPRINTF 1 + +/* Define to 1 if you have the `vsnprintf' function. */ +#define HAVE_VSNPRINTF 1 + +/* This value is set to 1 to indicate that the system argz facility works */ +#define HAVE_WORKING_ARGZ 1 + +/* Define as const if the declaration of iconv() needs const. */ +#define ICONV_CONST + +/* Install includedir */ +#define INCLUDE_PREFIX "/usr/local/include" + +/* Lib directory */ +#define LIB_PREFIX "/usr/local/lib" + +/* Define if the OS needs help to load dependent libraries for dlopen(). */ +/* #undef LTDL_DLOPEN_DEPLIBS */ + +/* Define to the system default library search path. */ +#define LT_DLSEARCH_PATH "/lib:/usr/lib:/usr/lib/x86_64-linux-gnu/libfakeroot:/usr/local/lib:/usr/local/lib/x86_64-linux-gnu:/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu:/lib32:/usr/lib32" + +/* The archive extension */ +#define LT_LIBEXT "a" + +/* The archive prefix */ +#define LT_LIBPREFIX "lib" + +/* Define to the extension used for runtime loadable modules, say, ".so". */ +#define LT_MODULE_EXT ".so" + +/* Define to the name of the environment variable that determines the run-time + module search path. */ +#define LT_MODULE_PATH_VAR "LD_LIBRARY_PATH" + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#define LT_OBJDIR ".libs/" + +/* Define to the shared library suffix, say, ".dylib". */ +/* #undef LT_SHARED_EXT */ + +/* Define to the shared archive member specification, say "(shr.o)". */ +/* #undef LT_SHARED_LIB_MEMBER */ + +/* Define if you need semundo union */ +/* #undef NEED_SEMUNDO_UNION */ + +/* Define if dlsym() requires a leading underscore in symbol names. */ +/* #undef NEED_USCORE */ + +/* Using OSX */ +/* #undef OSXHEADER */ + +/* Name of package */ +#define PACKAGE "unixODBC" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "nick@unixodbc.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "unixODBC" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "unixODBC 2.3.6" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "unixODBC" + +/* Define to the home page for this package. */ +#define PACKAGE_URL "" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "2.3.6" + +/* Platform is 64 bit */ +#define PLATFORM64 /**/ + +/* Install prefix */ +#define PREFIX "/usr/local" + +/* Using QNX */ +/* #undef QNX_LIBLTDL */ + +/* Shared lib extension */ +#define SHLIBEXT ".so" + +/* The size of `long', as computed by sizeof. */ +#define SIZEOF_LONG 8 + +/* The size of `long int', as computed by sizeof. */ +#define SIZEOF_LONG_INT 8 + +/* If using the C implementation of alloca, define if you know the + direction of stack growth for your system; otherwise it will be + automatically deduced at runtime. + STACK_DIRECTION > 0 => grows toward higher addresses + STACK_DIRECTION < 0 => grows toward lower addresses + STACK_DIRECTION = 0 => direction of growth unknown */ +/* #undef STACK_DIRECTION */ + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* don't include unixODBC prefix in driver error messages */ +#define STRICT_ODBC_ERROR /**/ + +/* System file path */ +#define SYSTEM_FILE_PATH "/etc" + +/* Lib path */ +#define SYSTEM_LIB_PATH "/usr/local/lib" + +/* Define to 1 if you can safely include both and . */ +#define TIME_WITH_SYS_TIME 1 + +/* Define to 1 if your declares `struct tm'. */ +/* #undef TM_IN_SYS_TIME */ + +/* Encoding to use for UNICODE */ +#define UNICODE_ENCODING "auto-search" + +/* Flag that we are not using another DM */ +#define UNIXODBC /**/ + +/* We are building inside the unixODBC source tree */ +#define UNIXODBC_SOURCE /**/ + +/* Version number of package */ +#define VERSION "2.3.6" + +/* Work with IBM drivers that use 32 bit handles on 64 bit platforms */ +/* #undef WITH_HANDLE_REDIRECT */ + +/* Define to 1 if `lex' declares `yytext' as a `char *' by default, not a + `char[]'. */ +/* #undef YYTEXT_POINTER */ + +/* Build flag for AIX */ +/* #undef _ALL_SOURCE */ + +/* Build flag for AIX */ +/* #undef _LONG_LONG */ + +/* Build flag for AIX */ +/* #undef _THREAD_SAFE */ + +/* Define so that glibc/gnulib argp.h does not typedef error_t. */ +/* #undef __error_t_defined */ + +/* Define to empty if `const' does not conform to ANSI C. */ +/* #undef const */ + +/* Define to a type to use for 'error_t' if it is not otherwise available. */ +/* #undef error_t */ + +/* Define to `int' if doesn't define. */ +/* #undef gid_t */ + +/* Define to `unsigned int' if does not define. */ +/* #undef size_t */ + +/* Define to `int' if doesn't define. */ +/* #undef uid_t */ diff --git a/contrib/unixodbc-cmake/linux_x86_64/unixodbc_conf.h b/contrib/unixodbc-cmake/linux_x86_64/unixodbc_conf.h new file mode 100644 index 00000000000..6597c85cea6 --- /dev/null +++ b/contrib/unixodbc-cmake/linux_x86_64/unixodbc_conf.h @@ -0,0 +1,60 @@ +/* unixodbc_conf.h. Generated from unixodbc_conf.h.in by configure. */ +#ifndef HAVE_UNISTD_H +#define HAVE_UNISTD_H 1 +#endif + +#ifndef HAVE_PWD_H +#define HAVE_PWD_H 1 +#endif + +#ifndef HAVE_SYS_TIME_H +#define HAVE_SYS_TIME_H 1 +#endif + +#ifndef ODBC_STD +/* #undef ODBC_STD */ +#endif + +#ifndef UNICODE +/* #undef UNICODE */ +#endif + +#ifndef GUID_DEFINED +/* #undef GUID_DEFINED */ +#endif + +#ifndef SQL_WCHART_CONVERT +/* #undef SQL_WCHART_CONVERT */ +#endif + +#ifndef HAVE_LONG_LONG +#define HAVE_LONG_LONG 1 +#endif + +#ifndef ODBCINT64_TYPEA +/* #undef ODBCINT64_TYPEA */ +#endif + +#ifndef UODBCINT64_TYPE +/* #undef UODBCINT64_TYPE */ +#endif + +#ifndef DISABLE_INI_CACHING +/* #undef DISABLE_INI_CACHING */ +#endif + +#ifndef SIZEOF_LONG_INT +#define SIZEOF_LONG_INT 8 +#endif + +#ifndef ALLREADY_HAVE_WINDOWS_TYPE +/* #undef ALLREADY_HAVE_WINDOWS_TYPE */ +#endif + +#ifndef DONT_TD_VOID +/* #undef DONT_TD_VOID */ +#endif + +#ifndef DO_YOU_KNOW_WHAT_YOUR_ARE_DOING +/* #undef DO_YOU_KNOW_WHAT_YOUR_ARE_DOING */ +#endif diff --git a/dbms/CMakeLists.txt b/dbms/CMakeLists.txt index eaf21b0b6ac..91d5b7676a2 100644 --- a/dbms/CMakeLists.txt +++ b/dbms/CMakeLists.txt @@ -53,6 +53,7 @@ add_headers_and_sources(dbms src/Interpreters/ClusterProxy) add_headers_and_sources(dbms src/Columns) add_headers_and_sources(dbms src/Storages) add_headers_and_sources(dbms src/Storages/Distributed) +add_headers_and_sources(dbms src/Storages/Kafka) add_headers_and_sources(dbms src/Storages/MergeTree) add_headers_and_sources(dbms src/Client) add_headers_and_sources(dbms src/Formats) @@ -84,7 +85,7 @@ list (APPEND dbms_headers src/TableFunctions/ITableFunction.h src/TableFunctions add_library(clickhouse_common_io ${SPLIT_SHARED} ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) -if (ARCH_FREEBSD) +if (OS_FREEBSD) target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST) endif () @@ -144,6 +145,7 @@ target_link_libraries (clickhouse_common_io ${EXECINFO_LIBRARY} ${ELF_LIBRARY} ${Boost_SYSTEM_LIBRARY} + apple_rt ${CMAKE_DL_LIBS} ) @@ -244,8 +246,6 @@ add_subdirectory (programs) add_subdirectory (tests) if (ENABLE_TESTS) - include (${ClickHouse_SOURCE_DIR}/cmake/find_gtest.cmake) - if (USE_INTERNAL_GTEST_LIBRARY) # Google Test from sources add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest) diff --git a/dbms/cmake/version.cmake b/dbms/cmake/version.cmake index 131e6f26aaa..4e1fb44caa4 100644 --- a/dbms/cmake/version.cmake +++ b/dbms/cmake/version.cmake @@ -1,11 +1,11 @@ # This strings autochanged from release_lib.sh: -set(VERSION_REVISION 54396 CACHE STRING "") +set(VERSION_REVISION 54404 CACHE STRING "") set(VERSION_MAJOR 18 CACHE STRING "") -set(VERSION_MINOR 1 CACHE STRING "") +set(VERSION_MINOR 9 CACHE STRING "") set(VERSION_PATCH 0 CACHE STRING "") -set(VERSION_GITHASH 550f41bc65cb03201acad489e7b96ea346ed8259 CACHE STRING "") -set(VERSION_DESCRIBE v18.1.0-testing CACHE STRING "") -set(VERSION_STRING 18.1.0 CACHE STRING "") +set(VERSION_GITHASH c83721a02db002eef7ff864f82d53ca89d47f9e6 CACHE STRING "") +set(VERSION_DESCRIBE v18.9.0-testing CACHE STRING "") +set(VERSION_STRING 18.9.0 CACHE STRING "") # end of autochange set(VERSION_EXTRA "" CACHE STRING "") @@ -14,18 +14,11 @@ set(VERSION_TWEAK "" CACHE STRING "") if (VERSION_TWEAK) string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_TWEAK}) endif () + if (VERSION_EXTRA) string(CONCAT VERSION_STRING ${VERSION_STRING} "." ${VERSION_EXTRA}) endif () -set (VERSION_NAME "${PROJECT_NAME}") -set (VERSION_FULL "${VERSION_NAME} ${VERSION_STRING}") - -if (APPLE) - # dirty hack: ld: malformed 64-bit a.b.c.d.e version number: 1.1.54160 - math (EXPR VERSION_SO1 "${VERSION_REVISION}/255") - math (EXPR VERSION_SO2 "${VERSION_REVISION}%255") - set (VERSION_SO "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_SO1}.${VERSION_SO2}") -else () - set (VERSION_SO "${VERSION_STRING}") -endif () +set (VERSION_NAME "${PROJECT_NAME}" CACHE STRING "") +set (VERSION_FULL "${VERSION_NAME} ${VERSION_STRING}" CACHE STRING "") +set (VERSION_SO "${VERSION_STRING}" CACHE STRING "") diff --git a/dbms/programs/CMakeLists.txt b/dbms/programs/CMakeLists.txt index 20baa6b039c..a5692d81c09 100644 --- a/dbms/programs/CMakeLists.txt +++ b/dbms/programs/CMakeLists.txt @@ -152,6 +152,6 @@ else () endif () -if (USE_EMBEDDED_COMPILER AND ENABLE_CLICKHOUSE_SERVER) +if (TARGET clickhouse-server AND TARGET copy-headers) add_dependencies(clickhouse-server copy-headers) endif () diff --git a/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt b/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt index e07a570a434..d02d266d5a5 100644 --- a/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt +++ b/dbms/programs/clang/Compiler-5.0.0/CMakeLists.txt @@ -43,4 +43,7 @@ LLVMSupport #PollyPPCG PUBLIC ${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} Threads::Threads +${MALLOC_LIBRARIES} +${GLIBC_COMPATIBILITY_LIBRARIES} +${MEMCPY_LIBRARIES} ) diff --git a/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt b/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt index 4f5e703bd63..701b99d08e3 100644 --- a/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt +++ b/dbms/programs/clang/Compiler-6.0.0/CMakeLists.txt @@ -43,4 +43,7 @@ ${REQUIRED_LLVM_LIBRARIES} #PollyPPCG PUBLIC ${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} Threads::Threads +${MALLOC_LIBRARIES} +${GLIBC_COMPATIBILITY_LIBRARIES} +${MEMCPY_LIBRARIES} ) diff --git a/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt b/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt index f46e8ef0dc1..081037cdeed 100644 --- a/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt +++ b/dbms/programs/clang/Compiler-7.0.0/CMakeLists.txt @@ -39,4 +39,7 @@ lldCore ${REQUIRED_LLVM_LIBRARIES} PUBLIC ${ZLIB_LIBRARIES} ${EXECINFO_LIBRARY} Threads::Threads +${MALLOC_LIBRARIES} +${GLIBC_COMPATIBILITY_LIBRARIES} +${MEMCPY_LIBRARIES} ) diff --git a/dbms/programs/client/Client.cpp b/dbms/programs/client/Client.cpp index eb7673c947b..a52dadcc5dc 100644 --- a/dbms/programs/client/Client.cpp +++ b/dbms/programs/client/Client.cpp @@ -86,9 +86,106 @@ namespace ErrorCodes extern const int UNKNOWN_PACKET_FROM_SERVER; extern const int UNEXPECTED_PACKET_FROM_SERVER; extern const int CLIENT_OUTPUT_FORMAT_SPECIFIED; + extern const int LOGICAL_ERROR; } +/// Checks expected server and client error codes in testmode. +/// To enable it add special comment after the query: "-- { serverError 60 }" or "-- { clientError 20 }". +class TestHint +{ +public: + TestHint(bool enabled_, const String & query) + : enabled(enabled_), + server_error(0), + client_error(0) + { + if (!enabled_) + return; + + size_t pos = query.find("--"); + if (pos != String::npos && query.find("--", pos + 2) != String::npos) + return; /// It's not last comment. Hint belongs to commented query. + + if (pos != String::npos) + { + pos = query.find('{', pos + 2); + if (pos != String::npos) + { + String hint = query.substr(pos + 1); + pos = hint.find('}'); + hint.resize(pos); + parse(hint); + } + } + } + + /// @returns true if it's possible to continue without reconnect + bool checkActual(int & actual_server_error, int & actual_client_error, + bool & got_exception, std::unique_ptr & last_exception) const + { + if (!enabled) + return true; + + if (allErrorsExpected(actual_server_error, actual_client_error)) + { + got_exception = false; + last_exception.reset(); + actual_server_error = 0; + actual_client_error = 0; + return false; + } + + if (lostExpectedError(actual_server_error, actual_client_error)) + { + std::cerr << "Success when error expected. It expects server error " + << server_error << ", client error " << client_error << "." << std::endl; + got_exception = true; + last_exception = std::make_unique("Success when error expected", ErrorCodes::LOGICAL_ERROR); /// return error to OS + return false; + } + + return true; + } + + int serverError() const { return server_error; } + int clientError() const { return client_error; } + +private: + bool enabled; + int server_error; + int client_error; + + void parse(const String & hint) + { + std::stringstream ss; + ss << hint; + while (!ss.eof()) + { + String item; + ss >> item; + if (item.empty()) + break; + + if (item == "serverError") + ss >> server_error; + else if (item == "clientError") + ss >> client_error; + } + } + + bool allErrorsExpected(int actual_server_error, int actual_client_error) const + { + return (server_error || client_error) && (server_error == actual_server_error) && (client_error == actual_client_error); + } + + bool lostExpectedError(int actual_server_error, int actual_client_error) const + { + return (server_error && !actual_server_error) || (client_error && !actual_client_error); + } +}; + + class Client : public Poco::Util::Application { public: @@ -107,6 +204,7 @@ private: bool is_interactive = true; /// Use either readline interface or batch mode. bool need_render_progress = true; /// Render query execution progress. bool echo_queries = false; /// Print queries before execution in batch mode. + bool ignore_error = false; /// In case of errors, don't print error message, continue to next query. Only applicable for non-interactive mode. bool print_time_to_stderr = false; /// Output execution time to stderr in batch mode. bool stdin_is_not_tty = false; /// stdin is not a terminal. @@ -157,6 +255,10 @@ private: /// If the last query resulted in exception. bool got_exception = false; + int expected_server_error = 0; + int expected_client_error = 0; + int actual_server_error = 0; + int actual_client_error = 0; String server_version; String server_display_name; @@ -373,6 +475,7 @@ private: { need_render_progress = config().getBool("progress", false); echo_queries = config().getBool("echo", false); + ignore_error = config().getBool("ignore-error", false); } connect(); @@ -515,6 +618,7 @@ private: String server_name; UInt64 server_version_major = 0; UInt64 server_version_minor = 0; + UInt64 server_version_patch = 0; UInt64 server_revision = 0; if (max_client_network_bandwidth) @@ -523,9 +627,9 @@ private: connection->setThrottler(throttler); } - connection->getServerVersion(server_name, server_version_major, server_version_minor, server_revision); + connection->getServerVersion(server_name, server_version_major, server_version_minor, server_version_patch, server_revision); - server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_revision); + server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch); if (server_display_name = connection->getServerDisplayName(); server_display_name.length() == 0) { @@ -536,6 +640,7 @@ private: { std::cout << "Connected to " << server_name << " server version " << server_version + << " revision " << server_revision << "." << std::endl << std::endl; } } @@ -617,10 +722,14 @@ private: } catch (const Exception & e) { - std::cerr << std::endl - << "Exception on client:" << std::endl - << "Code: " << e.code() << ". " << e.displayText() << std::endl - << std::endl; + actual_client_error = e.code(); + if (!actual_client_error || actual_client_error != expected_client_error) + { + std::cerr << std::endl + << "Exception on client:" << std::endl + << "Code: " << e.code() << ". " << e.displayText() << std::endl + << std::endl; + } /// Client-side exception during query execution can result in the loss of /// sync in the connection protocol. @@ -658,7 +767,7 @@ private: bool process(const String & text) { - const auto ignore_error = config().getBool("ignore-error", false); + const bool test_mode = config().has("testmode"); if (config().has("multiquery")) { /// Several queries separated by ';'. @@ -702,6 +811,10 @@ private: while (isWhitespaceASCII(*begin) || *begin == ';') ++begin; + TestHint test_hint(test_mode, query); + expected_client_error = test_hint.clientError(); + expected_server_error = test_hint.serverError(); + try { if (!processSingleQuery(query, ast) && !ignore_error) @@ -709,10 +822,16 @@ private: } catch (...) { - std::cerr << "Error on processing query: " << query << std::endl << getCurrentExceptionMessage(true); + last_exception = std::make_unique(getCurrentExceptionMessage(true), getCurrentExceptionCode()); + actual_client_error = last_exception->code(); + if (!ignore_error && (!actual_client_error || actual_client_error != expected_client_error)) + std::cerr << "Error on processing query: " << query << std::endl << last_exception->message(); got_exception = true; } + if (!test_hint.checkActual(actual_server_error, actual_client_error, got_exception, last_exception)) + connection->forceConnected(); + if (got_exception && !ignore_error) { if (is_interactive) @@ -1286,6 +1405,14 @@ private: resetOutput(); got_exception = true; + actual_server_error = e.code(); + if (expected_server_error) + { + if (actual_server_error == expected_server_error) + return; + std::cerr << "Expected error code: " << expected_server_error << " but got: " << actual_server_error << "." << std::endl; + } + std::string text = e.displayText(); auto embedded_stack_trace_pos = text.find("Stack trace"); @@ -1411,7 +1538,8 @@ public: ("pager", boost::program_options::value(), "pager") ("multiline,m", "multiline") ("multiquery,n", "multiquery") - ("ignore-error", "Do not stop processing in multiquery mode") + ("testmode,T", "enable test hints in comments") + ("ignore-error", "do not stop processing in multiquery mode") ("format,f", boost::program_options::value(), "default output format") ("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command") ("time,t", "print query execution time to stderr in non-interactive mode (for benchmarks)") @@ -1517,6 +1645,8 @@ public: config().setBool("multiline", true); if (options.count("multiquery")) config().setBool("multiquery", true); + if (options.count("testmode")) + config().setBool("testmode", true); if (options.count("ignore-error")) config().setBool("ignore-error", true); if (options.count("format")) diff --git a/dbms/programs/obfuscator/Obfuscator.cpp b/dbms/programs/obfuscator/Obfuscator.cpp index 854771b3b26..3ba6d76179e 100644 --- a/dbms/programs/obfuscator/Obfuscator.cpp +++ b/dbms/programs/obfuscator/Obfuscator.cpp @@ -58,13 +58,13 @@ It is designed to retain the following properties of data: Most of the properties above are viable for performance testing: - reading data, filtering, aggregation and sorting will work at almost the same speed - as on original data due to saved cardinalities, magnitudes, compression ratios, etc. + as on original data due to saved cardinalities, magnitudes, compression ratios, etc. It works in deterministic fashion: you define a seed value and transform is totally determined by input data and by seed. Some transforms are one to one and could be reversed, so you need to have large enough seed and keep it in secret. It use some cryptographic primitives to transform data, but from the cryptographic point of view, - it doesn't do anything properly and you should never consider the result as secure, unless you have other reasons for it. + it doesn't do anything properly and you should never consider the result as secure, unless you have other reasons for it. It may retain some data you don't want to publish. @@ -74,7 +74,7 @@ So, the user will be able to count exact ratio of mobile traffic. Another example, suppose you have some private data in your table, like user email and you don't want to publish any single email address. If your table is large enough and contain multiple different emails and there is no email that have very high frequency than all others, - it will perfectly anonymize all data. But if you have small amount of different values in a column, it can possibly reproduce some of them. + it will perfectly anonymize all data. But if you have small amount of different values in a column, it can possibly reproduce some of them. And you should take care and look at exact algorithm, how this tool works, and probably fine tune some of it command line parameters. This tool works fine only with reasonable amount of data (at least 1000s of rows). diff --git a/dbms/programs/performance-test/PerformanceTest.cpp b/dbms/programs/performance-test/PerformanceTest.cpp index 1f7421566a4..cf55173ad3a 100644 --- a/dbms/programs/performance-test/PerformanceTest.cpp +++ b/dbms/programs/performance-test/PerformanceTest.cpp @@ -521,11 +521,12 @@ public: std::string name; UInt64 version_major; UInt64 version_minor; + UInt64 version_patch; UInt64 version_revision; - connection.getServerVersion(name, version_major, version_minor, version_revision); + connection.getServerVersion(name, version_major, version_minor, version_patch, version_revision); std::stringstream ss; - ss << version_major << "." << version_minor << "." << version_revision; + ss << version_major << "." << version_minor << "." << version_patch; server_version = ss.str(); processTestsConfigurations(input_files); diff --git a/dbms/programs/server/CMakeLists.txt b/dbms/programs/server/CMakeLists.txt index 74297d29864..c146f40d281 100644 --- a/dbms/programs/server/CMakeLists.txt +++ b/dbms/programs/server/CMakeLists.txt @@ -19,7 +19,7 @@ if (CLICKHOUSE_SPLIT_BINARY) install (TARGETS clickhouse-server ${CLICKHOUSE_ALL_TARGETS} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse) endif () -if (NOT APPLE AND NOT ARCH_FREEBSD) +if (OS_LINUX) set (GLIBC_MAX_REQUIRED 2.4) add_test(NAME GLIBC_required_version COMMAND bash -c "readelf -s ${CMAKE_CURRENT_BINARY_DIR}/../clickhouse-server | grep '@GLIBC' | grep -oP 'GLIBC_[\\d\\.]+' | sort | uniq | sort -r | perl -lnE 'exit 1 if $_ gt q{GLIBC_${GLIBC_MAX_REQUIRED}}'") endif () diff --git a/dbms/programs/server/InterserverIOHTTPHandler.cpp b/dbms/programs/server/InterserverIOHTTPHandler.cpp index 3cdbaa69b64..39d214503ba 100644 --- a/dbms/programs/server/InterserverIOHTTPHandler.cpp +++ b/dbms/programs/server/InterserverIOHTTPHandler.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -23,14 +24,40 @@ namespace ErrorCodes extern const int TOO_MANY_SIMULTANEOUS_QUERIES; } +std::pair InterserverIOHTTPHandler::checkAuthentication(Poco::Net::HTTPServerRequest & request) const +{ + const auto & config = server.config(); + + if (config.has("interserver_http_credentials.user")) + { + if (!request.hasCredentials()) + return {"Server requires HTTP Basic authentification, but client doesn't provide it", false}; + String scheme, info; + request.getCredentials(scheme, info); + + if (scheme != "Basic") + return {"Server requires HTTP Basic authentification but client provides another method", false}; + + String user = config.getString("interserver_http_credentials.user"); + String password = config.getString("interserver_http_credentials.password", ""); + + Poco::Net::HTTPBasicCredentials credentials(info); + if (std::make_pair(user, password) != std::make_pair(credentials.getUsername(), credentials.getPassword())) + return {"Incorrect user or password in HTTP Basic authentification", false}; + } + else if (request.hasCredentials()) + { + return {"Client requires HTTP Basic authentification, but server doesn't provide it", false}; + } + return {"", true}; +} + void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response) { HTMLForm params(request); LOG_TRACE(log, "Request URI: " << request.getURI()); - /// NOTE: You can do authentication here if you need to. - String endpoint_name = params.get("endpoint"); bool compress = params.get("compress") == "true"; @@ -65,8 +92,18 @@ void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & requ try { - processQuery(request, response); - LOG_INFO(log, "Done processing query"); + if (auto [msg, success] = checkAuthentication(request); success) + { + processQuery(request, response); + LOG_INFO(log, "Done processing query"); + } + else + { + response.setStatusAndReason(Poco::Net::HTTPServerResponse::HTTP_UNAUTHORIZED); + if (!response.sent()) + response.send() << msg << std::endl; + LOG_WARNING(log, "Query processing failed request: '" << request.getURI() << "' authentification failed"); + } } catch (Exception & e) { diff --git a/dbms/programs/server/InterserverIOHTTPHandler.h b/dbms/programs/server/InterserverIOHTTPHandler.h index bf9fef59982..fbaf432d4f9 100644 --- a/dbms/programs/server/InterserverIOHTTPHandler.h +++ b/dbms/programs/server/InterserverIOHTTPHandler.h @@ -34,6 +34,8 @@ private: CurrentMetrics::Increment metric_increment{CurrentMetrics::InterserverConnection}; void processQuery(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response); + + std::pair checkAuthentication(Poco::Net::HTTPServerRequest & request) const; }; } diff --git a/dbms/programs/server/Server.cpp b/dbms/programs/server/Server.cpp index 9a3db8bdb12..153f48c9aef 100644 --- a/dbms/programs/server/Server.cpp +++ b/dbms/programs/server/Server.cpp @@ -58,6 +58,7 @@ namespace ErrorCodes extern const int NO_ELEMENTS_IN_CONFIG; extern const int SUPPORT_IS_DISABLED; extern const int ARGUMENT_OUT_OF_BOUND; + extern const int EXCESSIVE_ELEMENT_IN_CONFIG; } @@ -209,25 +210,49 @@ int Server::main(const std::vector & /*args*/) Poco::File(user_files_path).createDirectories(); } - if (config().has("interserver_http_port")) + if (config().has("interserver_http_port") && config().has("interserver_https_port")) + throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG); + + static const auto interserver_tags = { - String this_host = config().getString("interserver_http_host", ""); + std::make_tuple("interserver_http_host", "interserver_http_port", "http"), + std::make_tuple("interserver_https_host", "interserver_https_port", "https") + }; - if (this_host.empty()) + for (auto [host_tag, port_tag, scheme] : interserver_tags) + { + if (config().has(port_tag)) { - this_host = getFQDNOrHostName(); - LOG_DEBUG(log, - "Configuration parameter 'interserver_http_host' doesn't exist or exists and empty. Will use '" + this_host - + "' as replica host."); + String this_host = config().getString(host_tag, ""); + + if (this_host.empty()) + { + this_host = getFQDNOrHostName(); + LOG_DEBUG(log, + "Configuration parameter '" + String(host_tag) + "' doesn't exist or exists and empty. Will use '" + this_host + + "' as replica host."); + } + + String port_str = config().getString(port_tag); + int port = parse(port_str); + + if (port < 0 || port > 0xFFFF) + throw Exception("Out of range '" + String(port_tag) + "': " + toString(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + + global_context->setInterserverIOAddress(this_host, port); + global_context->setInterserverScheme(scheme); } + } - String port_str = config().getString("interserver_http_port"); - int port = parse(port_str); + if (config().has("interserver_http_credentials")) + { + String user = config().getString("interserver_http_credentials.user", ""); + String password = config().getString("interserver_http_credentials.password", ""); - if (port < 0 || port > 0xFFFF) - throw Exception("Out of range 'interserver_http_port': " + toString(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND); + if (user.empty()) + throw Exception("Configuration parameter interserver_http_credentials user can't be empty", ErrorCodes::NO_ELEMENTS_IN_CONFIG); - global_context->setInterserverIOAddress(this_host, port); + global_context->setInterserverCredentials(user, password); } if (config().has("macros")) @@ -276,6 +301,9 @@ int Server::main(const std::vector & /*args*/) if (config().has("max_table_size_to_drop")) global_context->setMaxTableSizeToDrop(config().getUInt64("max_table_size_to_drop")); + if (config().has("max_partition_size_to_drop")) + global_context->setMaxPartitionSizeToDrop(config().getUInt64("max_partition_size_to_drop")); + /// Size of cache for uncompressed blocks. Zero means disabled. size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0); if (uncompressed_cache_size) @@ -505,6 +533,27 @@ int Server::main(const std::vector & /*args*/) LOG_INFO(log, "Listening interserver http: " + address.toString()); } + + if (config().has("interserver_https_port")) + { +#if USE_POCO_NETSSL + initSSL(); + Poco::Net::SecureServerSocket socket; + auto address = socket_bind_listen(socket, listen_host, config().getInt("interserver_https_port"), /* secure = */ true); + socket.setReceiveTimeout(settings.http_receive_timeout); + socket.setSendTimeout(settings.http_send_timeout); + servers.emplace_back(new Poco::Net::HTTPServer( + new InterserverIOHTTPHandlerFactory(*this, "InterserverIOHTTPHandler-factory"), + server_pool, + socket, + http_params)); + + LOG_INFO(log, "Listening interserver https: " + address.toString()); +#else + throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.", + ErrorCodes::SUPPORT_IS_DISABLED}; +#endif + } } catch (const Poco::Net::NetException & e) { diff --git a/dbms/programs/server/TCPHandler.cpp b/dbms/programs/server/TCPHandler.cpp index 26c8ce29130..4f75f7dd6a7 100644 --- a/dbms/programs/server/TCPHandler.cpp +++ b/dbms/programs/server/TCPHandler.cpp @@ -24,7 +24,6 @@ #include #include - namespace DB { @@ -494,6 +493,7 @@ void TCPHandler::receiveHello() readStringBinary(client_name, *in); readVarUInt(client_version_major, *in); readVarUInt(client_version_minor, *in); + // NOTE For backward compatibility of the protocol, client cannot send its version_patch. readVarUInt(client_revision, *in); readStringBinary(default_database, *in); readStringBinary(user, *in); @@ -502,7 +502,8 @@ void TCPHandler::receiveHello() LOG_DEBUG(log, "Connected " << client_name << " version " << client_version_major << "." << client_version_minor - << "." << client_revision + << "." << client_version_patch + << ", revision: " << client_revision << (!default_database.empty() ? ", database: " + default_database : "") << (!user.empty() ? ", user: " + user : "") << "."); @@ -519,13 +520,11 @@ void TCPHandler::sendHello() writeVarUInt(DBMS_VERSION_MINOR, *out); writeVarUInt(ClickHouseRevision::get(), *out); if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) - { writeStringBinary(DateLUT::instance().getTimeZone(), *out); - } if (client_revision >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME) - { writeStringBinary(server_display_name, *out); - } + if (client_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) + writeVarUInt(DBMS_VERSION_PATCH, *out); out->next(); } @@ -598,6 +597,7 @@ void TCPHandler::receiveQuery() client_info.client_name = client_name; client_info.client_version_major = client_version_major; client_info.client_version_minor = client_version_minor; + client_info.client_version_patch = client_version_patch; client_info.client_revision = client_revision; } diff --git a/dbms/programs/server/TCPHandler.h b/dbms/programs/server/TCPHandler.h index e01987d3bbd..af122513cf7 100644 --- a/dbms/programs/server/TCPHandler.h +++ b/dbms/programs/server/TCPHandler.h @@ -98,6 +98,7 @@ private: String client_name; UInt64 client_version_major = 0; UInt64 client_version_minor = 0; + UInt64 client_version_patch = 0; UInt64 client_revision = 0; Context connection_context; diff --git a/dbms/programs/server/config.xml b/dbms/programs/server/config.xml index 7dd7a00517e..e461d49d522 100644 --- a/dbms/programs/server/config.xml +++ b/dbms/programs/server/config.xml @@ -322,10 +322,12 @@ + diff --git a/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp b/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp index f42c5b6d142..9cb7d03bf69 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionArray.cpp @@ -18,6 +18,9 @@ public: DataTypes transformArguments(const DataTypes & arguments) const override { + if (0 == arguments.size()) + throw Exception("-Array aggregate functions require at least one argument", ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + DataTypes nested_arguments; for (const auto & type : arguments) { diff --git a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp b/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp index 762baf2451b..8c188bcbb8e 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionBitwise.cpp @@ -38,9 +38,9 @@ void registerAggregateFunctionsBitwise(AggregateFunctionFactory & factory) factory.registerFunction("groupBitXor", createAggregateFunctionBitwise); /// Aliases for compatibility with MySQL. - factory.registerFunction("BIT_OR", createAggregateFunctionBitwise, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("BIT_AND", createAggregateFunctionBitwise, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("BIT_XOR", createAggregateFunctionBitwise, AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("BIT_OR", "groupBitOr", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("BIT_AND", "groupBitAnd", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("BIT_XOR", "groupBitXor", AggregateFunctionFactory::CaseInsensitive); } } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h b/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h index 4c70cc6c068..579951cecb1 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionCombinatorFactory.h @@ -15,6 +15,10 @@ namespace DB */ class AggregateFunctionCombinatorFactory final: public ext::singleton { +private: + using Dict = std::unordered_map; + Dict dict; + public: /// Not thread safe. You must register before using tryGet. void registerCombinator(const AggregateFunctionCombinatorPtr & value); @@ -22,8 +26,10 @@ public: /// Example: if the name is 'avgIf', it will return combinator -If. AggregateFunctionCombinatorPtr tryFindSuffix(const std::string & name) const; -private: - std::unordered_map dict; + const Dict & getAllAggregateFunctionCombinators() const + { + return dict; + } }; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp b/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp index 90109ff04c5..7876f0dcffb 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionFactory.cpp @@ -95,11 +95,12 @@ AggregateFunctionPtr AggregateFunctionFactory::get( AggregateFunctionPtr AggregateFunctionFactory::getImpl( - const String & name, + const String & name_param, const DataTypes & argument_types, const Array & parameters, int recursion_level) const { + String name = getAliasToOrName(name_param); /// Find by exact match. auto it = aggregate_functions.find(name); if (it != aggregate_functions.end()) @@ -120,8 +121,8 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl( if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name)) { - if (combinator->getName() == "Null") - throw Exception("Aggregate function combinator 'Null' is only for internal usage", ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION); + if (combinator->isForInternalUsageOnly()) + throw Exception("Aggregate function combinator '" + combinator->getName() + "' is only for internal usage", ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION); String nested_name = name.substr(0, name.size() - combinator->getName().size()); DataTypes nested_types = combinator->transformArguments(argument_types); @@ -143,10 +144,11 @@ AggregateFunctionPtr AggregateFunctionFactory::tryGet(const String & name, const bool AggregateFunctionFactory::isAggregateFunctionName(const String & name, int recursion_level) const { - if (aggregate_functions.count(name)) + if (aggregate_functions.count(name) || isAlias(name)) return true; - if (recursion_level == 0 && case_insensitive_aggregate_functions.count(Poco::toLower(name))) + String name_lowercase = Poco::toLower(name); + if (recursion_level == 0 && (case_insensitive_aggregate_functions.count(name_lowercase) || isAlias(name_lowercase))) return true; if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name)) diff --git a/dbms/src/AggregateFunctions/AggregateFunctionFactory.h b/dbms/src/AggregateFunctions/AggregateFunctionFactory.h index bc36e76c11f..92598e52509 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionFactory.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionFactory.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -20,27 +21,18 @@ class IDataType; using DataTypePtr = std::shared_ptr; using DataTypes = std::vector; +/** Creator have arguments: name of aggregate function, types of arguments, values of parameters. + * Parameters are for "parametric" aggregate functions. + * For example, in quantileWeighted(0.9)(x, weight), 0.9 is "parameter" and x, weight are "arguments". + */ +using AggregateFunctionCreator = std::function; + /** Creates an aggregate function by name. */ -class AggregateFunctionFactory final : public ext::singleton +class AggregateFunctionFactory final : public ext::singleton, public IFactoryWithAliases { - friend class StorageSystemFunctions; - public: - /** Creator have arguments: name of aggregate function, types of arguments, values of parameters. - * Parameters are for "parametric" aggregate functions. - * For example, in quantileWeighted(0.9)(x, weight), 0.9 is "parameter" and x, weight are "arguments". - */ - using Creator = std::function; - - /// For compatibility with SQL, it's possible to specify that certain aggregate function name is case insensitive. - enum CaseSensitiveness - { - CaseSensitive, - CaseInsensitive - }; - /// Register a function by its name. /// No locking, you must register all functions before usage of get. void registerFunction( @@ -77,6 +69,13 @@ private: /// Case insensitive aggregate functions will be additionally added here with lowercased name. AggregateFunctions case_insensitive_aggregate_functions; + + const AggregateFunctions & getCreatorMap() const override { return aggregate_functions; } + + const AggregateFunctions & getCaseInsensitiveCreatorMap() const override { return case_insensitive_aggregate_functions; } + + String getFactoryName() const override { return "AggregateFunctionFactory"; } + }; } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h index 4b92a6231fe..322307c2bcf 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionMinMaxAny.h @@ -646,7 +646,7 @@ struct AggregateFunctionAnyHeavyData : Data } else { - if (counter < to.counter) + if ((!this->has() && to.has()) || counter < to.counter) { this->change(to, arena); return true; diff --git a/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp b/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp index 46a46a2370a..6ce7d94d970 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionNull.cpp @@ -18,6 +18,8 @@ class AggregateFunctionCombinatorNull final : public IAggregateFunctionCombinato public: String getName() const override { return "Null"; } + bool isForInternalUsageOnly() const override { return true; } + DataTypes transformArguments(const DataTypes & arguments) const override { size_t size = arguments.size(); diff --git a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp index 250ee422e8b..62455af6353 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionQuantile.cpp @@ -93,30 +93,14 @@ void registerAggregateFunctionsQuantile(AggregateFunctionFactory & factory) createAggregateFunctionQuantile); /// 'median' is an alias for 'quantile' - - factory.registerFunction("median", - createAggregateFunctionQuantile); - - factory.registerFunction("medianDeterministic", - createAggregateFunctionQuantile); - - factory.registerFunction("medianExact", - createAggregateFunctionQuantile); - - factory.registerFunction("medianExactWeighted", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTiming", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTimingWeighted", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTDigest", - createAggregateFunctionQuantile); - - factory.registerFunction("medianTDigestWeighted", - createAggregateFunctionQuantile); + factory.registerAlias("median", NameQuantile::name); + factory.registerAlias("medianDeterministic", NameQuantileDeterministic::name); + factory.registerAlias("medianExact", NameQuantileExact::name); + factory.registerAlias("medianExactWeighted", NameQuantileExactWeighted::name); + factory.registerAlias("medianTiming", NameQuantileTiming::name); + factory.registerAlias("medianTimingWeighted", NameQuantileTimingWeighted::name); + factory.registerAlias("medianTDigest", NameQuantileTDigest::name); + factory.registerAlias("medianTDigestWeighted", NameQuantileTDigestWeighted::name); } } diff --git a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h b/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h index 4ad0400d160..b62755ef00c 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h +++ b/dbms/src/AggregateFunctions/AggregateFunctionWindowFunnel.h @@ -116,7 +116,7 @@ struct AggregateFunctionWindowFunnelData /// TODO Protection against huge size events_list.clear(); - events_list.resize(size); + events_list.reserve(size); UInt32 timestamp; UInt8 event; diff --git a/dbms/src/AggregateFunctions/AggregateFunctionsStatisticsSimple.cpp b/dbms/src/AggregateFunctions/AggregateFunctionsStatisticsSimple.cpp index 089ea59cd79..c42372187bc 100644 --- a/dbms/src/AggregateFunctions/AggregateFunctionsStatisticsSimple.cpp +++ b/dbms/src/AggregateFunctions/AggregateFunctionsStatisticsSimple.cpp @@ -56,12 +56,12 @@ void registerAggregateFunctionsStatisticsSimple(AggregateFunctionFactory & facto factory.registerFunction("corr", createAggregateFunctionStatisticsBinary, AggregateFunctionFactory::CaseInsensitive); /// Synonims for compatibility. - factory.registerFunction("VAR_SAMP", createAggregateFunctionStatisticsUnary, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("VAR_POP", createAggregateFunctionStatisticsUnary, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("STDDEV_SAMP", createAggregateFunctionStatisticsUnary, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("STDDEV_POP", createAggregateFunctionStatisticsUnary, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("COVAR_SAMP", createAggregateFunctionStatisticsBinary, AggregateFunctionFactory::CaseInsensitive); - factory.registerFunction("COVAR_POP", createAggregateFunctionStatisticsBinary, AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("VAR_SAMP", "varSamp", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("VAR_POP", "varPop", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("STDDEV_SAMP", "stddevSamp", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("STDDEV_POP", "stddevPop", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("COVAR_SAMP", "covarSamp", AggregateFunctionFactory::CaseInsensitive); + factory.registerAlias("COVAR_POP", "covarPop", AggregateFunctionFactory::CaseInsensitive); } } diff --git a/dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h b/dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h index ba28026b1cd..0ac9a3d41cd 100644 --- a/dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h +++ b/dbms/src/AggregateFunctions/IAggregateFunctionCombinator.h @@ -32,6 +32,8 @@ class IAggregateFunctionCombinator public: virtual String getName() const = 0; + virtual bool isForInternalUsageOnly() const { return false; } + /** From the arguments for combined function (ex: UInt64, UInt8 for sumIf), * get the arguments for nested function (ex: UInt64 for sum). * If arguments are not suitable for combined function, throw an exception. diff --git a/dbms/src/Client/Connection.cpp b/dbms/src/Client/Connection.cpp index b847d905054..affd89b1c28 100644 --- a/dbms/src/Client/Connection.cpp +++ b/dbms/src/Client/Connection.cpp @@ -89,7 +89,7 @@ void Connection::connect() LOG_TRACE(log_wrapper.get(), "Connected to " << server_name << " server version " << server_version_major << "." << server_version_minor - << "." << server_revision + << "." << server_version_patch << "."); } catch (Poco::Net::NetException & e) @@ -150,6 +150,7 @@ void Connection::sendHello() writeStringBinary((DBMS_NAME " ") + client_name, *out); writeVarUInt(DBMS_VERSION_MAJOR, *out); writeVarUInt(DBMS_VERSION_MINOR, *out); + // NOTE For backward compatibility of the protocol, client cannot send its version_patch. writeVarUInt(ClickHouseRevision::get(), *out); writeStringBinary(default_database, *out); writeStringBinary(user, *out); @@ -174,13 +175,13 @@ void Connection::receiveHello() readVarUInt(server_version_minor, *in); readVarUInt(server_revision, *in); if (server_revision >= DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE) - { readStringBinary(server_timezone, *in); - } if (server_revision >= DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME) - { readStringBinary(server_display_name, *in); - } + if (server_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) + readVarUInt(server_version_patch, *in); + else + server_version_patch = server_revision; } else if (packet_type == Protocol::Server::Exception) receiveException()->rethrow(); @@ -217,7 +218,7 @@ UInt16 Connection::getPort() const return port; } -void Connection::getServerVersion(String & name, UInt64 & version_major, UInt64 & version_minor, UInt64 & revision) +void Connection::getServerVersion(String & name, UInt64 & version_major, UInt64 & version_minor, UInt64 & version_patch, UInt64 & revision) { if (!connected) connect(); @@ -225,6 +226,7 @@ void Connection::getServerVersion(String & name, UInt64 & version_major, UInt64 name = server_name; version_major = server_version_major; version_minor = server_version_minor; + version_patch = server_version_patch; revision = server_revision; } diff --git a/dbms/src/Client/Connection.h b/dbms/src/Client/Connection.h index 2da794b8434..dabb50b53a9 100644 --- a/dbms/src/Client/Connection.h +++ b/dbms/src/Client/Connection.h @@ -104,7 +104,7 @@ public: /// Change default database. Changes will take effect on next reconnect. void setDefaultDatabase(const String & database); - void getServerVersion(String & name, UInt64 & version_major, UInt64 & version_minor, UInt64 & revision); + void getServerVersion(String & name, UInt64 & version_major, UInt64 & version_minor, UInt64 & version_patch, UInt64 & revision); const String & getServerTimezone(); const String & getServerDisplayName(); @@ -187,6 +187,7 @@ private: String server_name; UInt64 server_version_major = 0; UInt64 server_version_minor = 0; + UInt64 server_version_patch = 0; UInt64 server_revision = 0; String server_timezone; String server_display_name; diff --git a/dbms/src/Client/ConnectionPoolWithFailover.cpp b/dbms/src/Client/ConnectionPoolWithFailover.cpp index ee8c3607c43..73469fcf53f 100644 --- a/dbms/src/Client/ConnectionPoolWithFailover.cpp +++ b/dbms/src/Client/ConnectionPoolWithFailover.cpp @@ -83,6 +83,16 @@ std::vector ConnectionPoolWithFailover::getMany(const Se return entries; } +std::vector ConnectionPoolWithFailover::getManyForTableFunction(const Settings * settings, PoolMode pool_mode) +{ + TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message) + { + return tryGetEntry(pool, fail_message, settings); + }; + + return getManyImpl(settings, pool_mode, try_get_entry); +} + std::vector ConnectionPoolWithFailover::getManyChecked( const Settings * settings, PoolMode pool_mode, const QualifiedTableName & table_to_check) { @@ -90,6 +100,7 @@ std::vector ConnectionPoolWithFailover::g { return tryGetEntry(pool, fail_message, settings, &table_to_check); }; + return getManyImpl(settings, pool_mode, try_get_entry); } @@ -145,9 +156,10 @@ ConnectionPoolWithFailover::tryGetEntry( String server_name; UInt64 server_version_major; UInt64 server_version_minor; + UInt64 server_version_patch; UInt64 server_revision; if (table_to_check) - result.entry->getServerVersion(server_name, server_version_major, server_version_minor, server_revision); + result.entry->getServerVersion(server_name, server_version_major, server_version_minor, server_version_patch, server_revision); if (!table_to_check || server_revision < DBMS_MIN_REVISION_WITH_TABLES_STATUS) { diff --git a/dbms/src/Client/ConnectionPoolWithFailover.h b/dbms/src/Client/ConnectionPoolWithFailover.h index b61fa03d711..62ca75859ba 100644 --- a/dbms/src/Client/ConnectionPoolWithFailover.h +++ b/dbms/src/Client/ConnectionPoolWithFailover.h @@ -47,6 +47,9 @@ public: */ std::vector getMany(const Settings * settings, PoolMode pool_mode); + /// The same as getMany(), but return std::vector. + std::vector getManyForTableFunction(const Settings * settings, PoolMode pool_mode); + using Base = PoolWithFailoverBase; using TryResult = Base::TryResult; diff --git a/dbms/src/Columns/Collator.cpp b/dbms/src/Columns/Collator.cpp index b6cea96ed2f..aaf917fb93d 100644 --- a/dbms/src/Columns/Collator.cpp +++ b/dbms/src/Columns/Collator.cpp @@ -87,3 +87,14 @@ const std::string & Collator::getLocale() const { return locale; } + +std::vector Collator::getAvailableCollations() +{ + std::vector result; +#if USE_ICU + size_t available_locales_count = ucol_countAvailable(); + for (size_t i = 0; i < available_locales_count; ++i) + result.push_back(ucol_getAvailable(i)); +#endif + return result; +} diff --git a/dbms/src/Columns/Collator.h b/dbms/src/Columns/Collator.h index 53341fb1aeb..0bafe6b1dba 100644 --- a/dbms/src/Columns/Collator.h +++ b/dbms/src/Columns/Collator.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include struct UCollator; @@ -15,6 +16,8 @@ public: const std::string & getLocale() const; + static std::vector getAvailableCollations(); + private: std::string locale; UCollator * collator; diff --git a/dbms/src/Common/BackgroundSchedulePool.cpp b/dbms/src/Common/BackgroundSchedulePool.cpp index 84eecdad7ff..9556c9a037b 100644 --- a/dbms/src/Common/BackgroundSchedulePool.cpp +++ b/dbms/src/Common/BackgroundSchedulePool.cpp @@ -128,7 +128,8 @@ void BackgroundSchedulePool::TaskInfo::execute() zkutil::WatchCallback BackgroundSchedulePool::TaskInfo::getWatchCallback() { - return [t=shared_from_this()](const ZooKeeperImpl::ZooKeeper::WatchResponse &) { + return [t = shared_from_this()](const ZooKeeperImpl::ZooKeeper::WatchResponse &) + { t->schedule(); }; } diff --git a/dbms/src/Common/Config/ConfigProcessor.cpp b/dbms/src/Common/Config/ConfigProcessor.cpp index 1e0cb91340b..95189affce8 100644 --- a/dbms/src/Common/Config/ConfigProcessor.cpp +++ b/dbms/src/Common/Config/ConfigProcessor.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -103,7 +104,9 @@ static ElementIdentifier getElementIdentifier(Node * element) { const Node * node = attrs->item(i); std::string name = node->nodeName(); - if (name == "replace" || name == "remove" || name == "incl" || name == "from_zk") + auto subst_name_pos = std::find(ConfigProcessor::SUBSTITUTION_ATTRS.begin(), ConfigProcessor::SUBSTITUTION_ATTRS.end(), name); + if (name == "replace" || name == "remove" || + subst_name_pos != ConfigProcessor::SUBSTITUTION_ATTRS.end()) continue; std::string value = node->nodeValue(); attrs_kv.push_back(std::make_pair(name, value)); @@ -267,12 +270,18 @@ void ConfigProcessor::doIncludesRecursive( return; } + std::map attr_nodes; NamedNodeMapPtr attributes = node->attributes(); - const Node * incl_attribute = attributes->getNamedItem("incl"); - const Node * from_zk_attribute = attributes->getNamedItem("from_zk"); + size_t substs_count = 0; + for (const auto & attr_name : SUBSTITUTION_ATTRS) + { + auto subst = attributes->getNamedItem(attr_name); + attr_nodes[attr_name] = subst; + substs_count += static_cast(subst == nullptr); + } - if (incl_attribute && from_zk_attribute) - throw Poco::Exception("both incl and from_zk attributes set for element <" + node->nodeName() + ">"); + if (substs_count < SUBSTITUTION_ATTRS.size() - 1) /// only one substitution is allowed + throw Poco::Exception("several substitutions attributes set for element <" + node->nodeName() + ">"); /// Replace the original contents, not add to it. bool replace = attributes->getNamedItem("replace"); @@ -296,8 +305,8 @@ void ConfigProcessor::doIncludesRecursive( { Element & element = dynamic_cast(*node); - element.removeAttribute("incl"); - element.removeAttribute("from_zk"); + for (const auto & attr_name : SUBSTITUTION_ATTRS) + element.removeAttribute(attr_name); if (replace) { @@ -324,16 +333,19 @@ void ConfigProcessor::doIncludesRecursive( } }; - auto get_incl_node = [&](const std::string & name) + if (attr_nodes["incl"]) // we have include subst { - return include_from ? include_from->getNodeByPath("yandex/" + name) : nullptr; - }; - if (incl_attribute) - process_include(incl_attribute, get_incl_node, "Include not found: "); + auto get_incl_node = [&](const std::string & name) + { + return include_from ? include_from->getNodeByPath("yandex/" + name) : nullptr; + }; - if (from_zk_attribute) + process_include(attr_nodes["incl"], get_incl_node, "Include not found: "); + } + + if (attr_nodes["from_zk"]) /// we have zookeeper subst { - contributing_zk_paths.insert(from_zk_attribute->getNodeValue()); + contributing_zk_paths.insert(attr_nodes["from_zk"]->getNodeValue()); if (zk_node_cache) { @@ -349,10 +361,27 @@ void ConfigProcessor::doIncludesRecursive( return getRootNode(zk_document.get()); }; - process_include(from_zk_attribute, get_zk_node, "Could not get ZooKeeper node: "); + process_include(attr_nodes["from_zk"], get_zk_node, "Could not get ZooKeeper node: "); } } + if (attr_nodes["from_env"]) /// we have env subst + { + XMLDocumentPtr env_document; + auto get_env_node = [&](const std::string & name) -> const Node * + { + const char * env_val = std::getenv(name.c_str()); + if (env_val == nullptr) + return nullptr; + + env_document = dom_parser.parseString("" + std::string{env_val} + ""); + + return getRootNode(env_document.get()); + }; + + process_include(attr_nodes["from_env"], get_env_node, "Env variable is not set: "); + } + if (included_something) doIncludesRecursive(config, include_from, node, zk_node_cache, contributing_zk_paths); else @@ -377,9 +406,6 @@ ConfigProcessor::Files ConfigProcessor::getConfigMergeFiles(const std::string & /// Add path_to_config/conf.d dir merge_dir_path.setBaseName("conf"); merge_dirs.insert(merge_dir_path.toString()); - /// Add path_to_config/config.d dir - merge_dir_path.setBaseName("config"); - merge_dirs.insert(merge_dir_path.toString()); for (const std::string & merge_dir_name : merge_dirs) { diff --git a/dbms/src/Common/Config/ConfigProcessor.h b/dbms/src/Common/Config/ConfigProcessor.h index 8c9048bb102..8663ecb682f 100644 --- a/dbms/src/Common/Config/ConfigProcessor.h +++ b/dbms/src/Common/Config/ConfigProcessor.h @@ -95,6 +95,8 @@ public: /// Is the file named as result of config preprocessing, not as original files. static bool isPreprocessedFile(const std::string & config_path); + static inline const auto SUBSTITUTION_ATTRS = {"incl", "from_zk", "from_env"}; + private: const std::string path; const std::string preprocessed_path; diff --git a/dbms/src/Common/ErrorCodes.cpp b/dbms/src/Common/ErrorCodes.cpp index a1662563a1f..e719f8be4b3 100644 --- a/dbms/src/Common/ErrorCodes.cpp +++ b/dbms/src/Common/ErrorCodes.cpp @@ -377,7 +377,9 @@ namespace ErrorCodes extern const int CANNOT_STAT = 400; extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME = 401; extern const int CANNOT_IOSETUP = 402; - + extern const int INVALID_JOIN_ON_EXPRESSION = 403; + extern const int BAD_ODBC_CONNECTION_STRING = 404; + extern const int PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT = 405; extern const int KEEPER_EXCEPTION = 999; extern const int POCO_EXCEPTION = 1000; diff --git a/dbms/src/Common/FieldVisitors.cpp b/dbms/src/Common/FieldVisitors.cpp index 3132a7412ca..62b7667d936 100644 --- a/dbms/src/Common/FieldVisitors.cpp +++ b/dbms/src/Common/FieldVisitors.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -35,6 +36,13 @@ String FieldVisitorDump::operator() (const UInt64 & x) const { return formatQuot String FieldVisitorDump::operator() (const Int64 & x) const { return formatQuotedWithPrefix(x, "Int64_"); } String FieldVisitorDump::operator() (const Float64 & x) const { return formatQuotedWithPrefix(x, "Float64_"); } +String FieldVisitorDump::operator() (const UInt128 & x) const +{ + WriteBufferFromOwnString wb; + wb << "UInt128_" << x.low << "_" << x.high; + return wb.str(); + +} String FieldVisitorDump::operator() (const String & x) const { @@ -47,14 +55,14 @@ String FieldVisitorDump::operator() (const Array & x) const { WriteBufferFromOwnString wb; - wb.write("Array_[", 7); + wb << "Array_["; for (auto it = x.begin(); it != x.end(); ++it) { if (it != x.begin()) - wb.write(", ", 2); - writeString(applyVisitor(*this, *it), wb); + wb << ", "; + wb << applyVisitor(*this, *it); } - writeChar(']', wb); + wb << ']'; return wb.str(); } @@ -64,14 +72,14 @@ String FieldVisitorDump::operator() (const Tuple & x_def) const auto & x = x_def.toUnderType(); WriteBufferFromOwnString wb; - wb.write("Tuple_(", 7); + wb << "Tuple_("; for (auto it = x.begin(); it != x.end(); ++it) { if (it != x.begin()) - wb.write(", ", 2); - writeString(applyVisitor(*this, *it), wb); + wb << ", "; + wb << applyVisitor(*this, *it); } - writeChar(')', wb); + wb << ')'; return wb.str(); } @@ -105,19 +113,24 @@ String FieldVisitorToString::operator() (const Int64 & x) const { return formatQ String FieldVisitorToString::operator() (const Float64 & x) const { return formatFloat(x); } String FieldVisitorToString::operator() (const String & x) const { return formatQuoted(x); } +String FieldVisitorToString::operator() (const UInt128 & x) const +{ + /// Dummy implementation. There is no UInt128 literals in SQL. + return FieldVisitorDump()(x); +} String FieldVisitorToString::operator() (const Array & x) const { WriteBufferFromOwnString wb; - writeChar('[', wb); + wb << '['; for (Array::const_iterator it = x.begin(); it != x.end(); ++it) { if (it != x.begin()) wb.write(", ", 2); - writeString(applyVisitor(*this, *it), wb); + wb << applyVisitor(*this, *it); } - writeChar(']', wb); + wb << ']'; return wb.str(); } @@ -127,14 +140,14 @@ String FieldVisitorToString::operator() (const Tuple & x_def) const auto & x = x_def.toUnderType(); WriteBufferFromOwnString wb; - writeChar('(', wb); + wb << '('; for (auto it = x.begin(); it != x.end(); ++it) { if (it != x.begin()) - wb.write(", ", 2); - writeString(applyVisitor(*this, *it), wb); + wb << ", "; + wb << applyVisitor(*this, *it); } - writeChar(')', wb); + wb << ')'; return wb.str(); } @@ -155,6 +168,13 @@ void FieldVisitorHash::operator() (const UInt64 & x) const hash.update(x); } +void FieldVisitorHash::operator() (const UInt128 & x) const +{ + UInt8 type = Field::Types::UInt128; + hash.update(type); + hash.update(x); +} + void FieldVisitorHash::operator() (const Int64 & x) const { UInt8 type = Field::Types::Int64; diff --git a/dbms/src/Common/FieldVisitors.h b/dbms/src/Common/FieldVisitors.h index b59c6a47aa7..8abf75dbc64 100644 --- a/dbms/src/Common/FieldVisitors.h +++ b/dbms/src/Common/FieldVisitors.h @@ -38,6 +38,7 @@ typename std::decay_t::ResultType applyVisitor(Visitor && visitor, F && { case Field::Types::Null: return visitor(field.template get()); case Field::Types::UInt64: return visitor(field.template get()); + case Field::Types::UInt128: return visitor(field.template get()); case Field::Types::Int64: return visitor(field.template get()); case Field::Types::Float64: return visitor(field.template get()); case Field::Types::String: return visitor(field.template get()); @@ -57,6 +58,7 @@ static typename std::decay_t::ResultType applyBinaryVisitorImpl(Visitor { case Field::Types::Null: return visitor(field1, field2.template get()); case Field::Types::UInt64: return visitor(field1, field2.template get()); + case Field::Types::UInt128: return visitor(field1, field2.template get()); case Field::Types::Int64: return visitor(field1, field2.template get()); case Field::Types::Float64: return visitor(field1, field2.template get()); case Field::Types::String: return visitor(field1, field2.template get()); @@ -79,6 +81,9 @@ typename std::decay_t::ResultType applyVisitor(Visitor && visitor, F1 & case Field::Types::UInt64: return applyBinaryVisitorImpl( std::forward(visitor), field1.template get(), std::forward(field2)); + case Field::Types::UInt128: + return applyBinaryVisitorImpl( + std::forward(visitor), field1.template get(), std::forward(field2)); case Field::Types::Int64: return applyBinaryVisitorImpl( std::forward(visitor), field1.template get(), std::forward(field2)); @@ -107,6 +112,7 @@ class FieldVisitorToString : public StaticVisitor public: String operator() (const Null & x) const; String operator() (const UInt64 & x) const; + String operator() (const UInt128 & x) const; String operator() (const Int64 & x) const; String operator() (const Float64 & x) const; String operator() (const String & x) const; @@ -121,6 +127,7 @@ class FieldVisitorDump : public StaticVisitor public: String operator() (const Null & x) const; String operator() (const UInt64 & x) const; + String operator() (const UInt128 & x) const; String operator() (const Int64 & x) const; String operator() (const Float64 & x) const; String operator() (const String & x) const; @@ -157,6 +164,11 @@ public: T operator() (const UInt64 & x) const { return x; } T operator() (const Int64 & x) const { return x; } T operator() (const Float64 & x) const { return x; } + + T operator() (const UInt128 &) const + { + throw Exception("Cannot convert UInt128 to " + demangle(typeid(T).name()), ErrorCodes::CANNOT_CONVERT_TYPE); + } }; @@ -170,6 +182,7 @@ public: void operator() (const Null & x) const; void operator() (const UInt64 & x) const; + void operator() (const UInt128 & x) const; void operator() (const Int64 & x) const; void operator() (const Float64 & x) const; void operator() (const String & x) const; @@ -180,44 +193,60 @@ public: /** More precise comparison, used for index. * Differs from Field::operator< and Field::operator== in that it also compares values of different types. * Comparison rules are same as in FunctionsComparison (to be consistent with expression evaluation in query). + * + * TODO Comparisons of UInt128 with different type are incorrect. */ class FieldVisitorAccurateEquals : public StaticVisitor { public: bool operator() (const Null &, const Null &) const { return true; } bool operator() (const Null &, const UInt64 &) const { return false; } + bool operator() (const Null &, const UInt128 &) const { return false; } bool operator() (const Null &, const Int64 &) const { return false; } bool operator() (const Null &, const Float64 &) const { return false; } bool operator() (const Null &, const String &) const { return false; } bool operator() (const Null &, const Array &) const { return false; } bool operator() (const Null &, const Tuple &) const { return false; } - bool operator() (const UInt64 &, const Null &) const { return false; } + bool operator() (const UInt64 &, const Null &) const { return false; } bool operator() (const UInt64 & l, const UInt64 & r) const { return l == r; } + bool operator() (const UInt64 &, const UInt128) const { return true; } bool operator() (const UInt64 & l, const Int64 & r) const { return accurate::equalsOp(l, r); } bool operator() (const UInt64 & l, const Float64 & r) const { return accurate::equalsOp(l, r); } - bool operator() (const UInt64 &, const String &) const { return false; } - bool operator() (const UInt64 &, const Array &) const { return false; } - bool operator() (const UInt64 &, const Tuple &) const { return false; } + bool operator() (const UInt64 &, const String &) const { return false; } + bool operator() (const UInt64 &, const Array &) const { return false; } + bool operator() (const UInt64 &, const Tuple &) const { return false; } - bool operator() (const Int64 &, const Null &) const { return false; } + bool operator() (const UInt128 &, const Null &) const { return false; } + bool operator() (const UInt128 &, const UInt64) const { return false; } + bool operator() (const UInt128 & l, const UInt128 & r) const { return l == r; } + bool operator() (const UInt128 &, const Int64) const { return false; } + bool operator() (const UInt128 &, const Float64) const { return false; } + bool operator() (const UInt128 &, const String &) const { return false; } + bool operator() (const UInt128 &, const Array &) const { return false; } + bool operator() (const UInt128 &, const Tuple &) const { return false; } + + bool operator() (const Int64 &, const Null &) const { return false; } bool operator() (const Int64 & l, const UInt64 & r) const { return accurate::equalsOp(l, r); } + bool operator() (const Int64 &, const UInt128) const { return false; } bool operator() (const Int64 & l, const Int64 & r) const { return l == r; } bool operator() (const Int64 & l, const Float64 & r) const { return accurate::equalsOp(l, r); } - bool operator() (const Int64 &, const String &) const { return false; } - bool operator() (const Int64 &, const Array &) const { return false; } - bool operator() (const Int64 &, const Tuple &) const { return false; } + bool operator() (const Int64 &, const String &) const { return false; } + bool operator() (const Int64 &, const Array &) const { return false; } + bool operator() (const Int64 &, const Tuple &) const { return false; } - bool operator() (const Float64 &, const Null &) const { return false; } + bool operator() (const Float64 &, const Null &) const { return false; } bool operator() (const Float64 & l, const UInt64 & r) const { return accurate::equalsOp(l, r); } + bool operator() (const Float64 &, const UInt128) const { return false; } bool operator() (const Float64 & l, const Int64 & r) const { return accurate::equalsOp(l, r); } bool operator() (const Float64 & l, const Float64 & r) const { return l == r; } - bool operator() (const Float64 &, const String &) const { return false; } - bool operator() (const Float64 &, const Array &) const { return false; } - bool operator() (const Float64 &, const Tuple &) const { return false; } + bool operator() (const Float64 &, const String &) const { return false; } + bool operator() (const Float64 &, const Array &) const { return false; } + bool operator() (const Float64 &, const Tuple &) const { return false; } bool operator() (const String &, const Null &) const { return false; } bool operator() (const String &, const UInt64 &) const { return false; } + bool operator() (const String &, const UInt128 &) const { return false; } bool operator() (const String &, const Int64 &) const { return false; } bool operator() (const String &, const Float64 &) const { return false; } bool operator() (const String & l, const String & r) const { return l == r; } @@ -226,6 +255,7 @@ public: bool operator() (const Array &, const Null &) const { return false; } bool operator() (const Array &, const UInt64 &) const { return false; } + bool operator() (const Array &, const UInt128 &) const { return false; } bool operator() (const Array &, const Int64 &) const { return false; } bool operator() (const Array &, const Float64 &) const { return false; } bool operator() (const Array &, const String &) const { return false; } @@ -234,6 +264,7 @@ public: bool operator() (const Tuple &, const Null &) const { return false; } bool operator() (const Tuple &, const UInt64 &) const { return false; } + bool operator() (const Tuple &, const UInt128 &) const { return false; } bool operator() (const Tuple &, const Int64 &) const { return false; } bool operator() (const Tuple &, const Float64 &) const { return false; } bool operator() (const Tuple &, const String &) const { return false; } @@ -247,45 +278,60 @@ public: bool operator() (const Null &, const Null &) const { return false; } bool operator() (const Null &, const UInt64 &) const { return true; } bool operator() (const Null &, const Int64 &) const { return true; } + bool operator() (const Null &, const UInt128 &) const { return true; } bool operator() (const Null &, const Float64 &) const { return true; } bool operator() (const Null &, const String &) const { return true; } bool operator() (const Null &, const Array &) const { return true; } bool operator() (const Null &, const Tuple &) const { return true; } - bool operator() (const UInt64 &, const Null &) const { return false; } + bool operator() (const UInt64 &, const Null &) const { return false; } bool operator() (const UInt64 & l, const UInt64 & r) const { return l < r; } + bool operator() (const UInt64 &, const UInt128 &) const { return true; } bool operator() (const UInt64 & l, const Int64 & r) const { return accurate::lessOp(l, r); } bool operator() (const UInt64 & l, const Float64 & r) const { return accurate::lessOp(l, r); } - bool operator() (const UInt64 &, const String &) const { return true; } - bool operator() (const UInt64 &, const Array &) const { return true; } - bool operator() (const UInt64 &, const Tuple &) const { return true; } + bool operator() (const UInt64 &, const String &) const { return true; } + bool operator() (const UInt64 &, const Array &) const { return true; } + bool operator() (const UInt64 &, const Tuple &) const { return true; } - bool operator() (const Int64 &, const Null &) const { return false; } + bool operator() (const UInt128 &, const Null &) const { return false; } + bool operator() (const UInt128 &, const UInt64) const { return false; } + bool operator() (const UInt128 & l, const UInt128 & r) const { return l < r; } + bool operator() (const UInt128 &, const Int64) const { return false; } + bool operator() (const UInt128 &, const Float64) const { return false; } + bool operator() (const UInt128 &, const String &) const { return false; } + bool operator() (const UInt128 &, const Array &) const { return false; } + bool operator() (const UInt128 &, const Tuple &) const { return false; } + + bool operator() (const Int64 &, const Null &) const { return false; } bool operator() (const Int64 & l, const UInt64 & r) const { return accurate::lessOp(l, r); } + bool operator() (const Int64 &, const UInt128 &) const { return false; } bool operator() (const Int64 & l, const Int64 & r) const { return l < r; } bool operator() (const Int64 & l, const Float64 & r) const { return accurate::lessOp(l, r); } - bool operator() (const Int64 &, const String &) const { return true; } - bool operator() (const Int64 &, const Array &) const { return true; } - bool operator() (const Int64 &, const Tuple &) const { return true; } + bool operator() (const Int64 &, const String &) const { return true; } + bool operator() (const Int64 &, const Array &) const { return true; } + bool operator() (const Int64 &, const Tuple &) const { return true; } - bool operator() (const Float64 &, const Null &) const { return false; } + bool operator() (const Float64 &, const Null &) const { return false; } bool operator() (const Float64 & l, const UInt64 & r) const { return accurate::lessOp(l, r); } + bool operator() (const Float64, const UInt128 &) const { return false; } bool operator() (const Float64 & l, const Int64 & r) const { return accurate::lessOp(l, r); } bool operator() (const Float64 & l, const Float64 & r) const { return l < r; } - bool operator() (const Float64 &, const String &) const { return true; } - bool operator() (const Float64 &, const Array &) const { return true; } - bool operator() (const Float64 &, const Tuple &) const { return true; } + bool operator() (const Float64 &, const String &) const { return true; } + bool operator() (const Float64 &, const Array &) const { return true; } + bool operator() (const Float64 &, const Tuple &) const { return true; } - bool operator() (const String &, const Null &) const { return false; } - bool operator() (const String &, const UInt64 &) const { return false; } - bool operator() (const String &, const Int64 &) const { return false; } - bool operator() (const String &, const Float64 &) const { return false; } + bool operator() (const String &, const Null &) const { return false; } + bool operator() (const String &, const UInt64 &) const { return false; } + bool operator() (const String &, const UInt128 &) const { return false; } + bool operator() (const String &, const Int64 &) const { return false; } + bool operator() (const String &, const Float64 &) const { return false; } bool operator() (const String & l, const String & r) const { return l < r; } - bool operator() (const String &, const Array &) const { return true; } - bool operator() (const String &, const Tuple &) const { return true; } + bool operator() (const String &, const Array &) const { return true; } + bool operator() (const String &, const Tuple &) const { return true; } bool operator() (const Array &, const Null &) const { return false; } bool operator() (const Array &, const UInt64 &) const { return false; } + bool operator() (const Array &, const UInt128 &) const { return false; } bool operator() (const Array &, const Int64 &) const { return false; } bool operator() (const Array &, const Float64 &) const { return false; } bool operator() (const Array &, const String &) const { return false; } @@ -294,6 +340,7 @@ public: bool operator() (const Tuple &, const Null &) const { return false; } bool operator() (const Tuple &, const UInt64 &) const { return false; } + bool operator() (const Tuple &, const UInt128 &) const { return false; } bool operator() (const Tuple &, const Int64 &) const { return false; } bool operator() (const Tuple &, const Float64 &) const { return false; } bool operator() (const Tuple &, const String &) const { return false; } @@ -318,6 +365,7 @@ public: bool operator() (Null &) const { throw Exception("Cannot sum Nulls", ErrorCodes::LOGICAL_ERROR); } bool operator() (String &) const { throw Exception("Cannot sum Strings", ErrorCodes::LOGICAL_ERROR); } bool operator() (Array &) const { throw Exception("Cannot sum Arrays", ErrorCodes::LOGICAL_ERROR); } + bool operator() (UInt128 &) const { throw Exception("Cannot sum UUIDs", ErrorCodes::LOGICAL_ERROR); } }; } diff --git a/dbms/src/Common/FileChecker.cpp b/dbms/src/Common/FileChecker.cpp index e3b1db745ca..bd8e00e38c0 100644 --- a/dbms/src/Common/FileChecker.cpp +++ b/dbms/src/Common/FileChecker.cpp @@ -95,13 +95,14 @@ void FileChecker::save() const /// So complex JSON structure - for compatibility with the old format. writeCString("{\"yandex\":{", out); + auto settings = FormatSettings(); for (auto it = map.begin(); it != map.end(); ++it) { if (it != map.begin()) writeString(",", out); /// `escapeForFileName` is not really needed. But it is left for compatibility with the old code. - writeJSONString(escapeForFileName(it->first), out); + writeJSONString(escapeForFileName(it->first), out, settings); writeString(":{\"size\":\"", out); writeIntText(it->second, out); writeString("\"}", out); diff --git a/dbms/src/Common/IFactoryWithAliases.h b/dbms/src/Common/IFactoryWithAliases.h new file mode 100644 index 00000000000..9006a3c7cfd --- /dev/null +++ b/dbms/src/Common/IFactoryWithAliases.h @@ -0,0 +1,125 @@ +#pragma once + +#include +#include +#include + +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +/** If stored objects may have several names (aliases) + * this interface may be helpful + * template parameter is available as Creator + */ +template +class IFactoryWithAliases +{ +protected: + using Creator = CreatorFunc; + + String getAliasToOrName(const String & name) const + { + if (aliases.count(name)) + return aliases.at(name); + else if (String name_lowercase = Poco::toLower(name); case_insensitive_aliases.count(name_lowercase)) + return case_insensitive_aliases.at(name_lowercase); + else + return name; + } + +public: + /// For compatibility with SQL, it's possible to specify that certain function name is case insensitive. + enum CaseSensitiveness + { + CaseSensitive, + CaseInsensitive + }; + + /** Register additional name for creator + * real_name have to be already registered. + */ + void registerAlias(const String & alias_name, const String & real_name, CaseSensitiveness case_sensitiveness = CaseSensitive) + { + const auto & creator_map = getCreatorMap(); + const auto & case_insensitive_creator_map = getCaseInsensitiveCreatorMap(); + const String factory_name = getFactoryName(); + + String real_dict_name; + if (creator_map.count(real_name)) + real_dict_name = real_name; + else if (auto real_name_lowercase = Poco::toLower(real_name); case_insensitive_creator_map.count(real_name_lowercase)) + real_dict_name = real_name_lowercase; + else + throw Exception(factory_name + ": can't create alias '" + alias_name + "', the real name '" + real_name + "' is not registered", + ErrorCodes::LOGICAL_ERROR); + + String alias_name_lowercase = Poco::toLower(alias_name); + + if (creator_map.count(alias_name) || case_insensitive_creator_map.count(alias_name_lowercase)) + throw Exception( + factory_name + ": the alias name '" + alias_name + "' is already registered as real name", ErrorCodes::LOGICAL_ERROR); + + if (case_sensitiveness == CaseInsensitive) + if (!case_insensitive_aliases.emplace(alias_name_lowercase, real_dict_name).second) + throw Exception( + factory_name + ": case insensitive alias name '" + alias_name + "' is not unique", ErrorCodes::LOGICAL_ERROR); + + if (!aliases.emplace(alias_name, real_dict_name).second) + throw Exception(factory_name + ": alias name '" + alias_name + "' is not unique", ErrorCodes::LOGICAL_ERROR); + } + + std::vector getAllRegisteredNames() const + { + std::vector result; + auto getter = [](const auto & pair) { return pair.first; }; + std::transform(getCreatorMap().begin(), getCreatorMap().end(), std::back_inserter(result), getter); + std::transform(aliases.begin(), aliases.end(), std::back_inserter(result), getter); + return result; + } + + bool isCaseInsensitive(const String & name) const + { + String name_lowercase = Poco::toLower(name); + return getCaseInsensitiveCreatorMap().count(name_lowercase) || case_insensitive_aliases.count(name_lowercase); + } + + const String & aliasTo(const String & name) const + { + if (auto it = aliases.find(name); it != aliases.end()) + return it->second; + else if (auto it = case_insensitive_aliases.find(Poco::toLower(name)); it != case_insensitive_aliases.end()) + return it->second; + + throw Exception(getFactoryName() + ": name '" + name + "' is not alias", ErrorCodes::LOGICAL_ERROR); + } + + bool isAlias(const String & name) const + { + return aliases.count(name) || case_insensitive_aliases.count(name); + } + + virtual ~IFactoryWithAliases() {} + +private: + using InnerMap = std::unordered_map; // name -> creator + using AliasMap = std::unordered_map; // alias -> original type + + virtual const InnerMap & getCreatorMap() const = 0; + virtual const InnerMap & getCaseInsensitiveCreatorMap() const = 0; + virtual String getFactoryName() const = 0; + + /// Alias map to data_types from previous two maps + AliasMap aliases; + + /// Case insensitive aliases + AliasMap case_insensitive_aliases; +}; + +} diff --git a/dbms/src/Core/Block.cpp b/dbms/src/Core/Block.cpp index 9083c214662..c3e77c11a92 100644 --- a/dbms/src/Core/Block.cpp +++ b/dbms/src/Core/Block.cpp @@ -54,7 +54,7 @@ void Block::insert(size_t position, const ColumnWithTypeAndName & elem) if (name_pos.second >= position) ++name_pos.second; - index_by_name[elem.name] = position; + index_by_name.emplace(elem.name, position); data.emplace(data.begin() + position, elem); } @@ -68,20 +68,20 @@ void Block::insert(size_t position, ColumnWithTypeAndName && elem) if (name_pos.second >= position) ++name_pos.second; - index_by_name[elem.name] = position; + index_by_name.emplace(elem.name, position); data.emplace(data.begin() + position, std::move(elem)); } void Block::insert(const ColumnWithTypeAndName & elem) { - index_by_name[elem.name] = data.size(); + index_by_name.emplace(elem.name, data.size()); data.emplace_back(elem); } void Block::insert(ColumnWithTypeAndName && elem) { - index_by_name[elem.name] = data.size(); + index_by_name.emplace(elem.name, data.size()); data.emplace_back(std::move(elem)); } diff --git a/dbms/src/Core/Defines.h b/dbms/src/Core/Defines.h index b7036171e6e..331e226103d 100644 --- a/dbms/src/Core/Defines.h +++ b/dbms/src/Core/Defines.h @@ -45,6 +45,7 @@ #define DBMS_MIN_REVISION_WITH_TABLES_STATUS 54226 #define DBMS_MIN_REVISION_WITH_TIME_ZONE_PARAMETER_IN_DATETIME_DATA_TYPE 54337 #define DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME 54372 +#define DBMS_MIN_REVISION_WITH_VERSION_PATCH 54401 /// Version of ClickHouse TCP protocol. Set to git tag with latest protocol change. #define DBMS_TCP_PROTOCOL_VERSION 54226 diff --git a/dbms/src/Core/Names.h b/dbms/src/Core/Names.h index 5c3384112ae..ff8252084ac 100644 --- a/dbms/src/Core/Names.h +++ b/dbms/src/Core/Names.h @@ -12,5 +12,6 @@ namespace DB using Names = std::vector; using NameSet = std::unordered_set; using NameToNameMap = std::unordered_map; +using NameToNameSetMap = std::unordered_map; } diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp index 01127b5029b..91e68bea75f 100644 --- a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.cpp @@ -40,60 +40,45 @@ void CollapsingSortedBlockInputStream::reportIncorrectData() } -void CollapsingSortedBlockInputStream::insertRows(MutableColumns & merged_columns, size_t & merged_rows, bool last_in_stream) +void CollapsingSortedBlockInputStream::insertRows(MutableColumns & merged_columns, size_t & merged_rows) { if (count_positive == 0 && count_negative == 0) + { + /// No input rows have been read. return; + } if (count_positive == count_negative && !last_is_positive) { - /// If all the rows in the input streams was collapsed, we still want to give at least one block in the result. - if (last_in_stream && merged_rows == 0 && !blocks_written) - { - LOG_INFO(log, "All rows collapsed"); - ++merged_rows; - for (size_t i = 0; i < num_columns; ++i) - merged_columns[i]->insertFrom(*(*last_positive.columns)[i], last_positive.row_num); - ++merged_rows; - for (size_t i = 0; i < num_columns; ++i) - merged_columns[i]->insertFrom(*(*last_negative.columns)[i], last_negative.row_num); - - if (out_row_sources_buf) - { - /// true flag value means "skip row" - current_row_sources[last_positive_pos].setSkipFlag(false); - current_row_sources[last_negative_pos].setSkipFlag(false); - } - } + /// Input rows exactly cancel out. + return; } - else + + if (count_positive <= count_negative) { - if (count_positive <= count_negative) - { - ++merged_rows; - for (size_t i = 0; i < num_columns; ++i) - merged_columns[i]->insertFrom(*(*first_negative.columns)[i], first_negative.row_num); + ++merged_rows; + for (size_t i = 0; i < num_columns; ++i) + merged_columns[i]->insertFrom(*(*first_negative.columns)[i], first_negative.row_num); - if (out_row_sources_buf) - current_row_sources[first_negative_pos].setSkipFlag(false); - } + if (out_row_sources_buf) + current_row_sources[first_negative_pos].setSkipFlag(false); + } - if (count_positive >= count_negative) - { - ++merged_rows; - for (size_t i = 0; i < num_columns; ++i) - merged_columns[i]->insertFrom(*(*last_positive.columns)[i], last_positive.row_num); + if (count_positive >= count_negative) + { + ++merged_rows; + for (size_t i = 0; i < num_columns; ++i) + merged_columns[i]->insertFrom(*(*last_positive.columns)[i], last_positive.row_num); - if (out_row_sources_buf) - current_row_sources[last_positive_pos].setSkipFlag(false); - } + if (out_row_sources_buf) + current_row_sources[last_positive_pos].setSkipFlag(false); + } - if (!(count_positive == count_negative || count_positive + 1 == count_negative || count_positive == count_negative + 1)) - { - if (count_incorrect_data < MAX_ERROR_MESSAGES) - reportIncorrectData(); - ++count_incorrect_data; - } + if (!(count_positive == count_negative || count_positive + 1 == count_negative || count_positive == count_negative + 1)) + { + if (count_incorrect_data < MAX_ERROR_MESSAGES) + reportIncorrectData(); + ++count_incorrect_data; } if (out_row_sources_buf) @@ -211,7 +196,7 @@ void CollapsingSortedBlockInputStream::merge(MutableColumns & merged_columns, st } /// Write data for last primary key. - insertRows(merged_columns, merged_rows, true); + insertRows(merged_columns, merged_rows); finished = true; } diff --git a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.h b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.h index e8650b4efc5..776b39c76d0 100644 --- a/dbms/src/DataStreams/CollapsingSortedBlockInputStream.h +++ b/dbms/src/DataStreams/CollapsingSortedBlockInputStream.h @@ -74,7 +74,7 @@ private: void merge(MutableColumns & merged_columns, std::priority_queue & queue); /// Output to result rows for the current primary key. - void insertRows(MutableColumns & merged_columns, size_t & merged_rows, bool last_in_stream = false); + void insertRows(MutableColumns & merged_columns, size_t & merged_rows); void reportIncorrectData(); }; diff --git a/dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp b/dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp index 0a58050339a..a72784ffd96 100644 --- a/dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp +++ b/dbms/src/DataStreams/CreatingSetsBlockInputStream.cpp @@ -125,6 +125,21 @@ void CreatingSetsBlockInputStream::createOne(SubqueryForSet & subquery) if (!done_with_join) { + if (subquery.joined_block_actions) + subquery.joined_block_actions->execute(block); + + for (const auto & name_with_alias : subquery.joined_block_aliases) + { + if (block.has(name_with_alias.first)) + { + auto pos = block.getPositionByName(name_with_alias.first); + auto column = block.getByPosition(pos); + block.erase(pos); + column.name = name_with_alias.second; + block.insert(std::move(column)); + } + } + if (!subquery.join->insertFromBlock(block)) done_with_join = true; } diff --git a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp index 3d7f43a258d..70a0dc8cd77 100644 --- a/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp +++ b/dbms/src/DataStreams/PushingToViewsBlockOutputStream.cpp @@ -71,7 +71,7 @@ void PushingToViewsBlockOutputStream::write(const Block & block) try { BlockInputStreamPtr from = std::make_shared(block); - InterpreterSelectQuery select(view.query, *views_context, {}, QueryProcessingStage::Complete, 0, from); + InterpreterSelectQuery select(view.query, *views_context, from); BlockInputStreamPtr in = std::make_shared(select.execute().in); /// Squashing is needed here because the materialized view query can generate a lot of blocks /// even when only one block is inserted into the parent table (e.g. if the query is a GROUP BY diff --git a/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp b/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp index a56b2928fc5..18c122ddc1a 100644 --- a/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/SummingSortedBlockInputStream.cpp @@ -195,7 +195,7 @@ SummingSortedBlockInputStream::SummingSortedBlockInputStream( } -void SummingSortedBlockInputStream::insertCurrentRowIfNeeded(MutableColumns & merged_columns, bool force_insertion) +void SummingSortedBlockInputStream::insertCurrentRowIfNeeded(MutableColumns & merged_columns) { for (auto & desc : columns_to_aggregate) { @@ -237,9 +237,9 @@ void SummingSortedBlockInputStream::insertCurrentRowIfNeeded(MutableColumns & me desc.merged_column->insertDefault(); } - /// If it is "zero" row and it is not the last row of the result block, then - /// rollback the insertion (at this moment we need rollback only cols from columns_to_aggregate) - if (!force_insertion && current_row_is_zero) + /// If it is "zero" row, then rollback the insertion + /// (at this moment we need rollback only cols from columns_to_aggregate) + if (current_row_is_zero) { for (auto & desc : columns_to_aggregate) desc.merged_column->popBack(1); @@ -252,7 +252,6 @@ void SummingSortedBlockInputStream::insertCurrentRowIfNeeded(MutableColumns & me /// Update per-block and per-group flags ++merged_rows; - output_is_non_empty = true; } @@ -333,7 +332,7 @@ void SummingSortedBlockInputStream::merge(MutableColumns & merged_columns, std:: { if (!current_key.empty()) /// Write the data for the previous group. - insertCurrentRowIfNeeded(merged_columns, false); + insertCurrentRowIfNeeded(merged_columns); if (merged_rows >= max_block_size) { @@ -393,7 +392,7 @@ void SummingSortedBlockInputStream::merge(MutableColumns & merged_columns, std:: /// We will write the data for the last group, if it is non-zero. /// If it is zero, and without it the output stream will be empty, we will write it anyway. - insertCurrentRowIfNeeded(merged_columns, !output_is_non_empty); + insertCurrentRowIfNeeded(merged_columns); finished = true; } diff --git a/dbms/src/DataStreams/SummingSortedBlockInputStream.h b/dbms/src/DataStreams/SummingSortedBlockInputStream.h index 8cbd3c7c0f2..52c38454598 100644 --- a/dbms/src/DataStreams/SummingSortedBlockInputStream.h +++ b/dbms/src/DataStreams/SummingSortedBlockInputStream.h @@ -134,7 +134,6 @@ private: Row current_row; bool current_row_is_zero = true; /// Are all summed columns zero (or empty)? It is updated incrementally. - bool output_is_non_empty = false; /// Have we given out at least one row as a result. size_t merged_rows = 0; /// Number of rows merged into current result block /** We support two different cursors - with Collation and without. @@ -143,8 +142,7 @@ private: void merge(MutableColumns & merged_columns, std::priority_queue & queue); /// Insert the summed row for the current group into the result and updates some of per-block flags if the row is not "zero". - /// If force_insertion=true, then the row will be inserted even if it is "zero" - void insertCurrentRowIfNeeded(MutableColumns & merged_columns, bool force_insertion); + void insertCurrentRowIfNeeded(MutableColumns & merged_columns); /// Returns true if merge result is not empty bool mergeMap(const MapDescription & map, Row & row, SortCursor & cursor); diff --git a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp b/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp index 071752137c6..863e021b279 100644 --- a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp +++ b/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.cpp @@ -15,11 +15,11 @@ namespace ErrorCodes VersionedCollapsingSortedBlockInputStream::VersionedCollapsingSortedBlockInputStream( const BlockInputStreams & inputs_, const SortDescription & description_, - const String & sign_column_, size_t max_block_size_, bool can_collapse_all_rows_, + const String & sign_column_, size_t max_block_size_, WriteBuffer * out_row_sources_buf_) : MergingSortedBlockInputStream(inputs_, description_, max_block_size_, 0, out_row_sources_buf_) , max_rows_in_queue(std::min(std::max(3, max_block_size_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 2) - , current_keys(max_rows_in_queue + 1), can_collapse_all_rows(can_collapse_all_rows_) + , current_keys(max_rows_in_queue + 1) { sign_column_number = header.getPositionByName(sign_column_); } @@ -130,10 +130,7 @@ void VersionedCollapsingSortedBlockInputStream::merge(MutableColumns & merged_co { update_queue(current); - /// If all the rows was collapsed, we still want to give at least one block in the result. - /// If queue is empty then don't collapse two last rows. - if (sign == sign_in_queue || (!can_collapse_all_rows && blocks_written == 0 - && merged_rows == 0 && queue.empty() && current_keys.size() == 1)) + if (sign == sign_in_queue) current_keys.pushBack(next_key); else { diff --git a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h b/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h index 636ee5e3833..5e84284d2ee 100644 --- a/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h +++ b/dbms/src/DataStreams/VersionedCollapsingSortedBlockInputStream.h @@ -176,7 +176,7 @@ public: /// max_rows_in_queue should be about max_block_size_ if we won't store a lot of extra blocks (RowRef holds SharedBlockPtr). VersionedCollapsingSortedBlockInputStream( const BlockInputStreams & inputs_, const SortDescription & description_, - const String & sign_column_, size_t max_block_size_, bool can_collapse_all_rows_, + const String & sign_column_, size_t max_block_size_, WriteBuffer * out_row_sources_buf_ = nullptr); String getName() const override { return "VersionedCollapsingSorted"; } @@ -203,8 +203,6 @@ private: /// Sources of rows for VERTICAL merge algorithm. Size equals to (size + number of gaps) in current_keys. std::queue current_row_sources; - const bool can_collapse_all_rows; - void merge(MutableColumns & merged_columns, std::priority_queue & queue); /// Output to result row for the current primary key. diff --git a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp b/dbms/src/DataTypes/DataTypeAggregateFunction.cpp index 86d96ece5f2..f005f2e2eea 100644 --- a/dbms/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/dbms/src/DataTypes/DataTypeAggregateFunction.cpp @@ -212,9 +212,9 @@ void DataTypeAggregateFunction::deserializeTextQuoted(IColumn & column, ReadBuff } -void DataTypeAggregateFunction::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const +void DataTypeAggregateFunction::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - writeJSONString(serializeToString(function, column, row_num), ostr); + writeJSONString(serializeToString(function, column, row_num), ostr, settings); } diff --git a/dbms/src/DataTypes/DataTypeEnum.cpp b/dbms/src/DataTypes/DataTypeEnum.cpp index e9b87670928..bdc27e3f1be 100644 --- a/dbms/src/DataTypes/DataTypeEnum.cpp +++ b/dbms/src/DataTypes/DataTypeEnum.cpp @@ -165,9 +165,9 @@ void DataTypeEnum::deserializeTextQuoted(IColumn & column, ReadBuffer & is } template -void DataTypeEnum::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const +void DataTypeEnum::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - writeJSONString(getNameForValue(static_cast(column).getData()[row_num]), ostr); + writeJSONString(getNameForValue(static_cast(column).getData()[row_num]), ostr, settings); } template diff --git a/dbms/src/DataTypes/DataTypeFactory.cpp b/dbms/src/DataTypes/DataTypeFactory.cpp index bbc1d070db5..b9d25b09544 100644 --- a/dbms/src/DataTypes/DataTypeFactory.cpp +++ b/dbms/src/DataTypes/DataTypeFactory.cpp @@ -52,8 +52,10 @@ DataTypePtr DataTypeFactory::get(const ASTPtr & ast) const throw Exception("Unexpected AST element for data type.", ErrorCodes::UNEXPECTED_AST_STRUCTURE); } -DataTypePtr DataTypeFactory::get(const String & family_name, const ASTPtr & parameters) const +DataTypePtr DataTypeFactory::get(const String & family_name_param, const ASTPtr & parameters) const { + String family_name = getAliasToOrName(family_name_param); + if (endsWith(family_name, "WithDictionary")) { ASTPtr low_cardinality_params = std::make_shared(); @@ -77,8 +79,9 @@ DataTypePtr DataTypeFactory::get(const String & family_name, const ASTPtr & para return it->second(parameters); } + String family_name_lowercase = Poco::toLower(family_name); + { - String family_name_lowercase = Poco::toLower(family_name); DataTypesDictionary::const_iterator it = case_insensitive_data_types.find(family_name_lowercase); if (case_insensitive_data_types.end() != it) return it->second(parameters); @@ -94,11 +97,16 @@ void DataTypeFactory::registerDataType(const String & family_name, Creator creat throw Exception("DataTypeFactory: the data type family " + family_name + " has been provided " " a null constructor", ErrorCodes::LOGICAL_ERROR); + String family_name_lowercase = Poco::toLower(family_name); + + if (isAlias(family_name) || isAlias(family_name_lowercase)) + throw Exception("DataTypeFactory: the data type family name '" + family_name + "' is already registered as alias", + ErrorCodes::LOGICAL_ERROR); + if (!data_types.emplace(family_name, creator).second) throw Exception("DataTypeFactory: the data type family name '" + family_name + "' is not unique", ErrorCodes::LOGICAL_ERROR); - String family_name_lowercase = Poco::toLower(family_name); if (case_sensitiveness == CaseInsensitive && !case_insensitive_data_types.emplace(family_name_lowercase, creator).second) @@ -106,7 +114,6 @@ void DataTypeFactory::registerDataType(const String & family_name, Creator creat ErrorCodes::LOGICAL_ERROR); } - void DataTypeFactory::registerSimpleDataType(const String & name, SimpleCreator creator, CaseSensitiveness case_sensitiveness) { if (creator == nullptr) @@ -121,7 +128,6 @@ void DataTypeFactory::registerSimpleDataType(const String & name, SimpleCreator }, case_sensitiveness); } - void registerDataTypeNumbers(DataTypeFactory & factory); void registerDataTypeDate(DataTypeFactory & factory); void registerDataTypeDateTime(DataTypeFactory & factory); diff --git a/dbms/src/DataTypes/DataTypeFactory.h b/dbms/src/DataTypes/DataTypeFactory.h index cad176432de..21d22cf932e 100644 --- a/dbms/src/DataTypes/DataTypeFactory.h +++ b/dbms/src/DataTypes/DataTypeFactory.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -19,10 +20,9 @@ using ASTPtr = std::shared_ptr; /** Creates a data type by name of data type family and parameters. */ -class DataTypeFactory final : public ext::singleton +class DataTypeFactory final : public ext::singleton, public IFactoryWithAliases> { private: - using Creator = std::function; using SimpleCreator = std::function; using DataTypesDictionary = std::unordered_map; @@ -31,13 +31,6 @@ public: DataTypePtr get(const String & family_name, const ASTPtr & parameters) const; DataTypePtr get(const ASTPtr & ast) const; - /// For compatibility with SQL, it's possible to specify that certain data type name is case insensitive. - enum CaseSensitiveness - { - CaseSensitive, - CaseInsensitive - }; - /// Register a type family by its name. void registerDataType(const String & family_name, Creator creator, CaseSensitiveness case_sensitiveness = CaseSensitive); @@ -51,6 +44,13 @@ private: DataTypesDictionary case_insensitive_data_types; DataTypeFactory(); + + const DataTypesDictionary & getCreatorMap() const override { return data_types; } + + const DataTypesDictionary & getCaseInsensitiveCreatorMap() const override { return case_insensitive_data_types; } + + String getFactoryName() const override { return "DataTypeFactory"; } + friend class ext::singleton; }; diff --git a/dbms/src/DataTypes/DataTypeFixedString.cpp b/dbms/src/DataTypes/DataTypeFixedString.cpp index 05fdd34c464..c256c5e6214 100644 --- a/dbms/src/DataTypes/DataTypeFixedString.cpp +++ b/dbms/src/DataTypes/DataTypeFixedString.cpp @@ -168,10 +168,10 @@ void DataTypeFixedString::deserializeTextQuoted(IColumn & column, ReadBuffer & i } -void DataTypeFixedString::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const +void DataTypeFixedString::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { const char * pos = reinterpret_cast(&static_cast(column).getChars()[n * row_num]); - writeJSONString(pos, pos + n, ostr); + writeJSONString(pos, pos + n, ostr, settings); } @@ -231,7 +231,7 @@ void registerDataTypeFixedString(DataTypeFactory & factory) factory.registerDataType("FixedString", create); /// Compatibility alias. - factory.registerDataType("BINARY", create, DataTypeFactory::CaseInsensitive); + factory.registerAlias("BINARY", "FixedString", DataTypeFactory::CaseInsensitive); } } diff --git a/dbms/src/DataTypes/DataTypeString.cpp b/dbms/src/DataTypes/DataTypeString.cpp index 4ffda6f2099..308c3e04957 100644 --- a/dbms/src/DataTypes/DataTypeString.cpp +++ b/dbms/src/DataTypes/DataTypeString.cpp @@ -262,9 +262,9 @@ void DataTypeString::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, } -void DataTypeString::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const +void DataTypeString::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - writeJSONString(static_cast(column).getDataAt(row_num), ostr); + writeJSONString(static_cast(column).getDataAt(row_num), ostr, settings); } @@ -312,16 +312,16 @@ void registerDataTypeString(DataTypeFactory & factory) /// These synonims are added for compatibility. - factory.registerSimpleDataType("CHAR", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("VARCHAR", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("TEXT", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("TINYTEXT", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("MEDIUMTEXT", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("LONGTEXT", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("BLOB", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("TINYBLOB", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("MEDIUMBLOB", creator, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("LONGBLOB", creator, DataTypeFactory::CaseInsensitive); + factory.registerAlias("CHAR", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("VARCHAR", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("TEXT", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("TINYTEXT", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("MEDIUMTEXT", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("LONGTEXT", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("BLOB", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("TINYBLOB", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("MEDIUMBLOB", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("LONGBLOB", "String", DataTypeFactory::CaseInsensitive); } } diff --git a/dbms/src/DataTypes/DataTypesNumber.cpp b/dbms/src/DataTypes/DataTypesNumber.cpp index 72861eff3ac..254d6ba6852 100644 --- a/dbms/src/DataTypes/DataTypesNumber.cpp +++ b/dbms/src/DataTypes/DataTypesNumber.cpp @@ -22,13 +22,13 @@ void registerDataTypeNumbers(DataTypeFactory & factory) /// These synonims are added for compatibility. - factory.registerSimpleDataType("TINYINT", [] { return DataTypePtr(std::make_shared()); }, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("SMALLINT", [] { return DataTypePtr(std::make_shared()); }, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("INT", [] { return DataTypePtr(std::make_shared()); }, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("INTEGER", [] { return DataTypePtr(std::make_shared()); }, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("BIGINT", [] { return DataTypePtr(std::make_shared()); }, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("FLOAT", [] { return DataTypePtr(std::make_shared()); }, DataTypeFactory::CaseInsensitive); - factory.registerSimpleDataType("DOUBLE", [] { return DataTypePtr(std::make_shared()); }, DataTypeFactory::CaseInsensitive); + factory.registerAlias("TINYINT", "Int8", DataTypeFactory::CaseInsensitive); + factory.registerAlias("SMALLINT", "Int16", DataTypeFactory::CaseInsensitive); + factory.registerAlias("INT", "Int32", DataTypeFactory::CaseInsensitive); + factory.registerAlias("INTEGER", "Int32", DataTypeFactory::CaseInsensitive); + factory.registerAlias("BIGINT", "Int64", DataTypeFactory::CaseInsensitive); + factory.registerAlias("FLOAT", "Float32", DataTypeFactory::CaseInsensitive); + factory.registerAlias("DOUBLE", "Float64", DataTypeFactory::CaseInsensitive); } } diff --git a/dbms/src/DataTypes/FieldToDataType.cpp b/dbms/src/DataTypes/FieldToDataType.cpp index 1b4fbd53c6b..3c2e78b4295 100644 --- a/dbms/src/DataTypes/FieldToDataType.cpp +++ b/dbms/src/DataTypes/FieldToDataType.cpp @@ -18,6 +18,7 @@ namespace DB namespace ErrorCodes { extern const int EMPTY_DATA_PASSED; + extern const int NOT_IMPLEMENTED; } @@ -34,6 +35,11 @@ DataTypePtr FieldToDataType::operator() (const UInt64 & x) const return std::make_shared(); } +DataTypePtr FieldToDataType::operator() (const UInt128 &) const +{ + throw Exception("There are no UInt128 literals in SQL", ErrorCodes::NOT_IMPLEMENTED); +} + DataTypePtr FieldToDataType::operator() (const Int64 & x) const { if (x <= std::numeric_limits::max() && x >= std::numeric_limits::min()) return std::make_shared(); diff --git a/dbms/src/DataTypes/FieldToDataType.h b/dbms/src/DataTypes/FieldToDataType.h index c6256a6f04b..a60c6a725d8 100644 --- a/dbms/src/DataTypes/FieldToDataType.h +++ b/dbms/src/DataTypes/FieldToDataType.h @@ -19,6 +19,7 @@ class FieldToDataType : public StaticVisitor public: DataTypePtr operator() (const Null & x) const; DataTypePtr operator() (const UInt64 & x) const; + DataTypePtr operator() (const UInt128 & x) const; DataTypePtr operator() (const Int64 & x) const; DataTypePtr operator() (const Float64 & x) const; DataTypePtr operator() (const String & x) const; diff --git a/dbms/src/Dictionaries/CMakeLists.txt b/dbms/src/Dictionaries/CMakeLists.txt index e69de29bb2d..65172356645 100644 --- a/dbms/src/Dictionaries/CMakeLists.txt +++ b/dbms/src/Dictionaries/CMakeLists.txt @@ -0,0 +1,3 @@ +if (ENABLE_TESTS) + add_subdirectory (tests) +endif () diff --git a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp b/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp index 2dbab26acc1..978d7b9e496 100644 --- a/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp +++ b/dbms/src/Dictionaries/Embedded/RegionsHierarchy.cpp @@ -41,7 +41,6 @@ void RegionsHierarchy::reload() RegionID max_region_id = 0; - auto regions_reader = data_source->createReader(); RegionEntry region_entry; diff --git a/dbms/src/Dictionaries/ODBCDictionarySource.cpp b/dbms/src/Dictionaries/ODBCDictionarySource.cpp index 0d5176c2bb0..03c354d4bc3 100644 --- a/dbms/src/Dictionaries/ODBCDictionarySource.cpp +++ b/dbms/src/Dictionaries/ODBCDictionarySource.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -39,7 +40,7 @@ ODBCDictionarySource::ODBCDictionarySource(const DictionaryStructure & dict_stru { auto session = std::make_shared( config.getString(config_prefix + ".connector", "ODBC"), - config.getString(config_prefix + ".connection_string")); + validateODBCConnectionString(config.getString(config_prefix + ".connection_string"))); /// Default POCO value is 1024. Set property manually to make possible reading of longer strings. session->setProperty("maxFieldSize", Poco::Any(field_size)); diff --git a/dbms/src/Dictionaries/tests/CMakeLists.txt b/dbms/src/Dictionaries/tests/CMakeLists.txt new file mode 100644 index 00000000000..f0a4cf4ab68 --- /dev/null +++ b/dbms/src/Dictionaries/tests/CMakeLists.txt @@ -0,0 +1,2 @@ +add_executable (validate-odbc-connection-string validate-odbc-connection-string.cpp) +target_link_libraries (validate-odbc-connection-string dbms) diff --git a/dbms/src/Dictionaries/tests/validate-odbc-connection-string.cpp b/dbms/src/Dictionaries/tests/validate-odbc-connection-string.cpp new file mode 100644 index 00000000000..766a709d8fd --- /dev/null +++ b/dbms/src/Dictionaries/tests/validate-odbc-connection-string.cpp @@ -0,0 +1,24 @@ +#include +#include +#include + + +using namespace DB; + +int main(int argc, char ** argv) +try +{ + if (argc < 2) + { + std::cerr << "Usage: validate-odbc-connection-string 'ConnectionString'\n"; + return 1; + } + + std::cout << validateODBCConnectionString(argv[1]) << '\n'; + return 0; +} +catch (...) +{ + std::cerr << getCurrentExceptionMessage(false) << "\n"; + return 2; +} diff --git a/dbms/src/Dictionaries/tests/validate-odbc-connection-string.reference b/dbms/src/Dictionaries/tests/validate-odbc-connection-string.reference new file mode 100644 index 00000000000..2f1a0eb5bcd --- /dev/null +++ b/dbms/src/Dictionaries/tests/validate-odbc-connection-string.reference @@ -0,0 +1,39 @@ +Code: 404, e.displayText() = DB::Exception: ODBC connection string cannot be empty, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter doesn't have value, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: DSN parameter is mandatory for ODBC connection string, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter doesn't have value, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: DSN parameter is mandatory for ODBC connection string, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter value is unescaped and contains illegal character, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: DSN parameter is mandatory for ODBC connection string, e.what() = DB::Exception +DSN={hello};ABC={de[f}; +DSN={hello};ABC={de}}f}; +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter value is unescaped and contains illegal character, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter is escaped but there is no closing curly brace, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: Unexpected character found after parameter value in ODBC connection string, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: Unexpected character found after parameter value in ODBC connection string, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: Unexpected character found after parameter value in ODBC connection string, e.what() = DB::Exception +DSN={hello};ABC={de}}f}; +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter is escaped but there is no closing curly brace, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter is escaped but there is no closing curly brace, e.what() = DB::Exception +DSN={hello};ABC={ }; +DSN={hello};ABC={ }; +Code: 404, e.displayText() = DB::Exception: Unexpected character found after parameter value in ODBC connection string, e.what() = DB::Exception +DSN={hello world};ABC={ }; +Code: 404, e.displayText() = DB::Exception: Unexpected character found after parameter value in ODBC connection string, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter name doesn't begin with valid identifier character, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter name doesn't begin with valid identifier character, e.what() = DB::Exception +DSN={hello world};ABC={ };_={}; +DSN={hello world};ABC={ };_={}; +DSN={hello world};ABC={ };_={}; +DSN={hello world};ABC={ };_={}}}; +DSN={hello world};ABC={ };_={...................................................................}; +DSN={hello world};ABC={ };_={....................................................................................}; +Code: 404, e.displayText() = DB::Exception: ODBC connection string has too long keyword or value, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string has forbidden parameter, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string has forbidden parameter, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string has forbidden parameter, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: Duplicate parameter found in ODBC connection string, e.what() = DB::Exception +Code: 404, e.displayText() = DB::Exception: ODBC connection string parameter name doesn't begin with valid identifier character, e.what() = DB::Exception +DSN={myconnection}; +DSN={myconnection};DATABASE={my_db};HOST={127.0.0.1};PORT={5432};PWD={password};UID={username}; +DSN={MSSQL};PWD={test};UID={test}; diff --git a/dbms/src/Dictionaries/tests/validate-odbc-connection-string.sh b/dbms/src/Dictionaries/tests/validate-odbc-connection-string.sh new file mode 100755 index 00000000000..39789a8ab44 --- /dev/null +++ b/dbms/src/Dictionaries/tests/validate-odbc-connection-string.sh @@ -0,0 +1,41 @@ +#!/bin/sh + +./validate-odbc-connection-string '' 2>&1 +./validate-odbc-connection-string 'abc' 2>&1 +./validate-odbc-connection-string 'abc=' 2>&1 +./validate-odbc-connection-string 'ab"c=' 2>&1 +./validate-odbc-connection-string 'abc=def' 2>&1 +./validate-odbc-connection-string 'abc=de[f' 2>&1 +./validate-odbc-connection-string 'abc={de[f}' 2>&1 +./validate-odbc-connection-string 'abc={de[f};dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}}f};dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc=de}}f};dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}}f;dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}f;dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}f;dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}f};dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}}f};dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}}f;dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={de}} ;dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={ } ;dsn=hello' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn=hello ' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn=hello world ' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ...' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;...' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;=' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_=' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= ' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {}' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {}}}' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {...................................................................}' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {....................................................................................}' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {.....................................................................................................}' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {...}; FILEDSN=x' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {...}; FileDsn = x' 2>&1 +./validate-odbc-connection-string 'abc={ } ; dsn = {hello world} ;_= {...}; Driver=x' 2>&1 +./validate-odbc-connection-string 'abc={}; abc=def' 2>&1 +./validate-odbc-connection-string 'abc={};;' 2>&1 +./validate-odbc-connection-string 'DSN=myconnection' 2>&1 +./validate-odbc-connection-string 'DSN=myconnection;UID=username;PWD=password;HOST=127.0.0.1;PORT=5432;DATABASE=my_db' 2>&1 +./validate-odbc-connection-string 'DSN=MSSQL;UID=test;PWD=test' 2>&1 diff --git a/dbms/src/Dictionaries/validateODBCConnectionString.cpp b/dbms/src/Dictionaries/validateODBCConnectionString.cpp new file mode 100644 index 00000000000..2f199e1b88c --- /dev/null +++ b/dbms/src/Dictionaries/validateODBCConnectionString.cpp @@ -0,0 +1,241 @@ +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ODBC_CONNECTION_STRING; +} + + +std::string validateODBCConnectionString(const std::string & connection_string) +{ + /// Connection string is a list of name, value pairs. + /// name and value are separated by '='. + /// names are case insensitive. + /// name=value pairs are sepated by ';'. + /// ASCII whitespace characters are skipped before and after delimiters. + /// value may be optionally enclosed by {} + /// in enclosed value, } is escaped as }}. + /// + /// Example: PWD={a}}b} means that password is a}b + /// + /// https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqldriverconnect-function?view=sql-server-2017#comments + + /// unixODBC has fixed size buffers on stack and has buffer overflow bugs. + /// We will limit string sizes to small values. + + static constexpr size_t MAX_ELEMENT_SIZE = 100; + static constexpr size_t MAX_CONNECTION_STRING_SIZE = 1000; + + if (connection_string.empty()) + throw Exception("ODBC connection string cannot be empty", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + if (connection_string.size() >= MAX_CONNECTION_STRING_SIZE) + throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + const char * pos = connection_string.data(); + const char * end = pos + connection_string.size(); + + auto skip_whitespaces = [&] + { + while (pos < end && isWhitespaceASCII(*pos)) + { + if (*pos != ' ') + throw Exception("ODBC connection string parameter contains unusual whitespace character", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + ++pos; + } + }; + + auto read_name = [&] + { + const char * begin = pos; + + if (pos < end && isValidIdentifierBegin(*pos)) + ++pos; + else + throw Exception("ODBC connection string parameter name doesn't begin with valid identifier character", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + while (pos < end && isWordCharASCII(*pos)) + ++pos; + + return std::string(begin, pos); + }; + + auto read_plain_value = [&] + { + const char * begin = pos; + + while (pos < end && *pos != ';' && !isWhitespaceASCII(*pos)) + { + signed char c = *pos; + if (c < 32 || strchr("[]{}(),;?*=!@'\"", c) != nullptr) + throw Exception("ODBC connection string parameter value is unescaped and contains illegal character", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + ++pos; + } + + return std::string(begin, pos); + }; + + auto read_escaped_value = [&] + { + std::string res; + + if (pos < end && *pos == '{') + ++pos; + else + throw Exception("ODBC connection string parameter value doesn't begin with opening curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + while (pos < end) + { + if (*pos == '}') + { + ++pos; + if (pos >= end || *pos != '}') + return res; + } + + if (*pos == 0) + throw Exception("ODBC connection string parameter value contains ASCII NUL character", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + res += *pos; + ++pos; + } + + throw Exception("ODBC connection string parameter is escaped but there is no closing curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + }; + + auto read_value = [&] + { + if (pos >= end) + return std::string{}; + + if (*pos == '{') + return read_escaped_value(); + else + return read_plain_value(); + }; + + std::map parameters; + + while (pos < end) + { + skip_whitespaces(); + std::string name = read_name(); + skip_whitespaces(); + + Poco::toUpperInPlace(name); + if (name == "FILEDSN" || name == "SAVEFILE" || name == "DRIVER") + throw Exception("ODBC connection string has forbidden parameter", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + if (pos >= end) + throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + if (*pos == '=') + ++pos; + else + throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + skip_whitespaces(); + std::string value = read_value(); + skip_whitespaces(); + + if (name.size() > MAX_ELEMENT_SIZE || value.size() > MAX_ELEMENT_SIZE) + throw Exception("ODBC connection string has too long keyword or value", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + if (!parameters.emplace(name, value).second) + throw Exception("Duplicate parameter found in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + if (pos >= end) + break; + + if (*pos == ';') + ++pos; + else + throw Exception("Unexpected character found after parameter value in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + } + + /// Reconstruct the connection string. + + auto it = parameters.find("DSN"); + + if (parameters.end() == it) + throw Exception("DSN parameter is mandatory for ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + std::string dsn = it->second; + + if (dsn.empty()) + throw Exception("DSN parameter cannot be empty in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + parameters.erase(it); + + std::string reconstructed_connection_string; + + auto write_plain_value = [&](const std::string & value) + { + reconstructed_connection_string += value; + }; + + auto write_escaped_value = [&](const std::string & value) + { + reconstructed_connection_string += '{'; + + const char * pos = value.data(); + const char * end = pos + value.size(); + while (true) + { + const char * next_pos = find_first_symbols<'}'>(pos, end); + + if (next_pos == end) + { + reconstructed_connection_string.append(pos, next_pos - pos); + break; + } + else + { + reconstructed_connection_string.append(pos, next_pos - pos); + reconstructed_connection_string.append("}}"); + pos = next_pos + 1; + } + } + + reconstructed_connection_string += '}'; + }; + + auto write_value = [&](const std::string & value) + { + if (std::all_of(value.begin(), value.end(), isWordCharASCII)) + write_plain_value(value); + else + write_escaped_value(value); + }; + + auto write_element = [&](const std::string & name, const std::string & value) + { + reconstructed_connection_string.append(name); + reconstructed_connection_string += '='; + write_value(value); + reconstructed_connection_string += ';'; + }; + + /// Place DSN first because that's more safe. + write_element("DSN", dsn); + for (const auto & elem : parameters) + write_element(elem.first, elem.second); + + if (reconstructed_connection_string.size() >= MAX_CONNECTION_STRING_SIZE) + throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING); + + return reconstructed_connection_string; +} + +} diff --git a/dbms/src/Dictionaries/validateODBCConnectionString.h b/dbms/src/Dictionaries/validateODBCConnectionString.h new file mode 100644 index 00000000000..f0f93b1de65 --- /dev/null +++ b/dbms/src/Dictionaries/validateODBCConnectionString.h @@ -0,0 +1,21 @@ +#pragma once + +#include + + +namespace DB +{ + +/** Passing arbitary connection string to ODBC Driver Manager is insecure, for the following reasons: + * 1. Driver Manager like unixODBC has multiple bugs like buffer overflow. + * 2. Driver Manager can interpret some parameters as a path to library for dlopen or a file to read, + * thus allows arbitary remote code execution. + * + * This function will throw exception if connection string has insecure parameters. + * It may also modify connection string to harden it. + * + * Note that it is intended for ANSI (not multibyte) variant of connection string. + */ +std::string validateODBCConnectionString(const std::string & connection_string); + +} diff --git a/dbms/src/Formats/FormatFactory.cpp b/dbms/src/Formats/FormatFactory.cpp index b6c6b131c77..a1910492afd 100644 --- a/dbms/src/Formats/FormatFactory.cpp +++ b/dbms/src/Formats/FormatFactory.cpp @@ -60,6 +60,7 @@ BlockOutputStreamPtr FormatFactory::getOutput(const String & name, WriteBuffer & FormatSettings format_settings; format_settings.json.quote_64bit_integers = settings.output_format_json_quote_64bit_integers; format_settings.json.quote_denormals = settings.output_format_json_quote_denormals; + format_settings.json.escape_forward_slashes = settings.output_format_json_escape_forward_slashes; format_settings.csv.delimiter = settings.format_csv_delimiter; format_settings.csv.allow_single_quotes = settings.format_csv_allow_single_quotes; format_settings.csv.allow_double_quotes = settings.format_csv_allow_double_quotes; diff --git a/dbms/src/Formats/FormatFactory.h b/dbms/src/Formats/FormatFactory.h index f415e97abdb..7a9dc68d62e 100644 --- a/dbms/src/Formats/FormatFactory.h +++ b/dbms/src/Formats/FormatFactory.h @@ -58,6 +58,11 @@ public: void registerInputFormat(const String & name, InputCreator input_creator); void registerOutputFormat(const String & name, OutputCreator output_creator); + const FormatsDictionary & getAllFormats() const + { + return dict; + } + private: FormatsDictionary dict; diff --git a/dbms/src/Formats/FormatSettings.h b/dbms/src/Formats/FormatSettings.h index ad951bd798f..d6779116807 100644 --- a/dbms/src/Formats/FormatSettings.h +++ b/dbms/src/Formats/FormatSettings.h @@ -17,6 +17,7 @@ struct FormatSettings { bool quote_64bit_integers = true; bool quote_denormals = true; + bool escape_forward_slashes = true; }; JSON json; diff --git a/dbms/src/Formats/JSONEachRowRowOutputStream.cpp b/dbms/src/Formats/JSONEachRowRowOutputStream.cpp index fec80cd1421..40508b36856 100644 --- a/dbms/src/Formats/JSONEachRowRowOutputStream.cpp +++ b/dbms/src/Formats/JSONEachRowRowOutputStream.cpp @@ -18,7 +18,7 @@ JSONEachRowRowOutputStream::JSONEachRowRowOutputStream(WriteBuffer & ostr_, cons for (size_t i = 0; i < columns; ++i) { WriteBufferFromString out(fields[i]); - writeJSONString(sample.getByPosition(i).name, out); + writeJSONString(sample.getByPosition(i).name, out, settings); } } diff --git a/dbms/src/Formats/JSONRowOutputStream.cpp b/dbms/src/Formats/JSONRowOutputStream.cpp index a549ff4395f..89f57669a3e 100644 --- a/dbms/src/Formats/JSONRowOutputStream.cpp +++ b/dbms/src/Formats/JSONRowOutputStream.cpp @@ -21,7 +21,7 @@ JSONRowOutputStream::JSONRowOutputStream(WriteBuffer & ostr_, const Block & samp need_validate_utf8 = true; WriteBufferFromOwnString out; - writeJSONString(fields[i].name, out); + writeJSONString(fields[i].name, out, settings); fields[i].name = out.str(); } @@ -50,7 +50,7 @@ void JSONRowOutputStream::writePrefix() writeString(fields[i].name, *ostr); writeCString(",\n", *ostr); writeCString("\t\t\t\"type\": ", *ostr); - writeJSONString(fields[i].type->getName(), *ostr); + writeJSONString(fields[i].type->getName(), *ostr, settings); writeChar('\n', *ostr); writeCString("\t\t}", *ostr); @@ -149,7 +149,7 @@ void JSONRowOutputStream::writeTotals() writeCString(",\n", *ostr); writeCString("\t\t", *ostr); - writeJSONString(column.name, *ostr); + writeJSONString(column.name, *ostr, settings); writeCString(": ", *ostr); column.type->serializeTextJSON(*column.column.get(), 0, *ostr, settings); } @@ -176,7 +176,7 @@ static void writeExtremesElement(const char * title, const Block & extremes, siz writeCString(",\n", ostr); writeCString("\t\t\t", ostr); - writeJSONString(column.name, ostr); + writeJSONString(column.name, ostr, settings); writeCString(": ", ostr); column.type->serializeTextJSON(*column.column.get(), row_num, ostr, settings); } diff --git a/dbms/src/Formats/ValuesRowInputStream.cpp b/dbms/src/Formats/ValuesRowInputStream.cpp index c291f147184..559ac658a6a 100644 --- a/dbms/src/Formats/ValuesRowInputStream.cpp +++ b/dbms/src/Formats/ValuesRowInputStream.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -29,7 +30,7 @@ namespace ErrorCodes ValuesRowInputStream::ValuesRowInputStream(ReadBuffer & istr_, const Block & header_, const Context & context_, const FormatSettings & format_settings) - : istr(istr_), header(header_), context(context_), format_settings(format_settings) + : istr(istr_), header(header_), context(std::make_unique(context_)), format_settings(format_settings) { /// In this format, BOM at beginning of stream cannot be confused with value, so it is safe to skip it. skipBOMIfExists(istr); @@ -112,7 +113,7 @@ bool ValuesRowInputStream::read(MutableColumns & columns) istr.position() = const_cast(token_iterator->begin); - std::pair value_raw = evaluateConstantExpression(ast, context); + std::pair value_raw = evaluateConstantExpression(ast, *context); Field value = convertFieldToType(value_raw.first, type, value_raw.second.get()); if (value.isNull()) diff --git a/dbms/src/Formats/ValuesRowInputStream.h b/dbms/src/Formats/ValuesRowInputStream.h index 00fa9071947..49775861746 100644 --- a/dbms/src/Formats/ValuesRowInputStream.h +++ b/dbms/src/Formats/ValuesRowInputStream.h @@ -28,7 +28,7 @@ public: private: ReadBuffer & istr; Block header; - const Context & context; + std::unique_ptr context; /// pimpl const FormatSettings format_settings; }; diff --git a/dbms/src/Functions/CMakeLists.txt b/dbms/src/Functions/CMakeLists.txt index 6856cb95cf2..ef285659be2 100644 --- a/dbms/src/Functions/CMakeLists.txt +++ b/dbms/src/Functions/CMakeLists.txt @@ -41,7 +41,7 @@ generate_function_register(Array FunctionArrayEnumerate FunctionArrayEnumerateUniq FunctionArrayUniq - FunctionArrayDistinct + FunctionArrayDistinct FunctionEmptyArrayUInt8 FunctionEmptyArrayUInt16 FunctionEmptyArrayUInt32 @@ -91,7 +91,7 @@ list(REMOVE_ITEM clickhouse_functions_headers IFunction.h FunctionFactory.h Func add_library(clickhouse_functions ${clickhouse_functions_sources}) -target_link_libraries(clickhouse_functions PUBLIC dbms PRIVATE libconsistent-hashing ${FARMHASH_LIBRARIES} ${METROHASH_LIBRARIES}) +target_link_libraries(clickhouse_functions PUBLIC dbms PRIVATE libconsistent-hashing ${FARMHASH_LIBRARIES} ${METROHASH_LIBRARIES} murmurhash) target_include_directories (clickhouse_functions SYSTEM BEFORE PUBLIC ${DIVIDE_INCLUDE_DIR}) diff --git a/dbms/src/Functions/FunctionFactory.cpp b/dbms/src/Functions/FunctionFactory.cpp index 9bb2abbb013..0b2f042089d 100644 --- a/dbms/src/Functions/FunctionFactory.cpp +++ b/dbms/src/Functions/FunctionFactory.cpp @@ -6,7 +6,6 @@ #include - namespace DB { @@ -26,8 +25,13 @@ void FunctionFactory::registerFunction(const throw Exception("FunctionFactory: the function name '" + name + "' is not unique", ErrorCodes::LOGICAL_ERROR); + String function_name_lowercase = Poco::toLower(name); + if (isAlias(name) || isAlias(function_name_lowercase)) + throw Exception("FunctionFactory: the function name '" + name + "' is already registered as alias", + ErrorCodes::LOGICAL_ERROR); + if (case_sensitiveness == CaseInsensitive - && !case_insensitive_functions.emplace(Poco::toLower(name), creator).second) + && !case_insensitive_functions.emplace(function_name_lowercase, creator).second) throw Exception("FunctionFactory: the case insensitive function name '" + name + "' is not unique", ErrorCodes::LOGICAL_ERROR); } @@ -45,9 +49,11 @@ FunctionBuilderPtr FunctionFactory::get( FunctionBuilderPtr FunctionFactory::tryGet( - const std::string & name, + const std::string & name_param, const Context & context) const { + String name = getAliasToOrName(name_param); + auto it = functions.find(name); if (functions.end() != it) return it->second(context); diff --git a/dbms/src/Functions/FunctionFactory.h b/dbms/src/Functions/FunctionFactory.h index a061c3103fd..7fa0f81f475 100644 --- a/dbms/src/Functions/FunctionFactory.h +++ b/dbms/src/Functions/FunctionFactory.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -20,19 +21,9 @@ class Context; * Function could use for initialization (take ownership of shared_ptr, for example) * some dictionaries from Context. */ -class FunctionFactory : public ext::singleton +class FunctionFactory : public ext::singleton, public IFactoryWithAliases> { - friend class StorageSystemFunctions; - public: - using Creator = std::function; - - /// For compatibility with SQL, it's possible to specify that certain function name is case insensitive. - enum CaseSensitiveness - { - CaseSensitive, - CaseInsensitive - }; template void registerFunction(CaseSensitiveness case_sensitiveness = CaseSensitive) @@ -67,6 +58,12 @@ private: return std::make_shared(Function::create(context)); } + const Functions & getCreatorMap() const override { return functions; } + + const Functions & getCaseInsensitiveCreatorMap() const override { return case_insensitive_functions; } + + String getFactoryName() const override { return "FunctionFactory"; } + /// Register a function by its name. /// No locking, you must register all functions before usage of get. void registerFunction( diff --git a/dbms/src/Functions/FunctionsArray.cpp b/dbms/src/Functions/FunctionsArray.cpp index d72dcf6f670..466610bcd45 100644 --- a/dbms/src/Functions/FunctionsArray.cpp +++ b/dbms/src/Functions/FunctionsArray.cpp @@ -1286,12 +1286,12 @@ DataTypePtr FunctionArrayDistinct::getReturnTypeImpl(const DataTypes & arguments { const DataTypeArray * array_type = checkAndGetDataType(arguments[0].get()); if (!array_type) - throw Exception("Argument for function " + getName() + " must be array but it " + throw Exception("Argument for function " + getName() + " must be array but it " " has type " + arguments[0]->getName() + ".", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - + auto nested_type = removeNullable(array_type->getNestedType()); - + return std::make_shared(nested_type); } @@ -1307,7 +1307,7 @@ void FunctionArrayDistinct::executeImpl(Block & block, const ColumnNumbers & arg const IColumn & src_data = array->getData(); const ColumnArray::Offsets & offsets = array->getOffsets(); - + ColumnRawPtrs original_data_columns; original_data_columns.push_back(&src_data); @@ -1416,7 +1416,7 @@ bool FunctionArrayDistinct::executeString( HashTableAllocatorWithStackMemory<(1ULL << INITIAL_SIZE_DEGREE) * sizeof(StringRef)>>; const PaddedPODArray * src_null_map = nullptr; - + if (nullable_col) { src_null_map = &static_cast(&nullable_col->getNullMapColumn())->getData(); @@ -1471,7 +1471,7 @@ void FunctionArrayDistinct::executeHashed( res_data_col.insertFrom(*columns[0], j); } } - + res_offsets.emplace_back(set.size() + prev_off); prev_off = off; } diff --git a/dbms/src/Functions/FunctionsArray.h b/dbms/src/Functions/FunctionsArray.h index 3cd1a8968f7..15fd5b420e2 100644 --- a/dbms/src/Functions/FunctionsArray.h +++ b/dbms/src/Functions/FunctionsArray.h @@ -1011,10 +1011,11 @@ public: DataTypePtr observed_type0 = removeNullable(array_type->getNestedType()); DataTypePtr observed_type1 = removeNullable(arguments[1]); - if (!(observed_type0->isNumber() && observed_type1->isNumber()) + /// We also support arrays of Enum type (that are represented by number) to search numeric values. + if (!(observed_type0->isValueRepresentedByNumber() && observed_type1->isNumber()) && !observed_type0->equals(*observed_type1)) throw Exception("Types of array and 2nd argument of function " - + getName() + " must be identical up to nullability. Passed: " + + getName() + " must be identical up to nullability or numeric types or Enum and numeric type. Passed: " + arguments[0]->getName() + " and " + arguments[1]->getName() + ".", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } @@ -1249,7 +1250,7 @@ private: IColumn & res_data_col, ColumnArray::Offsets & res_offsets, const ColumnNullable * nullable_col); - + void executeHashed( const ColumnArray::Offsets & offsets, const ColumnRawPtrs & columns, diff --git a/dbms/src/Functions/FunctionsHashing.cpp b/dbms/src/Functions/FunctionsHashing.cpp index 2aca07e477d..bafd205e16c 100644 --- a/dbms/src/Functions/FunctionsHashing.cpp +++ b/dbms/src/Functions/FunctionsHashing.cpp @@ -20,22 +20,10 @@ void registerFunctionsHashing(FunctionFactory & factory) factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); + factory.registerFunction(); } - -template <> -UInt64 toInteger(Float32 x) -{ - UInt32 res; - memcpy(&res, &x, sizeof(x)); - return res; -} - -template <> -UInt64 toInteger(Float64 x) -{ - UInt64 res; - memcpy(&res, &x, sizeof(x)); - return res; -} - } diff --git a/dbms/src/Functions/FunctionsHashing.h b/dbms/src/Functions/FunctionsHashing.h index 86d5274b881..dce5a51baf1 100644 --- a/dbms/src/Functions/FunctionsHashing.h +++ b/dbms/src/Functions/FunctionsHashing.h @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include @@ -29,6 +31,7 @@ #include #include +#include namespace DB @@ -62,6 +65,8 @@ namespace ErrorCodes struct HalfMD5Impl { + using ReturnType = UInt64; + static UInt64 apply(const char * begin, size_t size) { union @@ -137,12 +142,15 @@ struct SHA256Impl struct SipHash64Impl { + using ReturnType = UInt64; + static UInt64 apply(const char * begin, size_t size) { return sipHash64(begin, size); } }; + struct SipHash128Impl { static constexpr auto name = "sipHash128"; @@ -176,58 +184,6 @@ struct IntHash64Impl }; -template -class FunctionStringHash64 : public IFunction -{ -public: - static constexpr auto name = Name::name; - static FunctionPtr create(const Context &) { return std::make_shared(); } - - String getName() const override - { - return name; - } - - size_t getNumberOfArguments() const override { return 1; } - - DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override - { - if (!arguments[0]->isString()) - throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - return std::make_shared(); - } - - bool useDefaultImplementationForConstants() const override { return true; } - - void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t /*input_rows_count*/) override - { - if (const ColumnString * col_from = checkAndGetColumn(block.getByPosition(arguments[0]).column.get())) - { - auto col_to = ColumnUInt64::create(); - - const typename ColumnString::Chars_t & data = col_from->getChars(); - const typename ColumnString::Offsets & offsets = col_from->getOffsets(); - typename ColumnUInt64::Container & vec_to = col_to->getData(); - size_t size = offsets.size(); - vec_to.resize(size); - - for (size_t i = 0; i < size; ++i) - vec_to[i] = Impl::apply( - reinterpret_cast(&data[i == 0 ? 0 : offsets[i - 1]]), - i == 0 ? offsets[i] - 1 : (offsets[i] - 1 - offsets[i - 1])); - - block.getByPosition(result).column = std::move(col_to); - } - else - throw Exception("Illegal column " + block.getByPosition(arguments[0]).column->getName() - + " of first argument of function " + Name::name, - ErrorCodes::ILLEGAL_COLUMN); - } -}; - - template class FunctionStringHashFixedString : public IFunction { @@ -265,12 +221,17 @@ public: const auto size = offsets.size(); chars_to.resize(size * Impl::length); + ColumnString::Offset current_offset = 0; for (size_t i = 0; i < size; ++i) + { Impl::apply( - reinterpret_cast(&data[i == 0 ? 0 : offsets[i - 1]]), - i == 0 ? offsets[i] - 1 : (offsets[i] - 1 - offsets[i - 1]), + reinterpret_cast(&data[current_offset]), + offsets[i] - current_offset - 1, &chars_to[i * Impl::length]); + current_offset = offsets[i]; + } + block.getByPosition(result).column = std::move(col_to); } else @@ -354,19 +315,6 @@ public: }; -template -static UInt64 toInteger(T x) -{ - return x; -} - -template <> -UInt64 toInteger(Float32 x); - -template <> -UInt64 toInteger(Float64 x); - - /** We use hash functions called CityHash, FarmHash, MetroHash. * In this regard, this template is named with the words `NeighborhoodHash`. */ @@ -387,7 +335,7 @@ private: size_t size = vec_from.size(); for (size_t i = 0; i < size; ++i) { - UInt64 h = IntHash64Impl::apply(toInteger(vec_from[i])); + UInt64 h = IntHash64Impl::apply(ext::bit_cast(vec_from[i])); if (first) vec_to[i] = h; else @@ -396,7 +344,7 @@ private: } else if (auto col_from = checkAndGetColumnConst>(column)) { - const UInt64 hash = IntHash64Impl::apply(toInteger(col_from->template getValue())); + const UInt64 hash = IntHash64Impl::apply(ext::bit_cast(col_from->template getValue())); size_t size = vec_to.size(); if (first) { @@ -423,15 +371,19 @@ private: const typename ColumnString::Offsets & offsets = col_from->getOffsets(); size_t size = offsets.size(); + ColumnString::Offset current_offset = 0; for (size_t i = 0; i < size; ++i) { const UInt64 h = Impl::Hash64( - reinterpret_cast(&data[i == 0 ? 0 : offsets[i - 1]]), - i == 0 ? offsets[i] - 1 : (offsets[i] - 1 - offsets[i - 1])); + reinterpret_cast(&data[current_offset]), + offsets[i] - current_offset - 1); + if (first) vec_to[i] = h; else vec_to[i] = Impl::Hash128to64(typename Impl::uint128_t(vec_to[i], h)); + + current_offset = offsets[i]; } } else if (const ColumnFixedString * col_from = checkAndGetColumn(column)) @@ -439,6 +391,7 @@ private: const typename ColumnString::Chars_t & data = col_from->getChars(); size_t n = col_from->getN(); size_t size = data.size() / n; + for (size_t i = 0; i < size; ++i) { const UInt64 h = Impl::Hash64(reinterpret_cast(&data[i * n]), n); @@ -453,6 +406,7 @@ private: String value = col_from->getValue().data(); const UInt64 hash = Impl::Hash64(value.data(), value.size()); const size_t size = vec_to.size(); + if (first) { vec_to.assign(size, hash); @@ -487,19 +441,21 @@ private: const size_t size = offsets.size(); + ColumnArray::Offset current_offset = 0; for (size_t i = 0; i < size; ++i) { - const size_t begin = i == 0 ? 0 : offsets[i - 1]; - const size_t end = offsets[i]; + ColumnArray::Offset next_offset = offsets[i]; - UInt64 h = IntHash64Impl::apply(end - begin); + UInt64 h = IntHash64Impl::apply(next_offset - current_offset); if (first) vec_to[i] = h; else vec_to[i] = Impl::Hash128to64(typename Impl::uint128_t(vec_to[i], h)); - for (size_t j = begin; j < end; ++j) + for (size_t j = current_offset; j < next_offset; ++j) vec_to[i] = Impl::Hash128to64(typename Impl::uint128_t(vec_to[i], vec_temp[j])); + + current_offset = offsets[i]; } } else if (const ColumnConst * col_from = checkAndGetColumnConst(column)) @@ -614,6 +570,181 @@ public: }; +template +class FunctionStringHash : public IFunction +{ +public: + static constexpr auto name = Name::name; + static FunctionPtr create(const Context &) { return std::make_shared(); } + + String getName() const override { return name; } + + bool isVariadic() const override { return false; } + + size_t getNumberOfArguments() const override { return 1; } + + DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments */) const override + { return std::make_shared>(); } + + bool useDefaultImplementationForConstants() const override { return true; } + + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override + { + auto col_to = ColumnVector::create(input_rows_count); + typename ColumnVector::Container & vec_to = col_to->getData(); + + const ColumnWithTypeAndName & col = block.getByPosition(arguments[0]); + const IDataType * from_type = col.type.get(); + const IColumn * icolumn = col.column.get(); + + if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeIntType(icolumn, vec_to); + else if (checkDataType(from_type)) executeString(icolumn, vec_to); + else if (checkDataType(from_type)) executeString(icolumn, vec_to); + else + throw Exception("Unexpected type " + from_type->getName() + " of argument of function " + getName(), + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + block.getByPosition(result).column = std::move(col_to); + } +private: + using ToType = typename Impl::ReturnType; + + template + void executeIntType(const IColumn * column, typename ColumnVector::Container & vec_to) + { + if (const ColumnVector * col_from = checkAndGetColumn>(column)) + { + const typename ColumnVector::Container & vec_from = col_from->getData(); + size_t size = vec_from.size(); + for (size_t i = 0; i < size; ++i) + { + vec_to[i] = Impl::apply(reinterpret_cast(&vec_from[i]), sizeof(FromType)); + } + } + else + throw Exception("Illegal column " + column->getName() + + " of argument of function " + getName(), + ErrorCodes::ILLEGAL_COLUMN); + } + + void executeString(const IColumn * column, typename ColumnVector::Container & vec_to) + { + if (const ColumnString * col_from = checkAndGetColumn(column)) + { + const typename ColumnString::Chars_t & data = col_from->getChars(); + const typename ColumnString::Offsets & offsets = col_from->getOffsets(); + size_t size = offsets.size(); + + ColumnString::Offset current_offset = 0; + for (size_t i = 0; i < size; ++i) + { + vec_to[i] = Impl::apply( + reinterpret_cast(&data[current_offset]), + offsets[i] - current_offset - 1); + + current_offset = offsets[i]; + } + } + else if (const ColumnFixedString * col_from = checkAndGetColumn(column)) + { + const typename ColumnString::Chars_t & data = col_from->getChars(); + size_t n = col_from->getN(); + size_t size = data.size() / n; + for (size_t i = 0; i < size; ++i) + vec_to[i] = Impl::apply(reinterpret_cast(&data[i * n]), n); + } + else + throw Exception("Illegal column " + column->getName() + + " of first argument of function " + getName(), + ErrorCodes::ILLEGAL_COLUMN); + } +}; + + +/** Why we need MurmurHash2? + * MurmurHash2 is an outdated hash function, superseded by MurmurHash3 and subsequently by CityHash, xxHash, HighwayHash. + * Usually there is no reason to use MurmurHash. + * It is needed for the cases when you already have MurmurHash in some applications and you want to reproduce it + * in ClickHouse as is. For example, it is needed to reproduce the behaviour + * for NGINX a/b testing module: https://nginx.ru/en/docs/http/ngx_http_split_clients_module.html + */ +struct MurmurHash2Impl32 +{ + using ReturnType = UInt32; + + static UInt32 apply(const char * data, const size_t size) + { + return MurmurHash2(data, size, 0); + } +}; + +struct MurmurHash2Impl64 +{ + using ReturnType = UInt64; + + static UInt64 apply(const char * data, const size_t size) + { + return MurmurHash64A(data, size, 0); + } +}; + +struct MurmurHash3Impl32 +{ + using ReturnType = UInt32; + + static UInt32 apply(const char * data, const size_t size) + { + union + { + UInt32 h; + char bytes[sizeof(h)]; + }; + MurmurHash3_x86_32(data, size, 0, bytes); + return h; + } +}; + +struct MurmurHash3Impl64 +{ + using ReturnType = UInt64; + + static UInt64 apply(const char * data, const size_t size) + { + union + { + UInt64 h[2]; + char bytes[16]; + }; + MurmurHash3_x64_128(data, size, 0, bytes); + return h[0] ^ h[1]; + } +}; + +struct MurmurHash3Impl128 +{ + static constexpr auto name = "murmurHash3_128"; + enum { length = 16 }; + + static void apply(const char * begin, const size_t size, unsigned char * out_char_data) + { + MurmurHash3_x64_128(begin, size, 0, out_char_data); + } +}; + + struct URLHashImpl { static UInt64 apply(const char * data, const size_t size) @@ -748,10 +879,15 @@ private: const auto & offsets = col_from->getOffsets(); auto & out = col_to->getData(); - for (const auto i : ext::range(0, size)) + ColumnString::Offset current_offset = 0; + for (size_t i = 0; i < size; ++i) + { out[i] = URLHashImpl::apply( - reinterpret_cast(&chars[i == 0 ? 0 : offsets[i - 1]]), - i == 0 ? offsets[i] - 1 : (offsets[i] - 1 - offsets[i - 1])); + reinterpret_cast(&chars[current_offset]), + offsets[i] - current_offset - 1); + + current_offset = offsets[i]; + } block.getByPosition(result).column = std::move(col_to); } @@ -778,10 +914,16 @@ private: const auto & offsets = col_from->getOffsets(); auto & out = col_to->getData(); - for (const auto i : ext::range(0, size)) - out[i] = URLHierarchyHashImpl::apply(level, - reinterpret_cast(&chars[i == 0 ? 0 : offsets[i - 1]]), - i == 0 ? offsets[i] - 1 : (offsets[i] - 1 - offsets[i - 1])); + ColumnString::Offset current_offset = 0; + for (size_t i = 0; i < size; ++i) + { + out[i] = URLHierarchyHashImpl::apply( + level, + reinterpret_cast(&chars[current_offset]), + offsets[i] - current_offset - 1); + + current_offset = offsets[i]; + } block.getByPosition(result).column = std::move(col_to); } @@ -796,6 +938,12 @@ struct NameHalfMD5 { static constexpr auto name = "halfMD5"; }; struct NameSipHash64 { static constexpr auto name = "sipHash64"; }; struct NameIntHash32 { static constexpr auto name = "intHash32"; }; struct NameIntHash64 { static constexpr auto name = "intHash64"; }; +struct NameMurmurHash2_32 { static constexpr auto name = "murmurHash2_32"; }; +struct NameMurmurHash2_64 { static constexpr auto name = "murmurHash2_64"; }; +struct NameMurmurHash3_32 { static constexpr auto name = "murmurHash3_32"; }; +struct NameMurmurHash3_64 { static constexpr auto name = "murmurHash3_64"; }; +struct NameMurmurHash3_128 { static constexpr auto name = "murmurHash3_128"; }; + struct ImplCityHash64 { @@ -836,8 +984,8 @@ struct ImplMetroHash64 } }; -using FunctionHalfMD5 = FunctionStringHash64; -using FunctionSipHash64 = FunctionStringHash64; +using FunctionHalfMD5 = FunctionStringHash; +using FunctionSipHash64 = FunctionStringHash; using FunctionIntHash32 = FunctionIntHash; using FunctionIntHash64 = FunctionIntHash; using FunctionMD5 = FunctionStringHashFixedString; @@ -848,5 +996,9 @@ using FunctionSipHash128 = FunctionStringHashFixedString; using FunctionCityHash64 = FunctionNeighbourhoodHash64; using FunctionFarmHash64 = FunctionNeighbourhoodHash64; using FunctionMetroHash64 = FunctionNeighbourhoodHash64; - +using FunctionMurmurHash2_32 = FunctionStringHash; +using FunctionMurmurHash2_64 = FunctionStringHash; +using FunctionMurmurHash3_32 = FunctionStringHash; +using FunctionMurmurHash3_64 = FunctionStringHash; +using FunctionMurmurHash3_128 = FunctionStringHashFixedString; } diff --git a/dbms/src/Functions/FunctionsNull.cpp b/dbms/src/Functions/FunctionsNull.cpp index af2831db383..979c92823ff 100644 --- a/dbms/src/Functions/FunctionsNull.cpp +++ b/dbms/src/Functions/FunctionsNull.cpp @@ -19,9 +19,9 @@ void registerFunctionsNull(FunctionFactory & factory) { factory.registerFunction(); factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); - factory.registerFunction(); + factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(FunctionFactory::CaseInsensitive); + factory.registerFunction(FunctionFactory::CaseInsensitive); factory.registerFunction(); factory.registerFunction(); } diff --git a/dbms/src/Functions/FunctionsRound.cpp b/dbms/src/Functions/FunctionsRound.cpp index 7bf7eb791ad..9cb9e1001ae 100644 --- a/dbms/src/Functions/FunctionsRound.cpp +++ b/dbms/src/Functions/FunctionsRound.cpp @@ -16,8 +16,8 @@ void registerFunctionsRound(FunctionFactory & factory) factory.registerFunction("trunc", FunctionFactory::CaseInsensitive); /// Compatibility aliases. - factory.registerFunction("ceiling", FunctionFactory::CaseInsensitive); - factory.registerFunction("truncate", FunctionFactory::CaseInsensitive); + factory.registerAlias("ceiling", "ceil", FunctionFactory::CaseInsensitive); + factory.registerAlias("truncate", "trunc", FunctionFactory::CaseInsensitive); } } diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.cpp b/dbms/src/IO/ReadWriteBufferFromHTTP.cpp index af0f34babbf..52ec808bd68 100644 --- a/dbms/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/dbms/src/IO/ReadWriteBufferFromHTTP.cpp @@ -18,6 +18,7 @@ ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP(const Poco::URI & uri, const std::string & method_, OutStreamCallback out_stream_callback, const ConnectionTimeouts & timeouts, + const Poco::Net::HTTPBasicCredentials & credentials, size_t buffer_size_) : ReadBuffer(nullptr, 0), uri{uri}, @@ -30,6 +31,9 @@ ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP(const Poco::URI & uri, if (out_stream_callback) request.setChunkedTransferEncoding(true); + if (!credentials.getUsername().empty()) + credentials.authenticate(request); + Poco::Net::HTTPResponse response; LOG_TRACE((&Logger::get("ReadWriteBufferFromHTTP")), "Sending request to " << uri.toString()); diff --git a/dbms/src/IO/ReadWriteBufferFromHTTP.h b/dbms/src/IO/ReadWriteBufferFromHTTP.h index 93a8232f93d..d370bb3d4c7 100644 --- a/dbms/src/IO/ReadWriteBufferFromHTTP.h +++ b/dbms/src/IO/ReadWriteBufferFromHTTP.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -32,6 +33,7 @@ public: const std::string & method = {}, OutStreamCallback out_stream_callback = {}, const ConnectionTimeouts & timeouts = {}, + const Poco::Net::HTTPBasicCredentials & credentials = {}, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE); bool nextImpl() override; diff --git a/dbms/src/IO/WriteHelpers.h b/dbms/src/IO/WriteHelpers.h index e45edf8b836..932b0744d0c 100644 --- a/dbms/src/IO/WriteHelpers.h +++ b/dbms/src/IO/WriteHelpers.h @@ -23,6 +23,7 @@ #include #include #include +#include namespace DB @@ -173,7 +174,7 @@ inline void writeString(const StringRef & ref, WriteBuffer & buf) * - it is assumed that string is in UTF-8, the invalid UTF-8 is not processed * - all other non-ASCII characters remain as is */ -inline void writeJSONString(const char * begin, const char * end, WriteBuffer & buf) +inline void writeJSONString(const char * begin, const char * end, WriteBuffer & buf, const FormatSettings & settings) { writeChar('"', buf); for (const char * it = begin; it != end; ++it) @@ -205,7 +206,8 @@ inline void writeJSONString(const char * begin, const char * end, WriteBuffer & writeChar('\\', buf); break; case '/': - writeChar('\\', buf); + if (settings.json.escape_forward_slashes) + writeChar('\\', buf); writeChar('/', buf); break; case '"': @@ -311,15 +313,15 @@ void writeAnyEscapedString(const char * begin, const char * end, WriteBuffer & b } -inline void writeJSONString(const String & s, WriteBuffer & buf) +inline void writeJSONString(const String & s, WriteBuffer & buf, const FormatSettings & settings) { - writeJSONString(s.data(), s.data() + s.size(), buf); + writeJSONString(s.data(), s.data() + s.size(), buf, settings); } -inline void writeJSONString(const StringRef & ref, WriteBuffer & buf) +inline void writeJSONString(const StringRef & ref, WriteBuffer & buf, const FormatSettings & settings) { - writeJSONString(ref.data, ref.data + ref.size, buf); + writeJSONString(ref.data, ref.data + ref.size, buf, settings); } diff --git a/dbms/src/IO/tests/CMakeLists.txt b/dbms/src/IO/tests/CMakeLists.txt index 7b427a6ae00..324baa8278c 100644 --- a/dbms/src/IO/tests/CMakeLists.txt +++ b/dbms/src/IO/tests/CMakeLists.txt @@ -60,7 +60,7 @@ add_check (hashing_read_buffer) add_executable (io_operators operators.cpp) target_link_libraries (io_operators clickhouse_common_io) -if (NOT APPLE AND NOT ARCH_FREEBSD) +if (OS_LINUX) add_executable(write_buffer_aio write_buffer_aio.cpp) target_link_libraries (write_buffer_aio clickhouse_common_io ${Boost_FILESYSTEM_LIBRARY}) diff --git a/dbms/src/Interpreters/AsynchronousMetrics.cpp b/dbms/src/Interpreters/AsynchronousMetrics.cpp index 611711b317e..9fa890d6850 100644 --- a/dbms/src/Interpreters/AsynchronousMetrics.cpp +++ b/dbms/src/Interpreters/AsynchronousMetrics.cpp @@ -27,6 +27,10 @@ } malloc_extension_initializer; #endif +#if USE_JEMALLOC + #include +#endif + namespace DB { @@ -229,6 +233,36 @@ void AsynchronousMetrics::update() } #endif +#if USE_JEMALLOC + { + #define FOR_EACH_METRIC(M) \ + M("allocated", size_t) \ + M("active", size_t) \ + M("metadata", size_t) \ + M("metadata_thp", size_t) \ + M("resident", size_t) \ + M("mapped", size_t) \ + M("retained", size_t) \ + M("background_thread.num_threads", size_t) \ + M("background_thread.num_runs", uint64_t) \ + M("background_thread.run_interval", uint64_t) \ + + #define GET_METRIC(NAME, TYPE) \ + do \ + { \ + TYPE value{}; \ + size_t size = sizeof(value); \ + mallctl("stats." NAME, &value, &size, nullptr, 0); \ + set("jemalloc." NAME, value); \ + } while (0); + + FOR_EACH_METRIC(GET_METRIC); + + #undef GET_METRIC + #undef FOR_EACH_METRIC + } +#endif + /// Add more metrics as you wish. } diff --git a/dbms/src/Interpreters/CMakeLists.txt b/dbms/src/Interpreters/CMakeLists.txt index 6fb123d2677..a6c043ddf6c 100644 --- a/dbms/src/Interpreters/CMakeLists.txt +++ b/dbms/src/Interpreters/CMakeLists.txt @@ -1,5 +1,5 @@ -if (ARCH_FREEBSD) +if (OS_FREEBSD) set (PATH_SHARE "/usr/local/share" CACHE STRING "") else () set (PATH_SHARE "/usr/share" CACHE STRING "") diff --git a/dbms/src/Interpreters/ClientInfo.cpp b/dbms/src/Interpreters/ClientInfo.cpp index c6bd8cd6b71..2752d0231b1 100644 --- a/dbms/src/Interpreters/ClientInfo.cpp +++ b/dbms/src/Interpreters/ClientInfo.cpp @@ -51,6 +51,12 @@ void ClientInfo::write(WriteBuffer & out, const UInt64 server_protocol_revision) if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO) writeBinary(quota_key, out); + + if (interface == Interface::TCP) + { + if (server_protocol_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) + writeVarUInt(client_version_patch, out); + } } @@ -96,6 +102,14 @@ void ClientInfo::read(ReadBuffer & in, const UInt64 client_protocol_revision) if (client_protocol_revision >= DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO) readBinary(quota_key, in); + + if (interface == Interface::TCP) + { + if (client_protocol_revision >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) + readVarUInt(client_version_patch, in); + else + client_version_patch = client_revision; + } } @@ -111,6 +125,7 @@ void ClientInfo::fillOSUserHostNameAndVersionInfo() client_version_major = DBMS_VERSION_MAJOR; client_version_minor = DBMS_VERSION_MINOR; + client_version_patch = DBMS_VERSION_PATCH; client_revision = ClickHouseRevision::get(); } diff --git a/dbms/src/Interpreters/ClientInfo.h b/dbms/src/Interpreters/ClientInfo.h index 58a6c250b55..ac65bc158fa 100644 --- a/dbms/src/Interpreters/ClientInfo.h +++ b/dbms/src/Interpreters/ClientInfo.h @@ -65,6 +65,7 @@ public: String client_name; UInt64 client_version_major = 0; UInt64 client_version_minor = 0; + UInt64 client_version_patch = 0; unsigned client_revision = 0; /// For http diff --git a/dbms/src/Interpreters/Cluster.cpp b/dbms/src/Interpreters/Cluster.cpp index efa2ad60732..cd1a3a2da11 100644 --- a/dbms/src/Interpreters/Cluster.cpp +++ b/dbms/src/Interpreters/Cluster.cpp @@ -127,11 +127,7 @@ String Cluster::Address::toStringFull() const Clusters::Clusters(Poco::Util::AbstractConfiguration & config, const Settings & settings, const String & config_name) { - Poco::Util::AbstractConfiguration::Keys config_keys; - config.keys(config_name, config_keys); - - for (const auto & key : config_keys) - impl.emplace(key, std::make_shared(config, settings, config_name + "." + key)); + updateClusters(config, settings, config_name); } @@ -158,19 +154,9 @@ void Clusters::updateClusters(Poco::Util::AbstractConfiguration & config, const std::lock_guard lock(mutex); + impl.clear(); for (const auto & key : config_keys) - { - auto it = impl.find(key); - auto new_cluster = std::make_shared(config, settings, config_name + "." + key); - - if (it == impl.end()) - impl.emplace(key, std::move(new_cluster)); - else - { - //TODO: Check that cluster update is necessarily - it->second = std::move(new_cluster); - } - } + impl.emplace(key, std::make_shared(config, settings, config_name + "." + key)); } Clusters::Impl Clusters::getContainer() const diff --git a/dbms/src/Interpreters/ClusterProxy/DescribeStreamFactory.cpp b/dbms/src/Interpreters/ClusterProxy/DescribeStreamFactory.cpp deleted file mode 100644 index 2638399f8ff..00000000000 --- a/dbms/src/Interpreters/ClusterProxy/DescribeStreamFactory.cpp +++ /dev/null @@ -1,59 +0,0 @@ -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace -{ - -BlockExtraInfo toBlockExtraInfo(const Cluster::Address & address) -{ - BlockExtraInfo block_extra_info; - block_extra_info.host = address.host_name; - block_extra_info.resolved_address = address.getResolvedAddress().toString(); - block_extra_info.port = address.port; - block_extra_info.user = address.user; - block_extra_info.is_valid = true; - return block_extra_info; -} - -} - -namespace ClusterProxy -{ - -void DescribeStreamFactory::createForShard( - const Cluster::ShardInfo & shard_info, - const String & query, const ASTPtr & query_ast, - const Context & context, const ThrottlerPtr & throttler, - BlockInputStreams & res) -{ - for (const Cluster::Address & local_address : shard_info.local_addresses) - { - InterpreterDescribeQuery interpreter{query_ast, context}; - BlockInputStreamPtr stream = interpreter.execute().in; - - /** Materialization is needed, since from remote servers the constants come materialized. - * If you do not do this, different types (Const and non-Const) columns will be produced in different threads, - * And this is not allowed, since all code is based on the assumption that in the block stream all types are the same. - */ - BlockInputStreamPtr materialized_stream = std::make_shared(stream); - res.emplace_back(std::make_shared(materialized_stream, toBlockExtraInfo(local_address))); - } - - if (shard_info.hasRemoteConnections()) - { - auto remote_stream = std::make_shared( - shard_info.pool, query, InterpreterDescribeQuery::getSampleBlock(), context, nullptr, throttler); - remote_stream->setPoolMode(PoolMode::GET_ALL); - remote_stream->appendExtraInfo(); - res.emplace_back(std::move(remote_stream)); - } -} - -} -} diff --git a/dbms/src/Interpreters/ClusterProxy/DescribeStreamFactory.h b/dbms/src/Interpreters/ClusterProxy/DescribeStreamFactory.h deleted file mode 100644 index 05befc59305..00000000000 --- a/dbms/src/Interpreters/ClusterProxy/DescribeStreamFactory.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -#include - -namespace DB -{ - -namespace ClusterProxy -{ - -class DescribeStreamFactory final : public IStreamFactory -{ -public: - void createForShard( - const Cluster::ShardInfo & shard_info, - const String & query, const ASTPtr & query_ast, - const Context & context, const ThrottlerPtr & throttler, - BlockInputStreams & res) override; -}; - -} - -} diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 24060eedcd7..cd6bed0c1d7 100644 --- a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -28,13 +29,26 @@ namespace ClusterProxy { SelectStreamFactory::SelectStreamFactory( - const Block & header, + const Block & header_, QueryProcessingStage::Enum processed_stage_, QualifiedTableName main_table_, const Tables & external_tables_) - : header(header), + : header(header_), processed_stage{processed_stage_}, main_table(std::move(main_table_)), + table_func_ptr{nullptr}, + external_tables{external_tables_} +{ +} + +SelectStreamFactory::SelectStreamFactory( + const Block & header_, + QueryProcessingStage::Enum processed_stage_, + ASTPtr table_func_ptr_, + const Tables & external_tables_) + : header(header_), + processed_stage{processed_stage_}, + table_func_ptr{table_func_ptr_}, external_tables{external_tables_} { } @@ -44,7 +58,7 @@ namespace BlockInputStreamPtr createLocalStream(const ASTPtr & query_ast, const Context & context, QueryProcessingStage::Enum processed_stage) { - InterpreterSelectQuery interpreter{query_ast, context, {}, processed_stage}; + InterpreterSelectQuery interpreter{query_ast, context, Names{}, processed_stage}; BlockInputStreamPtr stream = interpreter.execute().in; /** Materialization is needed, since from remote servers the constants come materialized. @@ -71,13 +85,24 @@ void SelectStreamFactory::createForShard( { auto stream = std::make_shared(shard_info.pool, query, header, context, nullptr, throttler, external_tables, processed_stage); stream->setPoolMode(PoolMode::GET_MANY); - stream->setMainTable(main_table); + if (!table_func_ptr) + stream->setMainTable(main_table); res.emplace_back(std::move(stream)); }; if (shard_info.isLocal()) { - StoragePtr main_table_storage = context.tryGetTable(main_table.database, main_table.table); + StoragePtr main_table_storage; + + if (table_func_ptr) + { + auto table_function = static_cast(table_func_ptr.get()); + main_table_storage = TableFunctionFactory::instance().get(table_function->name, context)->execute(table_func_ptr, context); + } + else + main_table_storage = context.tryGetTable(main_table.database, main_table.table); + + if (!main_table_storage) /// Table is absent on a local server. { ProfileEvents::increment(ProfileEvents::DistributedConnectionMissingTable); @@ -158,14 +183,17 @@ void SelectStreamFactory::createForShard( auto lazily_create_stream = [ pool = shard_info.pool, shard_num = shard_info.shard_num, query, header = header, query_ast, context, throttler, - main_table = main_table, external_tables = external_tables, stage = processed_stage, + main_table = main_table, table_func_ptr = table_func_ptr, external_tables = external_tables, stage = processed_stage, local_delay]() -> BlockInputStreamPtr { std::vector try_results; try { - try_results = pool->getManyChecked(&context.getSettingsRef(), PoolMode::GET_MANY, main_table); + if (table_func_ptr) + try_results = pool->getManyForTableFunction(&context.getSettingsRef(), PoolMode::GET_MANY); + else + try_results = pool->getManyChecked(&context.getSettingsRef(), PoolMode::GET_MANY, main_table); } catch (const Exception & ex) { diff --git a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h index 5325e5d463c..75c6938b56b 100644 --- a/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h +++ b/dbms/src/Interpreters/ClusterProxy/SelectStreamFactory.h @@ -13,12 +13,20 @@ namespace ClusterProxy class SelectStreamFactory final : public IStreamFactory { public: + /// Database in a query. SelectStreamFactory( - const Block & header, - QueryProcessingStage::Enum processed_stage, - QualifiedTableName main_table, + const Block & header_, + QueryProcessingStage::Enum processed_stage_, + QualifiedTableName main_table_, const Tables & external_tables); + /// TableFunction in a query. + SelectStreamFactory( + const Block & header_, + QueryProcessingStage::Enum processed_stage_, + ASTPtr table_func_ptr_, + const Tables & external_tables_); + void createForShard( const Cluster::ShardInfo & shard_info, const String & query, const ASTPtr & query_ast, @@ -29,6 +37,7 @@ private: const Block header; QueryProcessingStage::Enum processed_stage; QualifiedTableName main_table; + ASTPtr table_func_ptr; Tables external_tables; }; diff --git a/dbms/src/Interpreters/Context.cpp b/dbms/src/Interpreters/Context.cpp index 9fed370cfbc..7a35cf2644c 100644 --- a/dbms/src/Interpreters/Context.cpp +++ b/dbms/src/Interpreters/Context.cpp @@ -81,6 +81,7 @@ namespace ErrorCodes extern const int NO_ELEMENTS_IN_CONFIG; extern const int DDL_GUARD_IS_ACTIVE; extern const int TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT; + extern const int PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT; extern const int SESSION_NOT_FOUND; extern const int SESSION_IS_LOCKED; extern const int CANNOT_GET_CREATE_TABLE_QUERY; @@ -109,6 +110,9 @@ struct ContextShared String interserver_io_host; /// The host name by which this server is available for other servers. UInt16 interserver_io_port = 0; /// and port. + String interserver_io_user; + String interserver_io_password; + String interserver_scheme; /// http or https String path; /// Path to the data directory, with a slash at the end. String tmp_path; /// The path to the temporary files that occur when processing the request. @@ -140,6 +144,7 @@ struct ContextShared mutable std::unique_ptr compression_settings_selector; std::unique_ptr merge_tree_settings; /// Settings of MergeTree* engines. size_t max_table_size_to_drop = 50000000000lu; /// Protects MergeTree tables from accidental DROP (50GB by default) + size_t max_partition_size_to_drop = 50000000000lu; /// Protects MergeTree partitions from accidental DROP (50GB by default) String format_schema_path; /// Path to a directory that contains schema files used by input formats. ActionLocksManagerPtr action_locks_manager; /// Set of storages' action lockers @@ -1378,16 +1383,36 @@ void Context::setInterserverIOAddress(const String & host, UInt16 port) shared->interserver_io_port = port; } - std::pair Context::getInterserverIOAddress() const { if (shared->interserver_io_host.empty() || shared->interserver_io_port == 0) - throw Exception("Parameter 'interserver_http_port' required for replication is not specified in configuration file.", - ErrorCodes::NO_ELEMENTS_IN_CONFIG); + throw Exception("Parameter 'interserver_http(s)_port' required for replication is not specified in configuration file.", + ErrorCodes::NO_ELEMENTS_IN_CONFIG); return { shared->interserver_io_host, shared->interserver_io_port }; } +void Context::setInterserverCredentials(const String & user, const String & password) +{ + shared->interserver_io_user = user; + shared->interserver_io_password = password; +} + +std::pair Context::getInterserverCredentials() const +{ + return { shared->interserver_io_user, shared->interserver_io_password }; +} + +void Context::setInterserverScheme(const String & scheme) +{ + shared->interserver_scheme = scheme; +} + +String Context::getInterserverScheme() const +{ + return shared->interserver_scheme; +} + UInt16 Context::getTCPPort() const { auto lock = getLock(); @@ -1603,17 +1628,9 @@ const MergeTreeSettings & Context::getMergeTreeSettings() } -void Context::setMaxTableSizeToDrop(size_t max_size) +void Context::checkCanBeDropped(const String & database, const String & table, const size_t & size, const size_t & max_size_to_drop) { - // Is initialized at server startup - shared->max_table_size_to_drop = max_size; -} - -void Context::checkTableCanBeDropped(const String & database, const String & table, size_t table_size) -{ - size_t max_table_size_to_drop = shared->max_table_size_to_drop; - - if (!max_table_size_to_drop || table_size <= max_table_size_to_drop) + if (!max_size_to_drop || size <= max_size_to_drop) return; Poco::File force_file(getFlagsPath() + "force_drop_table"); @@ -1629,22 +1646,22 @@ void Context::checkTableCanBeDropped(const String & database, const String & tab catch (...) { /// User should recreate force file on each drop, it shouldn't be protected - tryLogCurrentException("Drop table check", "Can't remove force file to enable table drop"); + tryLogCurrentException("Drop table check", "Can't remove force file to enable table or partition drop"); } } - String table_size_str = formatReadableSizeWithDecimalSuffix(table_size); - String max_table_size_to_drop_str = formatReadableSizeWithDecimalSuffix(max_table_size_to_drop); + String size_str = formatReadableSizeWithDecimalSuffix(size); + String max_size_to_drop_str = formatReadableSizeWithDecimalSuffix(max_size_to_drop); std::stringstream ostr; - ostr << "Table " << backQuoteIfNeed(database) << "." << backQuoteIfNeed(table) << " was not dropped.\n" + ostr << "Table or Partition in " << backQuoteIfNeed(database) << "." << backQuoteIfNeed(table) << " was not dropped.\n" << "Reason:\n" - << "1. Table size (" << table_size_str << ") is greater than max_table_size_to_drop (" << max_table_size_to_drop_str << ")\n" + << "1. Size (" << size_str << ") is greater than max_size_to_drop (" << max_size_to_drop_str << ")\n" << "2. File '" << force_file.path() << "' intended to force DROP " - << (force_file_exists ? "exists but not writeable (could not be removed)" : "doesn't exist") << "\n"; + << (force_file_exists ? "exists but not writeable (could not be removed)" : "doesn't exist") << "\n"; ostr << "How to fix this:\n" - << "1. Either increase (or set to zero) max_table_size_to_drop in server config and restart ClickHouse\n" + << "1. Either increase (or set to zero) max_size_to_drop in server config and restart ClickHouse\n" << "2. Either create forcing file " << force_file.path() << " and make sure that ClickHouse has write permission for it.\n" << "Example:\nsudo touch '" << force_file.path() << "' && sudo chmod 666 '" << force_file.path() << "'"; @@ -1652,6 +1669,36 @@ void Context::checkTableCanBeDropped(const String & database, const String & tab } +void Context::setMaxTableSizeToDrop(size_t max_size) +{ + // Is initialized at server startup + shared->max_table_size_to_drop = max_size; +} + + +void Context::checkTableCanBeDropped(const String & database, const String & table, const size_t & table_size) +{ + size_t max_table_size_to_drop = shared->max_table_size_to_drop; + + checkCanBeDropped(database, table, table_size, max_table_size_to_drop); +} + + +void Context::setMaxPartitionSizeToDrop(size_t max_size) +{ + // Is initialized at server startup + shared->max_partition_size_to_drop = max_size; +} + + +void Context::checkPartitionCanBeDropped(const String & database, const String & table, const size_t & partition_size) +{ + size_t max_partition_size_to_drop = shared->max_partition_size_to_drop; + + checkCanBeDropped(database, table, partition_size, max_partition_size_to_drop); +} + + BlockInputStreamPtr Context::getInputFormat(const String & name, ReadBuffer & buf, const Block & sample, size_t max_block_size) const { return FormatFactory::instance().getInput(name, buf, sample, *this, max_block_size); diff --git a/dbms/src/Interpreters/Context.h b/dbms/src/Interpreters/Context.h index 1c867d65e8f..7eadb4479cd 100644 --- a/dbms/src/Interpreters/Context.h +++ b/dbms/src/Interpreters/Context.h @@ -249,6 +249,15 @@ public: /// How other servers can access this for downloading replicated data. void setInterserverIOAddress(const String & host, UInt16 port); std::pair getInterserverIOAddress() const; + + /// Credentials which server will use to communicate with others + void setInterserverCredentials(const String & user, const String & password); + std::pair getInterserverCredentials() const; + + /// Interserver requests scheme (http or https) + void setInterserverScheme(const String & scheme); + String getInterserverScheme() const; + /// The port that the server listens for executing SQL queries. UInt16 getTCPPort() const; @@ -366,7 +375,11 @@ public: /// Prevents DROP TABLE if its size is greater than max_size (50GB by default, max_size=0 turn off this check) void setMaxTableSizeToDrop(size_t max_size); - void checkTableCanBeDropped(const String & database, const String & table, size_t table_size); + void checkTableCanBeDropped(const String & database, const String & table, const size_t & table_size); + + /// Prevents DROP PARTITION if its size is greater than max_size (50GB by default, max_size=0 turn off this check) + void setMaxPartitionSizeToDrop(size_t max_size); + void checkPartitionCanBeDropped(const String & database, const String & table, const size_t & partition_size); /// Lets you select the compression settings according to the conditions described in the configuration file. CompressionSettings chooseCompressionSettings(size_t part_size, double part_size_ratio) const; @@ -423,6 +436,8 @@ private: /// Session will be closed after specified timeout. void scheduleCloseSession(const SessionKey & key, std::chrono::steady_clock::duration timeout); + + void checkCanBeDropped(const String & database, const String & table, const size_t & size, const size_t & max_size_to_drop); }; diff --git a/dbms/src/Interpreters/ExpressionActions.cpp b/dbms/src/Interpreters/ExpressionActions.cpp index ebf3ef3aac8..1a00f5c43d7 100644 --- a/dbms/src/Interpreters/ExpressionActions.cpp +++ b/dbms/src/Interpreters/ExpressionActions.cpp @@ -41,6 +41,8 @@ Names ExpressionAction::getNeededColumns() const res.insert(res.end(), array_joined_columns.begin(), array_joined_columns.end()); + res.insert(res.end(), join_key_names_left.begin(), join_key_names_left.end()); + for (const auto & column : projection) res.push_back(column.first); @@ -146,11 +148,14 @@ ExpressionAction ExpressionAction::arrayJoin(const NameSet & array_joined_column return a; } -ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr join_, const NamesAndTypesList & columns_added_by_join_) +ExpressionAction ExpressionAction::ordinaryJoin(std::shared_ptr join_, + const Names & join_key_names_left, + const NamesAndTypesList & columns_added_by_join_) { ExpressionAction a; a.type = JOIN; - a.join = join_; + a.join = std::move(join_); + a.join_key_names_left = join_key_names_left; a.columns_added_by_join = columns_added_by_join_; return a; } diff --git a/dbms/src/Interpreters/ExpressionActions.h b/dbms/src/Interpreters/ExpressionActions.h index 8da5fe2a279..993ba772d75 100644 --- a/dbms/src/Interpreters/ExpressionActions.h +++ b/dbms/src/Interpreters/ExpressionActions.h @@ -34,11 +34,14 @@ using DataTypePtr = std::shared_ptr; class IBlockInputStream; using BlockInputStreamPtr = std::shared_ptr; +class ExpressionActions; /** Action on the block. */ struct ExpressionAction { +private: + using ExpressionActionsPtr = std::shared_ptr; public: enum Type { @@ -85,6 +88,7 @@ public: /// For JOIN std::shared_ptr join; + Names join_key_names_left; NamesAndTypesList columns_added_by_join; /// For PROJECT. @@ -103,7 +107,8 @@ public: static ExpressionAction project(const NamesWithAliases & projected_columns_); static ExpressionAction project(const Names & projected_columns_); static ExpressionAction arrayJoin(const NameSet & array_joined_columns, bool array_join_is_left, const Context & context); - static ExpressionAction ordinaryJoin(std::shared_ptr join_, const NamesAndTypesList & columns_added_by_join_); + static ExpressionAction ordinaryJoin(std::shared_ptr join_, const Names & join_key_names_left, + const NamesAndTypesList & columns_added_by_join_); /// Which columns necessary to perform this action. Names getNeededColumns() const; diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.cpp b/dbms/src/Interpreters/ExpressionAnalyzer.cpp index ebf9e92adf9..c17fb435f88 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.cpp +++ b/dbms/src/Interpreters/ExpressionAnalyzer.cpp @@ -61,6 +61,10 @@ #include #include #include +#include +#include +#include +#include namespace DB @@ -89,6 +93,7 @@ namespace ErrorCodes extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int CONDITIONAL_TREE_PARENT_NOT_FOUND; extern const int TYPE_MISMATCH; + extern const int INVALID_JOIN_ON_EXPRESSION; } @@ -159,6 +164,34 @@ void removeDuplicateColumns(NamesAndTypesList & columns) } +String DatabaseAndTableWithAlias::getQualifiedNamePrefix() const +{ + return (!alias.empty() ? alias : (database + '.' + table)) + '.'; +} + + +void DatabaseAndTableWithAlias::makeQualifiedName(const ASTPtr & ast) const +{ + if (auto identifier = typeid_cast(ast.get())) + { + String prefix = getQualifiedNamePrefix(); + identifier->name.insert(identifier->name.begin(), prefix.begin(), prefix.end()); + + Names qualifiers; + if (!alias.empty()) + qualifiers.push_back(alias); + else + { + qualifiers.push_back(database); + qualifiers.push_back(table); + } + + for (const auto & qualifier : qualifiers) + identifier->children.emplace_back(std::make_shared(qualifier)); + } +} + + ExpressionAnalyzer::ExpressionAnalyzer( const ASTPtr & ast_, const Context & context_, @@ -170,7 +203,7 @@ ExpressionAnalyzer::ExpressionAnalyzer( const SubqueriesForSets & subqueries_for_set_) : ast(ast_), context(context_), settings(context.getSettings()), subquery_depth(subquery_depth_), - source_columns(source_columns_), required_result_columns(required_result_columns_.begin(), required_result_columns_.end()), + source_columns(source_columns_), required_result_columns(required_result_columns_), storage(storage_), do_global(do_global_), subqueries_for_sets(subqueries_for_set_) { @@ -215,7 +248,7 @@ ExpressionAnalyzer::ExpressionAnalyzer( /// Common subexpression elimination. Rewrite rules. normalizeTree(); - /// Remove unneeded columns according to 'required_source_columns'. + /// Remove unneeded columns according to 'required_result_columns'. /// Leave all selected columns in case of DISTINCT; columns that contain arrayJoin function inside. /// Must be after 'normalizeTree' (after expanding aliases, for aliases not get lost) /// and before 'executeScalarSubqueries', 'analyzeAggregation', etc. to avoid excessive calculations. @@ -256,112 +289,171 @@ ExpressionAnalyzer::ExpressionAnalyzer( analyzeAggregation(); } - -void ExpressionAnalyzer::translateQualifiedNames() +static DatabaseAndTableWithAlias getTableNameWithAliasFromTableExpression(const ASTTableExpression & table_expression, + const Context & context) { - String database_name; - String table_name; - String alias; - - if (!select_query || !select_query->tables || select_query->tables->children.empty()) - return; - - ASTTablesInSelectQueryElement & element = static_cast(*select_query->tables->children[0]); - - if (!element.table_expression) /// This is ARRAY JOIN without a table at the left side. - return; - - ASTTableExpression & table_expression = static_cast(*element.table_expression); + DatabaseAndTableWithAlias database_and_table_with_alias; if (table_expression.database_and_table_name) { - const ASTIdentifier & identifier = static_cast(*table_expression.database_and_table_name); + const auto & identifier = static_cast(*table_expression.database_and_table_name); - alias = identifier.tryGetAlias(); + database_and_table_with_alias.alias = identifier.tryGetAlias(); if (table_expression.database_and_table_name->children.empty()) { - database_name = context.getCurrentDatabase(); - table_name = identifier.name; + database_and_table_with_alias.database = context.getCurrentDatabase(); + database_and_table_with_alias.table = identifier.name; } else { if (table_expression.database_and_table_name->children.size() != 2) throw Exception("Logical error: number of components in table expression not equal to two", ErrorCodes::LOGICAL_ERROR); - database_name = static_cast(*identifier.children[0]).name; - table_name = static_cast(*identifier.children[1]).name; + database_and_table_with_alias.database = static_cast(*identifier.children[0]).name; + database_and_table_with_alias.table = static_cast(*identifier.children[1]).name; } } else if (table_expression.table_function) { - alias = table_expression.table_function->tryGetAlias(); + database_and_table_with_alias.alias = table_expression.table_function->tryGetAlias(); } else if (table_expression.subquery) { - alias = table_expression.subquery->tryGetAlias(); + database_and_table_with_alias.alias = table_expression.subquery->tryGetAlias(); } else throw Exception("Logical error: no known elements in ASTTableExpression", ErrorCodes::LOGICAL_ERROR); - translateQualifiedNamesImpl(ast, database_name, table_name, alias); + return database_and_table_with_alias; +}; + + +void ExpressionAnalyzer::translateQualifiedNames() +{ + if (!select_query || !select_query->tables || select_query->tables->children.empty()) + return; + + auto & element = static_cast(*select_query->tables->children[0]); + + if (!element.table_expression) /// This is ARRAY JOIN without a table at the left side. + return; + + auto & table_expression = static_cast(*element.table_expression); + auto * join = select_query->join(); + + std::vector tables = {getTableNameWithAliasFromTableExpression(table_expression, context)}; + + if (join) + { + const auto & join_table_expression = static_cast(*join->table_expression); + tables.emplace_back(getTableNameWithAliasFromTableExpression(join_table_expression, context)); + } + + translateQualifiedNamesImpl(ast, tables); } -void ExpressionAnalyzer::translateQualifiedNamesImpl(ASTPtr & ast, const String & database_name, const String & table_name, const String & alias) +/// Get the number of components of identifier which are correspond to 'alias.', 'table.' or 'databas.table.' from names. +static size_t getNumComponentsToStripInOrderToTranslateQualifiedName(const ASTIdentifier & identifier, + const DatabaseAndTableWithAlias & names) { - if (ASTIdentifier * ident = typeid_cast(ast.get())) + size_t num_qualifiers_to_strip = 0; + + auto get_identifier_name = [](const ASTPtr & ast) { return static_cast(*ast).name; }; + + /// It is compound identifier + if (!identifier.children.empty()) { - if (ident->kind == ASTIdentifier::Column) + size_t num_components = identifier.children.size(); + + /// database.table.column + if (num_components >= 3 + && !names.database.empty() + && get_identifier_name(identifier.children[0]) == names.database + && get_identifier_name(identifier.children[1]) == names.table) { - /// It is compound identifier - if (!ast->children.empty()) + num_qualifiers_to_strip = 2; + } + + /// table.column or alias.column. If num_components > 2, it is like table.nested.column. + if (num_components >= 2 + && ((!names.table.empty() && get_identifier_name(identifier.children[0]) == names.table) + || (!names.alias.empty() && get_identifier_name(identifier.children[0]) == names.alias))) + { + num_qualifiers_to_strip = 1; + } + } + + return num_qualifiers_to_strip; +} + + +/// Checks that ast is ASTIdentifier and remove num_qualifiers_to_strip components from left. +/// Example: 'database.table.name' -> (num_qualifiers_to_strip = 2) -> 'name'. +static void stripIdentifier(ASTPtr & ast, size_t num_qualifiers_to_strip) +{ + ASTIdentifier * identifier = typeid_cast(ast.get()); + + if (!identifier) + throw Exception("ASTIdentifier expected for stripIdentifier", ErrorCodes::LOGICAL_ERROR); + + if (num_qualifiers_to_strip) + { + size_t num_components = identifier->children.size(); + + /// plain column + if (num_components - num_qualifiers_to_strip == 1) + { + String node_alias = identifier->tryGetAlias(); + ast = identifier->children.back(); + if (!node_alias.empty()) + ast->setAlias(node_alias); + } + else + /// nested column + { + identifier->children.erase(identifier->children.begin(), identifier->children.begin() + num_qualifiers_to_strip); + String new_name; + for (const auto & child : identifier->children) { - size_t num_components = ast->children.size(); - size_t num_qualifiers_to_strip = 0; + if (!new_name.empty()) + new_name += '.'; + new_name += static_cast(*child.get()).name; + } + identifier->name = new_name; + } + } +} - /// database.table.column - if (num_components >= 3 - && !database_name.empty() - && static_cast(*ast->children[0]).name == database_name - && static_cast(*ast->children[1]).name == table_name) - { - num_qualifiers_to_strip = 2; - } - /// table.column or alias.column. If num_components > 2, it is like table.nested.column. - if (num_components >= 2 - && ((!table_name.empty() && static_cast(*ast->children[0]).name == table_name) - || (!alias.empty() && static_cast(*ast->children[0]).name == alias))) - { - num_qualifiers_to_strip = 1; - } +void ExpressionAnalyzer::translateQualifiedNamesImpl(ASTPtr & ast, const std::vector & tables) +{ + if (auto * identifier = typeid_cast(ast.get())) + { + if (identifier->kind == ASTIdentifier::Column) + { + /// Select first table name with max number of qualifiers which can be stripped. + size_t max_num_qualifiers_to_strip = 0; + size_t best_table_pos = 0; - if (num_qualifiers_to_strip) + for (size_t table_pos = 0; table_pos < tables.size(); ++table_pos) + { + const auto & table = tables[table_pos]; + auto num_qualifiers_to_strip = getNumComponentsToStripInOrderToTranslateQualifiedName(*identifier, table); + + if (num_qualifiers_to_strip > max_num_qualifiers_to_strip) { - /// plain column - if (num_components - num_qualifiers_to_strip == 1) - { - String node_alias = ast->tryGetAlias(); - ast = ast->children.back(); - if (!node_alias.empty()) - ast->setAlias(node_alias); - } - else - /// nested column - { - ident->children.erase(ident->children.begin(), ident->children.begin() + num_qualifiers_to_strip); - String new_name; - for (const auto & child : ident->children) - { - if (!new_name.empty()) - new_name += '.'; - new_name += static_cast(*child.get()).name; - } - ident->name = new_name; - } + max_num_qualifiers_to_strip = num_qualifiers_to_strip; + best_table_pos = table_pos; } } + + stripIdentifier(ast, max_num_qualifiers_to_strip); + + /// In case if column from the joined table are in source columns, change it's name to qualified. + if (best_table_pos && source_columns.contains(ast->getColumnName())) + tables[best_table_pos].makeQualifiedName(ast); } } else if (typeid_cast(ast.get())) @@ -377,19 +469,28 @@ void ExpressionAnalyzer::translateQualifiedNamesImpl(ASTPtr & ast, const String if (num_components > 2) throw Exception("Qualified asterisk cannot have more than two qualifiers", ErrorCodes::UNKNOWN_ELEMENT_IN_AST); - /// database.table.*, table.* or alias.* - if ( (num_components == 2 - && !database_name.empty() - && static_cast(*ident->children[0]).name == database_name - && static_cast(*ident->children[1]).name == table_name) - || (num_components == 0 - && ((!table_name.empty() && ident->name == table_name) - || (!alias.empty() && ident->name == alias)))) + for (const auto & table_names : tables) { - /// Replace to plain asterisk. - ast = std::make_shared(); + /// database.table.*, table.* or alias.* + if ((num_components == 2 + && !table_names.database.empty() + && static_cast(*ident->children[0]).name == table_names.database + && static_cast(*ident->children[1]).name == table_names.table) + || (num_components == 0 + && ((!table_names.table.empty() && ident->name == table_names.table) + || (!table_names.alias.empty() && ident->name == table_names.alias)))) + { + /// Replace to plain asterisk. + ast = std::make_shared(); + } } } + else if (auto * join = typeid_cast(ast.get())) + { + /// Don't translate on_expression here in order to resolve equation parts later. + if (join->using_expression_list) + translateQualifiedNamesImpl(join->using_expression_list, tables); + } else { for (auto & child : ast->children) @@ -398,7 +499,7 @@ void ExpressionAnalyzer::translateQualifiedNamesImpl(ASTPtr & ast, const String if (!typeid_cast(child.get()) && !typeid_cast(child.get())) { - translateQualifiedNamesImpl(child, database_name, table_name, alias); + translateQualifiedNamesImpl(child, tables); } } } @@ -525,8 +626,12 @@ void ExpressionAnalyzer::analyzeAggregation() const ASTTablesInSelectQueryElement * join = select_query->join(); if (join) { - if (static_cast(*join->table_join).using_expression_list) - getRootActions(static_cast(*join->table_join).using_expression_list, true, false, temp_actions); + const auto table_join = static_cast(*join->table_join); + if (table_join.using_expression_list) + getRootActions(table_join.using_expression_list, true, false, temp_actions); + if (table_join.on_expression) + for (const auto & key_ast : analyzed_join.key_asts_left) + getRootActions(key_ast, true, false, temp_actions); addJoinAction(temp_actions, true); } @@ -557,7 +662,7 @@ void ExpressionAnalyzer::analyzeAggregation() const auto & col = block.getByName(column_name); /// Constant expressions have non-null column pointer at this stage. - if (const auto is_constexpr = col.column) + if (col.column && col.column->isColumnConst()) { /// But don't remove last key column if no aggregate functions, otherwise aggregation will not work. if (!aggregate_descriptions.empty() || size > 1) @@ -1528,7 +1633,8 @@ void ExpressionAnalyzer::makeSetsForIndexImpl(const ASTPtr & node, const Block & { NamesAndTypesList temp_columns = source_columns; temp_columns.insert(temp_columns.end(), array_join_columns.begin(), array_join_columns.end()); - temp_columns.insert(temp_columns.end(), columns_added_by_join.begin(), columns_added_by_join.end()); + for (const auto & joined_column : analyzed_join.columns_added_by_join) + temp_columns.push_back(joined_column.name_and_type); ExpressionActionsPtr temp_actions = std::make_shared(temp_columns, settings); getRootActions(func->arguments->children.at(0), true, false, temp_actions); @@ -1813,16 +1919,15 @@ const Block & ScopeStack::getSampleBlock() const void ExpressionAnalyzer::getRootActions(const ASTPtr & ast, bool no_subqueries, bool only_consts, ExpressionActionsPtr & actions) { ScopeStack scopes(actions, settings); + ProjectionManipulatorPtr projection_manipulator; if (!isThereArrayJoin(ast) && settings.enable_conditional_computation && !only_consts) - { projection_manipulator = std::make_shared(scopes, context); - } else - { projection_manipulator = std::make_shared(scopes); - } + getActionsImpl(ast, no_subqueries, only_consts, scopes, projection_manipulator); + actions = scopes.popLevel(); } @@ -1981,6 +2086,28 @@ bool ExpressionAnalyzer::isThereArrayJoin(const ASTPtr & ast) } } +void ExpressionAnalyzer::getActionsFromJoinKeys(const ASTTableJoin & table_join, bool no_subqueries, bool only_consts, + ExpressionActionsPtr & actions) +{ + ScopeStack scopes(actions, settings); + + ProjectionManipulatorPtr projection_manipulator; + if (!isThereArrayJoin(ast) && settings.enable_conditional_computation && !only_consts) + projection_manipulator = std::make_shared(scopes, context); + else + projection_manipulator = std::make_shared(scopes); + + if (table_join.using_expression_list) + getActionsImpl(table_join.using_expression_list, no_subqueries, only_consts, scopes, projection_manipulator); + else if (table_join.on_expression) + { + for (const auto & ast : analyzed_join.key_asts_left) + getActionsImpl(ast, no_subqueries, only_consts, scopes, projection_manipulator); + } + + actions = scopes.popLevel(); +} + void ExpressionAnalyzer::getActionsImpl(const ASTPtr & ast, bool no_subqueries, bool only_consts, ScopeStack & actions_stack, ProjectionManipulatorPtr projection_manipulator) { @@ -2411,13 +2538,65 @@ bool ExpressionAnalyzer::appendArrayJoin(ExpressionActionsChain & chain, bool on void ExpressionAnalyzer::addJoinAction(ExpressionActionsPtr & actions, bool only_types) const { if (only_types) - actions->add(ExpressionAction::ordinaryJoin(nullptr, columns_added_by_join)); + actions->add(ExpressionAction::ordinaryJoin(nullptr, analyzed_join.key_names_left, + analyzed_join.getColumnsAddedByJoin())); else for (auto & subquery_for_set : subqueries_for_sets) if (subquery_for_set.second.join) - actions->add(ExpressionAction::ordinaryJoin(subquery_for_set.second.join, columns_added_by_join)); + actions->add(ExpressionAction::ordinaryJoin(subquery_for_set.second.join, analyzed_join.key_names_left, + analyzed_join.getColumnsAddedByJoin())); } + +void ExpressionAnalyzer::AnalyzedJoin::createJoinedBlockActions(const ASTSelectQuery * select_query, + const Context & context) +{ + if (!select_query) + return; + + const ASTTablesInSelectQueryElement * join = select_query->join(); + + if (!join) + return; + + const auto & join_params = static_cast(*join->table_join); + + /// Create custom expression list with join keys from right table. + auto expression_list = std::make_shared(); + ASTs & children = expression_list->children; + + if (join_params.on_expression) + for (const auto & join_right_key : key_asts_right) + children.emplace_back(join_right_key); + + NameSet required_columns_set(key_names_right.begin(), key_names_right.end()); + for (const auto & joined_column : columns_added_by_join) + required_columns_set.insert(joined_column.original_name); + + required_columns_set.insert(key_names_right.begin(), key_names_right.end()); + + required_columns_from_joined_table.insert(required_columns_from_joined_table.end(), + required_columns_set.begin(), required_columns_set.end()); + + ExpressionAnalyzer analyzer(expression_list, context, nullptr, columns_from_joined_table, required_columns_from_joined_table); + joined_block_actions = analyzer.getActions(false); + + for (const auto & column_required_from_actions : joined_block_actions->getRequiredColumns()) + if (!required_columns_set.count(column_required_from_actions)) + required_columns_from_joined_table.push_back(column_required_from_actions); +} + + +NamesAndTypesList ExpressionAnalyzer::AnalyzedJoin::getColumnsAddedByJoin() const +{ + NamesAndTypesList result; + for (const auto & joined_column : columns_added_by_join) + result.push_back(joined_column.name_and_type); + + return result; +} + + bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_types) { assertSelect(); @@ -2428,12 +2607,11 @@ bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_ty initChain(chain, source_columns); ExpressionActionsChain::Step & step = chain.steps.back(); - const ASTTablesInSelectQueryElement & join_element = static_cast(*select_query->join()); - const ASTTableJoin & join_params = static_cast(*join_element.table_join); - const ASTTableExpression & table_to_join = static_cast(*join_element.table_expression); + const auto & join_element = static_cast(*select_query->join()); + const auto & join_params = static_cast(*join_element.table_join); + const auto & table_to_join = static_cast(*join_element.table_expression); - if (join_params.using_expression_list) - getRootActions(join_params.using_expression_list, only_types, false, step.actions); + getActionsFromJoinKeys(join_params, only_types, false, step.actions); /// Two JOINs are not supported with the same subquery, but different USINGs. auto join_hash = join_element.getTreeHash(); @@ -2444,7 +2622,8 @@ bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_ty /// TODO This syntax does not support specifying a database name. if (table_to_join.database_and_table_name) { - auto database_table = getDatabaseAndTableNameFromIdentifier(static_cast(*table_to_join.database_and_table_name)); + const auto & identifier = static_cast(*table_to_join.database_and_table_name); + auto database_table = getDatabaseAndTableNameFromIdentifier(identifier); StoragePtr table = context.tryGetTable(database_table.first, database_table.second); if (table) @@ -2465,14 +2644,10 @@ bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_ty if (!subquery_for_set.join) { JoinPtr join = std::make_shared( - join_key_names_left, join_key_names_right, + analyzed_join.key_names_left, analyzed_join.key_names_right, analyzed_join.columns_added_by_join_from_right_keys, settings.join_use_nulls, SizeLimits(settings.max_rows_in_join, settings.max_bytes_in_join, settings.join_overflow_mode), join_params.kind, join_params.strictness); - Names required_joined_columns(join_key_names_right.begin(), join_key_names_right.end()); - for (const auto & name_type : columns_added_by_join) - required_joined_columns.push_back(name_type.name); - /** For GLOBAL JOINs (in the case, for example, of the push method for executing GLOBAL subqueries), the following occurs * - in the addExternalStorage function, the JOIN (SELECT ...) subquery is replaced with JOIN _data1, * in the subquery_for_set object this subquery is exposed as source and the temporary table _data1 as the `table`. @@ -2481,20 +2656,44 @@ bool ExpressionAnalyzer::appendJoin(ExpressionActionsChain & chain, bool only_ty if (!subquery_for_set.source) { ASTPtr table; + if (table_to_join.database_and_table_name) table = table_to_join.database_and_table_name; else table = table_to_join.subquery; - auto interpreter = interpretSubquery(table, context, subquery_depth, required_joined_columns); + auto interpreter = interpretSubquery(table, context, subquery_depth, analyzed_join.required_columns_from_joined_table); subquery_for_set.source = std::make_shared( interpreter->getSampleBlock(), [interpreter]() mutable { return interpreter->execute().in; }); } + /// Alias duplicating columns. + for (const auto & joined_column : analyzed_join.columns_added_by_join) + { + const auto & qualified_name = joined_column.name_and_type.name; + if (joined_column.original_name != qualified_name) + subquery_for_set.joined_block_aliases.emplace_back(joined_column.original_name, qualified_name); + } + + auto sample_block = subquery_for_set.source->getHeader(); + analyzed_join.joined_block_actions->execute(sample_block); + for (const auto & name_with_alias : subquery_for_set.joined_block_aliases) + { + if (sample_block.has(name_with_alias.first)) + { + auto pos = sample_block.getPositionByName(name_with_alias.first); + auto column = sample_block.getByPosition(pos); + sample_block.erase(pos); + column.name = name_with_alias.second; + sample_block.insert(std::move(column)); + } + } + /// TODO You do not need to set this up when JOIN is only needed on remote servers. subquery_for_set.join = join; - subquery_for_set.join->setSampleBlock(subquery_for_set.source->getHeader()); + subquery_for_set.join->setSampleBlock(sample_block); + subquery_for_set.joined_block_actions = analyzed_join.joined_block_actions; } addJoinAction(step.actions, false); @@ -2648,7 +2847,8 @@ void ExpressionAnalyzer::appendProjectResult(ExpressionActionsChain & chain) con for (size_t i = 0; i < asts.size(); ++i) { String result_name = asts[i]->getAliasOrColumnName(); - if (required_result_columns.empty() || required_result_columns.count(result_name)) + if (required_result_columns.empty() + || std::find(required_result_columns.begin(), required_result_columns.end(), result_name) != required_result_columns.end()) { result_columns.emplace_back(asts[i]->getColumnName(), result_name); step.required_output.push_back(result_columns.back().second); @@ -2772,19 +2972,30 @@ void ExpressionAnalyzer::collectUsedColumns() * (Do not assume that they are required for reading from the "left" table). */ NameSet available_joined_columns; - collectJoinedColumns(available_joined_columns, columns_added_by_join); + collectJoinedColumns(available_joined_columns); NameSet required_joined_columns; + + for (const auto & left_key_ast : analyzed_join.key_asts_left) + getRequiredSourceColumnsImpl(left_key_ast, available_columns, required, ignored, {}, required_joined_columns); + getRequiredSourceColumnsImpl(ast, available_columns, required, ignored, available_joined_columns, required_joined_columns); - for (NamesAndTypesList::iterator it = columns_added_by_join.begin(); it != columns_added_by_join.end();) + for (auto it = analyzed_join.columns_added_by_join.begin(); it != analyzed_join.columns_added_by_join.end();) { - if (required_joined_columns.count(it->name)) + if (required_joined_columns.count(it->name_and_type.name)) ++it; else - columns_added_by_join.erase(it++); + analyzed_join.columns_added_by_join.erase(it++); } + analyzed_join.createJoinedBlockActions(select_query, context); + + /// Some columns from right join key may be used in query. This columns will be appended to block during join. + for (const auto & right_key_name : analyzed_join.key_names_right) + if (required_joined_columns.count(right_key_name)) + analyzed_join.columns_added_by_join_from_right_keys.insert(right_key_name); + /// Insert the columns required for the ARRAY JOIN calculation into the required columns list. NameSet array_join_sources; for (const auto & result_source : array_join_result_to_source) @@ -2830,7 +3041,163 @@ void ExpressionAnalyzer::collectUsedColumns() throw Exception("Unknown identifier: " + *unknown_required_source_columns.begin(), ErrorCodes::UNKNOWN_IDENTIFIER); } -void ExpressionAnalyzer::collectJoinedColumns(NameSet & joined_columns, NamesAndTypesList & joined_columns_name_type) + +void ExpressionAnalyzer::collectJoinedColumnsFromJoinOnExpr() +{ + const auto & tables = static_cast(*select_query->tables); + const auto * left_tables_element = static_cast(tables.children.at(0).get()); + const auto * right_tables_element = select_query->join(); + + if (!left_tables_element || !right_tables_element) + return; + + const auto & table_join = static_cast(*right_tables_element->table_join); + if (!table_join.on_expression) + return; + + const auto & left_table_expression = static_cast(*left_tables_element->table_expression); + const auto & right_table_expression = static_cast(*right_tables_element->table_expression); + + auto left_source_names = getTableNameWithAliasFromTableExpression(left_table_expression, context); + auto right_source_names = getTableNameWithAliasFromTableExpression(right_table_expression, context); + + /// Stores examples of columns which are only from one table. + struct TableBelonging + { + const ASTIdentifier * example_only_from_left = nullptr; + const ASTIdentifier * example_only_from_right = nullptr; + }; + + /// Check all identifiers in ast and decide their possible table belonging. + /// Throws if there are two identifiers definitely from different tables. + std::function get_table_belonging; + get_table_belonging = [&](const ASTPtr & ast) -> TableBelonging + { + auto * identifier = typeid_cast(ast.get()); + if (identifier) + { + if (identifier->kind == ASTIdentifier::Column) + { + auto left_num_components = getNumComponentsToStripInOrderToTranslateQualifiedName(*identifier, left_source_names); + auto right_num_components = getNumComponentsToStripInOrderToTranslateQualifiedName(*identifier, right_source_names); + + /// Assume that component from definite table if num_components is greater than for the other table. + if (left_num_components > right_num_components) + return {identifier, nullptr}; + if (left_num_components < right_num_components) + return {nullptr, identifier}; + } + return {}; + } + + TableBelonging table_belonging; + for (const auto & child : ast->children) + { + auto children_belonging = get_table_belonging(child); + if (!table_belonging.example_only_from_left) + table_belonging.example_only_from_left = children_belonging.example_only_from_left; + if (!table_belonging.example_only_from_right) + table_belonging.example_only_from_right = children_belonging.example_only_from_right; + } + + if (table_belonging.example_only_from_left && table_belonging.example_only_from_right) + throw Exception("Invalid columns in JOIN ON section. Columns " + + table_belonging.example_only_from_left->getAliasOrColumnName() + " and " + + table_belonging.example_only_from_right->getAliasOrColumnName() + + " are from different tables.", ErrorCodes::INVALID_JOIN_ON_EXPRESSION); + + return table_belonging; + }; + + std::function translate_qualified_names; + translate_qualified_names = [&](ASTPtr & ast, const DatabaseAndTableWithAlias & source_names) + { + auto * identifier = typeid_cast(ast.get()); + if (identifier) + { + if (identifier->kind == ASTIdentifier::Column) + { + auto num_components = getNumComponentsToStripInOrderToTranslateQualifiedName(*identifier, source_names); + stripIdentifier(ast, num_components); + } + return; + } + + for (auto & child : ast->children) + translate_qualified_names(child, source_names); + }; + + const auto supported_syntax = " Supported syntax: JOIN ON Expr([table.]column, ...) = Expr([table.]column, ...) " + "[AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]"; + auto throwSyntaxException = [&](const String & msg) + { + throw Exception("Invalid expression for JOIN ON. " + msg + supported_syntax, ErrorCodes::INVALID_JOIN_ON_EXPRESSION); + }; + + /// For equal expression find out corresponding table for each part, translate qualified names and add asts to join keys. + auto add_columns_from_equals_expr = [&](const ASTPtr & expr) + { + auto * func_equals = typeid_cast(expr.get()); + if (!func_equals || func_equals->name != "equals") + throwSyntaxException("Expected equals expression, got " + queryToString(expr) + "."); + + ASTPtr left_ast = func_equals->arguments->children.at(0)->clone(); + ASTPtr right_ast = func_equals->arguments->children.at(1)->clone(); + + auto left_table_belonging = get_table_belonging(left_ast); + auto right_table_belonging = get_table_belonging(right_ast); + + bool can_be_left_part_from_left_table = left_table_belonging.example_only_from_right == nullptr; + bool can_be_left_part_from_right_table = left_table_belonging.example_only_from_left == nullptr; + bool can_be_right_part_from_left_table = right_table_belonging.example_only_from_right == nullptr; + bool can_be_right_part_from_right_table = right_table_belonging.example_only_from_left == nullptr; + + auto add_join_keys = [&](ASTPtr & ast_to_left_table, ASTPtr & ast_to_right_table) + { + translate_qualified_names(ast_to_left_table, left_source_names); + translate_qualified_names(ast_to_right_table, right_source_names); + + analyzed_join.key_asts_left.push_back(ast_to_left_table); + analyzed_join.key_names_left.push_back(ast_to_left_table->getColumnName()); + analyzed_join.key_asts_right.push_back(ast_to_right_table); + analyzed_join.key_names_right.push_back(ast_to_right_table->getAliasOrColumnName()); + }; + + /// Default variant when all identifiers may be from any table. + if (can_be_left_part_from_left_table && can_be_right_part_from_right_table) + add_join_keys(left_ast, right_ast); + else if (can_be_left_part_from_right_table && can_be_right_part_from_left_table) + add_join_keys(right_ast, left_ast); + else + { + auto * left_example = left_table_belonging.example_only_from_left ? + left_table_belonging.example_only_from_left : + left_table_belonging.example_only_from_right; + + auto * right_example = right_table_belonging.example_only_from_left ? + right_table_belonging.example_only_from_left : + right_table_belonging.example_only_from_right; + + auto left_name = queryToString(*left_example); + auto right_name = queryToString(*right_example); + auto expr_name = queryToString(expr); + + throwSyntaxException("In expression " + expr_name + " columns " + left_name + " and " + right_name + + " are from the same table but from different arguments of equal function."); + } + }; + + auto * func = typeid_cast(table_join.on_expression.get()); + if (func && func->name == "and") + { + for (const auto & expr : func->arguments->children) + add_columns_from_equals_expr(expr); + } + else + add_columns_from_equals_expr(table_join.on_expression); +} + +void ExpressionAnalyzer::collectJoinedColumns(NameSet & joined_columns) { if (!select_query) return; @@ -2840,13 +3207,15 @@ void ExpressionAnalyzer::collectJoinedColumns(NameSet & joined_columns, NamesAnd if (!node) return; - const ASTTableJoin & table_join = static_cast(*node->table_join); - const ASTTableExpression & table_expression = static_cast(*node->table_expression); + const auto & table_join = static_cast(*node->table_join); + const auto & table_expression = static_cast(*node->table_expression); + auto joined_table_name = getTableNameWithAliasFromTableExpression(table_expression, context); Block nested_result_sample; if (table_expression.database_and_table_name) { - auto database_table = getDatabaseAndTableNameFromIdentifier(static_cast(*table_expression.database_and_table_name)); + const auto & identifier = static_cast(*table_expression.database_and_table_name); + auto database_table = getDatabaseAndTableNameFromIdentifier(identifier); const auto & table = context.getTable(database_table.first, database_table.second); nested_result_sample = table->getSampleBlockNonMaterialized(); } @@ -2855,34 +3224,57 @@ void ExpressionAnalyzer::collectJoinedColumns(NameSet & joined_columns, NamesAnd const auto & subquery = table_expression.subquery->children.at(0); nested_result_sample = InterpreterSelectWithUnionQuery::getSampleBlock(subquery, context); } + analyzed_join.columns_from_joined_table = nested_result_sample.getNamesAndTypesList(); + + auto add_name_to_join_keys = [](Names & join_keys, ASTs & join_asts, const String & name, const ASTPtr & ast) + { + if (join_keys.end() == std::find(join_keys.begin(), join_keys.end(), name)) + { + join_keys.push_back(name); + join_asts.push_back(ast); + } + else + throw Exception("Duplicate column " + name + " in USING list", ErrorCodes::DUPLICATE_COLUMN); + }; if (table_join.using_expression_list) { auto & keys = typeid_cast(*table_join.using_expression_list); for (const auto & key : keys.children) { - if (join_key_names_left.end() == std::find(join_key_names_left.begin(), join_key_names_left.end(), key->getColumnName())) - join_key_names_left.push_back(key->getColumnName()); - else - throw Exception("Duplicate column " + key->getColumnName() + " in USING list", ErrorCodes::DUPLICATE_COLUMN); - - if (join_key_names_right.end() == std::find(join_key_names_right.begin(), join_key_names_right.end(), key->getAliasOrColumnName())) - join_key_names_right.push_back(key->getAliasOrColumnName()); - else - throw Exception("Duplicate column " + key->getAliasOrColumnName() + " in USING list", ErrorCodes::DUPLICATE_COLUMN); + add_name_to_join_keys(analyzed_join.key_names_left, analyzed_join.key_asts_left, key->getColumnName(), key); + add_name_to_join_keys(analyzed_join.key_names_right, analyzed_join.key_asts_right, key->getAliasOrColumnName(), key); } } + else if (table_join.on_expression) + collectJoinedColumnsFromJoinOnExpr(); + + /// When we use JOIN ON syntax, non_joined_columns are columns from join_key_names_left, + /// because even if a column from join_key_names_right, we may need to join it if it has different name. + /// If we use USING syntax, join_key_names_left and join_key_names_right are almost the same, but we need to use + /// join_key_names_right in order to support aliases in USING list. Example: + /// SELECT x FROM tab1 ANY LEFT JOIN tab2 USING (x as y) - will join column x from tab1 with column y from tab2. + auto & not_joined_columns = table_join.using_expression_list ? analyzed_join.key_names_right : analyzed_join.key_names_left; for (const auto i : ext::range(0, nested_result_sample.columns())) { const auto & col = nested_result_sample.safeGetByPosition(i); - if (join_key_names_right.end() == std::find(join_key_names_right.begin(), join_key_names_right.end(), col.name) - && !joined_columns.count(col.name)) /// Duplicate columns in the subquery for JOIN do not make sense. + if (not_joined_columns.end() == std::find(not_joined_columns.begin(), not_joined_columns.end(), col.name)) { - joined_columns.insert(col.name); + auto name = col.name; + /// Change name for duplicate column form joined table. + if (source_columns.contains(name)) + name = joined_table_name.getQualifiedNamePrefix() + name; - bool make_nullable = settings.join_use_nulls && (table_join.kind == ASTTableJoin::Kind::Left || table_join.kind == ASTTableJoin::Kind::Full); - joined_columns_name_type.emplace_back(col.name, make_nullable ? makeNullable(col.type) : col.type); + if (joined_columns.count(name)) /// Duplicate columns in the subquery for JOIN do not make sense. + continue; + + joined_columns.insert(name); + + bool make_nullable = settings.join_use_nulls && (table_join.kind == ASTTableJoin::Kind::Left || + table_join.kind == ASTTableJoin::Kind::Full); + auto type = make_nullable ? makeNullable(col.type) : col.type; + analyzed_join.columns_added_by_join.emplace_back(NameAndTypePair(name, std::move(type)), col.name); } } } @@ -2975,7 +3367,8 @@ void ExpressionAnalyzer::getRequiredSourceColumnsImpl(const ASTPtr & ast, */ if (!typeid_cast(child.get()) && !typeid_cast(child.get()) - && !typeid_cast(child.get())) + && !typeid_cast(child.get()) + && !typeid_cast(child.get())) getRequiredSourceColumnsImpl(child, available_columns, required_source_columns, ignored_names, available_joined_columns, required_joined_columns); } @@ -3001,15 +3394,37 @@ void ExpressionAnalyzer::removeUnneededColumnsFromSelectClause() if (!select_query) return; - if (required_result_columns.empty() || select_query->distinct) + if (required_result_columns.empty()) return; ASTs & elements = select_query->select_expression_list->children; - elements.erase(std::remove_if(elements.begin(), elements.end(), [this](const auto & node) + ASTs new_elements; + new_elements.reserve(elements.size()); + + /// Some columns may be queried multiple times, like SELECT x, y, y FROM table. + /// In that case we keep them exactly same number of times. + std::map required_columns_with_duplicate_count; + for (const auto & name : required_result_columns) + ++required_columns_with_duplicate_count[name]; + + for (const auto & elem : elements) { - return !required_result_columns.count(node->getAliasOrColumnName()) && !hasArrayJoin(node); - }), elements.end()); + String name = elem->getAliasOrColumnName(); + + auto it = required_columns_with_duplicate_count.find(name); + if (required_columns_with_duplicate_count.end() != it && it->second) + { + new_elements.push_back(elem); + --it->second; + } + else if (select_query->distinct || hasArrayJoin(elem)) + { + new_elements.push_back(elem); + } + } + + elements = std::move(new_elements); } } diff --git a/dbms/src/Interpreters/ExpressionAnalyzer.h b/dbms/src/Interpreters/ExpressionAnalyzer.h index 4b8d21daa0f..25a861a2123 100644 --- a/dbms/src/Interpreters/ExpressionAnalyzer.h +++ b/dbms/src/Interpreters/ExpressionAnalyzer.h @@ -6,6 +6,7 @@ #include #include #include +#include namespace DB { @@ -51,6 +52,10 @@ struct SubqueryForSet /// If set, build it from result. SetPtr set; JoinPtr join; + /// Apply this actions to joined block. + ExpressionActionsPtr joined_block_actions; + /// Rename column from joined block from this list. + NamesWithAliases joined_block_aliases; /// If set, put the result into the table. /// This is a temporary table for transferring to remote servers for distributed query processing. @@ -86,6 +91,19 @@ struct ScopeStack const Block & getSampleBlock() const; }; +struct DatabaseAndTableWithAlias +{ + String database; + String table; + String alias; + + /// "alias." or "database.table." if alias is empty + String getQualifiedNamePrefix() const; + + /// If ast is ASTIdentifier, prepend getQualifiedNamePrefix() to it's name. + void makeQualifiedName(const ASTPtr & ast) const; +}; + /** Transforms an expression from a syntax tree into a sequence of actions to execute it. * * NOTE: if `ast` is a SELECT query from a table, the structure of this table should not change during the lifetime of ExpressionAnalyzer. @@ -186,7 +204,7 @@ private: /** If non-empty, ignore all expressions in not from this list. */ - NameSet required_result_columns; + Names required_result_columns; /// Columns after ARRAY JOIN, JOIN, and/or aggregation. NamesAndTypesList aggregated_columns; @@ -207,19 +225,57 @@ private: PreparedSets prepared_sets; - /// NOTE: So far, only one JOIN per query is supported. + struct AnalyzedJoin + { - /** Query of the form `SELECT expr(x) AS FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k` - * The join is made by column k. - * During the JOIN, - * - in the "right" table, it will be available by alias `k`, since `Project` action for the subquery was executed. - * - in the "left" table, it will be accessible by the name `expr(x)`, since `Project` action has not been executed yet. - * You must remember both of these options. - */ - Names join_key_names_left; - Names join_key_names_right; + /// NOTE: So far, only one JOIN per query is supported. - NamesAndTypesList columns_added_by_join; + /** Query of the form `SELECT expr(x) AS k FROM t1 ANY LEFT JOIN (SELECT expr(x) AS k FROM t2) USING k` + * The join is made by column k. + * During the JOIN, + * - in the "right" table, it will be available by alias `k`, since `Project` action for the subquery was executed. + * - in the "left" table, it will be accessible by the name `expr(x)`, since `Project` action has not been executed yet. + * You must remember both of these options. + * + * Query of the form `SELECT ... from t1 ANY LEFT JOIN (SELECT ... from t2) ON expr(t1 columns) = expr(t2 columns)` + * to the subquery will be added expression `expr(t2 columns)`. + * It's possible to use name `expr(t2 columns)`. + */ + Names key_names_left; + Names key_names_right; + ASTs key_asts_left; + ASTs key_asts_right; + + struct JoinedColumn + { + /// Column will be joined to block. + NameAndTypePair name_and_type; + /// original column name from joined source. + String original_name; + + JoinedColumn(const NameAndTypePair & name_and_type_, const String & original_name_) + : name_and_type(name_and_type_), original_name(original_name_) {} + }; + + using JoinedColumnsList = std::list; + + /// All columns which can be read from joined table. + NamesAndTypesList columns_from_joined_table; + /// Columns which will be used in query to the joined query. + Names required_columns_from_joined_table; + /// Columns which will be added to block, possible including some columns from right join key. + JoinedColumnsList columns_added_by_join; + /// Such columns will be copied from left join keys during join. + NameSet columns_added_by_join_from_right_keys; + /// Actions which need to be calculated on joined block. + ExpressionActionsPtr joined_block_actions; + + void createJoinedBlockActions(const ASTSelectQuery * select_query, const Context & context); + + NamesAndTypesList getColumnsAddedByJoin() const; + }; + + AnalyzedJoin analyzed_join; using Aliases = std::unordered_map; Aliases aliases; @@ -251,7 +307,9 @@ private: /** Find the columns that are obtained by JOIN. */ - void collectJoinedColumns(NameSet & joined_columns, NamesAndTypesList & joined_columns_name_type); + void collectJoinedColumns(NameSet & joined_columns); + /// Parse JOIN ON expression and collect ASTs for joined columns. + void collectJoinedColumnsFromJoinOnExpr(); /** Create a dictionary of aliases. */ @@ -308,6 +366,9 @@ private: void getActionsImpl(const ASTPtr & ast, bool no_subqueries, bool only_consts, ScopeStack & actions_stack, ProjectionManipulatorPtr projection_manipulator); + /// If ast is ASTSelectQuery with JOIN, add actions for JOIN key columns. + void getActionsFromJoinKeys(const ASTTableJoin & table_join, bool no_subqueries, bool only_consts, ExpressionActionsPtr & actions); + void getRootActions(const ASTPtr & ast, bool no_subqueries, bool only_consts, ExpressionActionsPtr & actions); void getActionsBeforeAggregation(const ASTPtr & ast, ExpressionActionsPtr & actions, bool no_subqueries); @@ -354,7 +415,7 @@ private: * only one ("main") table is supported. Ambiguity is not detected or resolved. */ void translateQualifiedNames(); - void translateQualifiedNamesImpl(ASTPtr & node, const String & database_name, const String & table_name, const String & alias); + void translateQualifiedNamesImpl(ASTPtr & node, const std::vector & tables); /** Sometimes we have to calculate more columns in SELECT clause than will be returned from query. * This is the case when we have DISTINCT or arrayJoin: we require more columns in SELECT even if we need less columns in result. diff --git a/dbms/src/Interpreters/ExternalLoader.cpp b/dbms/src/Interpreters/ExternalLoader.cpp index fc25327558e..751af361d0f 100644 --- a/dbms/src/Interpreters/ExternalLoader.cpp +++ b/dbms/src/Interpreters/ExternalLoader.cpp @@ -228,6 +228,17 @@ void ExternalLoader::reloadFromConfigFiles(const bool throw_on_error, const bool throw; } } + + /// erase removed from config loadable objects + std::list removed_loadable_objects; + for (const auto & loadable : loadable_objects) + { + const auto & current_config = loadable_objects_defined_in_config[loadable.second.origin]; + if (current_config.find(loadable.first) == std::end(current_config)) + removed_loadable_objects.emplace_back(loadable.first); + } + for(const auto & name : removed_loadable_objects) + loadable_objects.erase(name); } void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const bool throw_on_error, @@ -250,6 +261,8 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const if (force_reload || last_modified > config_last_modified) { auto config = config_repository->load(config_path); + + loadable_objects_defined_in_config[config_path].clear(); /// Definitions of loadable objects may have changed, recreate all of them @@ -282,7 +295,8 @@ void ExternalLoader::reloadFromConfigFile(const std::string & config_path, const LOG_WARNING(log, config_path << ": " + config_settings.external_name + " name cannot be empty"); continue; } - + + loadable_objects_defined_in_config[config_path].emplace(name); if (!loadable_name.empty() && name != loadable_name) continue; diff --git a/dbms/src/Interpreters/ExternalLoader.h b/dbms/src/Interpreters/ExternalLoader.h index 47163ca60d6..76d6cfc6f96 100644 --- a/dbms/src/Interpreters/ExternalLoader.h +++ b/dbms/src/Interpreters/ExternalLoader.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -146,6 +147,8 @@ private: /// Both for loadable_objects and failed_loadable_objects. std::unordered_map update_times; + std::unordered_map> loadable_objects_defined_in_config; + pcg64 rnd_engine{randomSeed()}; const Configuration & config; @@ -166,8 +169,8 @@ private: /// Check objects definitions in config files and reload or/and add new ones if the definition is changed /// If loadable_name is not empty, load only loadable object with name loadable_name void reloadFromConfigFiles(bool throw_on_error, bool force_reload = false, const std::string & loadable_name = ""); - void reloadFromConfigFile(const std::string & config_path, bool throw_on_error, bool force_reload, - const std::string & loadable_name); + void reloadFromConfigFile(const std::string & config_path, const bool throw_on_error, + const bool force_reload, const std::string & loadable_name); /// Check config files and update expired loadable objects void reloadAndUpdate(bool throw_on_error = false); diff --git a/dbms/src/Interpreters/InterpreterAlterQuery.cpp b/dbms/src/Interpreters/InterpreterAlterQuery.cpp index c58d358dd63..e652065f2e1 100644 --- a/dbms/src/Interpreters/InterpreterAlterQuery.cpp +++ b/dbms/src/Interpreters/InterpreterAlterQuery.cpp @@ -59,6 +59,7 @@ BlockIO InterpreterAlterQuery::execute() switch (command.type) { case PartitionCommand::DROP_PARTITION: + table->checkPartitionCanBeDropped(command.partition); table->dropPartition(query_ptr, command.partition, command.detach, context); break; @@ -68,6 +69,7 @@ BlockIO InterpreterAlterQuery::execute() case PartitionCommand::REPLACE_PARTITION: { + table->checkPartitionCanBeDropped(command.partition); String from_database = command.from_database.empty() ? context.getCurrentDatabase() : command.from_database; auto from_storage = context.getTable(from_database, command.from_table); table->replacePartitionFrom(from_storage, command.partition, command.replace, context); diff --git a/dbms/src/Interpreters/InterpreterCheckQuery.cpp b/dbms/src/Interpreters/InterpreterCheckQuery.cpp index 2657775919c..bb8a1d46143 100644 --- a/dbms/src/Interpreters/InterpreterCheckQuery.cpp +++ b/dbms/src/Interpreters/InterpreterCheckQuery.cpp @@ -1,92 +1,16 @@ #include #include +#include #include -#include #include -#include -#include #include -#include #include #include -#include -#include -#include namespace DB { -namespace ErrorCodes -{ - extern const int INVALID_BLOCK_EXTRA_INFO; - extern const int RECEIVED_EMPTY_DATA; -} - - -namespace -{ - -/// A helper structure for performing a response to a DESCRIBE TABLE query with a Distributed table. -/// Contains information about the local table that was retrieved from a single replica. -struct TableDescription -{ - TableDescription(const Block & block, const BlockExtraInfo & extra_info_) - : extra_info(extra_info_) - { - const auto & name_column = typeid_cast(*block.getByName("name").column); - const auto & type_column = typeid_cast(*block.getByName("type").column); - const auto & default_type_column = typeid_cast(*block.getByName("default_type").column); - const auto & default_expression_column = typeid_cast(*block.getByName("default_expression").column); - - size_t row_count = block.rows(); - - names_with_types.reserve(name_column.byteSize() + type_column.byteSize() + (3 * row_count)); - - SHA512_CTX ctx; - SHA512_Init(&ctx); - - bool is_first = true; - for (size_t i = 0; i < row_count; ++i) - { - const auto & name = name_column.getDataAt(i).toString(); - const auto & type = type_column.getDataAt(i).toString(); - const auto & default_type = default_type_column.getDataAt(i).toString(); - const auto & default_expression = default_expression_column.getDataAt(i).toString(); - - names_with_types.append(is_first ? "" : ", "); - names_with_types.append(name); - names_with_types.append(" "); - names_with_types.append(type); - - SHA512_Update(&ctx, reinterpret_cast(name.data()), name.size()); - SHA512_Update(&ctx, reinterpret_cast(type.data()), type.size()); - SHA512_Update(&ctx, reinterpret_cast(default_type.data()), default_type.size()); - SHA512_Update(&ctx, reinterpret_cast(default_expression.data()), default_expression.size()); - - is_first = false; - } - - SHA512_Final(hash.data(), &ctx); - } - - using Hash = std::array; - - BlockExtraInfo extra_info; - std::string names_with_types; - Hash hash; - UInt32 structure_class; -}; - -inline bool operator<(const TableDescription & lhs, const TableDescription & rhs) -{ - return lhs.hash < rhs.hash; -} - -using TableDescriptions = std::deque; - -} - InterpreterCheckQuery::InterpreterCheckQuery(const ASTPtr & query_ptr_, const Context & context_) : query_ptr(query_ptr_), context(context_) { @@ -101,120 +25,14 @@ BlockIO InterpreterCheckQuery::execute() StoragePtr table = context.getTable(database_name, table_name); - auto distributed_table = dynamic_cast(&*table); - if (distributed_table != nullptr) - { - /// For tables with the Distributed engine, the CHECK TABLE query sends a DESCRIBE TABLE request to all replicas. - /// The identity of the structures is checked (column names + column types + default types + expressions - /// by default) of the tables that the distributed table looks at. + auto column = ColumnUInt8::create(); + column->insert(UInt64(table->checkData())); + result = Block{{ std::move(column), std::make_shared(), "result" }}; - const auto & settings = context.getSettingsRef(); + BlockIO res; + res.in = std::make_shared(result); - BlockInputStreams streams = distributed_table->describe(context, settings); - streams[0] = std::make_shared>( - streams, nullptr, settings.max_distributed_connections); - streams.resize(1); - - auto stream_ptr = dynamic_cast(&*streams[0]); - if (stream_ptr == nullptr) - throw Exception("InterpreterCheckQuery: Internal error", ErrorCodes::LOGICAL_ERROR); - auto & stream = *stream_ptr; - - /// Get all data from the DESCRIBE TABLE queries. - - TableDescriptions table_descriptions; - - while (true) - { - if (stream.isCancelledOrThrowIfKilled()) - { - BlockIO res; - res.in = std::make_shared(result); - return res; - } - - Block block = stream.read(); - if (!block) - break; - - BlockExtraInfo info = stream.getBlockExtraInfo(); - if (!info.is_valid) - throw Exception("Received invalid block extra info", ErrorCodes::INVALID_BLOCK_EXTRA_INFO); - - table_descriptions.emplace_back(block, info); - } - - if (table_descriptions.empty()) - throw Exception("Received empty data", ErrorCodes::RECEIVED_EMPTY_DATA); - - /// Define an equivalence class for each table structure. - - std::sort(table_descriptions.begin(), table_descriptions.end()); - - UInt32 structure_class = 0; - - auto it = table_descriptions.begin(); - it->structure_class = structure_class; - - auto prev = it; - for (++it; it != table_descriptions.end(); ++it) - { - if (*prev < *it) - ++structure_class; - it->structure_class = structure_class; - prev = it; - } - - /// Construct the result. - - MutableColumnPtr status_column = ColumnUInt8::create(); - MutableColumnPtr host_name_column = ColumnString::create(); - MutableColumnPtr host_address_column = ColumnString::create(); - MutableColumnPtr port_column = ColumnUInt16::create(); - MutableColumnPtr user_column = ColumnString::create(); - MutableColumnPtr structure_class_column = ColumnUInt32::create(); - MutableColumnPtr structure_column = ColumnString::create(); - - /// This value is 1 if the structure is not disposed of anywhere, but 0 otherwise. - UInt8 status_value = (structure_class == 0) ? 1 : 0; - - for (const auto & desc : table_descriptions) - { - status_column->insert(static_cast(status_value)); - structure_class_column->insert(static_cast(desc.structure_class)); - host_name_column->insert(desc.extra_info.host); - host_address_column->insert(desc.extra_info.resolved_address); - port_column->insert(static_cast(desc.extra_info.port)); - user_column->insert(desc.extra_info.user); - structure_column->insert(desc.names_with_types); - } - - Block block; - - block.insert(ColumnWithTypeAndName(std::move(status_column), std::make_shared(), "status")); - block.insert(ColumnWithTypeAndName(std::move(host_name_column), std::make_shared(), "host_name")); - block.insert(ColumnWithTypeAndName(std::move(host_address_column), std::make_shared(), "host_address")); - block.insert(ColumnWithTypeAndName(std::move(port_column), std::make_shared(), "port")); - block.insert(ColumnWithTypeAndName(std::move(user_column), std::make_shared(), "user")); - block.insert(ColumnWithTypeAndName(std::move(structure_class_column), std::make_shared(), "structure_class")); - block.insert(ColumnWithTypeAndName(std::move(structure_column), std::make_shared(), "structure")); - - BlockIO res; - res.in = std::make_shared(block); - - return res; - } - else - { - auto column = ColumnUInt8::create(); - column->insert(UInt64(table->checkData())); - result = Block{{ std::move(column), std::make_shared(), "result" }}; - - BlockIO res; - res.in = std::make_shared(result); - - return res; - } + return res; } } diff --git a/dbms/src/Interpreters/InterpreterDropQuery.cpp b/dbms/src/Interpreters/InterpreterDropQuery.cpp index efe6aae87a8..fb6fe2c8c38 100644 --- a/dbms/src/Interpreters/InterpreterDropQuery.cpp +++ b/dbms/src/Interpreters/InterpreterDropQuery.cpp @@ -75,10 +75,7 @@ BlockIO InterpreterDropQuery::executeToTable(String & database_name_, String & t } else if (kind == ASTDropQuery::Kind::Truncate) { - if (!database_and_table.second->checkTableCanBeDropped()) - throw Exception("Table " + database_name + "." + database_and_table.second->getTableName() + - " couldn't be truncated due to failed pre-drop check", - ErrorCodes::TABLE_WAS_NOT_DROPPED); + database_and_table.second->checkTableCanBeDropped(); /// If table was already dropped by anyone, an exception will be thrown auto table_lock = database_and_table.second->lockDataForAlter(__PRETTY_FUNCTION__); @@ -87,10 +84,7 @@ BlockIO InterpreterDropQuery::executeToTable(String & database_name_, String & t } else if (kind == ASTDropQuery::Kind::Drop) { - if (!database_and_table.second->checkTableCanBeDropped()) - throw Exception("Table " + database_name + "." + database_and_table.second->getTableName() + - " couldn't be dropped due to failed pre-drop check", - ErrorCodes::TABLE_WAS_NOT_DROPPED); + database_and_table.second->checkTableCanBeDropped(); database_and_table.second->shutdown(); /// If table was already dropped by anyone, an exception will be thrown diff --git a/dbms/src/Interpreters/InterpreterKillQueryQuery.cpp b/dbms/src/Interpreters/InterpreterKillQueryQuery.cpp index 336b45e7a5c..f0add31dc38 100644 --- a/dbms/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/dbms/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,9 @@ BlockIO InterpreterKillQueryQuery::execute() { ASTKillQueryQuery & query = typeid_cast(*query_ptr); + if (!query.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context, {"system"}); + BlockIO res_io; Block processes_block = getSelectFromSystemProcessesResult(); if (!processes_block) diff --git a/dbms/src/Interpreters/InterpreterOptimizeQuery.cpp b/dbms/src/Interpreters/InterpreterOptimizeQuery.cpp index 2472cff1876..80a64d83f90 100644 --- a/dbms/src/Interpreters/InterpreterOptimizeQuery.cpp +++ b/dbms/src/Interpreters/InterpreterOptimizeQuery.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include @@ -18,6 +19,9 @@ BlockIO InterpreterOptimizeQuery::execute() { const ASTOptimizeQuery & ast = typeid_cast(*query_ptr); + if (!ast.cluster.empty()) + return executeDDLQueryOnCluster(query_ptr, context, {ast.database}); + StoragePtr table = context.getTable(ast.database, ast.table); auto table_lock = table->lockStructure(true, __PRETTY_FUNCTION__); table->optimize(query_ptr, ast.partition, ast.final, ast.deduplicate, context); diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.cpp b/dbms/src/Interpreters/InterpreterSelectQuery.cpp index e7923f48297..c821eea43d5 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectQuery.cpp @@ -64,23 +64,33 @@ namespace ErrorCodes InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, - const Names & required_result_column_names_, + const Names & required_result_column_names, QueryProcessingStage::Enum to_stage_, size_t subquery_depth_, - const BlockInputStreamPtr & input, - bool only_analyze) - : query_ptr(query_ptr_->clone()) /// Note: the query is cloned because it will be modified during analysis. - , query(typeid_cast(*query_ptr)) - , context(context_) - , to_stage(to_stage_) - , subquery_depth(subquery_depth_) - , only_analyze(only_analyze) - , input(input) - , log(&Logger::get("InterpreterSelectQuery")) + bool only_analyze_) + : InterpreterSelectQuery(query_ptr_, context_, nullptr, nullptr, required_result_column_names, to_stage_, subquery_depth_, only_analyze_) { - init(required_result_column_names_); } +InterpreterSelectQuery::InterpreterSelectQuery( + const ASTPtr & query_ptr_, + const Context & context_, + const BlockInputStreamPtr & input_, + QueryProcessingStage::Enum to_stage_, + bool only_analyze_) + : InterpreterSelectQuery(query_ptr_, context_, input_, nullptr, Names{}, to_stage_, 0, only_analyze_) +{ +} + +InterpreterSelectQuery::InterpreterSelectQuery( + const ASTPtr & query_ptr_, + const Context & context_, + const StoragePtr & storage_, + QueryProcessingStage::Enum to_stage_, + bool only_analyze_) + : InterpreterSelectQuery(query_ptr_, context_, nullptr, storage_, Names{}, to_stage_, 0, only_analyze_) +{ +} InterpreterSelectQuery::~InterpreterSelectQuery() = default; @@ -100,8 +110,24 @@ static Context getSubqueryContext(const Context & context) return subquery_context; } - -void InterpreterSelectQuery::init(const Names & required_result_column_names) +InterpreterSelectQuery::InterpreterSelectQuery( + const ASTPtr & query_ptr_, + const Context & context_, + const BlockInputStreamPtr & input_, + const StoragePtr & storage_, + const Names & required_result_column_names, + QueryProcessingStage::Enum to_stage_, + size_t subquery_depth_, + bool only_analyze_) + : query_ptr(query_ptr_->clone()) /// Note: the query is cloned because it will be modified during analysis. + , query(typeid_cast(*query_ptr)) + , context(context_) + , to_stage(to_stage_) + , subquery_depth(subquery_depth_) + , only_analyze(only_analyze_) + , storage(storage_) + , input(input_) + , log(&Logger::get("InterpreterSelectQuery")) { if (!context.hasQueryContext()) context.setQueryContext(context); @@ -130,20 +156,23 @@ void InterpreterSelectQuery::init(const Names & required_result_column_names) source_header = interpreter_subquery->getSampleBlock(); } - else if (table_expression && typeid_cast(table_expression.get())) + else if (!storage) { - /// Read from table function. - storage = context.getQueryContext().executeTableFunction(table_expression); - } - else - { - /// Read from table. Even without table expression (implicit SELECT ... FROM system.one). - String database_name; - String table_name; + if (table_expression && typeid_cast(table_expression.get())) + { + /// Read from table function. + storage = context.getQueryContext().executeTableFunction(table_expression); + } + else + { + /// Read from table. Even without table expression (implicit SELECT ... FROM system.one). + String database_name; + String table_name; - getDatabaseAndTableNames(database_name, table_name); + getDatabaseAndTableNames(database_name, table_name); - storage = context.getTable(database_name, table_name); + storage = context.getTable(database_name, table_name); + } } if (storage) @@ -184,7 +213,7 @@ void InterpreterSelectQuery::init(const Names & required_result_column_names) /// Calculate structure of the result. { Pipeline pipeline; - executeImpl(pipeline, input, true); + executeImpl(pipeline, nullptr, true); result_header = pipeline.firstStream()->getHeader(); } } @@ -332,9 +361,6 @@ InterpreterSelectQuery::AnalysisResult InterpreterSelectQuery::analyzeExpression void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputStreamPtr & input, bool dry_run) { - if (input) - pipeline.streams.push_back(input); - /** Streams of data. When the query is executed in parallel, we have several data streams. * If there is no GROUP BY, then perform all operations before ORDER BY and LIMIT in parallel, then * if there is an ORDER BY, then glue the streams using UnionBlockInputStream, and then MergeSortingBlockInputStream, @@ -354,6 +380,9 @@ void InterpreterSelectQuery::executeImpl(Pipeline & pipeline, const BlockInputSt } else { + if (input) + pipeline.streams.push_back(input); + /** Read the data from Storage. from_stage - to what stage the request was completed in Storage. */ QueryProcessingStage::Enum from_stage = executeFetchColumns(pipeline); diff --git a/dbms/src/Interpreters/InterpreterSelectQuery.h b/dbms/src/Interpreters/InterpreterSelectQuery.h index fc71b713daa..26f6c2f15ab 100644 --- a/dbms/src/Interpreters/InterpreterSelectQuery.h +++ b/dbms/src/Interpreters/InterpreterSelectQuery.h @@ -38,9 +38,6 @@ public: * - to control the limit on the depth of nesting of subqueries. For subqueries, a value that is incremented by one is passed; * for INSERT SELECT, a value 1 is passed instead of 0. * - * input - * - if given - read not from the table specified in the query, but from prepared source. - * * required_result_column_names * - don't calculate all columns except the specified ones from the query * - it is used to remove calculation (and reading) of unnecessary columns from subqueries. @@ -53,8 +50,23 @@ public: const Names & required_result_column_names = Names{}, QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete, size_t subquery_depth_ = 0, - const BlockInputStreamPtr & input = nullptr, - bool only_analyze = false); + bool only_analyze_ = false); + + /// Read data not from the table specified in the query, but from the prepared source `input`. + InterpreterSelectQuery( + const ASTPtr & query_ptr_, + const Context & context_, + const BlockInputStreamPtr & input_, + QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete, + bool only_analyze_ = false); + + /// Read data not from the table specified in the query, but from the specified `storage_`. + InterpreterSelectQuery( + const ASTPtr & query_ptr_, + const Context & context_, + const StoragePtr & storage_, + QueryProcessingStage::Enum to_stage_ = QueryProcessingStage::Complete, + bool only_analyze_ = false); ~InterpreterSelectQuery() override; @@ -69,6 +81,17 @@ public: void ignoreWithTotals(); private: + InterpreterSelectQuery( + const ASTPtr & query_ptr_, + const Context & context_, + const BlockInputStreamPtr & input_, + const StoragePtr & storage_, + const Names & required_result_column_names, + QueryProcessingStage::Enum to_stage_, + size_t subquery_depth_, + bool only_analyze_); + + struct Pipeline { /** Streams of data. @@ -102,8 +125,6 @@ private: } }; - void init(const Names & required_result_column_names); - void executeImpl(Pipeline & pipeline, const BlockInputStreamPtr & input, bool dry_run); @@ -179,7 +200,7 @@ private: ASTSelectQuery & query; Context context; QueryProcessingStage::Enum to_stage; - size_t subquery_depth; + size_t subquery_depth = 0; std::unique_ptr query_analyzer; /// How many streams we ask for storage to produce, and in how many threads we will do further processing. diff --git a/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index 45d17e45b78..fa8d8bc2c86 100644 --- a/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/dbms/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -11,6 +11,7 @@ #include #include + namespace DB { @@ -57,7 +58,7 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery( /// We use it to determine positions of 'required_result_column_names' in SELECT clause. Block full_result_header = InterpreterSelectQuery( - ast.list_of_selects->children.at(0), context, Names(), to_stage, subquery_depth, nullptr, true).getSampleBlock(); + ast.list_of_selects->children.at(0), context, Names(), to_stage, subquery_depth, true).getSampleBlock(); std::vector positions_of_required_result_columns(required_result_column_names.size()); for (size_t required_result_num = 0, size = required_result_column_names.size(); required_result_num < size; ++required_result_num) @@ -66,10 +67,14 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery( for (size_t query_num = 1; query_num < num_selects; ++query_num) { Block full_result_header_for_current_select = InterpreterSelectQuery( - ast.list_of_selects->children.at(query_num), context, Names(), to_stage, subquery_depth, nullptr, true).getSampleBlock(); + ast.list_of_selects->children.at(query_num), context, Names(), to_stage, subquery_depth, true).getSampleBlock(); if (full_result_header_for_current_select.columns() != full_result_header.columns()) - throw Exception("Different number of columns in UNION ALL elements", ErrorCodes::UNION_ALL_RESULT_STRUCTURES_MISMATCH); + throw Exception("Different number of columns in UNION ALL elements:\n" + + full_result_header.dumpNames() + + "\nand\n" + + full_result_header_for_current_select.dumpNames() + "\n", + ErrorCodes::UNION_ALL_RESULT_STRUCTURES_MISMATCH); required_result_column_names_for_other_selects[query_num].reserve(required_result_column_names.size()); for (const auto & pos : positions_of_required_result_columns) @@ -84,10 +89,10 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery( : required_result_column_names_for_other_selects[query_num]; nested_interpreters.emplace_back(std::make_unique( - ast.list_of_selects->children.at(query_num), context, current_required_result_column_names, to_stage, subquery_depth, nullptr, only_analyze)); + ast.list_of_selects->children.at(query_num), context, current_required_result_column_names, to_stage, subquery_depth, only_analyze)); } - /// Determine structure of result. + /// Determine structure of the result. if (num_selects == 1) { @@ -104,7 +109,11 @@ InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery( for (size_t query_num = 1; query_num < num_selects; ++query_num) if (headers[query_num].columns() != num_columns) - throw Exception("Different number of columns in UNION ALL elements", ErrorCodes::UNION_ALL_RESULT_STRUCTURES_MISMATCH); + throw Exception("Different number of columns in UNION ALL elements:\n" + + result_header.dumpNames() + + "\nand\n" + + headers[query_num].dumpNames() + "\n", + ErrorCodes::UNION_ALL_RESULT_STRUCTURES_MISMATCH); for (size_t column_num = 0; column_num < num_columns; ++column_num) { diff --git a/dbms/src/Interpreters/Join.cpp b/dbms/src/Interpreters/Join.cpp index 7409af75986..e49fb089d18 100644 --- a/dbms/src/Interpreters/Join.cpp +++ b/dbms/src/Interpreters/Join.cpp @@ -30,11 +30,12 @@ namespace ErrorCodes } -Join::Join(const Names & key_names_left_, const Names & key_names_right_, bool use_nulls_, - const SizeLimits & limits, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_) +Join::Join(const Names & key_names_left_, const Names & key_names_right_, const NameSet & needed_key_names_right_, + bool use_nulls_, const SizeLimits & limits, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_) : kind(kind_), strictness(strictness_), key_names_left(key_names_left_), key_names_right(key_names_right_), + needed_key_names_right(needed_key_names_right_), use_nulls(use_nulls_), log(&Logger::get("Join")), limits(limits) @@ -776,6 +777,19 @@ void Join::joinBlockImpl(Block & block, const Maps & maps) const if (offsets_to_replicate) for (size_t i = 0; i < existing_columns; ++i) block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); + + /// Add join key columns from right block if they has different name. + for (size_t i = 0; i < key_names_right.size(); ++i) + { + auto & right_name = key_names_right[i]; + auto & left_name = key_names_left[i]; + + if (needed_key_names_right.count(right_name) && !block.has(right_name)) + { + const auto & col = block.getByName(left_name); + block.insert({col.column, col.type, right_name}); + } + } } diff --git a/dbms/src/Interpreters/Join.h b/dbms/src/Interpreters/Join.h index dbaa1b1812b..38527aa3cec 100644 --- a/dbms/src/Interpreters/Join.h +++ b/dbms/src/Interpreters/Join.h @@ -219,8 +219,8 @@ struct JoinKeyGetterHashed class Join { public: - Join(const Names & key_names_left_, const Names & key_names_right_, bool use_nulls_, - const SizeLimits & limits, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_); + Join(const Names & key_names_left_, const Names & key_names_right_, const NameSet & needed_key_names_right_, + bool use_nulls_, const SizeLimits & limits, ASTTableJoin::Kind kind_, ASTTableJoin::Strictness strictness_); bool empty() { return type == Type::EMPTY; } @@ -361,6 +361,8 @@ private: const Names key_names_left; /// Names of key columns (columns for equi-JOIN) in "right" table (in the order they appear in USING clause). const Names key_names_right; + /// Names of key columns in the "right" table which should stay in block after join. + const NameSet needed_key_names_right; /// Substitute NULLs for non-JOINed rows. bool use_nulls; diff --git a/dbms/src/Interpreters/QueryLog.cpp b/dbms/src/Interpreters/QueryLog.cpp index 47d2b4ad310..633d594cb8a 100644 --- a/dbms/src/Interpreters/QueryLog.cpp +++ b/dbms/src/Interpreters/QueryLog.cpp @@ -59,6 +59,9 @@ Block QueryLogElement::createBlock() {ColumnString::create(), std::make_shared(), "client_hostname"}, {ColumnString::create(), std::make_shared(), "client_name"}, {ColumnUInt32::create(), std::make_shared(), "client_revision"}, + {ColumnUInt32::create(), std::make_shared(), "client_version_major"}, + {ColumnUInt32::create(), std::make_shared(), "client_version_minor"}, + {ColumnUInt32::create(), std::make_shared(), "client_version_patch"}, {ColumnUInt8::create(), std::make_shared(), "http_method"}, {ColumnString::create(), std::make_shared(), "http_user_agent"}, @@ -138,6 +141,9 @@ void QueryLogElement::appendToBlock(Block & block) const columns[i++]->insert(client_info.client_hostname); columns[i++]->insert(client_info.client_name); columns[i++]->insert(UInt64(client_info.client_revision)); + columns[i++]->insert(UInt64(client_info.client_version_major)); + columns[i++]->insert(UInt64(client_info.client_version_minor)); + columns[i++]->insert(UInt64(client_info.client_version_patch)); columns[i++]->insert(UInt64(client_info.http_method)); columns[i++]->insert(client_info.http_user_agent); diff --git a/dbms/src/Interpreters/Settings.h b/dbms/src/Interpreters/Settings.h index 64cbe7b39b3..b5384309de9 100644 --- a/dbms/src/Interpreters/Settings.h +++ b/dbms/src/Interpreters/Settings.h @@ -154,6 +154,8 @@ struct Settings \ M(SettingBool, output_format_json_quote_denormals, false, "Enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format.") \ \ + M(SettingBool, output_format_json_escape_forward_slashes, true, "Controls escaping forward slashes for string outputs in JSON output format. This is intended for compatibility with JavaScript. Don't confuse with backslashes that are always escaped.") \ + \ M(SettingUInt64, output_format_pretty_max_rows, 10000, "Rows limit for Pretty formats.") \ M(SettingBool, output_format_pretty_color, true, "Use ANSI escape sequences to paint colors in Pretty formats") \ \ diff --git a/dbms/src/Interpreters/evaluateConstantExpression.cpp b/dbms/src/Interpreters/evaluateConstantExpression.cpp index 8ab3ca7bf1a..3fee9a8e2a7 100644 --- a/dbms/src/Interpreters/evaluateConstantExpression.cpp +++ b/dbms/src/Interpreters/evaluateConstantExpression.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -10,6 +11,7 @@ #include #include #include +#include namespace DB @@ -52,13 +54,18 @@ std::pair> evaluateConstantExpression(co ASTPtr evaluateConstantExpressionAsLiteral(const ASTPtr & node, const Context & context) { + /// Branch with string in qery. if (typeid_cast(node.get())) return node; + /// Branch with TableFunction in query. + if (auto table_func_ptr = typeid_cast(node.get())) + if (TableFunctionFactory::instance().isTableFunctionName(table_func_ptr->name)) + return node; + return std::make_shared(evaluateConstantExpression(node, context).first); } - ASTPtr evaluateConstantExpressionOrIdentifierAsLiteral(const ASTPtr & node, const Context & context) { if (auto id = typeid_cast(node.get())) diff --git a/dbms/src/Parsers/ASTKillQueryQuery.cpp b/dbms/src/Parsers/ASTKillQueryQuery.cpp index 8be944e8481..0f3e5406fdd 100644 --- a/dbms/src/Parsers/ASTKillQueryQuery.cpp +++ b/dbms/src/Parsers/ASTKillQueryQuery.cpp @@ -8,9 +8,22 @@ String ASTKillQueryQuery::getID() const return "KillQueryQuery_" + (where_expression ? where_expression->getID() : "") + "_" + String(sync ? "SYNC" : "ASYNC"); } +ASTPtr ASTKillQueryQuery::getRewrittenASTWithoutOnCluster(const std::string & /*new_database*/) const +{ + auto query_ptr = clone(); + ASTKillQueryQuery & query = static_cast(*query_ptr); + + query.cluster.clear(); + + return query_ptr; +} + void ASTKillQueryQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { - settings.ostr << (settings.hilite ? hilite_keyword : "") << "KILL QUERY WHERE " << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << "KILL QUERY "; + + formatOnCluster(settings); + settings.ostr << " WHERE " << (settings.hilite ? hilite_none : ""); if (where_expression) where_expression->formatImpl(settings, state, frame); diff --git a/dbms/src/Parsers/ASTKillQueryQuery.h b/dbms/src/Parsers/ASTKillQueryQuery.h index 4df1f28f733..086ee55e3bd 100644 --- a/dbms/src/Parsers/ASTKillQueryQuery.h +++ b/dbms/src/Parsers/ASTKillQueryQuery.h @@ -1,10 +1,11 @@ #include #include +#include namespace DB { -class ASTKillQueryQuery : public ASTQueryWithOutput +class ASTKillQueryQuery : public ASTQueryWithOutput, public ASTQueryWithOnCluster { public: ASTPtr where_expression; // expression to filter processes from system.processes table @@ -22,6 +23,8 @@ public: String getID() const override; void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; + + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &new_database) const override; }; } diff --git a/dbms/src/Parsers/ASTOptimizeQuery.cpp b/dbms/src/Parsers/ASTOptimizeQuery.cpp new file mode 100644 index 00000000000..dd37b665173 --- /dev/null +++ b/dbms/src/Parsers/ASTOptimizeQuery.cpp @@ -0,0 +1,39 @@ +#include + +namespace DB +{ + + +ASTPtr ASTOptimizeQuery::getRewrittenASTWithoutOnCluster(const std::string & new_database) const +{ + auto query_ptr = clone(); + ASTOptimizeQuery & query = static_cast(*query_ptr); + + query.cluster.clear(); + if (query.database.empty()) + query.database = new_database; + + return query_ptr; +} + +void ASTOptimizeQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const +{ + settings.ostr << (settings.hilite ? hilite_keyword : "") << "OPTIMIZE TABLE " << (settings.hilite ? hilite_none : "") + << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + + formatOnCluster(settings); + + if (partition) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << " PARTITION " << (settings.hilite ? hilite_none : ""); + partition->formatImpl(settings, state, frame); + } + + if (final) + settings.ostr << (settings.hilite ? hilite_keyword : "") << " FINAL" << (settings.hilite ? hilite_none : ""); + + if (deduplicate) + settings.ostr << (settings.hilite ? hilite_keyword : "") << " DEDUPLICATE" << (settings.hilite ? hilite_none : ""); +} + +} diff --git a/dbms/src/Parsers/ASTOptimizeQuery.h b/dbms/src/Parsers/ASTOptimizeQuery.h index 571b04d22ef..0b329d59559 100644 --- a/dbms/src/Parsers/ASTOptimizeQuery.h +++ b/dbms/src/Parsers/ASTOptimizeQuery.h @@ -1,7 +1,8 @@ #pragma once #include - +#include +#include namespace DB { @@ -9,7 +10,7 @@ namespace DB /** OPTIMIZE query */ -class ASTOptimizeQuery : public IAST +class ASTOptimizeQuery : public ASTQueryWithOutput, public ASTQueryWithOnCluster { public: String database; @@ -23,7 +24,8 @@ public: bool deduplicate; /** Get the text that identifies this element. */ - String getID() const override { return "OptimizeQuery_" + database + "_" + table + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : ""); } + String getID() const override + { return "OptimizeQuery_" + database + "_" + table + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : ""); } ASTPtr clone() const override { @@ -39,24 +41,10 @@ public: return res; } -protected: - void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override - { - settings.ostr << (settings.hilite ? hilite_keyword : "") << "OPTIMIZE TABLE " << (settings.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; - if (partition) - { - settings.ostr << (settings.hilite ? hilite_keyword : "") << " PARTITION " << (settings.hilite ? hilite_none : ""); - partition->formatImpl(settings, state, frame); - } + ASTPtr getRewrittenASTWithoutOnCluster(const std::string &new_database) const override; - if (final) - settings.ostr << (settings.hilite ? hilite_keyword : "") << " FINAL" << (settings.hilite ? hilite_none : ""); - - if (deduplicate) - settings.ostr << (settings.hilite ? hilite_keyword : "") << " DEDUPLICATE" << (settings.hilite ? hilite_none : ""); - } }; } diff --git a/dbms/src/Parsers/ASTSelectQuery.cpp b/dbms/src/Parsers/ASTSelectQuery.cpp index f234b0ae4b5..f4b4c217de1 100644 --- a/dbms/src/Parsers/ASTSelectQuery.cpp +++ b/dbms/src/Parsers/ASTSelectQuery.cpp @@ -388,5 +388,27 @@ void ASTSelectQuery::replaceDatabaseAndTable(const String & database_name, const } } + +void ASTSelectQuery::addTableFunction(ASTPtr & table_function_ptr) +{ + ASTTableExpression * table_expression = getFirstTableExpression(*this); + + if (!table_expression) + { + auto tables_list = std::make_shared(); + auto element = std::make_shared(); + auto table_expr = std::make_shared(); + element->table_expression = table_expr; + element->children.emplace_back(table_expr); + tables_list->children.emplace_back(element); + tables = tables_list; + children.emplace_back(tables_list); + table_expression = table_expr.get(); + } + + table_expression->table_function = table_function_ptr; + table_expression->database_and_table_name = nullptr; +} + }; diff --git a/dbms/src/Parsers/ASTSelectQuery.h b/dbms/src/Parsers/ASTSelectQuery.h index d45f45c34d8..91d8d52172c 100644 --- a/dbms/src/Parsers/ASTSelectQuery.h +++ b/dbms/src/Parsers/ASTSelectQuery.h @@ -47,6 +47,7 @@ public: bool final() const; void setDatabaseIfNeeded(const String & database_name); void replaceDatabaseAndTable(const String & database_name, const String & table_name); + void addTableFunction(ASTPtr & table_function_ptr); protected: void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override; diff --git a/dbms/src/Parsers/ParserKillQueryQuery.cpp b/dbms/src/Parsers/ParserKillQueryQuery.cpp index e6d1bae2e05..5e674d9da83 100644 --- a/dbms/src/Parsers/ParserKillQueryQuery.cpp +++ b/dbms/src/Parsers/ParserKillQueryQuery.cpp @@ -11,29 +11,36 @@ namespace DB bool ParserKillQueryQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { + String cluster_str; auto query = std::make_shared(); - if (!ParserKeyword{"KILL QUERY"}.ignore(pos, expected)) - return false; - - if (!ParserKeyword{"WHERE"}.ignore(pos, expected)) - return false; - + ParserKeyword p_on{"ON"}; + ParserKeyword p_test{"TEST"}; + ParserKeyword p_sync{"SYNC"}; + ParserKeyword p_async{"ASYNC"}; + ParserKeyword p_where{"WHERE"}; + ParserKeyword p_kill_query{"KILL QUERY"}; ParserExpression p_where_expression; - if (!p_where_expression.parse(pos, query->where_expression, expected)) + + if (!p_kill_query.ignore(pos, expected)) return false; - query->children.emplace_back(query->where_expression); + if (p_on.ignore(pos, expected) && !ASTQueryWithOnCluster::parse(pos, cluster_str, expected)) + return false; - if (ParserKeyword{"SYNC"}.ignore(pos)) + if (p_where.ignore(pos, expected) && !p_where_expression.parse(pos, query->where_expression, expected)) + return false; + + if (p_sync.ignore(pos, expected)) query->sync = true; - else if (ParserKeyword{"ASYNC"}.ignore(pos)) + else if (p_async.ignore(pos, expected)) query->sync = false; - else if (ParserKeyword{"TEST"}.ignore(pos)) + else if (p_test.ignore(pos, expected)) query->test = true; + query->cluster = cluster_str; + query->children.emplace_back(query->where_expression); node = std::move(query); - return true; } diff --git a/dbms/src/Parsers/ParserOptimizeQuery.cpp b/dbms/src/Parsers/ParserOptimizeQuery.cpp index c01a1a7b5df..e0dcf7ffb47 100644 --- a/dbms/src/Parsers/ParserOptimizeQuery.cpp +++ b/dbms/src/Parsers/ParserOptimizeQuery.cpp @@ -28,6 +28,7 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte ASTPtr partition; bool final = false; bool deduplicate = false; + String cluster_str; if (!s_optimize_table.ignore(pos, expected)) return false; @@ -42,6 +43,9 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte return false; } + if (ParserKeyword{"ON"}.ignore(pos, expected) && !ASTQueryWithOnCluster::parse(pos, cluster_str, expected)) + return false; + if (s_partition.ignore(pos, expected)) { if (!partition_p.parse(pos, partition, expected)) @@ -61,6 +65,8 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte query->database = typeid_cast(*database).name; if (table) query->table = typeid_cast(*table).name; + + query->cluster = cluster_str; query->partition = partition; query->final = final; query->deduplicate = deduplicate; diff --git a/dbms/src/Parsers/ParserQuery.cpp b/dbms/src/Parsers/ParserQuery.cpp index efdac16d74c..7285e03bad7 100644 --- a/dbms/src/Parsers/ParserQuery.cpp +++ b/dbms/src/Parsers/ParserQuery.cpp @@ -21,14 +21,12 @@ bool ParserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserInsertQuery insert_p(end); ParserUseQuery use_p; ParserSetQuery set_p; - ParserOptimizeQuery optimize_p; ParserSystemQuery system_p; bool res = query_with_output_p.parse(pos, node, expected) || insert_p.parse(pos, node, expected) || use_p.parse(pos, node, expected) || set_p.parse(pos, node, expected) - || optimize_p.parse(pos, node, expected) || system_p.parse(pos, node, expected); return res; diff --git a/dbms/src/Parsers/ParserQueryWithOutput.cpp b/dbms/src/Parsers/ParserQueryWithOutput.cpp index e7fdc390dd6..3ec71de5f0c 100644 --- a/dbms/src/Parsers/ParserQueryWithOutput.cpp +++ b/dbms/src/Parsers/ParserQueryWithOutput.cpp @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB @@ -27,6 +28,7 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec ParserRenameQuery rename_p; ParserDropQuery drop_p; ParserCheckQuery check_p; + ParserOptimizeQuery optimize_p; ParserKillQueryQuery kill_query_p; ASTPtr query; @@ -41,7 +43,8 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec || rename_p.parse(pos, query, expected) || drop_p.parse(pos, query, expected) || check_p.parse(pos, query, expected) - || kill_query_p.parse(pos, query, expected); + || kill_query_p.parse(pos, query, expected) + || optimize_p.parse(pos, query, expected); if (!parsed) return false; diff --git a/dbms/src/Storages/IStorage.h b/dbms/src/Storages/IStorage.h index c580bd1f749..58ac6302ff4 100644 --- a/dbms/src/Storages/IStorage.h +++ b/dbms/src/Storages/IStorage.h @@ -325,9 +325,14 @@ public: virtual bool checkData() const { throw DB::Exception("Check query is not supported for " + getName() + " storage"); } /// Checks that table could be dropped right now - /// If it can - returns true - /// Otherwise - throws an exception with detailed information or returns false - virtual bool checkTableCanBeDropped() const { return true; } + /// Otherwise - throws an exception with detailed information. + /// We do not use mutex because it is not very important that the size could change during the operation. + virtual void checkTableCanBeDropped() const {} + + /// Checks that Partition could be dropped right now + /// Otherwise - throws an exception with detailed information. + /// We do not use mutex because it is not very important that the size could change during the operation. + virtual void checkPartitionCanBeDropped(const ASTPtr & /*partition*/) {} /** Notify engine about updated dependencies for this storage. */ virtual void updateDependencies() {} diff --git a/dbms/src/Storages/ITableDeclaration.h b/dbms/src/Storages/ITableDeclaration.h index 74d5b6db6d7..5f15ad626f7 100644 --- a/dbms/src/Storages/ITableDeclaration.h +++ b/dbms/src/Storages/ITableDeclaration.h @@ -39,9 +39,9 @@ public: */ void check(const NamesAndTypesList & columns, const Names & column_names) const; - /** Check that the data block for the record contains all the columns of the table with the correct types, + /** Check that the data block contains all the columns of the table with the correct types, * contains only the columns of the table, and all the columns are different. - * If need_all, still checks that all the columns of the table are in the block. + * If need_all, checks that all the columns of the table are in the block. */ void check(const Block & block, bool need_all = false) const; diff --git a/dbms/src/Storages/Kafka/KafkaSettings.cpp b/dbms/src/Storages/Kafka/KafkaSettings.cpp new file mode 100644 index 00000000000..be6c3b11b05 --- /dev/null +++ b/dbms/src/Storages/Kafka/KafkaSettings.cpp @@ -0,0 +1,44 @@ +#include +#if USE_RDKAFKA + +#include +#include +#include + + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +void KafkaSettings::loadFromQuery(ASTStorage & storage_def) +{ + if (storage_def.settings) + { + for (const ASTSetQuery::Change & setting : storage_def.settings->changes) + { +#define SET(TYPE, NAME, DEFAULT, DESCRIPTION) \ + else if (setting.name == #NAME) NAME.set(setting.value); + + if (false) {} + APPLY_FOR_KAFKA_SETTINGS(SET) + else + throw Exception( + "Unknown setting " + setting.name + " for storage " + storage_def.engine->name, + ErrorCodes::BAD_ARGUMENTS); +#undef SET + } + } + else + { + auto settings_ast = std::make_shared(); + settings_ast->is_standalone = false; + storage_def.set(storage_def.settings, settings_ast); + } +} + +} +#endif diff --git a/dbms/src/Storages/Kafka/KafkaSettings.h b/dbms/src/Storages/Kafka/KafkaSettings.h new file mode 100644 index 00000000000..bd7a5cc0bbb --- /dev/null +++ b/dbms/src/Storages/Kafka/KafkaSettings.h @@ -0,0 +1,43 @@ +#pragma once +#include +#if USE_RDKAFKA + +#include +#include +#include +#include + + +namespace DB +{ + +class ASTStorage; + +/** Settings for the Kafka engine. + * Could be loaded from a CREATE TABLE query (SETTINGS clause). + */ +struct KafkaSettings +{ + +#define APPLY_FOR_KAFKA_SETTINGS(M) \ + M(SettingString, kafka_broker_list, "", "A comma-separated list of brokers for Kafka engine.") \ + M(SettingString, kafka_topic_list, "", "A list of Kafka topics.") \ + M(SettingString, kafka_group_name, "", "A group of Kafka consumers.") \ + M(SettingString, kafka_format, "", "Message format for Kafka engine.") \ + M(SettingChar, kafka_row_delimiter, '\0', "The character to be considered as a delimiter in Kafka message.") \ + M(SettingString, kafka_schema, "", "Schema identifier (used by schema-based formats) for Kafka engine") \ + M(SettingUInt64, kafka_num_consumers, 1, "The number of consumers per table for Kafka engine.") + +#define DECLARE(TYPE, NAME, DEFAULT, DESCRIPTION) \ + TYPE NAME {DEFAULT}; + + APPLY_FOR_KAFKA_SETTINGS(DECLARE) + +#undef DECLARE + +public: + void loadFromQuery(ASTStorage & storage_def); +}; + +} +#endif diff --git a/dbms/src/Storages/StorageKafka.cpp b/dbms/src/Storages/Kafka/StorageKafka.cpp similarity index 70% rename from dbms/src/Storages/StorageKafka.cpp rename to dbms/src/Storages/Kafka/StorageKafka.cpp index a9666bab22c..d43996e65b6 100644 --- a/dbms/src/Storages/StorageKafka.cpp +++ b/dbms/src/Storages/Kafka/StorageKafka.cpp @@ -23,7 +23,9 @@ #include #include #include -#include // Y_IGNORE +#include +#include +#include // Y_IGNORE #include #include #include @@ -62,12 +64,19 @@ class ReadBufferFromKafkaConsumer : public ReadBuffer { rd_kafka_t * consumer; rd_kafka_message_t * current; + bool current_pending; Poco::Logger * log; size_t read_messages; + char row_delimiter; bool nextImpl() override { - reset(); + if (current_pending) + { + BufferBase::set(reinterpret_cast(current->payload), current->len, 0); + current_pending = false; + return true; + } // Process next buffered message rd_kafka_message_t * msg = rd_kafka_consumer_poll(consumer, READ_POLL_MS); @@ -88,13 +97,24 @@ class ReadBufferFromKafkaConsumer : public ReadBuffer rd_kafka_message_destroy(msg); return nextImpl(); } + ++read_messages; + + // Now we've received a new message. Check if we need to produce a delimiter + if (row_delimiter != '\0' && current != nullptr) + { + BufferBase::set(&row_delimiter, 1, 0); + reset(); + current = msg; + current_pending = true; + return true; + } // Consume message and mark the topic/partition offset - // The offsets will be committed in the insertSuffix() method after the block is completed - // If an exception is thrown before that would occur, the client will rejoin without comitting offsets - BufferBase::set(reinterpret_cast(msg->payload), msg->len, 0); + // The offsets will be committed in the readSuffix() method after the block is completed + // If an exception is thrown before that would occur, the client will rejoin without committing offsets + reset(); current = msg; - ++read_messages; + BufferBase::set(reinterpret_cast(current->payload), current->len, 0); return true; } @@ -108,8 +128,12 @@ class ReadBufferFromKafkaConsumer : public ReadBuffer } public: - ReadBufferFromKafkaConsumer(rd_kafka_t * consumer_, Poco::Logger * log_) - : ReadBuffer(nullptr, 0), consumer(consumer_), current(nullptr), log(log_), read_messages(0) {} + ReadBufferFromKafkaConsumer(rd_kafka_t * consumer_, Poco::Logger * log_, char row_delimiter_) + : ReadBuffer(nullptr, 0), consumer(consumer_), current(nullptr), + current_pending(false), log(log_), read_messages(0), row_delimiter(row_delimiter_) + { + LOG_TRACE(log, "Row delimiter is: " << row_delimiter); + } ~ReadBufferFromKafkaConsumer() { reset(); } @@ -143,7 +167,7 @@ public: // Create a formatted reader on Kafka messages LOG_TRACE(storage.log, "Creating formatted reader"); - read_buf = std::make_unique(consumer->stream, storage.log); + read_buf = std::make_unique(consumer->stream, storage.log, storage.row_delimiter); reader = FormatFactory::instance().getInput(storage.format_name, *read_buf, storage.getSampleBlock(), context, max_block_size); } @@ -226,13 +250,14 @@ StorageKafka::StorageKafka( Context & context_, const ColumnsDescription & columns_, const String & brokers_, const String & group_, const Names & topics_, - const String & format_name_, const String & schema_name_, size_t num_consumers_) + const String & format_name_, char row_delimiter_, const String & schema_name_, size_t num_consumers_) : IStorage{columns_}, table_name(table_name_), database_name(database_name_), context(context_), topics(context.getMacros()->expand(topics_)), brokers(context.getMacros()->expand(brokers_)), group(context.getMacros()->expand(group_)), format_name(context.getMacros()->expand(format_name_)), + row_delimiter(row_delimiter_), schema_name(context.getMacros()->expand(schema_name_)), num_consumers(num_consumers_), log(&Logger::get("StorageKafka (" + table_name_ + ")")), semaphore(0, num_consumers_), mutex(), consumers(), event_update() @@ -543,77 +568,204 @@ void registerStorageKafka(StorageFactory & factory) factory.registerStorage("Kafka", [](const StorageFactory::Arguments & args) { ASTs & engine_args = args.engine_args; + size_t args_count = engine_args.size(); + bool has_settings = args.storage_def->settings; + + KafkaSettings kafka_settings; + if (has_settings) + { + kafka_settings.loadFromQuery(*args.storage_def); + } /** Arguments of engine is following: * - Kafka broker list * - List of topics * - Group ID (may be a constaint expression with a string result) * - Message format (string) + * - Row delimiter * - Schema (optional, if the format supports it) + * - Number of consumers */ - if (engine_args.size() < 3 || engine_args.size() > 6) - throw Exception( - "Storage Kafka requires 3-6 parameters" - " - Kafka broker list, list of topics to consume, consumer group ID, message format, schema, number of consumers", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + // Check arguments and settings + #define CHECK_KAFKA_STORAGE_ARGUMENT(ARG_NUM, PAR_NAME) \ + /* One of the four required arguments is not specified */ \ + if (args_count < ARG_NUM && ARG_NUM <= 4 && \ + !kafka_settings.PAR_NAME.changed) \ + { \ + throw Exception( \ + "Required parameter '" #PAR_NAME "' " \ + "for storage Kafka not specified", \ + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); \ + } \ + /* The same argument is given in two places */ \ + if (has_settings && \ + kafka_settings.PAR_NAME.changed && \ + args_count >= ARG_NUM) \ + { \ + throw Exception( \ + "The argument №" #ARG_NUM " of storage Kafka " \ + "and the parameter '" #PAR_NAME "' " \ + "in SETTINGS cannot be specified at the same time", \ + ErrorCodes::BAD_ARGUMENTS); \ + } + CHECK_KAFKA_STORAGE_ARGUMENT(1, kafka_broker_list) + CHECK_KAFKA_STORAGE_ARGUMENT(2, kafka_topic_list) + CHECK_KAFKA_STORAGE_ARGUMENT(3, kafka_group_name) + CHECK_KAFKA_STORAGE_ARGUMENT(4, kafka_format) + CHECK_KAFKA_STORAGE_ARGUMENT(5, kafka_row_delimiter) + CHECK_KAFKA_STORAGE_ARGUMENT(6, kafka_schema) + CHECK_KAFKA_STORAGE_ARGUMENT(7, kafka_num_consumers) + #undef CHECK_KAFKA_STORAGE_ARGUMENT + + // Get and check broker list String brokers; - auto ast = typeid_cast(engine_args[0].get()); - if (ast && ast->value.getType() == Field::Types::String) - brokers = safeGet(ast->value); - else - throw Exception(String("Kafka broker list must be a string"), ErrorCodes::BAD_ARGUMENTS); + if (args_count >= 1) + { + auto ast = typeid_cast(engine_args[0].get()); + if (ast && ast->value.getType() == Field::Types::String) + { + brokers = safeGet(ast->value); + } + else + { + throw Exception(String("Kafka broker list must be a string"), ErrorCodes::BAD_ARGUMENTS); + } + } + else if (kafka_settings.kafka_broker_list.changed) + { + brokers = kafka_settings.kafka_broker_list.value; + } - engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.local_context); - engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], args.local_context); - engine_args[3] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[3], args.local_context); + // Get and check topic list + String topic_list; + if (args_count >= 2) + { + engine_args[1] = evaluateConstantExpressionAsLiteral(engine_args[1], args.local_context); + topic_list = static_cast(*engine_args[1]).value.safeGet(); + } + else if (kafka_settings.kafka_topic_list.changed) + { + topic_list = kafka_settings.kafka_topic_list.value; + } + Names topics; + boost::split(topics, topic_list , [](char c){ return c == ','; }); + for (String & topic : topics) + { + boost::trim(topic); + } - // Parse format schema if supported (optional) - String schema; - if (engine_args.size() >= 5) + // Get and check group name + String group; + if (args_count >= 3) + { + engine_args[2] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[2], args.local_context); + group = static_cast(*engine_args[2]).value.safeGet(); + } + else if (kafka_settings.kafka_group_name.changed) + { + group = kafka_settings.kafka_group_name.value; + } + + // Get and check message format name + String format; + if (args_count >= 4) + { + engine_args[3] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[3], args.local_context); + + auto ast = typeid_cast(engine_args[3].get()); + if (ast && ast->value.getType() == Field::Types::String) + { + format = safeGet(ast->value); + } + else + { + throw Exception("Format must be a string", ErrorCodes::BAD_ARGUMENTS); + } + } + else if (kafka_settings.kafka_format.changed) + { + format = kafka_settings.kafka_format.value; + } + + // Parse row delimiter (optional) + char row_delimiter = '\0'; + if (args_count >= 5) { engine_args[4] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[4], args.local_context); auto ast = typeid_cast(engine_args[4].get()); + String arg; if (ast && ast->value.getType() == Field::Types::String) - schema = safeGet(ast->value); + { + arg = safeGet(ast->value); + } else + { + throw Exception("Row delimiter must be a char", ErrorCodes::BAD_ARGUMENTS); + } + if (arg.size() > 1) + { + throw Exception("Row delimiter must be a char", ErrorCodes::BAD_ARGUMENTS); + } + else if (arg.size() == 0) + { + row_delimiter = '\0'; + } + else + { + row_delimiter = arg[0]; + } + } + else if (kafka_settings.kafka_row_delimiter.changed) + { + row_delimiter = kafka_settings.kafka_row_delimiter.value; + } + + // Parse format schema if supported (optional) + String schema; + if (args_count >= 6) + { + engine_args[5] = evaluateConstantExpressionOrIdentifierAsLiteral(engine_args[4], args.local_context); + + auto ast = typeid_cast(engine_args[5].get()); + if (ast && ast->value.getType() == Field::Types::String) + { + schema = safeGet(ast->value); + } + else + { throw Exception("Format schema must be a string", ErrorCodes::BAD_ARGUMENTS); + } + } + else if (kafka_settings.kafka_schema.changed) + { + schema = kafka_settings.kafka_schema.value; } // Parse number of consumers (optional) UInt64 num_consumers = 1; - if (engine_args.size() >= 6) + if (args_count >= 7) { - auto ast = typeid_cast(engine_args[5].get()); + auto ast = typeid_cast(engine_args[6].get()); if (ast && ast->value.getType() == Field::Types::UInt64) + { num_consumers = safeGet(ast->value); + } else + { throw Exception("Number of consumers must be a positive integer", ErrorCodes::BAD_ARGUMENTS); + } + } + else if (kafka_settings.kafka_num_consumers.changed) + { + num_consumers = kafka_settings.kafka_num_consumers.value; } - - // Parse topic list - Names topics; - String topic_arg = static_cast(*engine_args[1]).value.safeGet(); - boost::split(topics, topic_arg , [](char c){ return c == ','; }); - for(String & topic : topics) - boost::trim(topic); - - // Parse consumer group - String group = static_cast(*engine_args[2]).value.safeGet(); - - // Parse format from string - String format; - ast = typeid_cast(engine_args[3].get()); - if (ast && ast->value.getType() == Field::Types::String) - format = safeGet(ast->value); - else - throw Exception("Format must be a string", ErrorCodes::BAD_ARGUMENTS); return StorageKafka::create( args.table_name, args.database_name, args.context, args.columns, - brokers, group, topics, format, schema, num_consumers); + brokers, group, topics, format, row_delimiter, schema, num_consumers); }); } diff --git a/dbms/src/Storages/StorageKafka.h b/dbms/src/Storages/Kafka/StorageKafka.h similarity index 92% rename from dbms/src/Storages/StorageKafka.h rename to dbms/src/Storages/Kafka/StorageKafka.h index 45530517e94..9652d1d6a46 100644 --- a/dbms/src/Storages/StorageKafka.h +++ b/dbms/src/Storages/Kafka/StorageKafka.h @@ -75,6 +75,9 @@ private: const String brokers; const String group; const String format_name; + // Optional row delimiter for generating char delimited stream + // in order to make various input stream parsers happy. + char row_delimiter; const String schema_name; /// Total number of consumers size_t num_consumers; @@ -109,7 +112,7 @@ protected: Context & context_, const ColumnsDescription & columns_, const String & brokers_, const String & group_, const Names & topics_, - const String & format_name_, const String & schema_name_, size_t num_consumers_); + const String & format_name_, char row_delimiter_, const String & schema_name_, size_t num_consumers_); }; } diff --git a/dbms/src/Storages/MergeTree/DataPartsExchange.cpp b/dbms/src/Storages/MergeTree/DataPartsExchange.cpp index 15d1c56b051..bffbb8a11a6 100644 --- a/dbms/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/dbms/src/Storages/MergeTree/DataPartsExchange.cpp @@ -161,11 +161,14 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( const String & host, int port, const ConnectionTimeouts & timeouts, + const String & user, + const String & password, + const String & interserver_scheme, bool to_detached, const String & tmp_prefix_) { Poco::URI uri; - uri.setScheme("http"); + uri.setScheme(interserver_scheme); uri.setHost(host); uri.setPort(port); uri.setQueryParameters( @@ -175,7 +178,14 @@ MergeTreeData::MutableDataPartPtr Fetcher::fetchPart( {"compress", "false"} }); - ReadWriteBufferFromHTTP in{uri, Poco::Net::HTTPRequest::HTTP_POST, {}, timeouts}; + Poco::Net::HTTPBasicCredentials creds{}; + if (!user.empty()) + { + creds.setUsername(user); + creds.setPassword(password); + } + + ReadWriteBufferFromHTTP in{uri, Poco::Net::HTTPRequest::HTTP_POST, {}, timeouts, creds}; static const String TMP_PREFIX = "tmp_fetch_"; String tmp_prefix = tmp_prefix_.empty() ? TMP_PREFIX : tmp_prefix_; diff --git a/dbms/src/Storages/MergeTree/DataPartsExchange.h b/dbms/src/Storages/MergeTree/DataPartsExchange.h index 0ebc2ec358a..d97687da886 100644 --- a/dbms/src/Storages/MergeTree/DataPartsExchange.h +++ b/dbms/src/Storages/MergeTree/DataPartsExchange.h @@ -54,6 +54,9 @@ public: const String & host, int port, const ConnectionTimeouts & timeouts, + const String & user, + const String & password, + const String & interserver_scheme, bool to_detached = false, const String & tmp_prefix_ = ""); diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.cpp b/dbms/src/Storages/MergeTree/MergeTreeData.cpp index 35cd278c512..e7447f07b09 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeData.cpp @@ -1784,6 +1784,21 @@ size_t MergeTreeData::getMaxPartsCountForPartition() const } +std::optional MergeTreeData::getMinPartDataVersion() const +{ + std::lock_guard lock(data_parts_mutex); + + std::optional result; + for (const DataPartPtr & part : getDataPartsStateRange(DataPartState::Committed)) + { + if (!result || *result > part->info.getDataVersion()) + result = part->info.getDataVersion(); + } + + return result; +} + + void MergeTreeData::delayInsertOrThrowIfNeeded(Poco::Event *until) const { const size_t parts_count = getMaxPartsCountForPartition(); @@ -2354,12 +2369,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::cloneAndLoadDataPart(const Merg const String & tmp_part_prefix, const MergeTreePartInfo & dst_part_info) { - String dst_part_name; - if (format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) - dst_part_name = dst_part_info.getPartNameV0(src_part->getMinDate(), src_part->getMaxDate()); - else - dst_part_name = dst_part_info.getPartName(); - + String dst_part_name = src_part->getNewName(dst_part_info); String tmp_dst_part_name = tmp_part_prefix + dst_part_name; Poco::Path dst_part_absolute_path = Poco::Path(full_path + tmp_dst_part_name).absolute(); diff --git a/dbms/src/Storages/MergeTree/MergeTreeData.h b/dbms/src/Storages/MergeTree/MergeTreeData.h index 2458a36dd3b..102378861fe 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeData.h +++ b/dbms/src/Storages/MergeTree/MergeTreeData.h @@ -384,9 +384,13 @@ public: size_t getMaxPartsCountForPartition() const; + /// Get min value of part->info.getDataVersion() for all active parts. + /// Makes sense only for ordinary MergeTree engines because for them block numbering doesn't depend on partition. + std::optional getMinPartDataVersion() const; + /// If the table contains too many active parts, sleep for a while to give them time to merge. /// If until is non-null, wake up from the sleep earlier if the event happened. - void delayInsertOrThrowIfNeeded(Poco::Event *until = nullptr) const; + void delayInsertOrThrowIfNeeded(Poco::Event * until = nullptr) const; void throwInsertIfNeeded() const; /// Renames temporary part to a permanent part and adds it to the parts set. diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 3597482388e..b318ac92ce7 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include #include @@ -18,14 +20,16 @@ #include #include #include +#include +#include #include #include #include #include -#include #include #include #include +#include #include @@ -660,7 +664,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor case MergeTreeData::MergingParams::VersionedCollapsing: merged_stream = std::make_unique( - src_streams, sort_description, data.merging_params.sign_column, DEFAULT_MERGE_BLOCK_SIZE, false, rows_sources_write_buf.get()); + src_streams, sort_description, data.merging_params.sign_column, DEFAULT_MERGE_BLOCK_SIZE, rows_sources_write_buf.get()); break; default: @@ -811,6 +815,124 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mergePartsToTempor } +static bool isStorageTouchedByMutation( + const StoragePtr & storage, const std::vector & commands, const Context & context) +{ + if (commands.empty()) + return false; + + for (const MutationCommand & command : commands) + { + if (!command.predicate) /// The command touches all rows. + return true; + } + + /// Execute `SELECT count() FROM storage WHERE predicate1 OR predicate2 OR ...` query. + /// The result is tne number of affected rows. + + auto select = std::make_shared(); + + select->select_expression_list = std::make_shared(); + select->children.push_back(select->select_expression_list); + auto count_func = std::make_shared(); + count_func->name = "count"; + count_func->arguments = std::make_shared(); + select->select_expression_list->children.push_back(count_func); + + if (commands.size() == 1) + select->where_expression = commands[0].predicate; + else + { + auto coalesced_predicates = std::make_shared(); + coalesced_predicates->name = "or"; + coalesced_predicates->arguments = std::make_shared(); + coalesced_predicates->children.push_back(coalesced_predicates->arguments); + + for (const MutationCommand & command : commands) + coalesced_predicates->arguments->children.push_back(command.predicate); + + select->where_expression = std::move(coalesced_predicates); + } + select->children.push_back(select->where_expression); + + auto context_copy = context; + context_copy.getSettingsRef().merge_tree_uniform_read_distribution = 0; + context_copy.getSettingsRef().max_threads = 1; + + InterpreterSelectQuery interpreter_select(select, context_copy, storage, QueryProcessingStage::Complete); + BlockInputStreamPtr in = interpreter_select.execute().in; + + Block block = in->read(); + if (!block.rows()) + return false; + else if (block.rows() != 1) + throw Exception("count() expression returned " + toString(block.rows()) + " rows, not 1", + ErrorCodes::LOGICAL_ERROR); + + auto count = (*block.getByName("count()").column)[0].get(); + return count != 0; +} + +static BlockInputStreamPtr createInputStreamWithMutatedData( + const StoragePtr & storage, std::vector commands, const Context & context) +{ + auto select = std::make_shared(); + + select->select_expression_list = std::make_shared(); + select->children.push_back(select->select_expression_list); + select->select_expression_list->children.push_back(std::make_shared()); + + /// For all commands that are in front of the list and are DELETE commands, we can push them down + /// to the SELECT statement and remove them from commands. + + auto deletes_end = commands.begin(); + for (; deletes_end != commands.end(); ++deletes_end) + { + if (deletes_end->type != MutationCommand::DELETE) + break; + } + + std::vector predicates; + for (auto it = commands.begin(); it != deletes_end; ++it) + { + auto predicate = std::make_shared(); + predicate->name = "not"; + predicate->arguments = std::make_shared(); + predicate->arguments->children.push_back(it->predicate); + predicate->children.push_back(predicate->arguments); + predicates.push_back(predicate); + } + + commands.erase(commands.begin(), deletes_end); + + if (!predicates.empty()) + { + ASTPtr where_expression; + if (predicates.size() == 1) + where_expression = predicates[0]; + else + { + auto coalesced_predicates = std::make_shared(); + coalesced_predicates->name = "and"; + coalesced_predicates->arguments = std::make_shared(); + coalesced_predicates->children.push_back(coalesced_predicates->arguments); + coalesced_predicates->arguments->children = predicates; + + where_expression = std::move(coalesced_predicates); + } + select->where_expression = where_expression; + select->children.push_back(where_expression); + } + + InterpreterSelectQuery interpreter_select(select, context, storage); + BlockInputStreamPtr in = interpreter_select.execute().in; + + if (!commands.empty()) + in = std::make_shared(in, commands, context); + + return in; +} + MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTemporaryPart( const FuturePart & future_part, const std::vector & commands, @@ -826,7 +948,19 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor CurrentMetrics::Increment num_mutations{CurrentMetrics::PartMutation}; const auto & source_part = future_part.parts[0]; - LOG_TRACE(log, "Mutating part " << source_part->name << " to mutation version " << future_part.part_info.mutation); + auto storage_from_source_part = StorageFromMergeTreeDataPart::create(source_part); + + auto context_for_reading = context; + context_for_reading.getSettingsRef().merge_tree_uniform_read_distribution = 0; + context_for_reading.getSettingsRef().max_threads = 1; + + if (!isStorageTouchedByMutation(storage_from_source_part, commands, context_for_reading)) + { + LOG_TRACE(log, "Part " << source_part->name << " doesn't change up to mutation version " << future_part.part_info.mutation); + return data.cloneAndLoadDataPart(source_part, "tmp_clone_", future_part.part_info); + } + else + LOG_TRACE(log, "Mutating part " << source_part->name << " to mutation version " << future_part.part_info.mutation); MergeTreeData::MutableDataPartPtr new_data_part = std::make_shared( data, future_part.name, future_part.part_info); @@ -835,21 +969,16 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor String new_part_tmp_path = new_data_part->getFullPath(); - Poco::File(new_part_tmp_path).createDirectories(); - - NamesAndTypesList all_columns = data.getColumns().getAllPhysical(); - - BlockInputStreamPtr in = std::make_shared( - data, source_part, DEFAULT_MERGE_BLOCK_SIZE, 0, 0, all_columns.getNames(), - MarkRanges(1, MarkRange(0, source_part->marks_count)), - false, nullptr, String(), true, 0, DBMS_DEFAULT_BUFFER_SIZE, false); - - in = std::make_shared(in, commands, context); + auto in = createInputStreamWithMutatedData(storage_from_source_part, commands, context_for_reading); if (data.hasPrimaryKey()) in = std::make_shared( std::make_shared(in, data.getPrimaryExpression())); + Poco::File(new_part_tmp_path).createDirectories(); + + NamesAndTypesList all_columns = data.getColumns().getAllPhysical(); + auto compression_settings = context.chooseCompressionSettings( source_part->bytes_on_disk, static_cast(source_part->bytes_on_disk) / data.getTotalActiveSizeInBytes()); diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 5e6c9b634be..31251680415 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/dbms/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -92,6 +92,7 @@ public: MergeListEntry & merge_entry, size_t aio_threshold, time_t time_of_merge, DiskSpaceMonitor::Reservation * disk_reservation, bool deduplication); + /// Mutate a single data part with the specified commands. Will create and return a temporary part. MergeTreeData::MutableDataPartPtr mutatePartToTemporaryPart( const FuturePart & future_part, const std::vector & commands, diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index ffec81639bd..af4ccf2e72a 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -140,9 +140,22 @@ BlockInputStreams MergeTreeDataSelectExecutor::read( const unsigned num_streams, Int64 max_block_number_to_read) const { - size_t part_index = 0; + return readFromParts( + data.getDataPartsVector(), column_names_to_return, query_info, context, processed_stage, + max_block_size, num_streams, max_block_number_to_read); +} - MergeTreeData::DataPartsVector parts = data.getDataPartsVector(); +BlockInputStreams MergeTreeDataSelectExecutor::readFromParts( + MergeTreeData::DataPartsVector parts, + const Names & column_names_to_return, + const SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum & processed_stage, + const size_t max_block_size, + const unsigned num_streams, + Int64 max_block_number_to_read) const +{ + size_t part_index = 0; /// If query contains restrictions on the virtual column `_part` or `_part_index`, select only parts suitable for it. /// The virtual column `_sample_factor` (which is equal to 1 / used sample rate) can be requested in the query. @@ -820,7 +833,7 @@ BlockInputStreams MergeTreeDataSelectExecutor::spreadMarkRangesAmongStreamsFinal case MergeTreeData::MergingParams::VersionedCollapsing: /// TODO Make VersionedCollapsingFinalBlockInputStream merged = std::make_shared( - to_merge, sort_description, data.merging_params.sign_column, max_block_size, true); + to_merge, sort_description, data.merging_params.sign_column, max_block_size); break; case MergeTreeData::MergingParams::Graphite: diff --git a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index e40baa9c6da..96788cea015 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/dbms/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -31,6 +31,16 @@ public: unsigned num_streams, Int64 max_block_number_to_read) const; + BlockInputStreams readFromParts( + MergeTreeData::DataPartsVector parts, + const Names & column_names, + const SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum & processed_stage, + size_t max_block_size, + unsigned num_streams, + Int64 max_block_number_to_read) const; + private: MergeTreeData & data; diff --git a/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h b/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h index 68d00b03e1d..95a6e32d204 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h +++ b/dbms/src/Storages/MergeTree/MergeTreeMutationEntry.h @@ -21,6 +21,8 @@ struct MergeTreeMutationEntry /// Create a new entry and write it to a temporary file. MergeTreeMutationEntry(MutationCommands commands_, const String & path_prefix_, Int64 tmp_number); + MergeTreeMutationEntry(const MergeTreeMutationEntry &) = delete; + MergeTreeMutationEntry(MergeTreeMutationEntry &&) = default; /// Commit entry and rename it to a permanent file. void commit(Int64 block_number_); diff --git a/dbms/src/Storages/MergeTree/MergeTreeSettings.h b/dbms/src/Storages/MergeTree/MergeTreeSettings.h index aa29dccc195..fad54a9bb57 100644 --- a/dbms/src/Storages/MergeTree/MergeTreeSettings.h +++ b/dbms/src/Storages/MergeTree/MergeTreeSettings.h @@ -139,7 +139,11 @@ struct MergeTreeSettings * instead of ordinary ones (dozens KB). \ * Before enabling check that all replicas support new format. \ */ \ - M(SettingBool, use_minimalistic_checksums_in_zookeeper, false) + M(SettingBool, use_minimalistic_checksums_in_zookeeper, true) \ + \ + /** How many records about mutations that are done to keep. \ + * If zero, then keep all of them */ \ + M(SettingUInt64, finished_mutations_to_keep, 100) /// Settings that should not change after the creation of a table. #define APPLY_FOR_IMMUTABLE_MERGE_TREE_SETTINGS(M) \ diff --git a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp index b839490633f..470193bad68 100644 --- a/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -340,6 +340,9 @@ void MergedBlockOutputStream::writeSuffixAndFinalizePart( new_part->partition.store(storage, part_path, checksums); if (new_part->minmax_idx.initialized) new_part->minmax_idx.store(storage, part_path, checksums); + else if (rows_count) + throw Exception("MinMax index was not initialized for new non-empty part " + new_part->name + + ". It is a bug.", ErrorCodes::LOGICAL_ERROR); WriteBufferFromFile count_out(part_path + "count.txt", 4096); HashingWriteBuffer count_out_hashing(count_out); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp index 16a84b4b2f6..d0f4b50fa34 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.cpp @@ -14,7 +14,9 @@ void ReplicatedMergeTreeAddress::writeText(WriteBuffer & out) const << "port: " << replication_port << '\n' << "tcp_port: " << queries_port << '\n' << "database: " << escape << database << '\n' - << "table: " << escape << table << '\n'; + << "table: " << escape << table << '\n' + << "scheme: " << escape << scheme << '\n'; + } void ReplicatedMergeTreeAddress::readText(ReadBuffer & in) @@ -25,6 +27,11 @@ void ReplicatedMergeTreeAddress::readText(ReadBuffer & in) >> "tcp_port: " >> queries_port >> "\n" >> "database: " >> escape >> database >> "\n" >> "table: " >> escape >> table >> "\n"; + + if (!in.eof()) + in >> "scheme: " >> escape >> scheme >> "\n"; + else + scheme = "http"; } String ReplicatedMergeTreeAddress::toString() const diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h index b50ec72f3a5..2a620515278 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAddress.h @@ -16,6 +16,7 @@ struct ReplicatedMergeTreeAddress UInt16 queries_port; String database; String table; + String scheme; ReplicatedMergeTreeAddress() = default; explicit ReplicatedMergeTreeAddress(const String & str) diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp index 72ff7084bc6..2450be70a40 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.cpp @@ -20,7 +20,6 @@ ReplicatedMergeTreeAlterThread::ReplicatedMergeTreeAlterThread(StorageReplicated , log(&Logger::get(log_name)) { task = storage_.context.getSchedulePool().createTask(log_name, [this]{ run(); }); - task->schedule(); } void ReplicatedMergeTreeAlterThread::run() diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.h index 257ef0a7659..c713f42ae29 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeAlterThread.h @@ -23,6 +23,14 @@ class ReplicatedMergeTreeAlterThread public: ReplicatedMergeTreeAlterThread(StorageReplicatedMergeTree & storage_); + void start() + { + task->activate(); + task->schedule(); + } + + void stop() { task->deactivate(); } + private: void run(); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp index d47a06432ff..2d857be819f 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeBlockOutputStream.cpp @@ -108,7 +108,7 @@ void ReplicatedMergeTreeBlockOutputStream::write(const Block & block) last_block_is_duplicate = false; /// TODO Is it possible to not lock the table structure here? - storage.data.delayInsertOrThrowIfNeeded(&storage.restarting_thread->getWakeupEvent()); + storage.data.delayInsertOrThrowIfNeeded(&storage.partial_shutdown_event); auto zookeeper = storage.getZooKeeper(); assertSessionIsNotExpired(zookeeper); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index df8de692488..a4bc98df293 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -21,7 +21,6 @@ ReplicatedMergeTreeCleanupThread::ReplicatedMergeTreeCleanupThread(StorageReplic , log(&Logger::get(log_name)) { task = storage.context.getSchedulePool().createTask(log_name, [this]{ run(); }); - task->schedule(); } void ReplicatedMergeTreeCleanupThread::run() @@ -60,6 +59,7 @@ void ReplicatedMergeTreeCleanupThread::iterate() { clearOldLogs(); clearOldBlocks(); + clearOldMutations(); } } @@ -237,4 +237,63 @@ void ReplicatedMergeTreeCleanupThread::getBlocksSortedByTime(zkutil::ZooKeeper & std::sort(timed_blocks.begin(), timed_blocks.end(), NodeWithStat::greaterByTime); } + +void ReplicatedMergeTreeCleanupThread::clearOldMutations() +{ + if (!storage.data.settings.finished_mutations_to_keep) + return; + + if (storage.queue.countFinishedMutations() <= storage.data.settings.finished_mutations_to_keep) + { + /// Not strictly necessary, but helps to avoid unnecessary ZooKeeper requests. + /// If even this replica hasn't finished enough mutations yet, then we don't need to clean anything. + return; + } + + auto zookeeper = storage.getZooKeeper(); + + zkutil::Stat replicas_stat; + Strings replicas = zookeeper->getChildren(storage.zookeeper_path + "/replicas", &replicas_stat); + + UInt64 min_pointer = std::numeric_limits::max(); + for (const String & replica : replicas) + { + String pointer; + zookeeper->tryGet(storage.zookeeper_path + "/replicas/" + replica + "/mutation_pointer", pointer); + if (pointer.empty()) + return; /// One replica hasn't done anything yet so we can't delete any mutations. + min_pointer = std::min(parse(pointer), min_pointer); + } + + Strings entries = zookeeper->getChildren(storage.zookeeper_path + "/mutations"); + std::sort(entries.begin(), entries.end()); + + /// Do not remove entries that are greater than `min_pointer` (they are not done yet). + entries.erase(std::upper_bound(entries.begin(), entries.end(), padIndex(min_pointer)), entries.end()); + /// Do not remove last `storage.data.settings.finished_mutations_to_keep` entries. + if (entries.size() <= storage.data.settings.finished_mutations_to_keep) + return; + entries.erase(entries.end() - storage.data.settings.finished_mutations_to_keep, entries.end()); + + if (entries.empty()) + return; + + zkutil::Requests ops; + size_t batch_start_i = 0; + for (size_t i = 0; i < entries.size(); ++i) + { + ops.emplace_back(zkutil::makeRemoveRequest(storage.zookeeper_path + "/mutations/" + entries[i], -1)); + + if (ops.size() > 4 * zkutil::MULTI_BATCH_SIZE || i + 1 == entries.size()) + { + /// Simultaneously with clearing the log, we check to see if replica was added since we received replicas list. + ops.emplace_back(zkutil::makeCheckRequest(storage.zookeeper_path + "/replicas", replicas_stat.version)); + zookeeper->multi(ops); + LOG_DEBUG(log, "Removed " << (i + 1 - batch_start_i) << " old mutation entries: " << entries[batch_start_i] << " - " << entries[i]); + batch_start_i = i + 1; + ops.clear(); + } + } +} + } diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h index 7d45a158c4c..2223a42717d 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.h @@ -24,7 +24,15 @@ class ReplicatedMergeTreeCleanupThread public: ReplicatedMergeTreeCleanupThread(StorageReplicatedMergeTree & storage_); - void schedule() { task->schedule(); } + void start() + { + task->activate(); + task->schedule(); + } + + void wakeup() { task->schedule(); } + + void stop() { task->deactivate(); } private: StorageReplicatedMergeTree & storage; @@ -42,6 +50,9 @@ private: /// Remove old block hashes from ZooKeeper. This is done by the leader replica. void clearOldBlocks(); + /// Remove old mutations that are done from ZooKeeper. This is done by the leader replica. + void clearOldMutations(); + using NodeCTimeCache = std::map; NodeCTimeCache cached_block_stats; diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index f5c91b9c94d..ab1a11ba12c 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -666,8 +666,25 @@ ReplicatedMergeTreeQueue::StringSet ReplicatedMergeTreeQueue::moveSiblingPartsFo return parts_for_merge; } +bool ReplicatedMergeTreeQueue::checkReplaceRangeCanBeRemoved(const MergeTreePartInfo & part_info, const LogEntryPtr entry_ptr, const ReplicatedMergeTreeLogEntryData & current) const +{ + if (entry_ptr->type != LogEntry::REPLACE_RANGE) + return false; -void ReplicatedMergeTreeQueue::removePartProducingOpsInRange(zkutil::ZooKeeperPtr zookeeper, const MergeTreePartInfo & part_info) + if (current.type != LogEntry::REPLACE_RANGE && current.type != LogEntry::DROP_RANGE) + return false; + + if (entry_ptr->replace_range_entry != nullptr && entry_ptr->replace_range_entry == current.replace_range_entry) /// same partition, don't want to drop ourselves + return false; + + for (const String & new_part_name : entry_ptr->replace_range_entry->new_part_names) + if (!part_info.contains(MergeTreePartInfo::fromPartName(new_part_name, format_version))) + return false; + + return true; +} + +void ReplicatedMergeTreeQueue::removePartProducingOpsInRange(zkutil::ZooKeeperPtr zookeeper, const MergeTreePartInfo & part_info, const ReplicatedMergeTreeLogEntryData & current) { Queue to_wait; size_t removed_entries = 0; @@ -680,8 +697,9 @@ void ReplicatedMergeTreeQueue::removePartProducingOpsInRange(zkutil::ZooKeeperPt { auto type = (*it)->type; - if ((type == LogEntry::GET_PART || type == LogEntry::MERGE_PARTS || type == LogEntry::MUTATE_PART) - && part_info.contains(MergeTreePartInfo::fromPartName((*it)->new_part_name, format_version))) + if (((type == LogEntry::GET_PART || type == LogEntry::MERGE_PARTS || type == LogEntry::MUTATE_PART) + && part_info.contains(MergeTreePartInfo::fromPartName((*it)->new_part_name, format_version))) + || checkReplaceRangeCanBeRemoved(part_info, *it, current)) { if ((*it)->currently_executing) to_wait.push_back(*it); @@ -1031,6 +1049,45 @@ bool ReplicatedMergeTreeQueue::processEntry( } +size_t ReplicatedMergeTreeQueue::countMergesAndPartMutations() const +{ + std::lock_guard lock(state_mutex); + + size_t count = 0; + for (const auto & entry : queue) + if (entry->type == ReplicatedMergeTreeLogEntry::MERGE_PARTS + || entry->type == ReplicatedMergeTreeLogEntry::MUTATE_PART) + ++count; + + return count; +} + + +size_t ReplicatedMergeTreeQueue::countMutations() const +{ + std::lock_guard lock(state_mutex); + return mutations_by_znode.size(); +} + + +size_t ReplicatedMergeTreeQueue::countFinishedMutations() const +{ + std::lock_guard lock(state_mutex); + + size_t count = 0; + for (const auto & pair : mutations_by_znode) + { + const auto & mutation = pair.second; + if (!mutation.is_done) + break; + + ++count; + } + + return count; +} + + ReplicatedMergeTreeMergePredicate ReplicatedMergeTreeQueue::getMergePredicate(zkutil::ZooKeeperPtr & zookeeper) { return ReplicatedMergeTreeMergePredicate(*this, zookeeper); @@ -1124,6 +1181,8 @@ bool ReplicatedMergeTreeQueue::tryFinalizeMutations(zkutil::ZooKeeperPtr zookeep { std::lock_guard lock(state_mutex); + mutation_pointer = finished.back()->znode_name; + for (const ReplicatedMergeTreeMutationEntry * entry : finished) { auto it = mutations_by_znode.find(entry->znode_name); @@ -1476,27 +1535,6 @@ bool ReplicatedMergeTreeMergePredicate::operator()( } -size_t ReplicatedMergeTreeMergePredicate::countMergesAndPartMutations() const -{ - std::lock_guard lock(queue.state_mutex); - - size_t count = 0; - for (const auto & entry : queue.queue) - if (entry->type == ReplicatedMergeTreeLogEntry::MERGE_PARTS - || entry->type == ReplicatedMergeTreeLogEntry::MUTATE_PART) - ++count; - - return count; -} - - -size_t ReplicatedMergeTreeMergePredicate::countMutations() const -{ - std::lock_guard lock(queue.state_mutex); - return queue.mutations_by_znode.size(); -} - - std::optional ReplicatedMergeTreeMergePredicate::getDesiredMutationVersion(const MergeTreeData::DataPartPtr & part) const { /// Assigning mutations is easier than assigning merges because mutations appear in the same order as diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 2e642ad148c..dab8dbca7a2 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -140,6 +140,8 @@ private: /// Notify subscribers about queue change void notifySubscribers(size_t new_queue_size); + /// Check that entry_ptr is REPLACE_RANGE entry and can be removed from queue because current entry covers it + bool checkReplaceRangeCanBeRemoved(const MergeTreePartInfo & part_info, const LogEntryPtr entry_ptr, const ReplicatedMergeTreeLogEntryData & current) const; /// Ensures that only one thread is simultaneously updating mutations. std::mutex update_mutations_mutex; @@ -249,7 +251,7 @@ public: /** Remove the action from the queue with the parts covered by part_name (from ZK and from the RAM). * And also wait for the completion of their execution, if they are now being executed. */ - void removePartProducingOpsInRange(zkutil::ZooKeeperPtr zookeeper, const MergeTreePartInfo & part_info); + void removePartProducingOpsInRange(zkutil::ZooKeeperPtr zookeeper, const MergeTreePartInfo & part_info, const ReplicatedMergeTreeLogEntryData & current); /** Throws and exception if there are currently executing entries in the range . */ @@ -275,6 +277,15 @@ public: */ bool processEntry(std::function get_zookeeper, LogEntryPtr & entry, const std::function func); + /// Count the number of merges and mutations of single parts in the queue. + size_t countMergesAndPartMutations() const; + + /// Count the total number of active mutations. + size_t countMutations() const; + + /// Count the total number of active mutations that are finished (is_done = true). + size_t countFinishedMutations() const; + ReplicatedMergeTreeMergePredicate getMergePredicate(zkutil::ZooKeeperPtr & zookeeper); /// Return the version (block number) of the last mutation that we don't need to apply to the part @@ -345,12 +356,6 @@ public: const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right, String * out_reason = nullptr) const; - /// Count the number of merges and mutations of single parts in the queue. - size_t countMergesAndPartMutations() const; - - /// Count the total number of active mutations. - size_t countMutations() const; - /// Return nonempty optional if the part can and should be mutated. /// Returned mutation version number is always the biggest possible. std::optional getDesiredMutationVersion(const MergeTreeData::DataPartPtr & part) const; diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 88895a473ae..38d6668475c 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -57,7 +57,29 @@ ReplicatedMergeTreeRestartingThread::ReplicatedMergeTreeRestartingThread(Storage ReplicatedMergeTreeRestartingThread::~ReplicatedMergeTreeRestartingThread() { - completeShutdown(); + try + { + /// Stop restarting_thread before stopping other tasks - so that it won't restart them again. + need_stop = true; + task->deactivate(); + LOG_TRACE(log, "Restarting thread finished"); + + /// Cancel fetches, merges and mutations to force the queue_task to finish ASAP. + storage.fetcher.blocker.cancelForever(); + storage.merger_mutator.actions_blocker.cancelForever(); + + /// Stop other tasks. + + partialShutdown(); + + if (storage.queue_task_handle) + storage.context.getBackgroundPool().removeTask(storage.queue_task_handle); + storage.queue_task_handle.reset(); + } + catch (...) + { + tryLogCurrentException(log, __PRETTY_FUNCTION__); + } } void ReplicatedMergeTreeRestartingThread::run() @@ -167,29 +189,6 @@ void ReplicatedMergeTreeRestartingThread::run() task->scheduleAfter(check_period_ms); } -void ReplicatedMergeTreeRestartingThread::completeShutdown() -{ - try - { - storage.data_parts_exchange_endpoint_holder->getBlocker().cancelForever(); - storage.data_parts_exchange_endpoint_holder = nullptr; - - /// Cancel fetches, merges and mutations to force the queue_task to finish ASAP. - storage.fetcher.blocker.cancelForever(); - storage.merger_mutator.actions_blocker.cancelForever(); - - partialShutdown(); - - if (storage.queue_task_handle) - storage.context.getBackgroundPool().removeTask(storage.queue_task_handle); - storage.queue_task_handle.reset(); - } - catch (...) - { - tryLogCurrentException(log, __PRETTY_FUNCTION__); - } -} - bool ReplicatedMergeTreeRestartingThread::tryStartup() { @@ -205,16 +204,16 @@ bool ReplicatedMergeTreeRestartingThread::tryStartup() /// Anything above can throw a KeeperException if something is wrong with ZK. /// Anything below should not throw exceptions. - storage.shutdown_called = false; - storage.shutdown_event.reset(); + storage.partial_shutdown_called = false; + storage.partial_shutdown_event.reset(); storage.queue_updating_task->activate(); storage.queue_updating_task->schedule(); storage.mutations_updating_task->activate(); storage.mutations_updating_task->schedule(); + storage.cleanup_thread.start(); + storage.alter_thread.start(); storage.part_check_thread.start(); - storage.alter_thread = std::make_unique(storage); - storage.cleanup_thread = std::make_unique(storage); if (!storage.queue_task_handle) storage.queue_task_handle = storage.context.getBackgroundPool().addTask( @@ -350,8 +349,8 @@ void ReplicatedMergeTreeRestartingThread::partialShutdown() { ProfileEvents::increment(ProfileEvents::ReplicaPartialShutdown); - storage.shutdown_called = true; - storage.shutdown_event.set(); + storage.partial_shutdown_called = true; + storage.partial_shutdown_event.set(); storage.alter_query_event->set(); storage.replica_is_active_node = nullptr; @@ -362,8 +361,8 @@ void ReplicatedMergeTreeRestartingThread::partialShutdown() storage.queue_updating_task->deactivate(); storage.mutations_updating_task->deactivate(); - storage.cleanup_thread.reset(); - storage.alter_thread.reset(); + storage.cleanup_thread.stop(); + storage.alter_thread.stop(); storage.part_check_thread.stop(); LOG_TRACE(log, "Threads finished"); diff --git a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h index 7747f008ddf..b9eca428cde 100644 --- a/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h +++ b/dbms/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.h @@ -25,28 +25,12 @@ public: ReplicatedMergeTreeRestartingThread(StorageReplicatedMergeTree & storage_); ~ReplicatedMergeTreeRestartingThread(); - void wakeup() - { - wakeup_event.set(); - task->schedule(); - } - - Poco::Event & getWakeupEvent() - { - return wakeup_event; - } - - void stop() - { - need_stop = true; - wakeup_event.set(); - } + void wakeup() { task->schedule(); } private: StorageReplicatedMergeTree & storage; String log_name; Logger * log; - Poco::Event wakeup_event; std::atomic need_stop {false}; /// The random data we wrote into `/replicas/me/is_active`. @@ -59,7 +43,6 @@ private: bool startup_completed = false; void run(); - void completeShutdown(); /// Start or stop background threads. Used for partial reinitialization when re-creating a session in ZooKeeper. bool tryStartup(); /// Returns false if ZooKeeper is not available. diff --git a/dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h new file mode 100644 index 00000000000..d7fffa2ac90 --- /dev/null +++ b/dbms/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include +#include + +#include + + +namespace DB +{ + +/// A Storage that allows reading from a single MergeTree data part. +class StorageFromMergeTreeDataPart : public ext::shared_ptr_helper, public IStorage +{ +public: + String getName() const override { return "FromMergeTreeDataPart"; } + String getTableName() const override { return part->storage.getTableName() + " (part " + part->name + ")"; } + + BlockInputStreams read( + const Names & column_names, + const SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum & processed_stage, + size_t max_block_size, + unsigned num_streams) override + { + return MergeTreeDataSelectExecutor(part->storage).readFromParts( + {part}, column_names, query_info, context, processed_stage, max_block_size, num_streams, 0); + } + +protected: + StorageFromMergeTreeDataPart(const MergeTreeData::DataPartPtr & part_) + : IStorage(part_->storage.getColumns()), part(part_) + {} + +private: + MergeTreeData::DataPartPtr part; +}; + +} diff --git a/dbms/src/Storages/StorageBuffer.cpp b/dbms/src/Storages/StorageBuffer.cpp index 3d6bc462e20..c1e783c4c7a 100644 --- a/dbms/src/Storages/StorageBuffer.cpp +++ b/dbms/src/Storages/StorageBuffer.cpp @@ -135,7 +135,7 @@ BlockInputStreams StorageBuffer::read( */ if (processed_stage > QueryProcessingStage::FetchColumns) for (auto & stream : streams_from_buffers) - stream = InterpreterSelectQuery(query_info.query, context, {}, processed_stage, 0, stream).execute().in; + stream = InterpreterSelectQuery(query_info.query, context, stream, processed_stage).execute().in; streams_from_dst.insert(streams_from_dst.end(), streams_from_buffers.begin(), streams_from_buffers.end()); return streams_from_dst; diff --git a/dbms/src/Storages/StorageDistributed.cpp b/dbms/src/Storages/StorageDistributed.cpp index 2225e18bdf8..8d1887bb1bc 100644 --- a/dbms/src/Storages/StorageDistributed.cpp +++ b/dbms/src/Storages/StorageDistributed.cpp @@ -30,7 +30,6 @@ #include #include #include -#include #include #include @@ -65,12 +64,15 @@ namespace ErrorCodes namespace { -/// select query has database and table names as AST pointers -/// Creates a copy of query, changes database and table names. -ASTPtr rewriteSelectQuery(const ASTPtr & query, const std::string & database, const std::string & table) +/// select query has database, table and table function names as AST pointers +/// Creates a copy of query, changes database, table and table function names. +ASTPtr rewriteSelectQuery(const ASTPtr & query, const std::string & database, const std::string & table, ASTPtr table_function_ptr = nullptr) { auto modified_query_ast = query->clone(); - typeid_cast(*modified_query_ast).replaceDatabaseAndTable(database, table); + if (table_function_ptr) + typeid_cast(*modified_query_ast).addTableFunction(table_function_ptr); + else + typeid_cast(*modified_query_ast).replaceDatabaseAndTable(database, table); return modified_query_ast; } @@ -170,16 +172,48 @@ StorageDistributed::StorageDistributed( } -StoragePtr StorageDistributed::createWithOwnCluster( - const std::string & name_, +StorageDistributed::StorageDistributed( + const String & database_name, + const String & table_name_, const ColumnsDescription & columns_, - const String & remote_database_, - const String & remote_table_, + ASTPtr remote_table_function_ptr_, + const String & cluster_name_, + const Context & context_, + const ASTPtr & sharding_key_, + const String & data_path_, + bool attach) + : StorageDistributed(database_name, table_name_, columns_, String{}, String{}, cluster_name_, context_, sharding_key_, data_path_, attach) +{ + remote_table_function_ptr = remote_table_function_ptr_; +} + + +StoragePtr StorageDistributed::createWithOwnCluster( + const std::string & table_name_, + const ColumnsDescription & columns_, + const String & remote_database_, /// database on remote servers. + const String & remote_table_, /// The name of the table on the remote servers. + ClusterPtr owned_cluster_, + const Context & context_) +{ + auto res = ext::shared_ptr_helper::create( + String{}, table_name_, columns_, remote_database_, remote_table_, String{}, context_, ASTPtr(), String(), false); + + res->owned_cluster = owned_cluster_; + + return res; +} + + +StoragePtr StorageDistributed::createWithOwnCluster( + const std::string & table_name_, + const ColumnsDescription & columns_, + ASTPtr & remote_table_function_ptr_, ClusterPtr & owned_cluster_, const Context & context_) { auto res = ext::shared_ptr_helper::create( - String{}, name_, columns_, remote_database_, remote_table_, String{}, context_, ASTPtr(), String(), false); + String{}, table_name_, columns_, remote_table_function_ptr_, String{}, context_, ASTPtr(), String(), false); res->owned_cluster = owned_cluster_; @@ -211,12 +245,15 @@ BlockInputStreams StorageDistributed::read( : QueryProcessingStage::WithMergeableState; const auto & modified_query_ast = rewriteSelectQuery( - query_info.query, remote_database, remote_table); + query_info.query, remote_database, remote_table, remote_table_function_ptr); - Block header = materializeBlock(InterpreterSelectQuery(query_info.query, context, {}, processed_stage).getSampleBlock()); + Block header = materializeBlock(InterpreterSelectQuery(query_info.query, context, Names{}, processed_stage).getSampleBlock()); - ClusterProxy::SelectStreamFactory select_stream_factory( - header, processed_stage, QualifiedTableName{remote_database, remote_table}, context.getExternalTables()); + ClusterProxy::SelectStreamFactory select_stream_factory = remote_table_function_ptr ? + ClusterProxy::SelectStreamFactory( + header, processed_stage, remote_table_function_ptr, context.getExternalTables()) + : ClusterProxy::SelectStreamFactory( + header, processed_stage, QualifiedTableName{remote_database, remote_table}, context.getExternalTables()); return ClusterProxy::executeQuery( select_stream_factory, cluster, modified_query_ast, context, settings); @@ -279,34 +316,6 @@ void StorageDistributed::shutdown() } -BlockInputStreams StorageDistributed::describe(const Context & context, const Settings & settings) -{ - /// Create DESCRIBE TABLE query. - auto cluster = getCluster(); - - auto describe_query = std::make_shared(); - - std::string name = remote_database + '.' + remote_table; - - auto id = std::make_shared(name); - - auto desc_database = std::make_shared(remote_database); - auto desc_table = std::make_shared(remote_table); - - id->children.push_back(desc_database); - id->children.push_back(desc_table); - - auto table_expression = std::make_shared(); - table_expression->database_and_table_name = id; - - describe_query->table_expression = table_expression; - - ClusterProxy::DescribeStreamFactory describe_stream_factory; - - return ClusterProxy::executeQuery( - describe_stream_factory, cluster, describe_query, context, settings); -} - void StorageDistributed::truncate(const ASTPtr &) { std::lock_guard lock(cluster_nodes_mutex); diff --git a/dbms/src/Storages/StorageDistributed.h b/dbms/src/Storages/StorageDistributed.h index bdfd654ea6e..fdb08c31c00 100644 --- a/dbms/src/Storages/StorageDistributed.h +++ b/dbms/src/Storages/StorageDistributed.h @@ -9,6 +9,7 @@ #include #include #include +#include #include @@ -36,8 +37,15 @@ public: static StoragePtr createWithOwnCluster( const std::string & table_name_, const ColumnsDescription & columns_, - const String & remote_database_, /// database on remote servers. - const String & remote_table_, /// The name of the table on the remote servers. + const String & remote_database_, /// database on remote servers. + const String & remote_table_, /// The name of the table on the remote servers. + ClusterPtr owned_cluster_, + const Context & context_); + + static StoragePtr createWithOwnCluster( + const std::string & table_name_, + const ColumnsDescription & columns_, + ASTPtr & remote_table_function_ptr_, /// Table function ptr. ClusterPtr & owned_cluster_, const Context & context_); @@ -77,9 +85,6 @@ public: String getDataPath() const override { return path; } - /// From each replica, get a description of the corresponding local table. - BlockInputStreams describe(const Context & context, const Settings & settings); - const ExpressionActionsPtr & getShardingKeyExpr() const { return sharding_key_expr; } const String & getShardingKeyColumnName() const { return sharding_key_column_name; } size_t getShardCount() const; @@ -101,6 +106,7 @@ public: String table_name; String remote_database; String remote_table; + ASTPtr remote_table_function_ptr; const Context & context; Logger * log = &Logger::get("StorageDistributed"); @@ -146,6 +152,17 @@ protected: const ASTPtr & sharding_key_, const String & data_path_, bool attach); + + StorageDistributed( + const String & database_name, + const String & table_name_, + const ColumnsDescription & columns_, + ASTPtr remote_table_function_ptr_, + const String & cluster_name_, + const Context & context_, + const ASTPtr & sharding_key_, + const String & data_path_, + bool attach); }; } diff --git a/dbms/src/Storages/StorageFactory.cpp b/dbms/src/Storages/StorageFactory.cpp index d56e7ee4d80..9ceb59abbcb 100644 --- a/dbms/src/Storages/StorageFactory.cpp +++ b/dbms/src/Storages/StorageFactory.cpp @@ -87,11 +87,19 @@ StoragePtr StorageFactory::get( name = engine_def.name; - if ((storage_def->partition_by || storage_def->order_by || storage_def->sample_by || storage_def->settings) + if (storage_def->settings && !endsWith(name, "MergeTree") && name != "Kafka") + { + throw Exception( + "Engine " + name + " doesn't support SETTINGS clause. " + "Currently only the MergeTree family of engines and Kafka engine supports it", + ErrorCodes::BAD_ARGUMENTS); + } + + if ((storage_def->partition_by || storage_def->order_by || storage_def->sample_by) && !endsWith(name, "MergeTree")) { throw Exception( - "Engine " + name + " doesn't support PARTITION BY, ORDER BY, SAMPLE BY or SETTINGS clauses. " + "Engine " + name + " doesn't support PARTITION BY, ORDER BY or SAMPLE BY clauses. " "Currently only the MergeTree family of engines supports them", ErrorCodes::BAD_ARGUMENTS); } diff --git a/dbms/src/Storages/StorageFactory.h b/dbms/src/Storages/StorageFactory.h index 2acb9fb7c00..4addfcd9794 100644 --- a/dbms/src/Storages/StorageFactory.h +++ b/dbms/src/Storages/StorageFactory.h @@ -53,6 +53,11 @@ public: /// No locking, you must register all engines before usage of get. void registerStorage(const std::string & name, Creator creator); + const auto & getAllStorages() const + { + return storages; + } + private: using Storages = std::unordered_map; Storages storages; diff --git a/dbms/src/Storages/StorageJoin.cpp b/dbms/src/Storages/StorageJoin.cpp index 49b07db2772..35d39c5b29c 100644 --- a/dbms/src/Storages/StorageJoin.cpp +++ b/dbms/src/Storages/StorageJoin.cpp @@ -35,7 +35,7 @@ StorageJoin::StorageJoin( /// NOTE StorageJoin doesn't use join_use_nulls setting. - join = std::make_shared(key_names, key_names, false /* use_nulls */, SizeLimits(), kind, strictness); + join = std::make_shared(key_names, key_names, NameSet(), false /* use_nulls */, SizeLimits(), kind, strictness); join->setSampleBlock(getSampleBlock().sortColumns()); restore(); } @@ -48,7 +48,7 @@ void StorageJoin::truncate(const ASTPtr &) Poco::File(path + "tmp/").createDirectories(); increment = 0; - join = std::make_shared(key_names, key_names, false /* use_nulls */, SizeLimits(), kind, strictness); + join = std::make_shared(key_names, key_names, NameSet(), false /* use_nulls */, SizeLimits(), kind, strictness); join->setSampleBlock(getSampleBlock().sortColumns()); }; diff --git a/dbms/src/Storages/StorageMaterializedView.cpp b/dbms/src/Storages/StorageMaterializedView.cpp index c4776925140..4e9fb726324 100644 --- a/dbms/src/Storages/StorageMaterializedView.cpp +++ b/dbms/src/Storages/StorageMaterializedView.cpp @@ -280,19 +280,31 @@ String StorageMaterializedView::getDataPath() const return {}; } -bool StorageMaterializedView::checkTableCanBeDropped() const +void StorageMaterializedView::checkTableCanBeDropped() const { /// Don't drop the target table if it was created manually via 'TO inner_table' statement if (!has_inner_table) - return true; + return; auto target_table = tryGetTargetTable(); if (!target_table) - return true; + return; - return target_table->checkTableCanBeDropped(); + target_table->checkTableCanBeDropped(); } +void StorageMaterializedView::checkPartitionCanBeDropped(const ASTPtr & partition) +{ + /// Don't drop the partition in target table if it was created manually via 'TO inner_table' statement + if (!has_inner_table) + return; + + auto target_table = tryGetTargetTable(); + if (!target_table) + return; + + target_table->checkPartitionCanBeDropped(partition); +} void registerStorageMaterializedView(StorageFactory & factory) { diff --git a/dbms/src/Storages/StorageMaterializedView.h b/dbms/src/Storages/StorageMaterializedView.h index c3c96be50d1..1f0c4c7af47 100644 --- a/dbms/src/Storages/StorageMaterializedView.h +++ b/dbms/src/Storages/StorageMaterializedView.h @@ -41,7 +41,10 @@ public: void freezePartition(const ASTPtr & partition, const String & with_name, const Context & context) override; void shutdown() override; - bool checkTableCanBeDropped() const override; + + void checkTableCanBeDropped() const override; + + void checkPartitionCanBeDropped(const ASTPtr & partition) override; BlockInputStreams read( const Names & column_names, diff --git a/dbms/src/Storages/StorageMerge.cpp b/dbms/src/Storages/StorageMerge.cpp index 1702d628d6c..0a001fd9c2f 100644 --- a/dbms/src/Storages/StorageMerge.cpp +++ b/dbms/src/Storages/StorageMerge.cpp @@ -241,12 +241,10 @@ BlockInputStreams StorageMerge::read( header = getSampleBlockForColumns(column_names); break; case QueryProcessingStage::WithMergeableState: - header = materializeBlock(InterpreterSelectQuery(query_info.query, context, {}, QueryProcessingStage::WithMergeableState, 0, - std::make_shared(getSampleBlockForColumns(column_names)), true).getSampleBlock()); - break; case QueryProcessingStage::Complete: - header = materializeBlock(InterpreterSelectQuery(query_info.query, context, {}, QueryProcessingStage::Complete, 0, - std::make_shared(getSampleBlockForColumns(column_names)), true).getSampleBlock()); + header = materializeBlock(InterpreterSelectQuery( + query_info.query, context, std::make_shared(getSampleBlockForColumns(column_names)), + processed_stage_in_source_table, true).getSampleBlock()); break; } } diff --git a/dbms/src/Storages/StorageMergeTree.cpp b/dbms/src/Storages/StorageMergeTree.cpp index e3f78e746b5..c4949345470 100644 --- a/dbms/src/Storages/StorageMergeTree.cpp +++ b/dbms/src/Storages/StorageMergeTree.cpp @@ -122,11 +122,26 @@ BlockOutputStreamPtr StorageMergeTree::write(const ASTPtr & /*query*/, const Set return std::make_shared(*this); } -bool StorageMergeTree::checkTableCanBeDropped() const +void StorageMergeTree::checkTableCanBeDropped() const { const_cast(getData()).recalculateColumnSizes(); context.checkTableCanBeDropped(database_name, table_name, getData().getTotalActiveSizeInBytes()); - return true; +} + +void StorageMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition) +{ + const_cast(getData()).recalculateColumnSizes(); + + const String partition_id = data.getPartitionIDFromQuery(partition, context); + auto parts_to_remove = data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id); + + UInt64 partition_size = 0; + + for (const auto & part : parts_to_remove) + { + partition_size += part->bytes_on_disk; + } + context.checkPartitionCanBeDropped(database_name, table_name, partition_size); } void StorageMergeTree::drop() @@ -311,6 +326,7 @@ std::vector StorageMergeTree::getMutationsStatus() cons part_data_versions.reserve(data_parts.size()); for (const auto & part : data_parts) part_data_versions.push_back(part->info.getDataVersion()); + std::sort(part_data_versions.begin(), part_data_versions.end()); std::vector result; for (const auto & kv : current_mutations_by_version) @@ -598,6 +614,7 @@ bool StorageMergeTree::backgroundTask() { data.clearOldPartsFromFilesystem(); data.clearOldTemporaryDirectories(); + clearOldMutations(); } size_t aio_threshold = context.getSettings().min_bytes_to_use_direct_io; @@ -630,6 +647,46 @@ Int64 StorageMergeTree::getCurrentMutationVersion( return it->first; }; +void StorageMergeTree::clearOldMutations() +{ + if (!data.settings.finished_mutations_to_keep) + return; + + std::vector mutations_to_delete; + { + std::lock_guard lock(currently_merging_mutex); + + if (current_mutations_by_version.size() <= data.settings.finished_mutations_to_keep) + return; + + auto begin_it = current_mutations_by_version.begin(); + + std::optional min_version = data.getMinPartDataVersion(); + auto end_it = current_mutations_by_version.end(); + if (min_version) + end_it = current_mutations_by_version.upper_bound(*min_version); + + size_t done_count = std::distance(begin_it, end_it); + if (done_count <= data.settings.finished_mutations_to_keep) + return; + + size_t to_delete_count = done_count - data.settings.finished_mutations_to_keep; + + auto it = begin_it; + for (size_t i = 0; i < to_delete_count; ++i) + { + mutations_to_delete.push_back(std::move(it->second)); + it = current_mutations_by_version.erase(it); + } + } + + for (auto & mutation : mutations_to_delete) + { + LOG_TRACE(log, "Removing mutation: " << mutation.file_name); + mutation.removeFile(); + } +} + void StorageMergeTree::clearColumnInPartition(const ASTPtr & partition, const Field & column_name, const Context & context) { diff --git a/dbms/src/Storages/StorageMergeTree.h b/dbms/src/Storages/StorageMergeTree.h index 80bc7b421ac..762640a605b 100644 --- a/dbms/src/Storages/StorageMergeTree.h +++ b/dbms/src/Storages/StorageMergeTree.h @@ -83,7 +83,9 @@ public: void alter(const AlterCommands & params, const String & database_name, const String & table_name, const Context & context) override; - bool checkTableCanBeDropped() const override; + void checkTableCanBeDropped() const override; + + void checkPartitionCanBeDropped(const ASTPtr & partition) override; ActionLock getActionLock(StorageActionBlockType action_type) override; @@ -140,6 +142,8 @@ private: const MergeTreeData::DataPartPtr & part, std::lock_guard & /* currently_merging_mutex_lock */) const; + void clearOldMutations(); + friend class MergeTreeBlockOutputStream; friend class MergeTreeData; friend struct CurrentlyMergingPartsTagger; diff --git a/dbms/src/Storages/StorageODBC.cpp b/dbms/src/Storages/StorageODBC.cpp index cbda47a9244..2361597e04b 100644 --- a/dbms/src/Storages/StorageODBC.cpp +++ b/dbms/src/Storages/StorageODBC.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -29,7 +30,7 @@ StorageODBC::StorageODBC( { pool = createAndCheckResizePocoSessionPool([&] { - return std::make_shared("ODBC", connection_string); + return std::make_shared("ODBC", validateODBCConnectionString(connection_string)); }); } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.cpp b/dbms/src/Storages/StorageReplicatedMergeTree.cpp index c8b8b6d9706..75f6b045d73 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.cpp +++ b/dbms/src/Storages/StorageReplicatedMergeTree.cpp @@ -215,7 +215,7 @@ StorageReplicatedMergeTree::StorageReplicatedMergeTree( [this] (const std::string & name) { enqueuePartForCheck(name); }), reader(data), writer(data), merger_mutator(data, context.getBackgroundPool()), queue(*this), fetcher(data), - shutdown_event(false), part_check_thread(*this), + cleanup_thread(*this), alter_thread(*this), part_check_thread(*this), log(&Logger::get(database_name + "." + table_name + " (StorageReplicatedMergeTree)")) { if (path_.empty()) @@ -1619,7 +1619,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry) void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) { auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, data.format_version); - queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info); + queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry); LOG_DEBUG(log, (entry.detach ? "Detaching" : "Removing") << " parts."); @@ -1653,7 +1653,7 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) /// We want to remove dropped parts from disk as soon as possible /// To be removed a partition should have zero refcount, therefore call the cleanup thread at exit parts_to_remove.clear(); - cleanup_thread->schedule(); + cleanup_thread.wakeup(); } @@ -1728,7 +1728,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) /// Range with only one block has special meaning ATTACH PARTITION bool replace = drop_range.getBlocksCount() > 1; - queue.removePartProducingOpsInRange(getZooKeeper(), drop_range); + queue.removePartProducingOpsInRange(getZooKeeper(), drop_range, entry); struct PartDescription { @@ -1971,9 +1971,14 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) String replica_path = zookeeper_path + "/replicas/" + part_desc->replica; ReplicatedMergeTreeAddress address(getZooKeeper()->get(replica_path + "/host")); auto timeouts = ConnectionTimeouts::getHTTPTimeouts(context.getSettingsRef()); + auto [user, password] = context.getInterserverCredentials(); + String interserver_scheme = context.getInterserverScheme(); + + if (interserver_scheme != address.scheme) + throw Exception("Interserver schemes are different '" + interserver_scheme + "' != '" + address.scheme + "', can't fetch part from " + address.host, ErrorCodes::LOGICAL_ERROR); part_desc->res_part = fetcher.fetchPart(part_desc->found_new_part_name, replica_path, - address.host, address.replication_port, timeouts, false, TMP_PREFIX + "fetch_"); + address.host, address.replication_port, timeouts, user, password, interserver_scheme, false, TMP_PREFIX + "fetch_"); /// TODO: check columns_version of fetched part @@ -2033,7 +2038,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) tryRemovePartsFromZooKeeperWithRetries(parts_to_remove); res_parts.clear(); parts_to_remove.clear(); - cleanup_thread->schedule(); + cleanup_thread.wakeup(); return true; } @@ -2041,10 +2046,6 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) void StorageReplicatedMergeTree::queueUpdatingTask() { - //most probably this check is not relevant - if (shutdown_called) - return; - if (!queue_update_in_progress) { last_queue_update_start_time.store(time(nullptr)); @@ -2196,7 +2197,7 @@ void StorageReplicatedMergeTree::mergeSelectingTask() /// If many merges is already queued, then will queue only small enough merges. /// Otherwise merge queue could be filled with only large merges, /// and in the same time, many small parts could be created and won't be merged. - size_t merges_and_mutations_queued = merge_pred.countMergesAndPartMutations(); + size_t merges_and_mutations_queued = queue.countMergesAndPartMutations(); if (merges_and_mutations_queued >= data.settings.max_replicated_merges_in_queue) { LOG_TRACE(log, "Number of queued merges and part mutations (" << merges_and_mutations_queued @@ -2215,7 +2216,7 @@ void StorageReplicatedMergeTree::mergeSelectingTask() { success = createLogEntryToMergeParts(zookeeper, future_merged_part.parts, future_merged_part.name, deduplicate); } - else if (merge_pred.countMutations() > 0) + else if (queue.countMutations() > 0) { /// Choose a part to mutate. @@ -2667,7 +2668,7 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Strin { LOG_DEBUG(log, "Part " << part->getNameWithState() << " should be deleted after previous attempt before fetch"); /// Force immediate parts cleanup to delete the part that was left from the previous fetch attempt. - cleanup_thread->schedule(); + cleanup_thread.wakeup(); return false; } @@ -2706,10 +2707,15 @@ bool StorageReplicatedMergeTree::fetchPart(const String & part_name, const Strin ReplicatedMergeTreeAddress address(getZooKeeper()->get(replica_path + "/host")); auto timeouts = ConnectionTimeouts::getHTTPTimeouts(context.getSettingsRef()); + auto [user, password] = context.getInterserverCredentials(); + String interserver_scheme = context.getInterserverScheme(); try { - part = fetcher.fetchPart(part_name, replica_path, address.host, address.replication_port, timeouts, to_detached); + if (interserver_scheme != address.scheme) + throw Exception("Interserver schemes are different '" + interserver_scheme + "' != '" + address.scheme + "', can't fetch part from " + address.host, ErrorCodes::LOGICAL_ERROR); + + part = fetcher.fetchPart(part_name, replica_path, address.host, address.replication_port, timeouts, user, password, interserver_scheme, to_detached); if (!to_detached) { @@ -2789,11 +2795,7 @@ void StorageReplicatedMergeTree::startup() void StorageReplicatedMergeTree::shutdown() { - if (restarting_thread) - { - restarting_thread->stop(); - restarting_thread.reset(); - } + restarting_thread.reset(); if (data_parts_exchange_endpoint_holder) { @@ -3028,7 +3030,7 @@ void StorageReplicatedMergeTree::alter(const AlterCommands & params, { LOG_DEBUG(log, "Waiting for " << replica << " to apply changes"); - while (!shutdown_called) + while (!partial_shutdown_called) { /// Replica could be inactive. if (!getZooKeeper()->exists(zookeeper_path + "/replicas/" + replica + "/is_active")) @@ -3093,7 +3095,7 @@ void StorageReplicatedMergeTree::alter(const AlterCommands & params, } } - if (shutdown_called) + if (partial_shutdown_called) throw Exception("Alter is not finished because table shutdown was called. Alter will be done after table restart.", ErrorCodes::UNFINISHED); @@ -3343,12 +3345,28 @@ void StorageReplicatedMergeTree::attachPartition(const ASTPtr & partition, bool } -bool StorageReplicatedMergeTree::checkTableCanBeDropped() const +void StorageReplicatedMergeTree::checkTableCanBeDropped() const { /// Consider only synchronized data const_cast(getData()).recalculateColumnSizes(); context.checkTableCanBeDropped(database_name, table_name, getData().getTotalActiveSizeInBytes()); - return true; +} + + +void StorageReplicatedMergeTree::checkPartitionCanBeDropped(const ASTPtr & partition) +{ + const_cast(getData()).recalculateColumnSizes(); + + const String partition_id = data.getPartitionIDFromQuery(partition, context); + auto parts_to_remove = data.getDataPartsVectorInPartition(MergeTreeDataPartState::Committed, partition_id); + + UInt64 partition_size = 0; + + for (const auto & part : parts_to_remove) + { + partition_size += part->bytes_on_disk; + } + context.checkPartitionCanBeDropped(database_name, table_name, partition_size); } @@ -4475,7 +4493,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ } /// We are almost ready to commit changes, remove fetches and merges from drop range - queue.removePartProducingOpsInRange(zookeeper, drop_range); + queue.removePartProducingOpsInRange(zookeeper, drop_range, entry); /// Remove deduplication block_ids of replacing parts if (replace) @@ -4539,7 +4557,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(const StoragePtr & source_ /// Speedup removing of replaced parts from filesystem parts_to_remove.clear(); - cleanup_thread->schedule(); + cleanup_thread.wakeup(); /// If necessary, wait until the operation is performed on all replicas. if (context.getSettingsRef().replication_alter_partitions_sync > 1) @@ -4592,6 +4610,7 @@ ReplicatedMergeTreeAddress StorageReplicatedMergeTree::getReplicatedMergeTreeAdd res.queries_port = context.getTCPPort(); res.database = database_name; res.table = table_name; + res.scheme = context.getInterserverScheme(); return res; } @@ -4642,7 +4661,7 @@ bool StorageReplicatedMergeTree::waitForShrinkingQueueSize(size_t queue_size, UI if (cond_reached) break; - if (shutdown_called) + if (partial_shutdown_called) throw Exception("Shutdown is called for table", ErrorCodes::ABORTED); } diff --git a/dbms/src/Storages/StorageReplicatedMergeTree.h b/dbms/src/Storages/StorageReplicatedMergeTree.h index e512977d4b0..61dceb0e408 100644 --- a/dbms/src/Storages/StorageReplicatedMergeTree.h +++ b/dbms/src/Storages/StorageReplicatedMergeTree.h @@ -138,7 +138,9 @@ public: bool supportsIndexForIn() const override { return true; } bool mayBenefitFromIndexForIn(const ASTPtr & left_in_operand) const override { return data.mayBenefitFromIndexForIn(left_in_operand); } - bool checkTableCanBeDropped() const override; + void checkTableCanBeDropped() const override; + + void checkPartitionCanBeDropped(const ASTPtr & partition) override; ActionLock getActionLock(StorageActionBlockType action_type) override; @@ -264,8 +266,10 @@ private: Poco::Event startup_event; /// Do I need to complete background threads (except restarting_thread)? - std::atomic shutdown_called {false}; - Poco::Event shutdown_event; + std::atomic partial_shutdown_called {false}; + + /// Event that is signalled (and is reset) by the restarting_thread when the ZooKeeper session expires. + Poco::Event partial_shutdown_event {false}; /// Poco::Event::EVENT_MANUALRESET /// Limiting parallel fetches per one table std::atomic_uint current_table_fetches {0}; @@ -283,25 +287,24 @@ private: /// A task that selects parts to merge. BackgroundSchedulePool::TaskHolder merge_selecting_task; + /// It is acquired for each iteration of the selection of parts to merge or each OPTIMIZE query. + std::mutex merge_selecting_mutex; /// A task that marks finished mutations as done. BackgroundSchedulePool::TaskHolder mutations_finalizing_task; - /// It is acquired for each iteration of the selection of parts to merge or each OPTIMIZE query. - std::mutex merge_selecting_mutex; - /// A thread that removes old parts, log entries, and blocks. - std::unique_ptr cleanup_thread; - - /// A thread that processes reconnection to ZooKeeper when the session expires. - std::unique_ptr restarting_thread; + ReplicatedMergeTreeCleanupThread cleanup_thread; /// A thread monitoring changes to the column list in ZooKeeper and updating the parts in accordance with these changes. - std::unique_ptr alter_thread; + ReplicatedMergeTreeAlterThread alter_thread; /// A thread that checks the data of the parts, as well as the queue of the parts to be checked. ReplicatedMergeTreePartCheckThread part_check_thread; + /// A thread that processes reconnection to ZooKeeper when the session expires. + std::unique_ptr restarting_thread; + /// An event that awakens `alter` method from waiting for the completion of the ALTER query. zkutil::EventPtr alter_query_event = std::make_shared(); diff --git a/dbms/src/Storages/StorageSet.cpp b/dbms/src/Storages/StorageSet.cpp index 87f73141bc9..194b9ec4e34 100644 --- a/dbms/src/Storages/StorageSet.cpp +++ b/dbms/src/Storages/StorageSet.cpp @@ -81,8 +81,8 @@ void SetOrJoinBlockOutputStream::writeSuffix() BlockOutputStreamPtr StorageSetOrJoinBase::write(const ASTPtr & /*query*/, const Settings & /*settings*/) { - ++increment; - return std::make_shared(*this, path, path + "tmp/", toString(increment) + ".bin"); + UInt64 id = ++increment; + return std::make_shared(*this, path, path + "tmp/", toString(id) + ".bin"); } diff --git a/dbms/src/Storages/StorageSet.h b/dbms/src/Storages/StorageSet.h index eca716eadb9..400e9670349 100644 --- a/dbms/src/Storages/StorageSet.h +++ b/dbms/src/Storages/StorageSet.h @@ -36,7 +36,7 @@ protected: String path; String table_name; - UInt64 increment = 0; /// For the backup file names. + std::atomic increment = 0; /// For the backup file names. /// Restore from backup. void restore(); diff --git a/dbms/src/Storages/System/IStorageSystemOneBlock.h b/dbms/src/Storages/System/IStorageSystemOneBlock.h new file mode 100644 index 00000000000..96286f56eee --- /dev/null +++ b/dbms/src/Storages/System/IStorageSystemOneBlock.h @@ -0,0 +1,54 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace DB +{ + +class Context; + + +/** Base class for system tables whose all columns have String type. + */ +template +class IStorageSystemOneBlock : public IStorage +{ +protected: + virtual void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const = 0; + +public: + IStorageSystemOneBlock(const String & name_) : name(name_) + { + setColumns(ColumnsDescription(Self::getNamesAndTypes())); + } + + std::string getTableName() const override + { + return name; + } + + BlockInputStreams read(const Names & column_names, + const SelectQueryInfo & query_info, + const Context & context, + QueryProcessingStage::Enum & processed_stage, + size_t /*max_block_size*/, + unsigned /*num_streams*/) override + { + check(column_names); + processed_stage = QueryProcessingStage::FetchColumns; + + Block sample_block = getSampleBlock(); + MutableColumns res_columns = sample_block.cloneEmptyColumns(); + fillData(res_columns, context, query_info); + + return BlockInputStreams(1, std::make_shared(sample_block.cloneWithColumns(std::move(res_columns)))); + } + +private: + const String name; +}; + +} diff --git a/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp b/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp new file mode 100644 index 00000000000..8fa335faceb --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.cpp @@ -0,0 +1,25 @@ +#include +#include + +namespace DB +{ + +NamesAndTypesList StorageSystemAggregateFunctionCombinators::getNamesAndTypes() +{ + return { + {"name", std::make_shared()}, + {"is_internal", std::make_shared()}, + }; +} + +void StorageSystemAggregateFunctionCombinators::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ + const auto & combinators = AggregateFunctionCombinatorFactory::instance().getAllAggregateFunctionCombinators(); + for (const auto & pair : combinators) + { + res_columns[0]->insert(pair.first); + res_columns[1]->insert(UInt64(pair.second->isForInternalUsageOnly())); + } +} + +} diff --git a/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.h b/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.h new file mode 100644 index 00000000000..1d7226eda8b --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemAggregateFunctionCombinators.h @@ -0,0 +1,25 @@ +#pragma once + +#include +#include +#include +#include +namespace DB +{ +class StorageSystemAggregateFunctionCombinators : public ext::shared_ptr_helper, + public IStorageSystemOneBlock +{ +protected: + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; + + using IStorageSystemOneBlock::IStorageSystemOneBlock; +public: + + std::string getName() const override + { + return "SystemAggregateFunctionCombinators"; + } + + static NamesAndTypesList getNamesAndTypes(); +}; +} diff --git a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp b/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp index bc2f76379e9..059ef708a81 100644 --- a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp +++ b/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.cpp @@ -1,51 +1,34 @@ -#include - -#include -#include -#include #include #include -#include +#include +#include namespace DB { - -StorageSystemAsynchronousMetrics::StorageSystemAsynchronousMetrics(const std::string & name_, const AsynchronousMetrics & async_metrics_) - : name(name_), - async_metrics(async_metrics_) +NamesAndTypesList StorageSystemAsynchronousMetrics::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { {"metric", std::make_shared()}, {"value", std::make_shared()}, - })); + }; } -BlockInputStreams StorageSystemAsynchronousMetrics::read( - const Names & column_names, - const SelectQueryInfo &, - const Context &, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +StorageSystemAsynchronousMetrics::StorageSystemAsynchronousMetrics(const std::string & name_, const AsynchronousMetrics & async_metrics_) + : IStorageSystemOneBlock(name_), async_metrics(async_metrics_) { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); +} +void StorageSystemAsynchronousMetrics::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ auto async_metrics_values = async_metrics.getValues(); - for (const auto & name_value : async_metrics_values) { res_columns[0]->insert(name_value.first); res_columns[1]->insert(name_value.second); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h b/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h index 60e50096143..853cb97c974 100644 --- a/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h +++ b/dbms/src/Storages/System/StorageSystemAsynchronousMetrics.h @@ -1,8 +1,7 @@ #pragma once #include -#include - +#include namespace DB { @@ -13,26 +12,20 @@ class Context; /** Implements system table asynchronous_metrics, which allows to get values of periodically (asynchronously) updated metrics. */ -class StorageSystemAsynchronousMetrics : public ext::shared_ptr_helper, public IStorage +class StorageSystemAsynchronousMetrics : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemAsynchronousMetrics"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; + static NamesAndTypesList getNamesAndTypes(); private: - const std::string name; const AsynchronousMetrics & async_metrics; protected: StorageSystemAsynchronousMetrics(const std::string & name_, const AsynchronousMetrics & async_metrics_); + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.cpp b/dbms/src/Storages/System/StorageSystemBuildOptions.cpp index e62e6e9bbfd..2a8ffc947be 100644 --- a/dbms/src/Storages/System/StorageSystemBuildOptions.cpp +++ b/dbms/src/Storages/System/StorageSystemBuildOptions.cpp @@ -1,46 +1,26 @@ -#include +#include #include -#include -#include #include #include -#include namespace DB { - -StorageSystemBuildOptions::StorageSystemBuildOptions(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemBuildOptions::getNamesAndTypes() { - setColumns(ColumnsDescription({ - { "name", std::make_shared() }, - { "value", std::make_shared() }, - })); + return { + {"name", std::make_shared()}, + {"value", std::make_shared()}, + }; } - -BlockInputStreams StorageSystemBuildOptions::read( - const Names & column_names, - const SelectQueryInfo &, - const Context &, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemBuildOptions::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (auto it = auto_config_build; *it; it += 2) { res_columns[0]->insert(String(it[0])); res_columns[1]->insert(String(it[1])); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemBuildOptions.h b/dbms/src/Storages/System/StorageSystemBuildOptions.h index d772b255383..749ffbddbaf 100644 --- a/dbms/src/Storages/System/StorageSystemBuildOptions.h +++ b/dbms/src/Storages/System/StorageSystemBuildOptions.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,18 @@ class Context; /** System table "build_options" with many params used for clickhouse building */ -class StorageSystemBuildOptions : public ext::shared_ptr_helper, public IStorage +class StorageSystemBuildOptions : public ext::shared_ptr_helper, public IStorageSystemOneBlock { -public: - std::string getName() const override { return "SystemBuildOptions"; } - std::string getTableName() const override { return name; } - - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; - protected: - StorageSystemBuildOptions(const std::string & name_); + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; + + using IStorageSystemOneBlock::IStorageSystemOneBlock; + +public: + + std::string getName() const override { return "SystemBuildOptions"; } + + static NamesAndTypesList getNamesAndTypes(); }; } diff --git a/dbms/src/Storages/System/StorageSystemClusters.cpp b/dbms/src/Storages/System/StorageSystemClusters.cpp index fb5c4e41b82..3527de302a1 100644 --- a/dbms/src/Storages/System/StorageSystemClusters.cpp +++ b/dbms/src/Storages/System/StorageSystemClusters.cpp @@ -1,50 +1,32 @@ -#include -#include -#include -#include +#include #include #include -#include -#include +#include #include +#include namespace DB { - -StorageSystemClusters::StorageSystemClusters(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemClusters::getNamesAndTypes() { - setColumns(ColumnsDescription({ - { "cluster", std::make_shared() }, - { "shard_num", std::make_shared() }, - { "shard_weight", std::make_shared() }, - { "replica_num", std::make_shared() }, - { "host_name", std::make_shared() }, - { "host_address", std::make_shared() }, - { "port", std::make_shared() }, - { "is_local", std::make_shared() }, - { "user", std::make_shared() }, - { "default_database", std::make_shared() }, - })); + return { + {"cluster", std::make_shared()}, + {"shard_num", std::make_shared()}, + {"shard_weight", std::make_shared()}, + {"replica_num", std::make_shared()}, + {"host_name", std::make_shared()}, + {"host_address", std::make_shared()}, + {"port", std::make_shared()}, + {"is_local", std::make_shared()}, + {"user", std::make_shared()}, + {"default_database", std::make_shared()}, + }; } - -BlockInputStreams StorageSystemClusters::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemClusters::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - - auto updateColumns = [&](const std::string & cluster_name, const Cluster::ShardInfo & shard_info, - const Cluster::Address & address) + auto updateColumns = [&](const std::string & cluster_name, const Cluster::ShardInfo & shard_info, const Cluster::Address & address) { size_t i = 0; res_columns[i++]->insert(cluster_name); @@ -85,8 +67,5 @@ BlockInputStreams StorageSystemClusters::read( } } } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemClusters.h b/dbms/src/Storages/System/StorageSystemClusters.h index 1e36269ded2..dde9e53b626 100644 --- a/dbms/src/Storages/System/StorageSystemClusters.h +++ b/dbms/src/Storages/System/StorageSystemClusters.h @@ -1,7 +1,9 @@ #pragma once +#include +#include #include -#include +#include namespace DB @@ -13,25 +15,17 @@ class Context; * that allows to obtain information about available clusters * (which may be specified in Distributed tables). */ -class StorageSystemClusters : public ext::shared_ptr_helper, public IStorage +class StorageSystemClusters : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemClusters"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemClusters(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemCollations.cpp b/dbms/src/Storages/System/StorageSystemCollations.cpp new file mode 100644 index 00000000000..f2a7f5e8184 --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemCollations.cpp @@ -0,0 +1,20 @@ +#include +#include + +namespace DB +{ + +NamesAndTypesList StorageSystemCollations::getNamesAndTypes() +{ + return { + {"name", std::make_shared()}, + }; +} + +void StorageSystemCollations::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ + for (const auto & collation_name : Collator::getAvailableCollations()) + res_columns[0]->insert(collation_name); +} + +} diff --git a/dbms/src/Storages/System/StorageSystemCollations.h b/dbms/src/Storages/System/StorageSystemCollations.h new file mode 100644 index 00000000000..f8b7b6ee3af --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemCollations.h @@ -0,0 +1,22 @@ +#pragma once +#include +#include + +namespace DB +{ + +class StorageSystemCollations : public ext::shared_ptr_helper, + public IStorageSystemOneBlock +{ +protected: + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; + + using IStorageSystemOneBlock::IStorageSystemOneBlock; +public: + + std::string getName() const override { return "SystemTableCollations"; } + + static NamesAndTypesList getNamesAndTypes(); +}; + +} diff --git a/dbms/src/Storages/System/StorageSystemColumns.cpp b/dbms/src/Storages/System/StorageSystemColumns.cpp index d42d8a80394..1a5fc74d324 100644 --- a/dbms/src/Storages/System/StorageSystemColumns.cpp +++ b/dbms/src/Storages/System/StorageSystemColumns.cpp @@ -15,10 +15,9 @@ namespace DB { -StorageSystemColumns::StorageSystemColumns(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemColumns::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { { "database", std::make_shared() }, { "table", std::make_shared() }, { "name", std::make_shared() }, @@ -28,21 +27,11 @@ StorageSystemColumns::StorageSystemColumns(const std::string & name_) { "data_compressed_bytes", std::make_shared() }, { "data_uncompressed_bytes", std::make_shared() }, { "marks_bytes", std::make_shared() }, - })); + }; } - -BlockInputStreams StorageSystemColumns::read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemColumns::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - Block block_to_filter; std::map, StoragePtr> storages; @@ -60,7 +49,7 @@ BlockInputStreams StorageSystemColumns::read( VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context); if (!block_to_filter.rows()) - return BlockInputStreams(); + return; ColumnPtr database_column = block_to_filter.getByName("database").column; size_t rows = database_column->size(); @@ -98,14 +87,12 @@ BlockInputStreams StorageSystemColumns::read( VirtualColumnUtils::filterBlockWithQuery(query_info.query, block_to_filter, context); if (!block_to_filter.rows()) - return BlockInputStreams(); + return; ColumnPtr filtered_database_column = block_to_filter.getByName("database").column; ColumnPtr filtered_table_column = block_to_filter.getByName("table").column; /// We compose the result. - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - size_t rows = filtered_database_column->size(); for (size_t i = 0; i < rows; ++i) { @@ -193,8 +180,6 @@ BlockInputStreams StorageSystemColumns::read( } } } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } } diff --git a/dbms/src/Storages/System/StorageSystemColumns.h b/dbms/src/Storages/System/StorageSystemColumns.h index ba187f7306f..dc1afc3f71a 100644 --- a/dbms/src/Storages/System/StorageSystemColumns.h +++ b/dbms/src/Storages/System/StorageSystemColumns.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -11,25 +11,17 @@ class Context; /** Implements system table 'columns', that allows to get information about columns for every table. */ -class StorageSystemColumns : public ext::shared_ptr_helper, public IStorage +class StorageSystemColumns : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemColumns"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemColumns(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; -private: - const std::string name; + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp new file mode 100644 index 00000000000..c8d692fddd8 --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.cpp @@ -0,0 +1,35 @@ +#include +#include +#include +#include +#include + +namespace DB +{ + +NamesAndTypesList StorageSystemDataTypeFamilies::getNamesAndTypes() +{ + return { + {"name", std::make_shared()}, + {"case_insensitive", std::make_shared()}, + {"alias_to", std::make_shared()}, + }; +} + +void StorageSystemDataTypeFamilies::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ + const auto & factory = DataTypeFactory::instance(); + auto names = factory.getAllRegisteredNames(); + for (const auto & name : names) + { + res_columns[0]->insert(name); + res_columns[1]->insert(UInt64(factory.isCaseInsensitive(name))); + + if (factory.isAlias(name)) + res_columns[2]->insert(factory.aliasTo(name)); + else + res_columns[2]->insert(String("")); + } +} + +} diff --git a/dbms/src/Storages/System/StorageSystemDataTypeFamilies.h b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.h new file mode 100644 index 00000000000..365e2790699 --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemDataTypeFamilies.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class StorageSystemDataTypeFamilies : public ext::shared_ptr_helper, + public IStorageSystemOneBlock +{ +protected: + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; + + using IStorageSystemOneBlock::IStorageSystemOneBlock; + +public: + std::string getName() const override { return "SystemTableDataTypeFamilies"; } + + static NamesAndTypesList getNamesAndTypes(); +}; + +} diff --git a/dbms/src/Storages/System/StorageSystemDatabases.cpp b/dbms/src/Storages/System/StorageSystemDatabases.cpp index 49c78688616..4df3d360a3b 100644 --- a/dbms/src/Storages/System/StorageSystemDatabases.cpp +++ b/dbms/src/Storages/System/StorageSystemDatabases.cpp @@ -1,40 +1,24 @@ -#include -#include -#include #include -#include +#include #include +#include namespace DB { - -StorageSystemDatabases::StorageSystemDatabases(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemDatabases::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { {"name", std::make_shared()}, {"engine", std::make_shared()}, {"data_path", std::make_shared()}, {"metadata_path", std::make_shared()}, - })); + }; } - -BlockInputStreams StorageSystemDatabases::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemDatabases::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - auto databases = context.getDatabases(); for (const auto & database : databases) { @@ -43,9 +27,6 @@ BlockInputStreams StorageSystemDatabases::read( res_columns[2]->insert(database.second->getDataPath()); res_columns[3]->insert(database.second->getMetadataPath()); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemDatabases.h b/dbms/src/Storages/System/StorageSystemDatabases.h index 621e490963a..c83f5a72efc 100644 --- a/dbms/src/Storages/System/StorageSystemDatabases.h +++ b/dbms/src/Storages/System/StorageSystemDatabases.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,20 @@ class Context; /** Implements `databases` system table, which allows you to get information about all databases. */ -class StorageSystemDatabases : public ext::shared_ptr_helper, public IStorage +class StorageSystemDatabases : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: - std::string getName() const override { return "SystemDatabases"; } - std::string getTableName() const override { return name; } + std::string getName() const override + { + return "SystemDatabases"; + } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemDatabases(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemDictionaries.cpp b/dbms/src/Storages/System/StorageSystemDictionaries.cpp index c57c1c7f459..665b992c829 100644 --- a/dbms/src/Storages/System/StorageSystemDictionaries.cpp +++ b/dbms/src/Storages/System/StorageSystemDictionaries.cpp @@ -1,27 +1,23 @@ -#include -#include -#include #include #include -#include -#include -#include -#include -#include +#include +#include #include #include #include +#include #include +#include + #include #include namespace DB { -StorageSystemDictionaries::StorageSystemDictionaries(const std::string & name) - : name{name} +NamesAndTypesList StorageSystemDictionaries::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { { "name", std::make_shared() }, { "origin", std::make_shared() }, { "type", std::make_shared() }, @@ -36,27 +32,14 @@ StorageSystemDictionaries::StorageSystemDictionaries(const std::string & name) { "creation_time", std::make_shared() }, { "source", std::make_shared() }, { "last_exception", std::make_shared() }, - })); + }; } - -BlockInputStreams StorageSystemDictionaries::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t, - const unsigned) +void StorageSystemDictionaries::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - const auto & external_dictionaries = context.getExternalDictionaries(); auto objects_map = external_dictionaries.getObjectsMap(); const auto & dictionaries = objects_map.get(); - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (const auto & dict_info : dictionaries) { size_t i = 0; @@ -102,8 +85,6 @@ BlockInputStreams StorageSystemDictionaries::read( else res_columns[i++]->insertDefault(); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } } diff --git a/dbms/src/Storages/System/StorageSystemDictionaries.h b/dbms/src/Storages/System/StorageSystemDictionaries.h index 57ac9b0b6eb..87df9ceada7 100644 --- a/dbms/src/Storages/System/StorageSystemDictionaries.h +++ b/dbms/src/Storages/System/StorageSystemDictionaries.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -10,25 +10,17 @@ namespace DB class Context; -class StorageSystemDictionaries : public ext::shared_ptr_helper, public IStorage +class StorageSystemDictionaries : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemDictionaries"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemDictionaries(const std::string & name); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemEvents.cpp b/dbms/src/Storages/System/StorageSystemEvents.cpp index 1dc49ad37b2..eb4832c0c92 100644 --- a/dbms/src/Storages/System/StorageSystemEvents.cpp +++ b/dbms/src/Storages/System/StorageSystemEvents.cpp @@ -1,39 +1,21 @@ #include -#include #include #include -#include #include - namespace DB { - -StorageSystemEvents::StorageSystemEvents(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemEvents::getNamesAndTypes() { - setColumns(ColumnsDescription( - { + return { {"event", std::make_shared()}, {"value", std::make_shared()}, - })); + }; } - -BlockInputStreams StorageSystemEvents::read( - const Names & column_names, - const SelectQueryInfo &, - const Context &, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemEvents::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (size_t i = 0, end = ProfileEvents::end(); i < end; ++i) { UInt64 value = ProfileEvents::counters[i]; @@ -44,9 +26,6 @@ BlockInputStreams StorageSystemEvents::read( res_columns[1]->insert(value); } } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemEvents.h b/dbms/src/Storages/System/StorageSystemEvents.h index b987151e400..5b02b7739f1 100644 --- a/dbms/src/Storages/System/StorageSystemEvents.h +++ b/dbms/src/Storages/System/StorageSystemEvents.h @@ -1,8 +1,7 @@ #pragma once #include -#include - +#include namespace DB { @@ -12,25 +11,17 @@ class Context; /** Implements `events` system table, which allows you to obtain information for profiling. */ -class StorageSystemEvents : public ext::shared_ptr_helper, public IStorage +class StorageSystemEvents : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemEvents"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemEvents(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemFormats.cpp b/dbms/src/Storages/System/StorageSystemFormats.cpp new file mode 100644 index 00000000000..96ce7ea7ed9 --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemFormats.cpp @@ -0,0 +1,32 @@ +#include +#include +#include +#include + +namespace DB +{ + +NamesAndTypesList StorageSystemFormats::getNamesAndTypes() +{ + return { + {"name", std::make_shared()}, + {"is_input", std::make_shared()}, + {"is_output", std::make_shared()}, + }; +} + +void StorageSystemFormats::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ + const auto & formats = FormatFactory::instance().getAllFormats(); + for (const auto & pair : formats) + { + const auto & [name, creator_pair] = pair; + UInt64 has_input_format(creator_pair.first != nullptr); + UInt64 has_output_format(creator_pair.second != nullptr); + res_columns[0]->insert(name); + res_columns[1]->insert(has_input_format); + res_columns[2]->insert(has_output_format); + } +} + +} diff --git a/dbms/src/Storages/System/StorageSystemFormats.h b/dbms/src/Storages/System/StorageSystemFormats.h new file mode 100644 index 00000000000..82f8303b5b0 --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemFormats.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include + +namespace DB +{ +class StorageSystemFormats : public ext::shared_ptr_helper, public IStorageSystemOneBlock +{ +protected: + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; + + using IStorageSystemOneBlock::IStorageSystemOneBlock; +public: + + std::string getName() const override + { + return "SystemFormats"; + } + + static NamesAndTypesList getNamesAndTypes(); +}; +} diff --git a/dbms/src/Storages/System/StorageSystemFunctions.cpp b/dbms/src/Storages/System/StorageSystemFunctions.cpp index 909bf1d8089..f63d0b9b932 100644 --- a/dbms/src/Storages/System/StorageSystemFunctions.cpp +++ b/dbms/src/Storages/System/StorageSystemFunctions.cpp @@ -1,56 +1,53 @@ -#include -#include -#include #include -#include -#include #include #include -#include +#include +#include #include +#include namespace DB { - -StorageSystemFunctions::StorageSystemFunctions(const std::string & name_) - : name(name_) +namespace { - setColumns(ColumnsDescription({ - { "name", std::make_shared() }, - { "is_aggregate", std::make_shared() }, - })); + template + void fillRow(MutableColumns & res_columns, const String & name, UInt64 is_aggregate, const Factory & f) + { + res_columns[0]->insert(name); + res_columns[1]->insert(is_aggregate); + res_columns[2]->insert(UInt64(f.isCaseInsensitive(name))); + if (f.isAlias(name)) + res_columns[3]->insert(f.aliasTo(name)); + else + res_columns[3]->insert(String{}); + } } - -BlockInputStreams StorageSystemFunctions::read( - const Names & column_names, - const SelectQueryInfo &, - const Context &, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +NamesAndTypesList StorageSystemFunctions::getNamesAndTypes() { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; + return { + {"name", std::make_shared()}, + {"is_aggregate", std::make_shared()}, + {"case_insensitive", std::make_shared()}, + {"alias_to", std::make_shared()}, + }; +} - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - - const auto & functions = FunctionFactory::instance().functions; - for (const auto & it : functions) +void StorageSystemFunctions::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ + const auto & functions_factory = FunctionFactory::instance(); + const auto & function_names = functions_factory.getAllRegisteredNames(); + for (const auto & name : function_names) { - res_columns[0]->insert(it.first); - res_columns[1]->insert(UInt64(0)); + fillRow(res_columns, name, UInt64(0), functions_factory); } - const auto & aggregate_functions = AggregateFunctionFactory::instance().aggregate_functions; - for (const auto & it : aggregate_functions) + const auto & aggregate_functions_factory = AggregateFunctionFactory::instance(); + const auto & aggregate_function_names = aggregate_functions_factory.getAllRegisteredNames(); + for (const auto & name : aggregate_function_names) { - res_columns[0]->insert(it.first); - res_columns[1]->insert(UInt64(1)); + fillRow(res_columns, name, UInt64(1), aggregate_functions_factory); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemFunctions.h b/dbms/src/Storages/System/StorageSystemFunctions.h index f77b9536453..baead3d8186 100644 --- a/dbms/src/Storages/System/StorageSystemFunctions.h +++ b/dbms/src/Storages/System/StorageSystemFunctions.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -13,25 +13,17 @@ class Context; /** Implements `functions`system table, which allows you to get a list * all normal and aggregate functions. */ -class StorageSystemFunctions : public ext::shared_ptr_helper, public IStorage +class StorageSystemFunctions : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemFunctions"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemFunctions(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; -private: - const std::string name; + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemGraphite.cpp b/dbms/src/Storages/System/StorageSystemGraphite.cpp index c9ea685366b..7eab731bd12 100644 --- a/dbms/src/Storages/System/StorageSystemGraphite.cpp +++ b/dbms/src/Storages/System/StorageSystemGraphite.cpp @@ -124,10 +124,9 @@ static Strings getAllGraphiteSections(const AbstractConfiguration & config) } // namespace -StorageSystemGraphite::StorageSystemGraphite(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemGraphite::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { {"config_name", std::make_shared()}, {"regexp", std::make_shared()}, {"function", std::make_shared()}, @@ -135,23 +134,12 @@ StorageSystemGraphite::StorageSystemGraphite(const std::string & name_) {"precision", std::make_shared()}, {"priority", std::make_shared()}, {"is_default", std::make_shared()}, - })); + }; } -BlockInputStreams StorageSystemGraphite::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t /*max_block_size*/, - unsigned /*num_streams*/) +void StorageSystemGraphite::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - const auto & config = context.getConfigRef(); Strings sections = getAllGraphiteSections(config); @@ -172,8 +160,6 @@ BlockInputStreams StorageSystemGraphite::read( } } } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } } diff --git a/dbms/src/Storages/System/StorageSystemGraphite.h b/dbms/src/Storages/System/StorageSystemGraphite.h index 8c7a625de54..fa63c839857 100644 --- a/dbms/src/Storages/System/StorageSystemGraphite.h +++ b/dbms/src/Storages/System/StorageSystemGraphite.h @@ -1,31 +1,24 @@ #pragma once -#include +#include +#include #include namespace DB { /// Provides information about Graphite configuration. -class StorageSystemGraphite : public ext::shared_ptr_helper, public IStorage +class StorageSystemGraphite : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemGraphite"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemGraphite(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemMacros.cpp b/dbms/src/Storages/System/StorageSystemMacros.cpp index 456730bde4b..8e6420add8b 100644 --- a/dbms/src/Storages/System/StorageSystemMacros.cpp +++ b/dbms/src/Storages/System/StorageSystemMacros.cpp @@ -1,38 +1,21 @@ #include -#include -#include -#include -#include #include +#include namespace DB { - -StorageSystemMacros::StorageSystemMacros(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemMacros::getNamesAndTypes() { - setColumns(ColumnsDescription({ - {"macro", std::make_shared()}, - {"substitution", std::make_shared()}, - })); + return { + {"macro", std::make_shared()}, + {"substitution", std::make_shared()}, + }; } - -BlockInputStreams StorageSystemMacros::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemMacros::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - auto macros = context.getMacros(); for (const auto & macro : macros->getMacroMap()) @@ -40,9 +23,6 @@ BlockInputStreams StorageSystemMacros::read( res_columns[0]->insert(macro.first); res_columns[1]->insert(macro.second); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemMacros.h b/dbms/src/Storages/System/StorageSystemMacros.h index d4bb5ab3732..fdc091dfe1b 100644 --- a/dbms/src/Storages/System/StorageSystemMacros.h +++ b/dbms/src/Storages/System/StorageSystemMacros.h @@ -1,7 +1,8 @@ #pragma once +#include #include -#include +#include namespace DB @@ -12,25 +13,17 @@ class Context; /** Information about macros for introspection. */ -class StorageSystemMacros : public ext::shared_ptr_helper, public IStorage +class StorageSystemMacros : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemMacros"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemMacros(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemMerges.cpp b/dbms/src/Storages/System/StorageSystemMerges.cpp index d3af993e29e..29d54b83e32 100644 --- a/dbms/src/Storages/System/StorageSystemMerges.cpp +++ b/dbms/src/Storages/System/StorageSystemMerges.cpp @@ -1,53 +1,36 @@ -#include -#include -#include -#include -#include -#include #include #include +#include namespace DB { -StorageSystemMerges::StorageSystemMerges(const std::string & name) - : name{name} +NamesAndTypesList StorageSystemMerges::getNamesAndTypes() { - setColumns(ColumnsDescription({ - { "database", std::make_shared() }, - { "table", std::make_shared() }, - { "elapsed", std::make_shared() }, - { "progress", std::make_shared() }, - { "num_parts", std::make_shared() }, - { "source_part_names", std::make_shared(std::make_shared()) }, - { "result_part_name", std::make_shared() }, - { "total_size_bytes_compressed", std::make_shared() }, - { "total_size_marks", std::make_shared() }, - { "bytes_read_uncompressed", std::make_shared() }, - { "rows_read", std::make_shared() }, - { "bytes_written_uncompressed", std::make_shared() }, - { "rows_written", std::make_shared() }, - { "columns_written", std::make_shared() }, - { "memory_usage", std::make_shared() }, - { "thread_number", std::make_shared() }, - })); + return { + {"database", std::make_shared()}, + {"table", std::make_shared()}, + {"elapsed", std::make_shared()}, + {"progress", std::make_shared()}, + {"num_parts", std::make_shared()}, + {"source_part_names", std::make_shared(std::make_shared())}, + {"result_part_name", std::make_shared()}, + {"total_size_bytes_compressed", std::make_shared()}, + {"total_size_marks", std::make_shared()}, + {"bytes_read_uncompressed", std::make_shared()}, + {"rows_read", std::make_shared()}, + {"bytes_written_uncompressed", std::make_shared()}, + {"rows_written", std::make_shared()}, + {"columns_written", std::make_shared()}, + {"memory_usage", std::make_shared()}, + {"thread_number", std::make_shared()}, + }; } -BlockInputStreams StorageSystemMerges::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t, - const unsigned) +void StorageSystemMerges::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (const auto & merge : context.getMergeList().get()) { size_t i = 0; @@ -68,8 +51,6 @@ BlockInputStreams StorageSystemMerges::read( res_columns[i++]->insert(merge.memory_usage); res_columns[i++]->insert(merge.thread_number); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } } diff --git a/dbms/src/Storages/System/StorageSystemMerges.h b/dbms/src/Storages/System/StorageSystemMerges.h index d48c97bfa17..f45f895d661 100644 --- a/dbms/src/Storages/System/StorageSystemMerges.h +++ b/dbms/src/Storages/System/StorageSystemMerges.h @@ -1,7 +1,10 @@ #pragma once +#include +#include +#include #include -#include +#include namespace DB @@ -10,25 +13,17 @@ namespace DB class Context; -class StorageSystemMerges : public ext::shared_ptr_helper, public IStorage +class StorageSystemMerges : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemMerges"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemMerges(const std::string & name); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemMetrics.cpp b/dbms/src/Storages/System/StorageSystemMetrics.cpp index 9d3b1cc9fbc..acfbd1b7340 100644 --- a/dbms/src/Storages/System/StorageSystemMetrics.cpp +++ b/dbms/src/Storages/System/StorageSystemMetrics.cpp @@ -1,39 +1,23 @@ + #include -#include -#include #include #include -#include #include namespace DB { - -StorageSystemMetrics::StorageSystemMetrics(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemMetrics::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { {"metric", std::make_shared()}, - {"value", std::make_shared()}, - })); + {"value", std::make_shared()}, + }; } - -BlockInputStreams StorageSystemMetrics::read( - const Names & column_names, - const SelectQueryInfo &, - const Context &, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemMetrics::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (size_t i = 0, end = CurrentMetrics::end(); i < end; ++i) { Int64 value = CurrentMetrics::values[i].load(std::memory_order_relaxed); @@ -41,9 +25,6 @@ BlockInputStreams StorageSystemMetrics::read( res_columns[0]->insert(String(CurrentMetrics::getDescription(CurrentMetrics::Metric(i)))); res_columns[1]->insert(value); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemMetrics.h b/dbms/src/Storages/System/StorageSystemMetrics.h index 7b6058de9e5..f74db926126 100644 --- a/dbms/src/Storages/System/StorageSystemMetrics.h +++ b/dbms/src/Storages/System/StorageSystemMetrics.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,17 @@ class Context; /** Implements `metrics` system table, which provides information about the operation of the server. */ -class StorageSystemMetrics : public ext::shared_ptr_helper, public IStorage +class StorageSystemMetrics : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemMetrics"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemMetrics(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemModels.cpp b/dbms/src/Storages/System/StorageSystemModels.cpp index 5175989b861..2479742c8ec 100644 --- a/dbms/src/Storages/System/StorageSystemModels.cpp +++ b/dbms/src/Storages/System/StorageSystemModels.cpp @@ -2,45 +2,29 @@ #include #include #include -#include -#include -#include #include #include #include namespace DB { -StorageSystemModels::StorageSystemModels(const std::string & name) - : name{name} +NamesAndTypesList StorageSystemModels::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { { "name", std::make_shared() }, { "origin", std::make_shared() }, { "type", std::make_shared() }, { "creation_time", std::make_shared() }, { "last_exception", std::make_shared() }, - })); + }; } - -BlockInputStreams StorageSystemModels::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t, - const unsigned) +void StorageSystemModels::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - const auto & external_models = context.getExternalModels(); auto objects_map = external_models.getObjectsMap(); const auto & models = objects_map.get(); - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (const auto & model_info : models) { res_columns[0]->insert(model_info.first); @@ -73,8 +57,6 @@ BlockInputStreams StorageSystemModels::read( else res_columns[4]->insertDefault(); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } } diff --git a/dbms/src/Storages/System/StorageSystemModels.h b/dbms/src/Storages/System/StorageSystemModels.h index b32c5a804ce..ef30bd511ea 100644 --- a/dbms/src/Storages/System/StorageSystemModels.h +++ b/dbms/src/Storages/System/StorageSystemModels.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -10,25 +10,17 @@ namespace DB class Context; -class StorageSystemModels : public ext::shared_ptr_helper, public IStorage +class StorageSystemModels : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemModels"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemModels(const std::string & name); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemMutations.cpp b/dbms/src/Storages/System/StorageSystemMutations.cpp index 3cf204c6a77..17580c00940 100644 --- a/dbms/src/Storages/System/StorageSystemMutations.cpp +++ b/dbms/src/Storages/System/StorageSystemMutations.cpp @@ -13,10 +13,10 @@ namespace DB { -StorageSystemMutations::StorageSystemMutations(const std::string & name_) - : name(name_) + +NamesAndTypesList StorageSystemMutations::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { { "database", std::make_shared() }, { "table", std::make_shared() }, { "mutation_id", std::make_shared() }, @@ -28,21 +28,12 @@ StorageSystemMutations::StorageSystemMutations(const std::string & name_) std::make_shared()) }, { "parts_to_do", std::make_shared() }, { "is_done", std::make_shared() }, - })); + }; } -BlockInputStreams StorageSystemMutations::read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemMutations::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - /// Collect a set of *MergeTree tables. std::map> merge_tree_tables; for (const auto & db : context.getDatabases()) @@ -83,13 +74,12 @@ BlockInputStreams StorageSystemMutations::read( VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context); if (!filtered_block.rows()) - return BlockInputStreams(); + return; col_database = filtered_block.getByName("database").column; col_table = filtered_block.getByName("table").column; } - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); for (size_t i_storage = 0; i_storage < col_database->size(); ++i_storage) { auto database = (*col_database)[i_storage].safeGet(); @@ -129,12 +119,6 @@ BlockInputStreams StorageSystemMutations::read( res_columns[col_num++]->insert(UInt64(status.is_done)); } } - - Block res = getSampleBlock().cloneEmpty(); - for (size_t i_col = 0; i_col < res.columns(); ++i_col) - res.getByPosition(i_col).column = std::move(res_columns[i_col]); - - return BlockInputStreams(1, std::make_shared(res)); } } diff --git a/dbms/src/Storages/System/StorageSystemMutations.h b/dbms/src/Storages/System/StorageSystemMutations.h index 3b82f3f46be..d2dcf99aa46 100644 --- a/dbms/src/Storages/System/StorageSystemMutations.h +++ b/dbms/src/Storages/System/StorageSystemMutations.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,17 @@ class Context; /// Implements the `mutations` system table, which provides information about the status of mutations /// in the MergeTree tables. -class StorageSystemMutations : public ext::shared_ptr_helper, public IStorage +class StorageSystemMutations : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: String getName() const override { return "SystemMutations"; } - String getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const String name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemMutations(const String & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemProcesses.cpp b/dbms/src/Storages/System/StorageSystemProcesses.cpp index 793e3124a2a..ca18e9835d0 100644 --- a/dbms/src/Storages/System/StorageSystemProcesses.cpp +++ b/dbms/src/Storages/System/StorageSystemProcesses.cpp @@ -1,75 +1,61 @@ -#include #include #include -#include +#include #include #include -#include namespace DB { - -StorageSystemProcesses::StorageSystemProcesses(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemProcesses::getNamesAndTypes() { - setColumns(ColumnsDescription({ - { "is_initial_query", std::make_shared() }, + return { + {"is_initial_query", std::make_shared()}, - { "user", std::make_shared() }, - { "query_id", std::make_shared() }, - { "address", std::make_shared() }, - { "port", std::make_shared() }, + {"user", std::make_shared()}, + {"query_id", std::make_shared()}, + {"address", std::make_shared()}, + {"port", std::make_shared()}, - { "initial_user", std::make_shared() }, - { "initial_query_id", std::make_shared() }, - { "initial_address", std::make_shared() }, - { "initial_port", std::make_shared() }, + {"initial_user", std::make_shared()}, + {"initial_query_id", std::make_shared()}, + {"initial_address", std::make_shared()}, + {"initial_port", std::make_shared()}, - { "interface", std::make_shared() }, + {"interface", std::make_shared()}, - { "os_user", std::make_shared() }, - { "client_hostname", std::make_shared() }, - { "client_name", std::make_shared() }, - { "client_version_major", std::make_shared() }, - { "client_version_minor", std::make_shared() }, - { "client_revision", std::make_shared() }, + {"os_user", std::make_shared()}, + {"client_hostname", std::make_shared()}, + {"client_name", std::make_shared()}, + {"client_version_major", std::make_shared()}, + {"client_version_minor", std::make_shared()}, + {"client_version_patch", std::make_shared()}, + {"client_revision", std::make_shared()}, - { "http_method", std::make_shared() }, - { "http_user_agent", std::make_shared() }, + {"http_method", std::make_shared()}, + {"http_user_agent", std::make_shared()}, - { "quota_key", std::make_shared() }, + {"quota_key", std::make_shared()}, - { "elapsed", std::make_shared() }, - { "is_cancelled", std::make_shared() }, - { "read_rows", std::make_shared() }, - { "read_bytes", std::make_shared() }, - { "total_rows_approx", std::make_shared() }, - { "written_rows", std::make_shared() }, - { "written_bytes", std::make_shared() }, - { "memory_usage", std::make_shared() }, - { "peak_memory_usage", std::make_shared() }, - { "query", std::make_shared() }, - })); + {"elapsed", std::make_shared()}, + {"is_cancelled", std::make_shared()}, + {"read_rows", std::make_shared()}, + {"read_bytes", std::make_shared()}, + {"total_rows_approx", std::make_shared()}, + {"written_rows", std::make_shared()}, + {"written_bytes", std::make_shared()}, + {"memory_usage", std::make_shared()}, + {"peak_memory_usage", std::make_shared()}, + {"query", std::make_shared()}, + }; } -BlockInputStreams StorageSystemProcesses::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemProcesses::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - ProcessList::Info info = context.getProcessList().getInfo(); - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (const auto & process : info) { size_t i = 0; @@ -88,6 +74,7 @@ BlockInputStreams StorageSystemProcesses::read( res_columns[i++]->insert(process.client_info.client_name); res_columns[i++]->insert(process.client_info.client_version_major); res_columns[i++]->insert(process.client_info.client_version_minor); + res_columns[i++]->insert(process.client_info.client_version_patch); res_columns[i++]->insert(UInt64(process.client_info.client_revision)); res_columns[i++]->insert(UInt64(process.client_info.http_method)); res_columns[i++]->insert(process.client_info.http_user_agent); @@ -103,9 +90,6 @@ BlockInputStreams StorageSystemProcesses::read( res_columns[i++]->insert(process.peak_memory_usage); res_columns[i++]->insert(process.query); } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemProcesses.h b/dbms/src/Storages/System/StorageSystemProcesses.h index f8f26d13d35..3cbe0028af3 100644 --- a/dbms/src/Storages/System/StorageSystemProcesses.h +++ b/dbms/src/Storages/System/StorageSystemProcesses.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,17 @@ class Context; /** Implements `processes` system table, which allows you to get information about the queries that are currently executing. */ -class StorageSystemProcesses : public ext::shared_ptr_helper, public IStorage +class StorageSystemProcesses : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemProcesses"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemProcesses(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp b/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp index 69fc73bd89c..51b0805c4c2 100644 --- a/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp +++ b/dbms/src/Storages/System/StorageSystemReplicationQueue.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -17,10 +16,10 @@ namespace DB { -StorageSystemReplicationQueue::StorageSystemReplicationQueue(const std::string & name_) - : name(name_) + +NamesAndTypesList StorageSystemReplicationQueue::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { /// Table properties. { "database", std::make_shared() }, { "table", std::make_shared() }, @@ -43,21 +42,12 @@ StorageSystemReplicationQueue::StorageSystemReplicationQueue(const std::string & { "num_postponed", std::make_shared() }, { "postpone_reason", std::make_shared() }, { "last_postpone_time", std::make_shared() }, - })); + }; } -BlockInputStreams StorageSystemReplicationQueue::read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemReplicationQueue::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - std::map> replicated_tables; for (const auto & db : context.getDatabases()) for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next()) @@ -90,7 +80,7 @@ BlockInputStreams StorageSystemReplicationQueue::read( VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context); if (!filtered_block.rows()) - return BlockInputStreams(); + return; col_database_to_filter = filtered_block.getByName("database").column; col_table_to_filter = filtered_block.getByName("table").column; @@ -99,8 +89,6 @@ BlockInputStreams StorageSystemReplicationQueue::read( StorageReplicatedMergeTree::LogEntriesData queue; String replica_name; - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (size_t i = 0, tables_size = col_database_to_filter->size(); i < tables_size; ++i) { String database = (*col_database_to_filter)[i].safeGet(); @@ -139,9 +127,6 @@ BlockInputStreams StorageSystemReplicationQueue::read( res_columns[col_num++]->insert(UInt64(entry.last_postpone_time)); } } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemReplicationQueue.h b/dbms/src/Storages/System/StorageSystemReplicationQueue.h index 8554361e0df..63dc58118cd 100644 --- a/dbms/src/Storages/System/StorageSystemReplicationQueue.h +++ b/dbms/src/Storages/System/StorageSystemReplicationQueue.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,17 @@ class Context; /** Implements the `replication_queue` system table, which allows you to view the replication queues for the replicated tables. */ -class StorageSystemReplicationQueue : public ext::shared_ptr_helper, public IStorage +class StorageSystemReplicationQueue : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemReplicationQueue"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemReplicationQueue(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemSettings.cpp b/dbms/src/Storages/System/StorageSystemSettings.cpp index efb50c559cc..fee9467f6f9 100644 --- a/dbms/src/Storages/System/StorageSystemSettings.cpp +++ b/dbms/src/Storages/System/StorageSystemSettings.cpp @@ -1,8 +1,5 @@ -#include -#include #include #include -#include #include #include @@ -10,44 +7,27 @@ namespace DB { - -StorageSystemSettings::StorageSystemSettings(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemSettings::getNamesAndTypes() { - setColumns(ColumnsDescription({ - { "name", std::make_shared() }, - { "value", std::make_shared() }, - { "changed", std::make_shared() }, - { "description", std::make_shared() }, - })); + return { + {"name", std::make_shared()}, + {"value", std::make_shared()}, + {"changed", std::make_shared()}, + {"description", std::make_shared()}, + }; } - -BlockInputStreams StorageSystemSettings::read( - const Names & column_names, - const SelectQueryInfo &, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemSettings::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo &) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - const Settings & settings = context.getSettingsRef(); - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - -#define ADD_SETTING(TYPE, NAME, DEFAULT, DESCRIPTION) \ - res_columns[0]->insert(String(#NAME)); \ - res_columns[1]->insert(settings.NAME.toString()); \ +#define ADD_SETTING(TYPE, NAME, DEFAULT, DESCRIPTION) \ + res_columns[0]->insert(String(#NAME)); \ + res_columns[1]->insert(settings.NAME.toString()); \ res_columns[2]->insert(UInt64(settings.NAME.changed)); \ res_columns[3]->insert(String(DESCRIPTION)); APPLY_FOR_SETTINGS(ADD_SETTING) #undef ADD_SETTING - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } - } diff --git a/dbms/src/Storages/System/StorageSystemSettings.h b/dbms/src/Storages/System/StorageSystemSettings.h index 153b9213ef8..e44e0abbcd4 100644 --- a/dbms/src/Storages/System/StorageSystemSettings.h +++ b/dbms/src/Storages/System/StorageSystemSettings.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,17 @@ class Context; /** implements system table "settings", which allows to get information about the current settings. */ -class StorageSystemSettings : public ext::shared_ptr_helper, public IStorage +class StorageSystemSettings : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemSettings"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemSettings(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/StorageSystemTableEngines.cpp b/dbms/src/Storages/System/StorageSystemTableEngines.cpp new file mode 100644 index 00000000000..d40fc6fa49e --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemTableEngines.cpp @@ -0,0 +1,22 @@ +#include +#include +#include + +namespace DB +{ + +NamesAndTypesList StorageSystemTableEngines::getNamesAndTypes() +{ + return {{"name", std::make_shared()}}; +} + +void StorageSystemTableEngines::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ + const auto & storages = StorageFactory::instance().getAllStorages(); + for (const auto & pair : storages) + { + res_columns[0]->insert(pair.first); + } +} + +} diff --git a/dbms/src/Storages/System/StorageSystemTableEngines.h b/dbms/src/Storages/System/StorageSystemTableEngines.h new file mode 100644 index 00000000000..f0f6b62d59d --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemTableEngines.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +class StorageSystemTableEngines : public ext::shared_ptr_helper, + public IStorageSystemOneBlock +{ +protected: + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; + + using IStorageSystemOneBlock::IStorageSystemOneBlock; + +public: + std::string getName() const override + { + return "SystemTableEngines"; + } + + static NamesAndTypesList getNamesAndTypes(); +}; + +} diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.cpp b/dbms/src/Storages/System/StorageSystemTableFunctions.cpp new file mode 100644 index 00000000000..15067bbc41f --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemTableFunctions.cpp @@ -0,0 +1,21 @@ +#include + +#include +namespace DB +{ + +NamesAndTypesList StorageSystemTableFunctions::getNamesAndTypes() +{ + return {{"name", std::make_shared()}}; +} + +void StorageSystemTableFunctions::fillData(MutableColumns & res_columns, const Context &, const SelectQueryInfo &) const +{ + const auto & functions = TableFunctionFactory::instance().getAllTableFunctions(); + for (const auto & pair : functions) + { + res_columns[0]->insert(pair.first); + } +} + +} diff --git a/dbms/src/Storages/System/StorageSystemTableFunctions.h b/dbms/src/Storages/System/StorageSystemTableFunctions.h new file mode 100644 index 00000000000..413af0f5c66 --- /dev/null +++ b/dbms/src/Storages/System/StorageSystemTableFunctions.h @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +namespace DB +{ + +class StorageSystemTableFunctions : public ext::shared_ptr_helper, + public IStorageSystemOneBlock +{ +protected: + + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; + +public: + + std::string getName() const override + { + return "SystemTableFunctions"; + } + + static NamesAndTypesList getNamesAndTypes(); + +}; + +} diff --git a/dbms/src/Storages/System/StorageSystemZooKeeper.cpp b/dbms/src/Storages/System/StorageSystemZooKeeper.cpp index e506802ec74..612b0d782d7 100644 --- a/dbms/src/Storages/System/StorageSystemZooKeeper.cpp +++ b/dbms/src/Storages/System/StorageSystemZooKeeper.cpp @@ -1,9 +1,6 @@ -#include -#include #include #include #include -#include #include #include #include @@ -19,10 +16,9 @@ namespace DB { -StorageSystemZooKeeper::StorageSystemZooKeeper(const std::string & name_) - : name(name_) +NamesAndTypesList StorageSystemZooKeeper::getNamesAndTypes() { - setColumns(ColumnsDescription({ + return { { "name", std::make_shared() }, { "value", std::make_shared() }, { "czxid", std::make_shared() }, @@ -37,7 +33,7 @@ StorageSystemZooKeeper::StorageSystemZooKeeper(const std::string & name_) { "numChildren", std::make_shared() }, { "pzxid", std::make_shared() }, { "path", std::make_shared() }, - })); + }; } @@ -103,17 +99,8 @@ static String extractPath(const ASTPtr & query) } -BlockInputStreams StorageSystemZooKeeper::read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - const size_t /*max_block_size*/, - const unsigned /*num_streams*/) +void StorageSystemZooKeeper::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const { - check(column_names); - processed_stage = QueryProcessingStage::FetchColumns; - String path = extractPath(query_info.query); if (path.empty()) throw Exception("SELECT from system.zookeeper table must contain condition like path = 'path' in WHERE clause."); @@ -136,8 +123,6 @@ BlockInputStreams StorageSystemZooKeeper::read( for (const String & node : nodes) futures.push_back(zookeeper->asyncTryGet(path_part + '/' + node)); - MutableColumns res_columns = getSampleBlock().cloneEmptyColumns(); - for (size_t i = 0, size = nodes.size(); i < size; ++i) { auto res = futures[i].get(); @@ -162,8 +147,6 @@ BlockInputStreams StorageSystemZooKeeper::read( res_columns[col_num++]->insert(Int64(stat.pzxid)); res_columns[col_num++]->insert(path); /// This is the original path. In order to process the request, condition in WHERE should be triggered. } - - return BlockInputStreams(1, std::make_shared(getSampleBlock().cloneWithColumns(std::move(res_columns)))); } diff --git a/dbms/src/Storages/System/StorageSystemZooKeeper.h b/dbms/src/Storages/System/StorageSystemZooKeeper.h index 45625ebab12..9644fe96162 100644 --- a/dbms/src/Storages/System/StorageSystemZooKeeper.h +++ b/dbms/src/Storages/System/StorageSystemZooKeeper.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB @@ -12,25 +12,17 @@ class Context; /** Implements `zookeeper` system table, which allows you to view the data in ZooKeeper for debugging purposes. */ -class StorageSystemZooKeeper : public ext::shared_ptr_helper, public IStorage +class StorageSystemZooKeeper : public ext::shared_ptr_helper, public IStorageSystemOneBlock { public: std::string getName() const override { return "SystemZooKeeper"; } - std::string getTableName() const override { return name; } - BlockInputStreams read( - const Names & column_names, - const SelectQueryInfo & query_info, - const Context & context, - QueryProcessingStage::Enum & processed_stage, - size_t max_block_size, - unsigned num_streams) override; - -private: - const std::string name; + static NamesAndTypesList getNamesAndTypes(); protected: - StorageSystemZooKeeper(const std::string & name_); + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const override; }; } diff --git a/dbms/src/Storages/System/attachSystemTables.cpp b/dbms/src/Storages/System/attachSystemTables.cpp index 705d01fb9c2..479337d1b41 100644 --- a/dbms/src/Storages/System/attachSystemTables.cpp +++ b/dbms/src/Storages/System/attachSystemTables.cpp @@ -1,13 +1,17 @@ #include #include +#include #include #include +#include #include #include #include +#include #include #include +#include #include #include #include @@ -23,6 +27,8 @@ #include #include #include +#include +#include #include #include @@ -42,6 +48,12 @@ void attachSystemTablesLocal(IDatabase & system_database) system_database.attachTable("events", StorageSystemEvents::create("events")); system_database.attachTable("settings", StorageSystemSettings::create("settings")); system_database.attachTable("build_options", StorageSystemBuildOptions::create("build_options")); + system_database.attachTable("formats", StorageSystemFormats::create("formats")); + system_database.attachTable("table_functions", StorageSystemTableFunctions::create("table_functions")); + system_database.attachTable("aggregate_function_combinators", StorageSystemAggregateFunctionCombinators::create("aggregate_function_combinators")); + system_database.attachTable("data_type_families", StorageSystemDataTypeFamilies::create("data_type_families")); + system_database.attachTable("collations", StorageSystemCollations::create("collations")); + system_database.attachTable("table_engines", StorageSystemTableEngines::create("table_engines")); } void attachSystemTablesServer(IDatabase & system_database, bool has_zookeeper) diff --git a/dbms/src/Storages/getStructureOfRemoteTable.cpp b/dbms/src/Storages/getStructureOfRemoteTable.cpp index bdbc04103a9..174ec49a4f1 100644 --- a/dbms/src/Storages/getStructureOfRemoteTable.cpp +++ b/dbms/src/Storages/getStructureOfRemoteTable.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include namespace DB @@ -22,21 +24,40 @@ ColumnsDescription getStructureOfRemoteTable( const Cluster & cluster, const std::string & database, const std::string & table, - const Context & context) + const Context & context, + const ASTPtr & table_func_ptr) { /// Send to the first any remote shard. const auto & shard_info = cluster.getAnyShardInfo(); - if (shard_info.isLocal()) - return context.getTable(database, table)->getColumns(); + String query; + + if (table_func_ptr) + { + if (shard_info.isLocal()) + { + auto table_function = static_cast(table_func_ptr.get()); + return TableFunctionFactory::instance().get(table_function->name, context)->execute(table_func_ptr, context)->getColumns(); + } + + auto table_func_name = queryToString(table_func_ptr); + query = "DESC TABLE " + table_func_name; + } + else + { + if (shard_info.isLocal()) + return context.getTable(database, table)->getColumns(); + + /// Request for a table description + query = "DESC TABLE " + backQuoteIfNeed(database) + "." + backQuoteIfNeed(table); + } - /// Request for a table description - String query = "DESC TABLE " + backQuoteIfNeed(database) + "." + backQuoteIfNeed(table); ColumnsDescription res; auto input = std::make_shared(shard_info.pool, query, InterpreterDescribeQuery::getSampleBlock(), context); input->setPoolMode(PoolMode::GET_ONE); - input->setMainTable(QualifiedTableName{database, table}); + if (!table_func_ptr) + input->setMainTable(QualifiedTableName{database, table}); input->readPrefix(); const DataTypeFactory & data_type_factory = DataTypeFactory::instance(); diff --git a/dbms/src/Storages/getStructureOfRemoteTable.h b/dbms/src/Storages/getStructureOfRemoteTable.h index 20417ef50e1..9f1769a7096 100644 --- a/dbms/src/Storages/getStructureOfRemoteTable.h +++ b/dbms/src/Storages/getStructureOfRemoteTable.h @@ -1,6 +1,8 @@ #pragma once #include +#include +#include namespace DB @@ -15,6 +17,7 @@ ColumnsDescription getStructureOfRemoteTable( const Cluster & cluster, const std::string & database, const std::string & table, - const Context & context); + const Context & context, + const ASTPtr & table_func_ptr = nullptr); } diff --git a/dbms/src/TableFunctions/TableFunctionFactory.cpp b/dbms/src/TableFunctions/TableFunctionFactory.cpp index b6188ee5967..8fb8533176b 100644 --- a/dbms/src/TableFunctions/TableFunctionFactory.cpp +++ b/dbms/src/TableFunctions/TableFunctionFactory.cpp @@ -37,4 +37,9 @@ TableFunctionPtr TableFunctionFactory::get( return it->second(); } +bool TableFunctionFactory::isTableFunctionName(const std::string & name) const +{ + return functions.count(name); +} + } diff --git a/dbms/src/TableFunctions/TableFunctionFactory.h b/dbms/src/TableFunctions/TableFunctionFactory.h index 27c0f566a78..22bc5cdb99f 100644 --- a/dbms/src/TableFunctions/TableFunctionFactory.h +++ b/dbms/src/TableFunctions/TableFunctionFactory.h @@ -23,6 +23,7 @@ class TableFunctionFactory final: public ext::singleton public: using Creator = std::function; + using TableFunctions = std::unordered_map; /// Register a function by its name. /// No locking, you must register all functions before usage of get. void registerFunction(const std::string & name, Creator creator); @@ -42,9 +43,14 @@ public: const std::string & name, const Context & context) const; -private: - using TableFunctions = std::unordered_map; + bool isTableFunctionName(const std::string & name) const; + const TableFunctions & getAllTableFunctions() const + { + return functions; + } + +private: TableFunctions functions; }; diff --git a/dbms/src/TableFunctions/TableFunctionODBC.cpp b/dbms/src/TableFunctions/TableFunctionODBC.cpp index 75f73146485..a45283c65f2 100644 --- a/dbms/src/TableFunctions/TableFunctionODBC.cpp +++ b/dbms/src/TableFunctions/TableFunctionODBC.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -75,7 +76,7 @@ StoragePtr TableFunctionODBC::executeImpl(const ASTPtr & ast_function, const Con for (int i = 0; i < 2; ++i) args[i] = evaluateConstantExpressionOrIdentifierAsLiteral(args[i], context); - std::string connection_string = static_cast(*args[0]).value.safeGet(); + std::string connection_string = validateODBCConnectionString(static_cast(*args[0]).value.safeGet()); std::string table_name = static_cast(*args[1]).value.safeGet(); Poco::Data::ODBC::SessionImpl session(connection_string, DBMS_DEFAULT_CONNECT_TIMEOUT_SEC); diff --git a/dbms/src/TableFunctions/TableFunctionRemote.cpp b/dbms/src/TableFunctions/TableFunctionRemote.cpp index ac79f0ac2f2..4f7d1c11ac2 100644 --- a/dbms/src/TableFunctions/TableFunctionRemote.cpp +++ b/dbms/src/TableFunctions/TableFunctionRemote.cpp @@ -198,6 +198,7 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C String cluster_description; String remote_database; String remote_table; + ASTPtr remote_table_function_ptr; String username; String password; @@ -230,24 +231,40 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C ++arg_num; args[arg_num] = evaluateConstantExpressionOrIdentifierAsLiteral(args[arg_num], context); - remote_database = static_cast(*args[arg_num]).value.safeGet(); - ++arg_num; - size_t dot = remote_database.find('.'); - if (dot != String::npos) + const auto function = typeid_cast(args[arg_num].get()); + + if (function && TableFunctionFactory::instance().isTableFunctionName(function->name)) { - /// NOTE Bad - do not support identifiers in backquotes. - remote_table = remote_database.substr(dot + 1); - remote_database = remote_database.substr(0, dot); + remote_table_function_ptr = args[arg_num]; + ++arg_num; } else { - if (arg_num >= args.size()) - throw Exception(help_message, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + remote_database = static_cast(*args[arg_num]).value.safeGet(); - args[arg_num] = evaluateConstantExpressionOrIdentifierAsLiteral(args[arg_num], context); - remote_table = static_cast(*args[arg_num]).value.safeGet(); ++arg_num; + + size_t dot = remote_database.find('.'); + if (dot != String::npos) + { + /// NOTE Bad - do not support identifiers in backquotes. + remote_table = remote_database.substr(dot + 1); + remote_database = remote_database.substr(0, dot); + } + else + { + if (arg_num >= args.size()) + { + throw Exception(help_message, ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + } + else + { + args[arg_num] = evaluateConstantExpressionOrIdentifierAsLiteral(args[arg_num], context); + remote_table = static_cast(*args[arg_num]).value.safeGet(); + ++arg_num; + } + } } /// Username and password parameters are prohibited in cluster version of the function @@ -299,13 +316,23 @@ StoragePtr TableFunctionRemote::executeImpl(const ASTPtr & ast_function, const C cluster = std::make_shared(context.getSettings(), names, username, password, context.getTCPPort(), false); } - auto res = StorageDistributed::createWithOwnCluster( - getName(), - getStructureOfRemoteTable(*cluster, remote_database, remote_table, context), - remote_database, - remote_table, - cluster, - context); + auto structure_remote_table = getStructureOfRemoteTable(*cluster, remote_database, remote_table, context, remote_table_function_ptr); + + StoragePtr res = remote_table_function_ptr + ? StorageDistributed::createWithOwnCluster( + getName(), + structure_remote_table, + remote_table_function_ptr, + cluster, + context) + : StorageDistributed::createWithOwnCluster( + getName(), + structure_remote_table, + remote_database, + remote_table, + cluster, + context); + res->startup(); return res; } diff --git a/dbms/tests/clickhouse-test b/dbms/tests/clickhouse-test index f734b784f9b..e8e1f940bff 100755 --- a/dbms/tests/clickhouse-test +++ b/dbms/tests/clickhouse-test @@ -81,7 +81,6 @@ def main(args): os.environ.setdefault("CLICKHOUSE_CONFIG_CLIENT", args.configclient) os.environ.setdefault("CLICKHOUSE_TMP", tmp_dir) - # TODO ! use clickhouse-extract-from-config here: if args.zookeeper is None: code, out = commands.getstatusoutput(args.binary + "-extract-from-config --try --config " + args.configserver + ' --key zookeeper | grep . | wc -l') try: @@ -126,7 +125,7 @@ def main(args): failures = 0 failures_chain = 0 if 'stateful' in suite and not is_data_present(): - print("Won't run stateful tests because test data wasn't loaded. See README.txt.") + print("Won't run stateful tests because test data wasn't loaded.") continue # Reverse sort order: we want run newest test first. @@ -195,7 +194,7 @@ def main(args): stderr_file = os.path.join(suite_tmp_dir, name) + '.stderr' if ext == '.sql': - command = "{0} --multiquery < {1} > {2} 2> {3}".format(args.client, case_file, stdout_file, stderr_file) + command = "{0} --testmode --multiquery < {1} > {2} 2> {3}".format(args.client, case_file, stdout_file, stderr_file) else: command = "{0} > {1} 2> {2}".format(case_file, stdout_file, stderr_file) diff --git a/dbms/tests/clickhouse-test-server b/dbms/tests/clickhouse-test-server index 1cf2fea62d8..810f84aff67 100755 --- a/dbms/tests/clickhouse-test-server +++ b/dbms/tests/clickhouse-test-server @@ -9,8 +9,9 @@ ROOT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && cd ../.. && pwd) #TODO: DATA_DIR=${DATA_DIR:=`mktemp -d /tmp/clickhouse.test..XXXXX`} DATA_DIR=${DATA_DIR:=/tmp/clickhouse} LOG_DIR=${LOG_DIR:=$DATA_DIR/log} -BUILD_DIR=${BUILD_DIR:=$ROOT_DIR/build${BUILD_TYPE}} export CLICKHOUSE_BINARY=${CLICKHOUSE_BINARY:="clickhouse"} +[ -x "$ROOT_DIR/dbms/programs/${CLICKHOUSE_BINARY}-server" ] && BUILD_DIR=${BUILD_DIR:=$ROOT_DIR} # Build without separate build dir +BUILD_DIR=${BUILD_DIR:=$ROOT_DIR/build${BUILD_TYPE}} [ -x "$CUR_DIR/clickhouse-server" ] && [ -x "${CUR_DIR}/${CLICKHOUSE_BINARY}-client" ] && BIN_DIR= # Allow run in /usr/bin [ -x "$BUILD_DIR/dbms/programs/${CLICKHOUSE_BINARY}-server" ] && BIN_DIR=${BIN_DIR:=$BUILD_DIR/dbms/programs/} [ -f "$CUR_DIR/server-test.xml" ] && CONFIG_DIR=${CONFIG_DIR=$CUR_DIR}/ @@ -52,9 +53,9 @@ CERT=`${BIN_DIR}clickhouse-extract-from-config --config=$CLICKHOUSE_CONFIG --key [ -n "$DHPARAM" ] && openssl dhparam -out $DHPARAM 256 [ -n "$PRIVATEKEY" ] && [ -n "$CERT" ] && openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout $PRIVATEKEY -out $CERT -if [ "$TEST_GDB" ]; then +if [ "$TEST_GDB" ] || [ "$GDB" ]; then echo -e "run \nset pagination off \nset logging file $DATA_DIR/gdb.log \nset logging on \nthread apply all backtrace \ndetach \nquit " > $DATA_DIR/gdb.cmd - GDB="gdb -x $DATA_DIR/gdb.cmd --args " + GDB=${GDB:="gdb -x $DATA_DIR/gdb.cmd --args "} fi # Start a local clickhouse server which will be used to run tests diff --git a/dbms/tests/external_dictionaries/dictionary_library/dictionary_library.cpp b/dbms/tests/external_dictionaries/dictionary_library/dictionary_library.cpp index 59b75d0a26f..2a411ebcb00 100644 --- a/dbms/tests/external_dictionaries/dictionary_library/dictionary_library.cpp +++ b/dbms/tests/external_dictionaries/dictionary_library/dictionary_library.cpp @@ -62,7 +62,8 @@ void MakeColumnsFromVector(DataHolder * ptr) ptr->ctable.data = ptr->rowHolder.get(); } -extern "C" { +extern "C" +{ void * ClickHouseDictionary_v3_loadIds(void * data_ptr, ClickHouseLibrary::CStrings * settings, @@ -151,7 +152,8 @@ void * ClickHouseDictionary_v3_loadKeys(void * data_ptr, ClickHouseLibrary::CStr if (requested_keys) { LOG(ptr->lib->log, "requested_keys columns passed: " << requested_keys->size); - for (size_t i = 0; i < requested_keys->size; ++i) { + for (size_t i = 0; i < requested_keys->size; ++i) + { LOG(ptr->lib->log, "requested_keys at column " << i << " passed: " << requested_keys->data[i].size); } } diff --git a/dbms/tests/instructions/sanitizers.md b/dbms/tests/instructions/sanitizers.md index 5c8b464703e..ad7ce179927 100644 --- a/dbms/tests/instructions/sanitizers.md +++ b/dbms/tests/instructions/sanitizers.md @@ -6,12 +6,10 @@ Note: We use Address Sanitizer to run functional tests for every commit automati mkdir build && cd build ``` -Note: -ENABLE_TCMALLOC=0 is optional. -CC=clang CXX=clang++ is strongly recommended. +Note: using clang instead of gcc is strongly recommended. ``` -CC=clang CXX=clang++ cmake -D CMAKE_BUILD_TYPE=ASan -D ENABLE_TCMALLOC=0 .. +CC=clang CXX=clang++ cmake -D SANITIZE=address .. ninja ``` @@ -37,7 +35,7 @@ mkdir build && cd build ## Note: All parameters are mandatory. ``` -CC=clang CXX=clang++ cmake -D CMAKE_BUILD_TYPE=TSan -D ENABLE_TCMALLOC=0 .. +CC=clang CXX=clang++ cmake -D SANITIZE=thread .. ninja ``` @@ -75,5 +73,5 @@ mkdir build && cd build ``` ``` -CC=clang CXX=clang++ cmake -D CMAKE_BUILD_TYPE=MSan -D LIBCXX_PATH=/home/milovidov/libcxx_msan .. +CC=clang CXX=clang++ cmake -D SANITIZE=memory -D LIBCXX_PATH=/home/milovidov/libcxx_msan .. ``` diff --git a/dbms/tests/integration/helpers/client.py b/dbms/tests/integration/helpers/client.py index 1fa2b7c7643..2c2b397c900 100644 --- a/dbms/tests/integration/helpers/client.py +++ b/dbms/tests/integration/helpers/client.py @@ -19,7 +19,7 @@ class Client: command = self.command[:] if stdin is None: - command += ['--multiquery'] + command += ['--multiquery', '--testmode'] stdin = sql else: command += ['--query', sql] diff --git a/dbms/tests/integration/helpers/cluster.py b/dbms/tests/integration/helpers/cluster.py index 4242fa8fa62..0ca348c2364 100644 --- a/dbms/tests/integration/helpers/cluster.py +++ b/dbms/tests/integration/helpers/cluster.py @@ -20,8 +20,16 @@ from .client import Client, CommandRequest HELPERS_DIR = p.dirname(__file__) +DEFAULT_ENV_NAME = 'env_file' +def _create_env_file(path, variables, fname=DEFAULT_ENV_NAME): + full_path = os.path.join(path, fname) + with open(full_path, 'w') as f: + for var, value in variables.items(): + f.write("=".join([var, value]) + "\n") + return full_path + class ClickHouseCluster: """ClickHouse cluster with several instances and (possibly) ZooKeeper. @@ -49,17 +57,18 @@ class ClickHouseCluster: self.base_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', self.project_name] self.base_zookeeper_cmd = None self.base_mysql_cmd = [] + self.base_kafka_cmd = [] self.pre_zookeeper_commands = [] self.instances = {} self.with_zookeeper = False self.with_mysql = False - + self.with_kafka = False + self.docker_client = None self.is_up = False - def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macroses={}, with_zookeeper=False, with_mysql=False, - clickhouse_path_dir=None, hostname=None): + def add_instance(self, name, config_dir=None, main_configs=[], user_configs=[], macros={}, with_zookeeper=False, with_mysql=False, with_kafka=False, clickhouse_path_dir=None, hostname=None, env_variables={}): """Add an instance to the cluster. name - the name of the instance directory and the value of the 'instance' macro in ClickHouse. @@ -76,8 +85,8 @@ class ClickHouseCluster: raise Exception("Can\'t add instance `%s': there is already an instance with the same name!" % name) instance = ClickHouseInstance( - self, self.base_dir, name, config_dir, main_configs, user_configs, macroses, with_zookeeper, - self.zookeeper_config_path, with_mysql, self.base_configs_dir, self.server_bin_path, clickhouse_path_dir, hostname=hostname) + self, self.base_dir, name, config_dir, main_configs, user_configs, macros, with_zookeeper, + self.zookeeper_config_path, with_mysql, with_kafka, self.base_configs_dir, self.server_bin_path, clickhouse_path_dir, hostname=hostname, env_variables=env_variables) self.instances[name] = instance self.base_cmd.extend(['--file', instance.docker_compose_path]) @@ -86,13 +95,19 @@ class ClickHouseCluster: self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')]) self.base_zookeeper_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_zookeeper.yml')] - + if with_mysql and not self.with_mysql: self.with_mysql = True self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_mysql.yml')]) self.base_mysql_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_mysql.yml')] + if with_kafka and not self.with_kafka: + self.with_kafka = True + self.base_cmd.extend(['--file', p.join(HELPERS_DIR, 'docker_compose_kafka.yml')]) + self.base_kafka_cmd = ['docker-compose', '--project-directory', self.base_dir, '--project-name', + self.project_name, '--file', p.join(HELPERS_DIR, 'docker_compose_kafka.yml')] + return instance @@ -135,6 +150,10 @@ class ClickHouseCluster: if self.with_mysql and self.base_mysql_cmd: subprocess.check_call(self.base_mysql_cmd + ['up', '-d', '--no-recreate']) + if self.with_kafka and self.base_kafka_cmd: + subprocess.check_call(self.base_kafka_cmd + ['up', '-d', '--no-recreate']) + self.kafka_docker_id = self.get_instance_docker_id('kafka1') + # Uncomment for debugging #print ' '.join(self.base_cmd + ['up', '--no-recreate']) @@ -206,14 +225,17 @@ services: - server - --config-file=/etc/clickhouse-server/config.xml - --log-file=/var/log/clickhouse-server/clickhouse-server.log + - --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log depends_on: {depends_on} + env_file: + - {env_file} ''' class ClickHouseInstance: def __init__( - self, cluster, base_path, name, custom_config_dir, custom_main_configs, custom_user_configs, macroses, - with_zookeeper, zookeeper_config_path, with_mysql, base_configs_dir, server_bin_path, clickhouse_path_dir, hostname=None): + self, cluster, base_path, name, custom_config_dir, custom_main_configs, custom_user_configs, macros, + with_zookeeper, zookeeper_config_path, with_mysql, with_kafka, base_configs_dir, server_bin_path, clickhouse_path_dir, hostname=None, env_variables={}): self.name = name self.base_cmd = cluster.base_cmd[:] @@ -225,7 +247,7 @@ class ClickHouseInstance: self.custom_main_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_main_configs] self.custom_user_config_paths = [p.abspath(p.join(base_path, c)) for c in custom_user_configs] self.clickhouse_path_dir = p.abspath(p.join(base_path, clickhouse_path_dir)) if clickhouse_path_dir else None - self.macroses = macroses if macroses is not None else {} + self.macros = macros if macros is not None else {} self.with_zookeeper = with_zookeeper self.zookeeper_config_path = zookeeper_config_path @@ -233,9 +255,11 @@ class ClickHouseInstance: self.server_bin_path = server_bin_path self.with_mysql = with_mysql + self.with_kafka = with_kafka self.path = p.join(self.cluster.instances_dir, name) self.docker_compose_path = p.join(self.path, 'docker_compose.yml') + self.env_variables = env_variables self.docker_client = None self.ip_address = None @@ -282,9 +306,10 @@ class ClickHouseInstance: deadline = start_time + timeout while True: - status = self.get_docker_handle().status + handle = self.get_docker_handle() + status = handle.status; if status == 'exited': - raise Exception("Instance `{}' failed to start. Container status: {}".format(self.name, status)) + raise Exception("Instance `{}' failed to start. Container status: {}, logs: {}".format(self.name, status, handle.logs())) current_time = time.time() time_left = deadline - current_time @@ -339,11 +364,11 @@ class ClickHouseInstance: shutil.copy(p.join(HELPERS_DIR, 'common_instance_config.xml'), config_d_dir) - # Generate and write macroses file - macroses = self.macroses.copy() - macroses['instance'] = self.name + # Generate and write macros file + macros = self.macros.copy() + macros['instance'] = self.name with open(p.join(config_d_dir, 'macros.xml'), 'w') as macros_config: - macros_config.write(self.dict_to_xml({"macros" : macroses})) + macros_config.write(self.dict_to_xml({"macros" : macros})) # Put ZooKeeper config if self.with_zookeeper: @@ -374,11 +399,16 @@ class ClickHouseInstance: if self.with_mysql: depends_on.append("mysql1") + if self.with_kafka: + depends_on.append("kafka1") + if self.with_zookeeper: depends_on.append("zoo1") depends_on.append("zoo2") depends_on.append("zoo3") + env_file = _create_env_file(os.path.dirname(self.docker_compose_path), self.env_variables) + with open(self.docker_compose_path, 'w') as docker_compose: docker_compose.write(DOCKER_COMPOSE_TEMPLATE.format( name=self.name, @@ -389,7 +419,8 @@ class ClickHouseInstance: config_d_dir=config_d_dir, db_dir=db_dir, logs_dir=logs_dir, - depends_on=str(depends_on))) + depends_on=str(depends_on), + env_file=env_file)) def destroy_dir(self): diff --git a/dbms/tests/integration/helpers/docker_compose_kafka.yml b/dbms/tests/integration/helpers/docker_compose_kafka.yml new file mode 100644 index 00000000000..42dd154b1e8 --- /dev/null +++ b/dbms/tests/integration/helpers/docker_compose_kafka.yml @@ -0,0 +1,24 @@ +version: '2' + +services: + kafka_zookeeper: + image: zookeeper:3.4.9 + hostname: kafka_zookeeper + environment: + ZOO_MY_ID: 1 + ZOO_PORT: 2181 + ZOO_SERVERS: server.1=kafka_zookeeper:2888:3888 + + kafka1: + image: confluentinc/cp-kafka:4.1.0 + hostname: kafka1 + ports: + - "9092:9092" + environment: + KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka1:9092" + KAFKA_ZOOKEEPER_CONNECT: "kafka_zookeeper:2181" + KAFKA_BROKER_ID: 1 + KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO" + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + depends_on: + - kafka_zookeeper diff --git a/dbms/tests/integration/test_cluster_copier/test.py b/dbms/tests/integration/test_cluster_copier/test.py index a19fa8231cf..3f3c5f31741 100644 --- a/dbms/tests/integration/test_cluster_copier/test.py +++ b/dbms/tests/integration/test_cluster_copier/test.py @@ -58,7 +58,7 @@ def started_cluster(): name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name) cluster.add_instance(name, config_dir="configs", - macroses={"cluster": cluster_name, "shard": shard_name, "replica": replica_name}, + macros={"cluster": cluster_name, "shard": shard_name, "replica": replica_name}, with_zookeeper=True) cluster.start() diff --git a/dbms/tests/integration/test_config_substitutions/__init__.py b/dbms/tests/integration/test_config_substitutions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_env.xml b/dbms/tests/integration/test_config_substitutions/configs/config_env.xml new file mode 100644 index 00000000000..712855c47c0 --- /dev/null +++ b/dbms/tests/integration/test_config_substitutions/configs/config_env.xml @@ -0,0 +1,14 @@ + + + + + + + + + + default + default + + + diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_incl.xml b/dbms/tests/integration/test_config_substitutions/configs/config_incl.xml new file mode 100644 index 00000000000..383a23af1ff --- /dev/null +++ b/dbms/tests/integration/test_config_substitutions/configs/config_incl.xml @@ -0,0 +1,15 @@ + + /etc/clickhouse-server/config.d/max_query_size.xml + + + + + + + + + default + default + + + diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_no_substs.xml b/dbms/tests/integration/test_config_substitutions/configs/config_no_substs.xml new file mode 100644 index 00000000000..ea72e332626 --- /dev/null +++ b/dbms/tests/integration/test_config_substitutions/configs/config_no_substs.xml @@ -0,0 +1,14 @@ + + + + 33333 + + + + + + default + default + + + diff --git a/dbms/tests/integration/test_config_substitutions/configs/config_zk.xml b/dbms/tests/integration/test_config_substitutions/configs/config_zk.xml new file mode 100644 index 00000000000..aa589e9f9d3 --- /dev/null +++ b/dbms/tests/integration/test_config_substitutions/configs/config_zk.xml @@ -0,0 +1,14 @@ + + + + + + + + + + default + default + + + diff --git a/dbms/tests/integration/test_config_substitutions/configs/max_query_size.xml b/dbms/tests/integration/test_config_substitutions/configs/max_query_size.xml new file mode 100644 index 00000000000..9ec61368be9 --- /dev/null +++ b/dbms/tests/integration/test_config_substitutions/configs/max_query_size.xml @@ -0,0 +1,3 @@ + + 99999 + diff --git a/dbms/tests/integration/test_config_substitutions/test.py b/dbms/tests/integration/test_config_substitutions/test.py new file mode 100644 index 00000000000..8e8a2d0971b --- /dev/null +++ b/dbms/tests/integration/test_config_substitutions/test.py @@ -0,0 +1,28 @@ +import time +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', main_configs=['configs/config_no_substs.xml']) # hardcoded value 33333 +node2 = cluster.add_instance('node2', main_configs=['configs/config_env.xml'], env_variables={"MAX_QUERY_SIZE": "55555"}) +node3 = cluster.add_instance('node3', main_configs=['configs/config_zk.xml'], with_zookeeper=True) +node4 = cluster.add_instance('node4', main_configs=['configs/config_incl.xml', 'configs/max_query_size.xml']) # include value 77777 + +@pytest.fixture(scope="module") +def start_cluster(): + try: + def create_zk_roots(zk): + zk.create(path="/setting/max_query_size", value="77777", makepath=True) + cluster.add_zookeeper_startup_command(create_zk_roots) + + cluster.start() + yield cluster + finally: + cluster.shutdown() + +def test_config(start_cluster): + assert node1.query("select value from system.settings where name = 'max_query_size'") == "33333\n" + assert node2.query("select value from system.settings where name = 'max_query_size'") == "55555\n" + assert node3.query("select value from system.settings where name = 'max_query_size'") == "77777\n" + assert node4.query("select value from system.settings where name = 'max_query_size'") == "99999\n" diff --git a/dbms/tests/integration/test_distributed_ddl/test.py b/dbms/tests/integration/test_distributed_ddl/test.py index 8621f723ac1..c2851438c00 100755 --- a/dbms/tests/integration/test_distributed_ddl/test.py +++ b/dbms/tests/integration/test_distributed_ddl/test.py @@ -72,7 +72,7 @@ def init_cluster(cluster): cluster.add_instance( 'ch{}'.format(i+1), config_dir="configs", - macroses={"layer": 0, "shard": i/2 + 1, "replica": i%2 + 1}, + macros={"layer": 0, "shard": i/2 + 1, "replica": i%2 + 1}, with_zookeeper=True) cluster.start() @@ -332,6 +332,26 @@ def test_allowed_databases(started_cluster): instance.query("DROP DATABASE db1 ON CLUSTER cluster", settings={"user" : "restricted_user"}) +def test_kill_query(started_cluster): + instance = cluster.instances['ch3'] + + ddl_check_query(instance, "KILL QUERY ON CLUSTER 'cluster' WHERE NOT elapsed FORMAT TSV") + +def test_detach_query(started_cluster): + instance = cluster.instances['ch3'] + + ddl_check_query(instance, "DROP TABLE IF EXISTS test_attach ON CLUSTER cluster FORMAT TSV") + ddl_check_query(instance, "CREATE TABLE test_attach ON CLUSTER cluster (i Int8)ENGINE = Log") + ddl_check_query(instance, "DETACH TABLE test_attach ON CLUSTER cluster FORMAT TSV") + ddl_check_query(instance, "ATTACH TABLE test_attach ON CLUSTER cluster") + + +def test_optimize_query(started_cluster): + instance = cluster.instances['ch3'] + + ddl_check_query(instance, "DROP TABLE IF EXISTS test_optimize ON CLUSTER cluster FORMAT TSV") + ddl_check_query(instance, "CREATE TABLE test_optimize ON CLUSTER cluster (p Date, i Int32) ENGINE = MergeTree(p, p, 8192)") + ddl_check_query(instance, "OPTIMIZE TABLE test_optimize ON CLUSTER cluster FORMAT TSV") if __name__ == '__main__': with contextmanager(started_cluster)() as cluster: diff --git a/dbms/tests/integration/test_extreme_deduplication/test.py b/dbms/tests/integration/test_extreme_deduplication/test.py index d1a19dc1c60..f8043632ba6 100644 --- a/dbms/tests/integration/test_extreme_deduplication/test.py +++ b/dbms/tests/integration/test_extreme_deduplication/test.py @@ -12,8 +12,8 @@ from helpers.client import QueryTimeoutExceedException cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 1}) -node2 = cluster.add_instance('node2', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 2}) +node1 = cluster.add_instance('node1', config_dir='configs', with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 1}) +node2 = cluster.add_instance('node2', config_dir='configs', with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 2}) nodes = [node1, node2] @pytest.fixture(scope="module") diff --git a/dbms/tests/integration/test_https_replication/__init__.py b/dbms/tests/integration/test_https_replication/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_https_replication/configs/config.xml b/dbms/tests/integration/test_https_replication/configs/config.xml new file mode 100644 index 00000000000..35a43b2fc54 --- /dev/null +++ b/dbms/tests/integration/test_https_replication/configs/config.xml @@ -0,0 +1,364 @@ + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + 8123 + 9000 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + /var/lib/clickhouse/user_files/ + + + users.xml + + + default + + + + + + default + + + + + + + + + + + + + + localhost + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + system + query_log
+ + toYYYYMM(event_date) + + 7500 +
+ + + + + + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + +
diff --git a/dbms/tests/integration/test_https_replication/configs/no_ssl_conf.xml b/dbms/tests/integration/test_https_replication/configs/no_ssl_conf.xml new file mode 100644 index 00000000000..db43fd59e99 --- /dev/null +++ b/dbms/tests/integration/test_https_replication/configs/no_ssl_conf.xml @@ -0,0 +1,3 @@ + + 9010 + diff --git a/dbms/tests/integration/test_https_replication/configs/remote_servers.xml b/dbms/tests/integration/test_https_replication/configs/remote_servers.xml new file mode 100644 index 00000000000..ce36da06e9a --- /dev/null +++ b/dbms/tests/integration/test_https_replication/configs/remote_servers.xml @@ -0,0 +1,49 @@ + + + + + true + + test + node1 + 9000 + + + test + node2 + 9000 + + + + + + true + + test + node3 + 9000 + + + test + node4 + 9000 + + + + + + true + + test + node5 + 9000 + + + test + node6 + 9000 + + + + + diff --git a/dbms/tests/integration/test_https_replication/configs/server.crt b/dbms/tests/integration/test_https_replication/configs/server.crt new file mode 100644 index 00000000000..7ade2d96273 --- /dev/null +++ b/dbms/tests/integration/test_https_replication/configs/server.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/TCCAeWgAwIBAgIJANjx1QSR77HBMA0GCSqGSIb3DQEBCwUAMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDAgFw0xODA3MzAxODE2MDhaGA8yMjkyMDUxNDE4MTYwOFow +FDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAs9uSo6lJG8o8pw0fbVGVu0tPOljSWcVSXH9uiJBwlZLQnhN4SFSFohfI +4K8U1tBDTnxPLUo/V1K9yzoLiRDGMkwVj6+4+hE2udS2ePTQv5oaMeJ9wrs+5c9T +4pOtlq3pLAdm04ZMB1nbrEysceVudHRkQbGHzHp6VG29Fw7Ga6YpqyHQihRmEkTU +7UCYNA+Vk7aDPdMS/khweyTpXYZimaK9f0ECU3/VOeG3fH6Sp2X6FN4tUj/aFXEj +sRmU5G2TlYiSIUMF2JPdhSihfk1hJVALrHPTU38SOL+GyyBRWdNcrIwVwbpvsvPg +pryMSNxnpr0AK0dFhjwnupIv5hJIOQIDAQABo1AwTjAdBgNVHQ4EFgQUjPLb3uYC +kcamyZHK4/EV8jAP0wQwHwYDVR0jBBgwFoAUjPLb3uYCkcamyZHK4/EV8jAP0wQw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAM/ocuDvfPus/KpMVD51j +4IdlU8R0vmnYLQ+ygzOAo7+hUWP5j0yvq4ILWNmQX6HNvUggCgFv9bjwDFhb/5Vr +85ieWfTd9+LTjrOzTw4avdGwpX9G+6jJJSSq15tw5ElOIFb/qNA9O4dBiu8vn03C +L/zRSXrARhSqTW5w/tZkUcSTT+M5h28+Lgn9ysx4Ff5vi44LJ1NnrbJbEAIYsAAD ++UA+4MBFKx1r6hHINULev8+lCfkpwIaeS8RL+op4fr6kQPxnULw8wT8gkuc8I4+L +P9gg/xDHB44T3ADGZ5Ib6O0DJaNiToO6rnoaaxs0KkotbvDWvRoxEytSbXKoYjYp +0g== +-----END CERTIFICATE----- diff --git a/dbms/tests/integration/test_https_replication/configs/server.key b/dbms/tests/integration/test_https_replication/configs/server.key new file mode 100644 index 00000000000..f0fb61ac443 --- /dev/null +++ b/dbms/tests/integration/test_https_replication/configs/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz25KjqUkbyjyn +DR9tUZW7S086WNJZxVJcf26IkHCVktCeE3hIVIWiF8jgrxTW0ENOfE8tSj9XUr3L +OguJEMYyTBWPr7j6ETa51LZ49NC/mhox4n3Cuz7lz1Pik62WreksB2bThkwHWdus +TKxx5W50dGRBsYfMenpUbb0XDsZrpimrIdCKFGYSRNTtQJg0D5WTtoM90xL+SHB7 +JOldhmKZor1/QQJTf9U54bd8fpKnZfoU3i1SP9oVcSOxGZTkbZOViJIhQwXYk92F +KKF+TWElUAusc9NTfxI4v4bLIFFZ01ysjBXBum+y8+CmvIxI3GemvQArR0WGPCe6 +ki/mEkg5AgMBAAECggEATrbIBIxwDJOD2/BoUqWkDCY3dGevF8697vFuZKIiQ7PP +TX9j4vPq0DfsmDjHvAPFkTHiTQXzlroFik3LAp+uvhCCVzImmHq0IrwvZ9xtB43f +7Pkc5P6h1l3Ybo8HJ6zRIY3TuLtLxuPSuiOMTQSGRL0zq3SQ5DKuGwkz+kVjHXUN +MR2TECFwMHKQ5VLrC+7PMpsJYyOMlDAWhRfUalxC55xOXTpaN8TxNnwQ8K2ISVY5 +212Jz/a4hn4LdwxSz3Tiu95PN072K87HLWx3EdT6vW4Ge5P/A3y+smIuNAlanMnu +plHBRtpATLiTxZt/n6npyrfQVbYjSH7KWhB8hBHtaQKBgQDh9Cq1c/KtqDtE0Ccr +/r9tZNTUwBE6VP+3OJeKdEdtsfuxjOCkS1oAjgBJiSDOiWPh1DdoDeVZjPKq6pIu +Mq12OE3Doa8znfCXGbkSzEKOb2unKZMJxzrz99kXt40W5DtrqKPNb24CNqTiY8Aa +CjtcX+3weat82VRXvph6U8ltMwKBgQDLxjiQQzNoY7qvg7CwJCjf9qq8jmLK766g +1FHXopqS+dTxDLM8eJSRrpmxGWJvNeNc1uPhsKsKgotqAMdBUQTf7rSTbt4MyoH5 +bUcRLtr+0QTK9hDWMOOvleqNXha68vATkohWYfCueNsC60qD44o8RZAS6UNy3ENq +cM1cxqe84wKBgQDKkHutWnooJtajlTxY27O/nZKT/HA1bDgniMuKaz4R4Gr1PIez +on3YW3V0d0P7BP6PWRIm7bY79vkiMtLEKdiKUGWeyZdo3eHvhDb/3DCawtau8L2K +GZsHVp2//mS1Lfz7Qh8/L/NedqCQ+L4iWiPnZ3THjjwn3CoZ05ucpvrAMwKBgB54 +nay039MUVq44Owub3KDg+dcIU62U+cAC/9oG7qZbxYPmKkc4oL7IJSNecGHA5SbU +2268RFdl/gLz6tfRjbEOuOHzCjFPdvAdbysanpTMHLNc6FefJ+zxtgk9sJh0C4Jh +vxFrw9nTKKzfEl12gQ1SOaEaUIO0fEBGbe8ZpauRAoGAMAlGV+2/K4ebvAJKOVTa +dKAzQ+TD2SJmeR1HZmKDYddNqwtZlzg3v4ZhCk4eaUmGeC1Bdh8MDuB3QQvXz4Dr +vOIP4UVaOr+uM+7TgAgVnP4/K6IeJGzUDhX93pmpWhODfdu/oojEKVcpCojmEmS1 +KCBtmIrQLqzMpnBpLNuSY+Q= +-----END PRIVATE KEY----- diff --git a/dbms/tests/integration/test_https_replication/configs/ssl_conf.xml b/dbms/tests/integration/test_https_replication/configs/ssl_conf.xml new file mode 100644 index 00000000000..237bbc6af1c --- /dev/null +++ b/dbms/tests/integration/test_https_replication/configs/ssl_conf.xml @@ -0,0 +1,18 @@ + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + none + true + + + true + none + + AcceptCertificateHandler + + + + 9010 + diff --git a/dbms/tests/integration/test_https_replication/test.py b/dbms/tests/integration/test_https_replication/test.py new file mode 100644 index 00000000000..ba0a4de9164 --- /dev/null +++ b/dbms/tests/integration/test_https_replication/test.py @@ -0,0 +1,103 @@ +import time +import pytest + +from helpers.cluster import ClickHouseCluster + +""" +Both ssl_conf.xml and no_ssl_conf.xml have the same port +""" + +def _fill_nodes(nodes, shard): + for node in nodes: + node.query( + ''' + CREATE DATABASE test; + + CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192); + '''.format(shard=shard, replica=node.name)) + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/ssl_conf.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/ssl_conf.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def both_https_cluster(): + try: + cluster.start() + + _fill_nodes([node1, node2], 1) + + yield cluster + + finally: + cluster.shutdown() + +def test_both_https(both_https_cluster): + node1.query("insert into test_table values ('2017-06-16', 111, 0)") + time.sleep(1) + + assert node1.query("SELECT id FROM test_table order by id") == '111\n' + assert node2.query("SELECT id FROM test_table order by id") == '111\n' + + node2.query("insert into test_table values ('2017-06-17', 222, 1)") + time.sleep(1) + + assert node1.query("SELECT id FROM test_table order by id") == '111\n222\n' + assert node2.query("SELECT id FROM test_table order by id") == '111\n222\n' + +node3 = cluster.add_instance('node3', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/no_ssl_conf.xml'], with_zookeeper=True) +node4 = cluster.add_instance('node4', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/no_ssl_conf.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def both_http_cluster(): + try: + cluster.start() + + _fill_nodes([node3, node4], 2) + + yield cluster + + finally: + cluster.shutdown() + +def test_both_http(both_http_cluster): + node3.query("insert into test_table values ('2017-06-16', 111, 0)") + time.sleep(1) + + assert node3.query("SELECT id FROM test_table order by id") == '111\n' + assert node4.query("SELECT id FROM test_table order by id") == '111\n' + + node4.query("insert into test_table values ('2017-06-17', 222, 1)") + time.sleep(1) + + assert node3.query("SELECT id FROM test_table order by id") == '111\n222\n' + assert node4.query("SELECT id FROM test_table order by id") == '111\n222\n' + +node5 = cluster.add_instance('node5', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/ssl_conf.xml'], with_zookeeper=True) +node6 = cluster.add_instance('node6', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/no_ssl_conf.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def mixed_protocol_cluster(): + try: + cluster.start() + + _fill_nodes([node5, node6], 3) + + yield cluster + + finally: + cluster.shutdown() + +def test_mixed_protocol(mixed_protocol_cluster): + node5.query("insert into test_table values ('2017-06-16', 111, 0)") + time.sleep(1) + + assert node5.query("SELECT id FROM test_table order by id") == '111\n' + assert node6.query("SELECT id FROM test_table order by id") == '' + + node6.query("insert into test_table values ('2017-06-17', 222, 1)") + time.sleep(1) + + assert node5.query("SELECT id FROM test_table order by id") == '111\n' + assert node6.query("SELECT id FROM test_table order by id") == '222\n' diff --git a/dbms/tests/integration/test_random_inserts/test.py b/dbms/tests/integration/test_random_inserts/test.py index 88abd762504..9e5029c5b64 100644 --- a/dbms/tests/integration/test_random_inserts/test.py +++ b/dbms/tests/integration/test_random_inserts/test.py @@ -14,8 +14,8 @@ from helpers.client import CommandRequest cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance('node1', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 1}) -node2 = cluster.add_instance('node2', config_dir='configs', with_zookeeper=True, macroses={"layer": 0, "shard": 0, "replica": 2}) +node1 = cluster.add_instance('node1', config_dir='configs', with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 1}) +node2 = cluster.add_instance('node2', config_dir='configs', with_zookeeper=True, macros={"layer": 0, "shard": 0, "replica": 2}) nodes = [node1, node2] @pytest.fixture(scope="module") diff --git a/dbms/tests/integration/test_replace_partition/__init__.py b/dbms/tests/integration/test_replace_partition/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_replace_partition/configs/remote_servers.xml b/dbms/tests/integration/test_replace_partition/configs/remote_servers.xml new file mode 100644 index 00000000000..732a7228ea4 --- /dev/null +++ b/dbms/tests/integration/test_replace_partition/configs/remote_servers.xml @@ -0,0 +1,43 @@ + + + + + true + + node1 + 9000 + + + node2 + 9000 + + + + + + true + + node3 + 9000 + + + node4 + 9000 + + + + + + true + + node5 + 9000 + + + node6 + 9000 + + + + + diff --git a/dbms/tests/integration/test_replace_partition/test.py b/dbms/tests/integration/test_replace_partition/test.py new file mode 100644 index 00000000000..cb02d73211c --- /dev/null +++ b/dbms/tests/integration/test_replace_partition/test.py @@ -0,0 +1,163 @@ +import pytest +import time +import sys + +from helpers.cluster import ClickHouseCluster +from helpers.network import PartitionManager + +cluster = ClickHouseCluster(__file__) + +def _fill_nodes(nodes, shard): + for node in nodes: + node.query( + ''' + CREATE DATABASE test; + + CREATE TABLE real_table(date Date, id UInt32, dummy UInt32) + ENGINE = MergeTree(date, id, 8192); + + CREATE TABLE other_table(date Date, id UInt32, dummy UInt32) + ENGINE = MergeTree(date, id, 8192); + + CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192); + '''.format(shard=shard, replica=node.name)) + + +node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def normal_work(): + try: + cluster.start() + + _fill_nodes([node1, node2], 1) + + yield cluster + + finally: + cluster.shutdown() + +def test_normal_work(normal_work): + node1.query("insert into test_table values ('2017-06-16', 111, 0)") + node1.query("insert into real_table values ('2017-06-16', 222, 0)") + time.sleep(1) + + assert node1.query("SELECT id FROM test_table order by id") == '111\n' + assert node1.query("SELECT id FROM real_table order by id") == '222\n' + assert node2.query("SELECT id FROM test_table order by id") == '111\n' + + node1.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table") + time.sleep(1) + + assert node1.query("SELECT id FROM test_table order by id") == '222\n' + assert node2.query("SELECT id FROM test_table order by id") == '222\n' + +node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def drop_failover(): + try: + cluster.start() + + _fill_nodes([node3, node4], 2) + + yield cluster + + finally: + cluster.shutdown() + +def test_drop_failover(drop_failover): + node3.query("insert into test_table values ('2017-06-16', 111, 0)") + node3.query("insert into real_table values ('2017-06-16', 222, 0)") + time.sleep(1) + + assert node3.query("SELECT id FROM test_table order by id") == '111\n' + assert node3.query("SELECT id FROM real_table order by id") == '222\n' + assert node4.query("SELECT id FROM test_table order by id") == '111\n' + + + with PartitionManager() as pm: + # Hinder replication between replicas + pm.partition_instances(node3, node4, port=9009) + # Disconnect Node4 from zookeper + pm.drop_instance_zk_connections(node4) + + node3.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table") + + # Node3 replace is ok + assert node3.query("SELECT id FROM test_table order by id") == '222\n' + # Network interrupted -- replace is not ok, but it's ok + assert node4.query("SELECT id FROM test_table order by id") == '111\n' + + #Drop partition on source node + node3.query("ALTER TABLE test_table DROP PARTITION 201706") + + time.sleep(1) + # connection restored + counter = 0 + while counter < 10: # will lasts forever + if 'Not found part' not in node4.query("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'"): + break + time.sleep(1) + counter += 1 + assert 'Not found part' not in node4.query("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'") + assert node4.query("SELECT id FROM test_table order by id") == '' + +node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) +node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def replace_after_replace_failover(): + try: + cluster.start() + + _fill_nodes([node5, node6], 3) + + yield cluster + + finally: + cluster.shutdown() + +def test_replace_after_replace_failover(replace_after_replace_failover): + node5.query("insert into test_table values ('2017-06-16', 111, 0)") + node5.query("insert into real_table values ('2017-06-16', 222, 0)") + node5.query("insert into other_table values ('2017-06-16', 333, 0)") + time.sleep(1) + + assert node5.query("SELECT id FROM test_table order by id") == '111\n' + assert node5.query("SELECT id FROM real_table order by id") == '222\n' + assert node5.query("SELECT id FROM other_table order by id") == '333\n' + assert node6.query("SELECT id FROM test_table order by id") == '111\n' + + + with PartitionManager() as pm: + # Hinder replication between replicas + pm.partition_instances(node5, node6, port=9009) + # Disconnect Node6 from zookeper + pm.drop_instance_zk_connections(node6) + + node5.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table") + + # Node5 replace is ok + assert node5.query("SELECT id FROM test_table order by id") == '222\n' + # Network interrupted -- replace is not ok, but it's ok + assert node6.query("SELECT id FROM test_table order by id") == '111\n' + + #Replace partition on source node + node5.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM other_table") + + assert node5.query("SELECT id FROM test_table order by id") == '333\n' + + time.sleep(1) + # connection restored + counter = 0 + while counter < 10: # will lasts forever + if 'Not found part' not in node6.query("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'"): + break + time.sleep(1) + counter += 1 + assert 'Not found part' not in node6.query("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'") + assert node6.query("SELECT id FROM test_table order by id") == '333\n' diff --git a/dbms/tests/integration/test_replication_credentials/__init__.py b/dbms/tests/integration/test_replication_credentials/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_replication_credentials/configs/credentials1.xml b/dbms/tests/integration/test_replication_credentials/configs/credentials1.xml new file mode 100644 index 00000000000..1a5fbd393d5 --- /dev/null +++ b/dbms/tests/integration/test_replication_credentials/configs/credentials1.xml @@ -0,0 +1,7 @@ + + 9009 + + admin + 222 + + diff --git a/dbms/tests/integration/test_replication_credentials/configs/credentials2.xml b/dbms/tests/integration/test_replication_credentials/configs/credentials2.xml new file mode 100644 index 00000000000..cf846e7a53d --- /dev/null +++ b/dbms/tests/integration/test_replication_credentials/configs/credentials2.xml @@ -0,0 +1,7 @@ + + 9009 + + root + 111 + + diff --git a/dbms/tests/integration/test_replication_credentials/configs/no_credentials.xml b/dbms/tests/integration/test_replication_credentials/configs/no_credentials.xml new file mode 100644 index 00000000000..9822058811e --- /dev/null +++ b/dbms/tests/integration/test_replication_credentials/configs/no_credentials.xml @@ -0,0 +1,3 @@ + + 9009 + diff --git a/dbms/tests/integration/test_replication_credentials/configs/remote_servers.xml b/dbms/tests/integration/test_replication_credentials/configs/remote_servers.xml new file mode 100644 index 00000000000..d8b384a6392 --- /dev/null +++ b/dbms/tests/integration/test_replication_credentials/configs/remote_servers.xml @@ -0,0 +1,58 @@ + + + + + true + + test + node1 + 9000 + + + test + node2 + 9000 + + + + true + + test + node3 + 9000 + + + test + node4 + 9000 + + + + true + + test + node5 + 9000 + + + test + node7 + 9000 + + + + true + + test + node7 + 9000 + + + test + node8 + 9000 + + + + + diff --git a/dbms/tests/integration/test_replication_credentials/test.py b/dbms/tests/integration/test_replication_credentials/test.py new file mode 100644 index 00000000000..ad5f05e04d9 --- /dev/null +++ b/dbms/tests/integration/test_replication_credentials/test.py @@ -0,0 +1,132 @@ +import time +import pytest + +from helpers.cluster import ClickHouseCluster + + +def _fill_nodes(nodes, shard): + for node in nodes: + node.query( + ''' + CREATE DATABASE test; + + CREATE TABLE test_table(date Date, id UInt32, dummy UInt32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192); + '''.format(shard=shard, replica=node.name)) + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], with_zookeeper=True) +node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], with_zookeeper=True) + + +@pytest.fixture(scope="module") +def same_credentials_cluster(): + try: + cluster.start() + + _fill_nodes([node1, node2], 1) + + yield cluster + + finally: + cluster.shutdown() + +def test_same_credentials(same_credentials_cluster): + node1.query("insert into test_table values ('2017-06-16', 111, 0)") + time.sleep(1) + + assert node1.query("SELECT id FROM test_table order by id") == '111\n' + assert node2.query("SELECT id FROM test_table order by id") == '111\n' + + node2.query("insert into test_table values ('2017-06-17', 222, 1)") + time.sleep(1) + + assert node1.query("SELECT id FROM test_table order by id") == '111\n222\n' + assert node2.query("SELECT id FROM test_table order by id") == '111\n222\n' + + +node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], with_zookeeper=True) +node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def no_credentials_cluster(): + try: + cluster.start() + + _fill_nodes([node3, node4], 2) + + yield cluster + + finally: + cluster.shutdown() + + +def test_no_credentials(no_credentials_cluster): + node3.query("insert into test_table values ('2017-06-18', 111, 0)") + time.sleep(1) + + assert node3.query("SELECT id FROM test_table order by id") == '111\n' + assert node4.query("SELECT id FROM test_table order by id") == '111\n' + + node4.query("insert into test_table values ('2017-06-19', 222, 1)") + time.sleep(1) + + assert node3.query("SELECT id FROM test_table order by id") == '111\n222\n' + assert node4.query("SELECT id FROM test_table order by id") == '111\n222\n' + +node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], with_zookeeper=True) +node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml', 'configs/credentials2.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def different_credentials_cluster(): + try: + cluster.start() + + _fill_nodes([node5, node6], 3) + + yield cluster + + finally: + cluster.shutdown() + +def test_different_credentials(different_credentials_cluster): + node5.query("insert into test_table values ('2017-06-20', 111, 0)") + time.sleep(1) + + assert node5.query("SELECT id FROM test_table order by id") == '111\n' + assert node6.query("SELECT id FROM test_table order by id") == '' + + node6.query("insert into test_table values ('2017-06-21', 222, 1)") + time.sleep(1) + + assert node5.query("SELECT id FROM test_table order by id") == '111\n' + assert node6.query("SELECT id FROM test_table order by id") == '222\n' + +node7 = cluster.add_instance('node7', main_configs=['configs/remote_servers.xml', 'configs/credentials1.xml'], with_zookeeper=True) +node8 = cluster.add_instance('node8', main_configs=['configs/remote_servers.xml', 'configs/no_credentials.xml'], with_zookeeper=True) + +@pytest.fixture(scope="module") +def credentials_and_no_credentials_cluster(): + try: + cluster.start() + + _fill_nodes([node7, node8], 4) + + yield cluster + + finally: + cluster.shutdown() + +def test_credentials_and_no_credentials(credentials_and_no_credentials_cluster): + node7.query("insert into test_table values ('2017-06-21', 111, 0)") + time.sleep(1) + + assert node7.query("SELECT id FROM test_table order by id") == '111\n' + assert node8.query("SELECT id FROM test_table order by id") == '' + + node8.query("insert into test_table values ('2017-06-22', 222, 1)") + time.sleep(1) + + assert node7.query("SELECT id FROM test_table order by id") == '111\n' + assert node8.query("SELECT id FROM test_table order by id") == '222\n' + diff --git a/dbms/tests/integration/test_storage_kafka/__init__.py b/dbms/tests/integration/test_storage_kafka/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/integration/test_storage_kafka/configs/kafka.xml b/dbms/tests/integration/test_storage_kafka/configs/kafka.xml new file mode 100644 index 00000000000..e5c07881e06 --- /dev/null +++ b/dbms/tests/integration/test_storage_kafka/configs/kafka.xml @@ -0,0 +1,5 @@ + + + earliest + + diff --git a/dbms/tests/integration/test_storage_kafka/test.py b/dbms/tests/integration/test_storage_kafka/test.py new file mode 100644 index 00000000000..41076ac78c4 --- /dev/null +++ b/dbms/tests/integration/test_storage_kafka/test.py @@ -0,0 +1,110 @@ +import os.path as p +import time +import pytest + +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +import json +import subprocess + + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance', + main_configs=['configs/kafka.xml'], + with_kafka=True) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + instance.query('CREATE DATABASE test') + + yield cluster + + finally: + cluster.shutdown() + + +def kafka_is_available(started_cluster): + p = subprocess.Popen(('docker', + 'exec', + '-i', + started_cluster.kafka_docker_id, + '/usr/bin/kafka-broker-api-versions', + '--bootstrap-server', + 'PLAINTEXT://localhost:9092'), + stdout=subprocess.PIPE) + p.communicate()[0] + return p.returncode == 0 + + +def kafka_produce(started_cluster, topic, messages): + p = subprocess.Popen(('docker', + 'exec', + '-i', + started_cluster.kafka_docker_id, + '/usr/bin/kafka-console-producer', + '--broker-list', + 'localhost:9092', + '--topic', + topic), + stdin=subprocess.PIPE) + p.communicate(messages) + p.stdin.close() + + +def kafka_check_json_numbers(instance): + retries = 0 + while True: + if kafka_is_available(started_cluster): + break + else: + retries += 1 + if retries > 50: + raise 'Cannot connect to kafka.' + print("Waiting for kafka to be available...") + time.sleep(1) + messages = '' + for i in xrange(50): + messages += json.dumps({'key': i, 'value': i}) + '\n' + kafka_produce(started_cluster, 'json', messages) + time.sleep(3) + result = instance.query('SELECT * FROM test.kafka;') + file = p.join(p.dirname(__file__), 'test_kafka_json.reference') + with open(file) as reference: + assert TSV(result) == TSV(reference) + + +def test_kafka_json(started_cluster): + instance.query(''' + DROP TABLE IF EXISTS test.kafka; + CREATE TABLE test.kafka (key UInt64, value UInt64) + ENGINE = Kafka('kafka1:9092', 'json', 'json', + 'JSONEachRow', '\\n'); + ''') + kafka_check_json_numbers(instance) + instance.query('DROP TABLE test.kafka') + + +def test_kafka_json_settings(started_cluster): + instance.query(''' + DROP TABLE IF EXISTS test.kafka; + CREATE TABLE test.kafka (key UInt64, value UInt64) + ENGINE = Kafka + SETTINGS + kafka_broker_list = 'kafka1:9092', + kafka_topic_list = 'json' + kafka_group_name = 'json' + kafka_format = 'JSONEachRow' + kafka_row_delimiter = '\\n'; + ''') + kafka_check_json_numbers(instance) + instance.query('DROP TABLE test.kafka') + + +if __name__ == '__main__': + cluster.start() + raw_input("Cluster created, press any key to destroy...") + cluster.shutdown() diff --git a/dbms/tests/integration/test_storage_kafka/test_kafka_json.reference b/dbms/tests/integration/test_storage_kafka/test_kafka_json.reference new file mode 100644 index 00000000000..959bb2aad74 --- /dev/null +++ b/dbms/tests/integration/test_storage_kafka/test_kafka_json.reference @@ -0,0 +1,50 @@ +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 diff --git a/dbms/tests/queries/0_stateless/00002_system_numbers.sql b/dbms/tests/queries/0_stateless/00002_system_numbers.sql index bc9269495bc..d6ebf8e89ed 100644 --- a/dbms/tests/queries/0_stateless/00002_system_numbers.sql +++ b/dbms/tests/queries/0_stateless/00002_system_numbers.sql @@ -1 +1,12 @@ -SELECT * FROM system.numbers LIMIT 10 +SELECT * FROM system.numbers LIMIT 3; +SELECT sys_num.number FROM system.numbers AS sys_num WHERE number > 2 LIMIT 2; +SELECT number FROM system.numbers WHERE number >= 5 LIMIT 2; +SELECT * FROM system.numbers WHERE number == 7 LIMIT 1; +SELECT number AS n FROM system.numbers WHERE number IN(8, 9) LIMIT 2; +select number from system.numbers limit 0; +select x from system.numbers limit 1; -- { clientError 0 serverError 47 } +SELECT x, number FROM system.numbers LIMIT 1; -- { serverError 47 } +SELECT * FROM system.number LIMIT 1; -- { serverError 60 } +SELECT * FROM system LIMIT 1; -- { serverError 60 } +SELECT * FROM numbers LIMIT 1; -- { serverError 60 } +SELECT sys.number FROM system.numbers AS sys_num LIMIT 1; -- { serverError 47 } diff --git a/dbms/tests/queries/0_stateless/00007_array.reference b/dbms/tests/queries/0_stateless/00007_array.reference index 2a64c8ea7b2..62a24ffe3bf 100644 --- a/dbms/tests/queries/0_stateless/00007_array.reference +++ b/dbms/tests/queries/0_stateless/00007_array.reference @@ -1 +1,3 @@ ['Hello','Goodbye'] +['Hello'] ['Goodbye'] +[] diff --git a/dbms/tests/queries/0_stateless/00007_array.sql b/dbms/tests/queries/0_stateless/00007_array.sql index 7c1f27f1978..cf53e8f78a3 100644 --- a/dbms/tests/queries/0_stateless/00007_array.sql +++ b/dbms/tests/queries/0_stateless/00007_array.sql @@ -1 +1,3 @@ -SELECT ['Hello', 'Goodbye'] +SELECT ['Hello', 'Goodbye']; +SELECT ['Hello'], ['Goodbye']; +SELECT []; diff --git a/dbms/tests/queries/0_stateless/00043_summing_empty_part.reference b/dbms/tests/queries/0_stateless/00043_summing_empty_part.reference index d2a7d002464..6f86bbba109 100644 --- a/dbms/tests/queries/0_stateless/00043_summing_empty_part.reference +++ b/dbms/tests/queries/0_stateless/00043_summing_empty_part.reference @@ -1,2 +1 @@ -2015-01-01 1 0 2015-01-01 2 -9 diff --git a/dbms/tests/queries/0_stateless/00098_1_union_all.reference b/dbms/tests/queries/0_stateless/00098_1_union_all.reference index 8fc0e85e8fb..5927fc33d20 100644 --- a/dbms/tests/queries/0_stateless/00098_1_union_all.reference +++ b/dbms/tests/queries/0_stateless/00098_1_union_all.reference @@ -1,2 +1,4 @@ 1000 2000 +1000 Alice +2000 Alice diff --git a/dbms/tests/queries/0_stateless/00098_1_union_all.sql b/dbms/tests/queries/0_stateless/00098_1_union_all.sql index 7c05af6de98..6f96b710985 100644 --- a/dbms/tests/queries/0_stateless/00098_1_union_all.sql +++ b/dbms/tests/queries/0_stateless/00098_1_union_all.sql @@ -23,3 +23,9 @@ UNION ALL SELECT value AS val FROM data2014 WHERE name = 'Alice') ORDER BY val ASC; +SELECT val, name FROM +(SELECT value AS val, value AS val_1, name FROM data2013 WHERE name = 'Alice' +UNION ALL +SELECT value AS val, value, name FROM data2014 WHERE name = 'Alice') +ORDER BY val ASC; + diff --git a/dbms/tests/queries/0_stateless/00433_ifnull.reference b/dbms/tests/queries/0_stateless/00433_ifnull.reference index c1a59dec60e..4cf982b2116 100644 --- a/dbms/tests/queries/0_stateless/00433_ifnull.reference +++ b/dbms/tests/queries/0_stateless/00433_ifnull.reference @@ -28,3 +28,8 @@ x Nullable(String) 1 UInt8 1 UInt8 \N Nullable(Nothing) +0 Nullable(String) +-1 Nullable(String) +2 Nullable(String) +3 Nullable(String) +4 Nullable(String) diff --git a/dbms/tests/queries/0_stateless/00433_ifnull.sql b/dbms/tests/queries/0_stateless/00433_ifnull.sql index cad3eeba534..9071305a87a 100644 --- a/dbms/tests/queries/0_stateless/00433_ifnull.sql +++ b/dbms/tests/queries/0_stateless/00433_ifnull.sql @@ -17,3 +17,5 @@ SELECT ifNull(nullIf(toString(number), '1'), nullIf(toString(-number), '-3')) AS SELECT ifNull(NULL, 1) AS res, toTypeName(res); SELECT ifNull(1, NULL) AS res, toTypeName(res); SELECT ifNull(NULL, NULL) AS res, toTypeName(res); + +SELECT IFNULL(NULLIF(toString(number), '1'), NULLIF(toString(-number), '-3')) AS res, toTypeName(res) FROM system.numbers LIMIT 5; diff --git a/dbms/tests/queries/0_stateless/00435_coalesce.reference b/dbms/tests/queries/0_stateless/00435_coalesce.reference index 20b8288b09c..b236c89c865 100644 --- a/dbms/tests/queries/0_stateless/00435_coalesce.reference +++ b/dbms/tests/queries/0_stateless/00435_coalesce.reference @@ -1,4 +1,5 @@ \N \N \N 1 1 1 1 +\N \N 1 0 Nullable(UInt64) \N Nullable(UInt64) 2 Nullable(UInt64) diff --git a/dbms/tests/queries/0_stateless/00435_coalesce.sql b/dbms/tests/queries/0_stateless/00435_coalesce.sql index 7c1c519bcb2..3744f5ff290 100644 --- a/dbms/tests/queries/0_stateless/00435_coalesce.sql +++ b/dbms/tests/queries/0_stateless/00435_coalesce.sql @@ -1,6 +1,8 @@ SELECT coalesce(), coalesce(NULL), coalesce(NULL, NULL), coalesce(1), coalesce(1, NULL), coalesce(NULL, 1), coalesce(NULL, 1, NULL); +SELECT COALESCE(), COALESCE(NULL), COALESCE(1, NULL); + SELECT coalesce(number % 2 = 0 ? number : NULL, number % 3 = 0 ? number : NULL, number % 5 = 0 ? number : NULL) AS res, toTypeName(res) FROM system.numbers LIMIT 15; SELECT coalesce(number % 2 = 0 ? number : NULL, number % 3 = 0 ? number : NULL, number) AS res, toTypeName(res) FROM system.numbers LIMIT 15; SELECT coalesce(number % 2 = 0 ? number : NULL, number % 3 = 0 ? number : NULL, 100) AS res, toTypeName(res) FROM system.numbers LIMIT 15; diff --git a/dbms/tests/queries/0_stateless/00534_filimonov.data b/dbms/tests/queries/0_stateless/00534_filimonov.data index 2dd470403c0..b4c15b01ef4 100644 --- a/dbms/tests/queries/0_stateless/00534_filimonov.data +++ b/dbms/tests/queries/0_stateless/00534_filimonov.data @@ -428,5 +428,7 @@ SELECT COVAR_SAMPArray([CAST( 0 AS Int8)],arrayPopBack([CAST( 0 AS Int8)])); SELECT medianTimingWeightedArray([CAST( 0 AS Int8)],arrayPopBack([CAST( 0 AS Int8)])); SELECT quantilesDeterministicArray([CAST( 0 AS Int8)],arrayPopBack([CAST( 0 AS Int32)])); -SELECT maxIntersections([], []) -SELECT sumMap([], []) +SELECT maxIntersections([], []); +SELECT sumMap([], []); + +SELECT countArray(); diff --git a/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference b/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference index 408617f86e5..2b11238a118 100644 --- a/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference +++ b/dbms/tests/queries/0_stateless/00564_versioned_collapsing_merge_tree.reference @@ -1,7 +1,5 @@ table with 2 blocks final table with 2 blocks optimized -2018-01-31 str_8 0 -1 -2018-01-31 str_9 0 1 ------------------------- table with 2 blocks final 2018-01-31 str_0 0 -1 @@ -48,8 +46,6 @@ table with 2 blocks optimized ------------------------- table with 2 blocks final table with 2 blocks optimized -2018-01-31 str_9 0 1 -2018-01-31 str_9 0 -1 ------------------------- table with 2 blocks final 2018-01-31 str_0 0 -1 @@ -96,8 +92,6 @@ table with 2 blocks optimized ------------------------- table with 4 blocks final table with 4 blocks optimized -2018-01-31 str_9 0 1 -2018-01-31 str_9 0 -1 ------------------------- table with 5 blocks final 2018-01-31 str_0 1 -1 @@ -124,8 +118,6 @@ table with 5 blocks optimized ------------------------- table with 2 blocks final table with 2 blocks optimized -2018-01-31 str_999999 0 1 -2018-01-31 str_999999 0 -1 ------------------------- table with 2 blocks final 2018-01-31 0 0 1 @@ -261,15 +253,11 @@ table with 2 blocks final 2018-01-31 126 0 1 2018-01-31 127 0 1 table with 2 blocks optimized -2018-01-31 0 0 -1 -2018-01-31 127 0 1 ------------------------- Vertival merge ------------------------- table with 2 blocks final table with 2 blocks optimized -2018-01-31 str_8 0 -1 -2018-01-31 str_9 0 1 ------------------------- table with 2 blocks final 2018-01-31 str_0 0 -1 @@ -316,8 +304,6 @@ table with 2 blocks optimized ------------------------- table with 2 blocks final table with 2 blocks optimized -2018-01-31 str_9 0 1 -2018-01-31 str_9 0 -1 ------------------------- table with 2 blocks final 2018-01-31 str_0 0 -1 @@ -364,8 +350,6 @@ table with 2 blocks optimized ------------------------- table with 4 blocks final table with 4 blocks optimized -2018-01-31 str_9 0 1 -2018-01-31 str_9 0 -1 ------------------------- table with 5 blocks final 2018-01-31 str_0 1 -1 @@ -392,8 +376,6 @@ table with 5 blocks optimized ------------------------- table with 2 blocks final table with 2 blocks optimized -2018-01-31 str_999999 0 1 -2018-01-31 str_999999 0 -1 ------------------------- table with 2 blocks final 2018-01-31 0 0 1 @@ -529,5 +511,3 @@ table with 2 blocks final 2018-01-31 126 0 1 2018-01-31 127 0 1 table with 2 blocks optimized -2018-01-31 0 0 -1 -2018-01-31 127 0 1 diff --git a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh b/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh index 9a841c8d75d..8fad8b96100 100755 --- a/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh +++ b/dbms/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh @@ -3,4 +3,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh -${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "USE test; DROP TABLE IF EXISTS tab; CREATE TABLE tab (val UInt64) engine = Memory; SHOW CREATE TABLE tab format abcd; DESC tab; DROP TABLE tab;" 2> /dev/null +${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "USE test; DROP TABLE IF EXISTS tab; CREATE TABLE tab (val UInt64) engine = Memory; SHOW CREATE TABLE tab format abcd; DESC tab; DROP TABLE tab;" ||: 2> /dev/null diff --git a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference b/dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference index ebddd8d85fd..5341d7b49aa 100644 --- a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference +++ b/dbms/tests/queries/0_stateless/00652_mergetree_mutations.reference @@ -8,3 +8,6 @@ Query involving aliases should fail on submission mutation_1.txt DELETE WHERE x = 1 [''] [1] 0 1 mutation_5.txt DELETE WHERE (x % 2) = 1 [''] [5] 0 1 mutation_6.txt DELETE WHERE s = \'d\' [''] [6] 0 1 +*** Test mutations cleaner *** +mutation_3.txt DELETE WHERE x = 2 1 +mutation_4.txt DELETE WHERE x = 3 1 diff --git a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh b/dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh index 90a3bb6b659..3df1fe014a4 100755 --- a/dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh +++ b/dbms/tests/queries/0_stateless/00652_mergetree_mutations.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh +. $CURDIR/mergetree_mutations.lib + ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mutations" ${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.mutations(d Date, x UInt32, s String, a UInt32 ALIAS x + 1) ENGINE MergeTree(d, intDiv(x, 10), 8192)" @@ -31,18 +33,8 @@ ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations DELETE WHERE s = 'd'" ${CLICKHOUSE_CLIENT} --query="INSERT INTO test.mutations(d, x, s) VALUES \ ('2000-01-01', 5, 'e'), ('2000-02-01', 5, 'e')" -# Wait until all mutations are done. -for i in {1..100} -do - sleep 0.1 - if [[ $(${CLICKHOUSE_CLIENT} --query="SELECT sum(is_done) FROM system.mutations WHERE table='mutations'") -eq 3 ]]; then - break - fi - - if [[ $i -eq 100 ]]; then - echo "Timed out while waiting for mutations to execute!" - fi -done +# Wait until the last mutation is done. +wait_for_mutation "mutations" "mutation_6.txt" # Check that the table contains only the data that should not be deleted. ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.mutations ORDER BY d, x" @@ -50,4 +42,31 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.mutations ORDER BY d, x" ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, command, block_numbers.partition_id, block_numbers.number, parts_to_do, is_done \ FROM system.mutations WHERE table = 'mutations' ORDER BY mutation_id" + +${CLICKHOUSE_CLIENT} --query="SELECT '*** Test mutations cleaner ***'" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mutations_cleaner" + +# Create a table with finished_mutations_to_keep = 2 +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.mutations_cleaner(x UInt32) ENGINE MergeTree ORDER BY x SETTINGS finished_mutations_to_keep = 2" + +# Insert some data +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.mutations_cleaner(x) VALUES (1), (2), (3), (4)" + +# Add some mutations and wait for their execution +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_cleaner DELETE WHERE x = 1" +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_cleaner DELETE WHERE x = 2" +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_cleaner DELETE WHERE x = 3" + +wait_for_mutation "mutations_cleaner" "mutation_4.txt" + +# Sleep and then do an INSERT to wakeup the background task that will clean up the old mutations +sleep 1 +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.mutations_cleaner(x) VALUES (4)" +sleep 0.1 + +# Check that the first mutation is cleaned +${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, command, is_done FROM system.mutations WHERE table = 'mutations_cleaner' ORDER BY mutation_id" + ${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mutations" +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mutations_cleaner" diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference b/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference index a82c2571055..cb5a52cb905 100644 --- a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference +++ b/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.reference @@ -7,3 +7,7 @@ Query should fail 2 0000000000 DELETE WHERE x = 1 [] [] 0 1 0000000001 DELETE WHERE (x % 2) = 1 ['200001','200002'] [2,1] 0 1 0000000002 DELETE WHERE s = \'d\' ['200001','200002'] [3,2] 0 1 +*** Test mutations cleaner *** +0000000001 DELETE WHERE x = 2 1 +0000000002 DELETE WHERE x = 3 1 +0000000003 DELETE WHERE x = 4 0 diff --git a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh index c652596cd1a..327b1221482 100755 --- a/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh +++ b/dbms/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh @@ -3,6 +3,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh +. $CURDIR/mergetree_mutations.lib + ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mutations_r1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mutations_r2" @@ -31,18 +33,8 @@ ${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_r1 DELETE WHERE s = 'd' ${CLICKHOUSE_CLIENT} --query="INSERT INTO test.mutations_r1(d, x, s) VALUES \ ('2000-01-01', 5, 'e'), ('2000-02-01', 5, 'e')" -# Wait until all mutations are done. -for i in {1..100} -do - sleep 0.1 - if [[ $(${CLICKHOUSE_CLIENT} --query="SELECT sum(is_done) FROM system.mutations WHERE table='mutations_r2'") -eq 3 ]]; then - break - fi - - if [[ $i -eq 100 ]]; then - echo "Timed out while waiting for mutations to execute!" - fi -done +# Wait until the last mutation is done. +wait_for_mutation "mutations_r2" "0000000002" # Check that the table contains only the data that should not be deleted. ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.mutations_r2 ORDER BY d, x" @@ -50,5 +42,44 @@ ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.mutations_r2 ORDER BY d, x" ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, command, block_numbers.partition_id, block_numbers.number, parts_to_do, is_done \ FROM system.mutations WHERE table = 'mutations_r2' ORDER BY mutation_id" + +${CLICKHOUSE_CLIENT} --query="SELECT '*** Test mutations cleaner ***'" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mutations_cleaner_r1" +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.mutations_cleaner_r2" + +# Create 2 replicas with finished_mutations_to_keep = 2 +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.mutations_cleaner_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/mutations_cleaner', 'r1') ORDER BY x SETTINGS \ + finished_mutations_to_keep = 2, + cleanup_delay_period = 1, + cleanup_delay_period_random_add = 0" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.mutations_cleaner_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/mutations_cleaner', 'r2') ORDER BY x SETTINGS \ + finished_mutations_to_keep = 2, + cleanup_delay_period = 1, + cleanup_delay_period_random_add = 0" + +# Insert some data +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.mutations_cleaner_r1(x) VALUES (1), (2), (3), (4)" + +# Add some mutations and wait for their execution +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_cleaner_r1 DELETE WHERE x = 1" +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_cleaner_r1 DELETE WHERE x = 2" +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_cleaner_r1 DELETE WHERE x = 3" + +wait_for_mutation "mutations_cleaner_r2" "0000000002" + +# Add another mutation and prevent its execution on the second replica +${CLICKHOUSE_CLIENT} --query="SYSTEM STOP REPLICATION QUEUES test.mutations_cleaner_r2" +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.mutations_cleaner_r1 DELETE WHERE x = 4" + +# Sleep for more than cleanup_delay_period +sleep 1.5 + +# Check that the first mutation is cleaned +${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, command, is_done FROM system.mutations WHERE table = 'mutations_cleaner_r2' ORDER BY mutation_id" + ${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mutations_r1" ${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mutations_r2" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mutations_cleaner_r1" +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.mutations_cleaner_r2" diff --git a/dbms/tests/queries/0_stateless/00674_has_array_enum.reference b/dbms/tests/queries/0_stateless/00674_has_array_enum.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00674_has_array_enum.reference @@ -0,0 +1 @@ +1 diff --git a/dbms/tests/queries/0_stateless/00674_has_array_enum.sql b/dbms/tests/queries/0_stateless/00674_has_array_enum.sql new file mode 100644 index 00000000000..b8baf602216 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00674_has_array_enum.sql @@ -0,0 +1 @@ +SELECT has([x], 10) FROM (SELECT CAST(10 AS Enum8('hello' = 1, 'world' = 2, 'abc' = 10)) AS x); diff --git a/dbms/tests/queries/0_stateless/00674_join_on_syntax.reference b/dbms/tests/queries/0_stateless/00674_join_on_syntax.reference new file mode 100644 index 00000000000..7a41a90c03f --- /dev/null +++ b/dbms/tests/queries/0_stateless/00674_join_on_syntax.reference @@ -0,0 +1,53 @@ +joind columns from right table +1 +1 2 +1 2 +1 3 +1 2 3 +join on expression +2 +2 2 +2 3 +1 +2 +1 2 2 3 +1 2 2 3 3 +1 2 2 3 +join on and chain +2 3 +2 3 +2 3 2 3 +1 +1 3 +1 2 2 3 +2 4 +join on aliases +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +1 2 2 3 +join on complex expression +2 3 +2 3 +2 3 +2 3 +2 3 +duplicate column names +{"a1":1,"test.tab1_copy.a1":2} +{"a1":1,"test.tab1_copy.a1":2} +{"a1":1,"copy.a1":2} +{"a1":1,"copy.a1":2} +{"a1":1,"copy.a1":2} diff --git a/dbms/tests/queries/0_stateless/00674_join_on_syntax.sql b/dbms/tests/queries/0_stateless/00674_join_on_syntax.sql new file mode 100644 index 00000000000..7fb60e64d04 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00674_join_on_syntax.sql @@ -0,0 +1,84 @@ +drop table if exists test.tab1; +drop table if exists test.tab2; +drop table if exists test.tab3; +drop table if exists test.tab1_copy; + +create table test.tab1 (a1 Int32, b1 Int32) engine = MergeTree order by a1; +create table test.tab2 (a2 Int32, b2 Int32) engine = MergeTree order by a2; +create table test.tab3 (a3 Int32, b3 Int32) engine = MergeTree order by a3; +create table test.tab1_copy (a1 Int32, b1 Int32) engine = MergeTree order by a1; + +insert into test.tab1 values (1, 2); +insert into test.tab2 values (2, 3); +insert into test.tab3 values (2, 3); +insert into test.tab1_copy values (2, 3); + + +select 'joind columns from right table'; +select a1 from test.tab1 any left join test.tab2 on b1 = a2; +select a1, b1 from test.tab1 any left join test.tab2 on b1 = a2; +select a1, a2 from test.tab1 any left join test.tab2 on b1 = a2; +select a1, b2 from test.tab1 any left join test.tab2 on b1 = a2; +select a1, a2, b2 from test.tab1 any left join test.tab2 on b1 = a2; + + +select 'join on expression'; +select b1 from test.tab1 any left join test.tab2 on toInt32(a1 + 1) = a2; +select b1, a2 from test.tab1 any left join test.tab2 on toInt32(a1 + 1) = a2; +select b1, b2 from test.tab1 any left join test.tab2 on toInt32(a1 + 1) = a2; +select a1 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1; +select a2 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1; +select a1, b1, a2, b2 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1; +select a1, b1, a2, b2, a2 + 1 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1; +select a1, b1, a2, b2 from test.tab1 any left join test.tab2 on a1 + 4 = b2 + 2; + + +select 'join on and chain'; +select a2, b2 from test.tab2 any left join test.tab3 on a2 = a3 and b2 = b3; +select a3, b3 from test.tab2 any left join test.tab3 on a2 = a3 and b2 = b3; +select a2, b2, a3, b3 from test.tab2 any left join test.tab3 on a2 = a3 and b2 = b3; +select a1 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; +select a1, b2 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; +select a1, b1, a2, b2 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; +select a2, b2 + 1 from test.tab1 any left join test.tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; + + +select 'join on aliases'; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on first.b1 = second.a2; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on second.a2 = first.b1; + +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on tab1.b1 = tab2.a2; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on tab2.a2 = tab1.b1; + +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on test.tab1.b1 = test.tab2.a2; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on test.tab2.a2 = test.tab1.b1; + +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on first.b1 = tab2.a2; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on tab2.a2 = first.b1; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on first.b1 = test.tab2.a2; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on test.tab2.a2 = first.b1; + +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on tab1.b1 = second.a2; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on second.a2 = tab1.b1; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on test.tab1.b1 = second.a2; +select a1, a2, b1, b2 from test.tab1 first any left join test.tab2 second on second.a2 = test.tab1.b1; + +select a1, a2, first.b1, second.b2 from test.tab1 first any left join test.tab2 second on b1 = a2; +select a1, a2, tab1.b1, tab2.b2 from test.tab1 first any left join test.tab2 second on b1 = a2; +select a1, a2, test.tab1.b1, test.tab2.b2 from test.tab1 first any left join test.tab2 second on b1 = a2; + + +select 'join on complex expression'; +select a2, b2 from test.tab2 any left join test.tab3 on a2 + b2 = a3 + b3; +select a2, b2 from test.tab2 any left join test.tab3 on a3 + tab3.b3 = a2 + b2; +select a2, b2 from test.tab2 second any left join test.tab3 on a3 + b3 = a2 + second.b2; +select a2, b2 from test.tab2 second any left join test.tab3 third on third.a3 + tab3.b3 = tab2.a2 + second.b2; +select a2, b2 from test.tab2 second any left join test.tab3 third on third.a3 + test.tab3.b3 = test.tab2.a2 + second.b2; + +select 'duplicate column names'; +select a1, tab1_copy.a1 from test.tab1 any left join test.tab1_copy on tab1.b1 + 3 = b1 + 2 FORMAT JSONEachRow; +select a1, test.tab1_copy.a1 from test.tab1 any left join test.tab1_copy on tab1.b1 + 3 = b1 + 2 FORMAT JSONEachRow; +select a1, copy.a1 from test.tab1 any left join test.tab1_copy copy on tab1.b1 + 3 = b1 + 2 FORMAT JSONEachRow; +select a1, tab1_copy.a1 from test.tab1 any left join test.tab1_copy copy on tab1.b1 + 3 = b1 + 2 FORMAT JSONEachRow; +select a1, test.tab1_copy.a1 from test.tab1 any left join test.tab1_copy copy on tab1.b1 + 3 = b1 + 2 FORMAT JSONEachRow; + diff --git a/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference b/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference new file mode 100644 index 00000000000..de78180725a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.reference @@ -0,0 +1,2 @@ +4 +8 diff --git a/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql b/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql new file mode 100644 index 00000000000..f10ceeaa646 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00675_shard_remote_with_table_function.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test.remote_test; +CREATE TABLE test.remote_test(a1 UInt8) ENGINE=Memory; +INSERT INTO FUNCTION remote('127.0.0.1', test.remote_test) VALUES(1); +INSERT INTO FUNCTION remote('127.0.0.1', test.remote_test) VALUES(2); +INSERT INTO FUNCTION remote('127.0.0.1', test.remote_test) VALUES(3); +INSERT INTO FUNCTION remote('127.0.0.1', test.remote_test) VALUES(4); +SELECT COUNT(*) FROM remote('127.0.0.1', test.remote_test); +SELECT count(*) FROM remote('127.0.0.{1,2}', merge(test, '^remote_test')); +DROP TABLE test.remote_test; diff --git a/dbms/tests/queries/0_stateless/00676_group_by_in.reference b/dbms/tests/queries/0_stateless/00676_group_by_in.reference new file mode 100644 index 00000000000..a8a294d868d --- /dev/null +++ b/dbms/tests/queries/0_stateless/00676_group_by_in.reference @@ -0,0 +1,7 @@ +1 1 +0 1 +1 1 +0 1 +1 1 +0 8 +1 2 diff --git a/dbms/tests/queries/0_stateless/00676_group_by_in.sql b/dbms/tests/queries/0_stateless/00676_group_by_in.sql new file mode 100644 index 00000000000..9296458dfa3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00676_group_by_in.sql @@ -0,0 +1,12 @@ +SELECT dummy IN (0) AS x, count() GROUP BY x; + +SELECT 1 IN (0) AS x, count() GROUP BY x; +SELECT 0 IN (0) AS x, count() GROUP BY x; +SELECT materialize(1) IN (0) AS x, count() GROUP BY x; +SELECT materialize(0) IN (0) AS x, count() GROUP BY x; + +SELECT + number IN (1, 2) AS x, + count() +FROM numbers(10) +GROUP BY x; diff --git a/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference b/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference new file mode 100644 index 00000000000..ec20747d6cd --- /dev/null +++ b/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.reference @@ -0,0 +1 @@ +4 ['hello','world'] hello diff --git a/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql b/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql new file mode 100644 index 00000000000..f6fac23a155 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00677_shard_any_heavy_merge.sql @@ -0,0 +1 @@ +WITH arrayJoin(['hello', 'world']) AS s SELECT count(), arraySort(groupUniqArray(s)), anyHeavy(s) FROM remote('127.0.0.{2,3}', system.one); diff --git a/dbms/tests/queries/0_stateless/00678_murmurhash.reference b/dbms/tests/queries/0_stateless/00678_murmurhash.reference new file mode 100644 index 00000000000..548c5c1cae6 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00678_murmurhash.reference @@ -0,0 +1,26 @@ +623211862 +3533626746 +2388617433 +2708309598 +2414502773 +670491991 +0 +0 +0 +0 +0 +0 +0 +1 +1 +14834356025302342401 +12725806677685968135 +12725806677685968135 +4138058784 +3831157163 +3831157163 +11303473983767132390 +956517343494314387 +956517343494314387 +6145F501578671E2877DBA2BE487AF7E +16FE7483905CCE7A85670E43E4678877 diff --git a/dbms/tests/queries/0_stateless/00678_murmurhash.sql b/dbms/tests/queries/0_stateless/00678_murmurhash.sql new file mode 100644 index 00000000000..9d20b56aa93 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00678_murmurhash.sql @@ -0,0 +1,32 @@ +SELECT murmurHash2_32(123456); +SELECT murmurHash2_32(CAST(3 AS UInt8)); +SELECT murmurHash2_32(CAST(1.2684 AS Float32)); +SELECT murmurHash2_32(CAST(-154477 AS Int64)); +SELECT murmurHash2_32('foo'); +SELECT murmurHash2_32(CAST('bar' AS FixedString(3))); +SELECT murmurHash2_32(x) FROM (SELECT CAST(1 AS Enum8('a' = 1, 'b' = 2)) as x); + +SELECT murmurHash2_32(''); +SELECT murmurHash2_32('\x01'); +SELECT murmurHash2_32('\x02\0'); +SELECT murmurHash2_32('\x03\0\0'); +SELECT murmurHash2_32(1); +SELECT murmurHash2_32(toUInt16(2)); + +SELECT murmurHash2_32(2) = bitXor(toUInt32(0x5bd1e995 * bitXor(toUInt32(3 * 0x5bd1e995) AS a, bitShiftRight(a, 13))) AS b, bitShiftRight(b, 15)); +SELECT murmurHash2_32('\x02') = bitXor(toUInt32(0x5bd1e995 * bitXor(toUInt32(3 * 0x5bd1e995) AS a, bitShiftRight(a, 13))) AS b, bitShiftRight(b, 15)); + +SELECT murmurHash2_64('foo'); +SELECT murmurHash2_64('\x01'); +SELECT murmurHash2_64(1); + +SELECT murmurHash3_32('foo'); +SELECT murmurHash3_32('\x01'); +SELECT murmurHash3_32(1); + +SELECT murmurHash3_64('foo'); +SELECT murmurHash3_64('\x01'); +SELECT murmurHash3_64(1); + +SELECT hex(murmurHash3_128('foo')); +SELECT hex(murmurHash3_128('\x01')); diff --git a/dbms/tests/queries/0_stateless/00678_shard_funnel_window.reference b/dbms/tests/queries/0_stateless/00678_shard_funnel_window.reference new file mode 100644 index 00000000000..80e5f185810 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00678_shard_funnel_window.reference @@ -0,0 +1,3 @@ +1 10000 +1 10000 +1 10000 diff --git a/dbms/tests/queries/0_stateless/00678_shard_funnel_window.sql b/dbms/tests/queries/0_stateless/00678_shard_funnel_window.sql new file mode 100644 index 00000000000..e2405bcbc92 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00678_shard_funnel_window.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test.remote_test; +CREATE TABLE test.remote_test(uid String, its UInt32, action_code String, day Date) ENGINE = MergeTree(day, (uid, its), 8192); +INSERT INTO test.remote_test SELECT toString(number) AS uid, number % 3 AS its, toString(number % 3) AS action_code, '2000-01-01' FROM system.numbers LIMIT 10000; +SELECT level, COUNT() FROM (SELECT uid, windowFunnel(3600)(toUInt32(its), action_code != '', action_code = '2') AS level FROM remote('127.0.0.{2,3}', test.remote_test) GROUP BY uid) GROUP BY level; +SELECT level, COUNT() FROM (SELECT uid, windowFunnel(3600)(toUInt32(its), action_code != '', action_code = '2') AS level FROM remote('127.0.0.{2,3}', test.remote_test) GROUP BY uid) GROUP BY level; +SELECT level, COUNT() FROM (SELECT uid, windowFunnel(3600)(toUInt32(its), action_code != '', action_code = '2') AS level FROM remote('127.0.0.{2,3}', test.remote_test) GROUP BY uid) GROUP BY level; +DROP TABLE IF EXISTS test.remote_test; diff --git a/dbms/tests/queries/0_stateless/00679_uuid_in_key.reference b/dbms/tests/queries/0_stateless/00679_uuid_in_key.reference new file mode 100644 index 00000000000..eb806b81202 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00679_uuid_in_key.reference @@ -0,0 +1,6 @@ +1 +0 +0 +0 +1 +1 diff --git a/dbms/tests/queries/0_stateless/00679_uuid_in_key.sql b/dbms/tests/queries/0_stateless/00679_uuid_in_key.sql new file mode 100644 index 00000000000..7a61132b4ff --- /dev/null +++ b/dbms/tests/queries/0_stateless/00679_uuid_in_key.sql @@ -0,0 +1,21 @@ +USE test; + +CREATE TABLE IF NOT EXISTS uuid +( + created_at DateTime, + id UUID +) +ENGINE = MergeTree +PARTITION BY toDate(created_at) +ORDER BY (created_at, id); + +INSERT INTO uuid (created_at, id) VALUES ('2018-01-01 01:02:03', '00000000-0000-03f8-9cb8-cb1b82fb3900'); + +SELECT count() FROM uuid WHERE id = '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id != '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id < '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id > '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id <= '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id >= '00000000-0000-03f8-9cb8-cb1b82fb3900'; + +DROP TABLE uuid; diff --git a/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference b/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference new file mode 100644 index 00000000000..e2ec95f0464 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.reference @@ -0,0 +1,8 @@ +1 2 +3 3 +1 2 +4 4 +1 2 +3 4 +1 2 +3 3 diff --git a/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql b/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql new file mode 100644 index 00000000000..c316df36803 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00680_duplicate_columns_inside_union_all.sql @@ -0,0 +1,4 @@ +SELECT x, y FROM (SELECT x, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT x, x FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; +SELECT x, y FROM (SELECT x, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT y, y FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; +SELECT x, y FROM (SELECT x, x, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT x, y, y FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; +SELECT x, y FROM (SELECT x, y, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT x, x, y FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; diff --git a/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference b/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference new file mode 100644 index 00000000000..3e67fe1ac4f --- /dev/null +++ b/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.reference @@ -0,0 +1,2 @@ +123 +123 diff --git a/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql b/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql new file mode 100644 index 00000000000..2c118f546c3 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00681_duplicate_columns_inside_union_all_stas_sviridov.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test.test; + +CREATE TABLE test.test(x Int32) ENGINE = Log; +INSERT INTO test.test VALUES (123); + +SELECT a1 +FROM +( + SELECT x AS a1, x AS a2 FROM test.test + UNION ALL + SELECT x, x FROM test.test +); + +DROP TABLE test.test; diff --git a/dbms/tests/queries/0_stateless/00682_empty_parts_merge.reference b/dbms/tests/queries/0_stateless/00682_empty_parts_merge.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/0_stateless/00682_empty_parts_merge.sh b/dbms/tests/queries/0_stateless/00682_empty_parts_merge.sh new file mode 100755 index 00000000000..abc64ed06f7 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00682_empty_parts_merge.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +. $CURDIR/mergetree_mutations.lib + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.ordinary" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.ordinary(k UInt32) ENGINE MergeTree ORDER BY k" + +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.ordinary(k) VALUES (1)" +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.ordinary(k) VALUES (1)" + +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.ordinary DELETE WHERE k = 1" +wait_for_mutation "ordinary" "mutation_3.txt" + +${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE test.ordinary PARTITION tuple() FINAL" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.ordinary" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.ordinary" + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.summing" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.summing(k UInt32, v UInt32) ENGINE SummingMergeTree ORDER BY k" + +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.summing(k, v) VALUES (1, 1)" +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.summing(k, v) VALUES (1, 2)" + +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.summing DELETE WHERE k = 1" +wait_for_mutation "summing" "mutation_3.txt" + +${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE test.summing PARTITION tuple() FINAL" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.summing" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.summing" + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.aggregating" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.aggregating(k UInt32, v AggregateFunction(count)) ENGINE AggregatingMergeTree ORDER BY k" + +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.aggregating(k) VALUES (1)" +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.aggregating(k) VALUES (1)" + +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.aggregating DELETE WHERE k = 1" +wait_for_mutation "aggregating" "mutation_3.txt" + +${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE test.aggregating PARTITION tuple() FINAL" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.aggregating" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.aggregating" + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.replacing" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.replacing(k UInt32, v String) ENGINE ReplacingMergeTree ORDER BY k" + +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.replacing(k, v) VALUES (1, 'a')" +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.replacing(k, v) VALUES (1, 'b')" + +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.replacing DELETE WHERE k = 1" +wait_for_mutation "replacing" "mutation_3.txt" + +${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE test.replacing PARTITION tuple() FINAL" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.replacing" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.replacing" + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.collapsing" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.collapsing(k UInt32, v String, s Int8) ENGINE CollapsingMergeTree(s) ORDER BY k" + +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.collapsing(k, v, s) VALUES (1, 'a', 1)" +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.collapsing(k, v, s) VALUES (2, 'b', 1)" + +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.collapsing DELETE WHERE k IN (1, 2)" +wait_for_mutation "collapsing" "mutation_3.txt" + +${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE test.collapsing PARTITION tuple() FINAL" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.collapsing" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.collapsing" + + +${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS test.versioned_collapsing" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE test.versioned_collapsing(k UInt32, val String, ver UInt32, s Int8) ENGINE VersionedCollapsingMergeTree(s, ver) ORDER BY k" + +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.versioned_collapsing(k, val, ver, s) VALUES (1, 'a', 0, 1)" +${CLICKHOUSE_CLIENT} --query="INSERT INTO test.versioned_collapsing(k, val, ver, s) VALUES (2, 'b', 0, 1)" + +${CLICKHOUSE_CLIENT} --query="ALTER TABLE test.versioned_collapsing DELETE WHERE k IN (1, 2)" +wait_for_mutation "versioned_collapsing" "mutation_3.txt" + +${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE test.versioned_collapsing PARTITION tuple() FINAL" +${CLICKHOUSE_CLIENT} --query="SELECT * FROM test.versioned_collapsing" + +${CLICKHOUSE_CLIENT} --query="DROP TABLE test.versioned_collapsing" diff --git a/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference b/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference new file mode 100644 index 00000000000..d415e95f350 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.reference @@ -0,0 +1,2 @@ +{"url":"\/some\/cool\/url"} +{"url":"/some/cool/url"} diff --git a/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql b/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql new file mode 100644 index 00000000000..4ed0468b146 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00685_output_format_json_escape_forward_slashes.sql @@ -0,0 +1,4 @@ +SET output_format_json_escape_forward_slashes = 1; +select '/some/cool/url' as url format JSONEachRow; +SET output_format_json_escape_forward_slashes = 0; +select '/some/cool/url' as url format JSONEachRow; diff --git a/dbms/tests/queries/0_stateless/00686_client_exit_code.reference b/dbms/tests/queries/0_stateless/00686_client_exit_code.reference new file mode 100644 index 00000000000..3b20426c050 --- /dev/null +++ b/dbms/tests/queries/0_stateless/00686_client_exit_code.reference @@ -0,0 +1 @@ +108 diff --git a/dbms/tests/queries/0_stateless/00686_client_exit_code.sh b/dbms/tests/queries/0_stateless/00686_client_exit_code.sh new file mode 100755 index 00000000000..996dac1b6bc --- /dev/null +++ b/dbms/tests/queries/0_stateless/00686_client_exit_code.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +. $CURDIR/mergetree_mutations.lib + +echo "INSERT INTO test.test FORMAT CSV" | ${CLICKHOUSE_CLIENT} -n 2>/dev/null +echo $? diff --git a/dbms/tests/queries/0_stateless/00687_insert_into_mv.reference b/dbms/tests/queries/0_stateless/00687_insert_into_mv.reference new file mode 100644 index 00000000000..71f3536d8da --- /dev/null +++ b/dbms/tests/queries/0_stateless/00687_insert_into_mv.reference @@ -0,0 +1,6 @@ +stest +--- +stest +stest +--- +stest diff --git a/dbms/tests/queries/0_stateless/00687_insert_into_mv.sql b/dbms/tests/queries/0_stateless/00687_insert_into_mv.sql new file mode 100644 index 00000000000..62353839e9a --- /dev/null +++ b/dbms/tests/queries/0_stateless/00687_insert_into_mv.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test.test; +DROP TABLE IF EXISTS test.mv_bad; +DROP TABLE IF EXISTS test.mv_good; +DROP TABLE IF EXISTS test.mv_group; + +CREATE TABLE test.test (x String) ENGINE = Null; + +create MATERIALIZED VIEW test.mv_bad (x String) +ENGINE = MergeTree Partition by tuple() order by tuple() +AS SELECT DISTINCT x FROM test.test; + +create MATERIALIZED VIEW test.mv_good (x String) +ENGINE = MergeTree Partition by tuple() order by tuple() +AS SELECT x FROM test.test; + +create MATERIALIZED VIEW test.mv_group (x String) +ENGINE = MergeTree Partition by tuple() order by tuple() +AS SELECT x FROM test.test group by x; + +insert into test.test values ('stest'), ('stest'); + +select * from test.mv_bad; +SELECT '---'; +select * from test.mv_good; +SELECT '---'; +select * from test.mv_group; + +DROP TABLE test.mv_bad; +DROP TABLE test.mv_good; +DROP TABLE test.mv_group; +DROP TABLE test.test; diff --git a/dbms/tests/queries/0_stateless/mergetree_mutations.lib b/dbms/tests/queries/0_stateless/mergetree_mutations.lib new file mode 100644 index 00000000000..0df275092fe --- /dev/null +++ b/dbms/tests/queries/0_stateless/mergetree_mutations.lib @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +function wait_for_mutation() +{ + local table=$1 + local mutation_id=$2 + + for i in {1..100} + do + sleep 0.1 + if [[ $(${CLICKHOUSE_CLIENT} --query="SELECT is_done FROM system.mutations WHERE table='$table' AND mutation_id='$mutation_id'") -eq 1 ]]; then + break + fi + + if [[ $i -eq 100 ]]; then + echo "Timed out while waiting for mutation to execute!" + fi + + done +} diff --git a/dbms/tests/queries/1_stateful/00001_count_hits.reference b/dbms/tests/queries/1_stateful/00001_count_hits.reference new file mode 100644 index 00000000000..6aa5d5ca5d6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00001_count_hits.reference @@ -0,0 +1 @@ +8873898 diff --git a/dbms/tests/queries/1_stateful/00001_count_hits.sql b/dbms/tests/queries/1_stateful/00001_count_hits.sql new file mode 100644 index 00000000000..fbad81d70ed --- /dev/null +++ b/dbms/tests/queries/1_stateful/00001_count_hits.sql @@ -0,0 +1 @@ +SELECT count() FROM test.hits diff --git a/dbms/tests/queries/1_stateful/00002_count_visits.reference b/dbms/tests/queries/1_stateful/00002_count_visits.reference new file mode 100644 index 00000000000..57f1a68bec5 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00002_count_visits.reference @@ -0,0 +1 @@ +1676581 diff --git a/dbms/tests/queries/1_stateful/00002_count_visits.sql b/dbms/tests/queries/1_stateful/00002_count_visits.sql new file mode 100644 index 00000000000..8cb9553f744 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00002_count_visits.sql @@ -0,0 +1 @@ +SELECT sum(Sign) FROM test.visits diff --git a/dbms/tests/queries/1_stateful/00003_count_mouse_clicks.reference b/dbms/tests/queries/1_stateful/00003_count_mouse_clicks.reference new file mode 100644 index 00000000000..a0f1f03d797 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00003_count_mouse_clicks.reference @@ -0,0 +1 @@ +5483925 diff --git a/dbms/tests/queries/1_stateful/00003_count_mouse_clicks.sql b/dbms/tests/queries/1_stateful/00003_count_mouse_clicks.sql new file mode 100644 index 00000000000..3ed06b0facb --- /dev/null +++ b/dbms/tests/queries/1_stateful/00003_count_mouse_clicks.sql @@ -0,0 +1 @@ +SELECT count() FROM test.mouse_clicks diff --git a/dbms/tests/queries/1_stateful/00004_top_counters.reference b/dbms/tests/queries/1_stateful/00004_top_counters.reference new file mode 100644 index 00000000000..5667f0332bd --- /dev/null +++ b/dbms/tests/queries/1_stateful/00004_top_counters.reference @@ -0,0 +1,10 @@ +1143050 523264 +731962 475698 +722545 337212 +722889 252197 +2237260 196036 +23057320 147211 +722818 90109 +48221 85379 +19762435 77807 +722884 77492 diff --git a/dbms/tests/queries/1_stateful/00004_top_counters.sql b/dbms/tests/queries/1_stateful/00004_top_counters.sql new file mode 100644 index 00000000000..045f940da42 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00004_top_counters.sql @@ -0,0 +1 @@ +SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID ORDER BY c DESC LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00005_filtering.reference b/dbms/tests/queries/1_stateful/00005_filtering.reference new file mode 100644 index 00000000000..d17d98f5402 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00005_filtering.reference @@ -0,0 +1 @@ +30641 diff --git a/dbms/tests/queries/1_stateful/00005_filtering.sql b/dbms/tests/queries/1_stateful/00005_filtering.sql new file mode 100644 index 00000000000..3384685f19f --- /dev/null +++ b/dbms/tests/queries/1_stateful/00005_filtering.sql @@ -0,0 +1,2 @@ +SELECT count() FROM test.hits WHERE AdvEngineID != 0 + diff --git a/dbms/tests/queries/1_stateful/00006_agregates.reference b/dbms/tests/queries/1_stateful/00006_agregates.reference new file mode 100644 index 00000000000..9e49d438c66 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00006_agregates.reference @@ -0,0 +1 @@ +281146 8873898 1289.4475214837944 diff --git a/dbms/tests/queries/1_stateful/00006_agregates.sql b/dbms/tests/queries/1_stateful/00006_agregates.sql new file mode 100644 index 00000000000..1dcd4a3ee75 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00006_agregates.sql @@ -0,0 +1 @@ +SELECT sum(AdvEngineID), count(), avg(ResolutionWidth) FROM test.hits diff --git a/dbms/tests/queries/1_stateful/00007_uniq.reference b/dbms/tests/queries/1_stateful/00007_uniq.reference new file mode 100644 index 00000000000..10e29f1e3fd --- /dev/null +++ b/dbms/tests/queries/1_stateful/00007_uniq.reference @@ -0,0 +1,10 @@ +213 28 +2 10 +225 7 +17 2 +56 2 +187 2 +4 1 +9 1 +15 1 +16 1 diff --git a/dbms/tests/queries/1_stateful/00007_uniq.sql b/dbms/tests/queries/1_stateful/00007_uniq.sql new file mode 100644 index 00000000000..b602ea63b6e --- /dev/null +++ b/dbms/tests/queries/1_stateful/00007_uniq.sql @@ -0,0 +1 @@ +SELECT RegionID, uniq(UserID) AS u FROM test.hits WHERE CounterID = 34 GROUP BY RegionID ORDER BY u DESC, RegionID LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00008_uniq.reference b/dbms/tests/queries/1_stateful/00008_uniq.reference new file mode 100644 index 00000000000..e101845cecc --- /dev/null +++ b/dbms/tests/queries/1_stateful/00008_uniq.reference @@ -0,0 +1 @@ +119497 2867 5595 diff --git a/dbms/tests/queries/1_stateful/00008_uniq.sql b/dbms/tests/queries/1_stateful/00008_uniq.sql new file mode 100644 index 00000000000..a80c3b85812 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00008_uniq.sql @@ -0,0 +1 @@ +SELECT uniq(UserID), uniqIf(UserID, CounterID = 1143050), uniqIf(FUniqID, RegionID = 213) FROM test.hits diff --git a/dbms/tests/queries/1_stateful/00009_uniq_distributed.reference b/dbms/tests/queries/1_stateful/00009_uniq_distributed.reference new file mode 100644 index 00000000000..e101845cecc --- /dev/null +++ b/dbms/tests/queries/1_stateful/00009_uniq_distributed.reference @@ -0,0 +1 @@ +119497 2867 5595 diff --git a/dbms/tests/queries/1_stateful/00009_uniq_distributed.sql b/dbms/tests/queries/1_stateful/00009_uniq_distributed.sql new file mode 100644 index 00000000000..b2b99d36d2f --- /dev/null +++ b/dbms/tests/queries/1_stateful/00009_uniq_distributed.sql @@ -0,0 +1 @@ +SELECT uniq(UserID), uniqIf(UserID, CounterID = 1143050), uniqIf(FUniqID, RegionID = 213) FROM remote('127.0.0.{1,2}', test, hits) diff --git a/dbms/tests/queries/1_stateful/00010_quantiles_segfault.reference b/dbms/tests/queries/1_stateful/00010_quantiles_segfault.reference new file mode 100644 index 00000000000..ae9aba98e8d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00010_quantiles_segfault.reference @@ -0,0 +1,3 @@ +http://metrika.yandex.ru/list/ 744 [1995,3123,9638] + + 3762 [968,1789,9638] diff --git a/dbms/tests/queries/1_stateful/00010_quantiles_segfault.sql b/dbms/tests/queries/1_stateful/00010_quantiles_segfault.sql new file mode 100644 index 00000000000..952159c8996 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00010_quantiles_segfault.sql @@ -0,0 +1 @@ +SELECT URL AS `ym:ah:URL`, sum((NOT DontCountHits AND NOT Refresh)), quantilesTimingIf(0.1, 0.5, 0.9)((DOMCompleteTiming + LoadEventEndTiming), DOMCompleteTiming != -1 AND LoadEventEndTiming != -1) as t FROM remote('127.0.0.{1,2}', test, hits) WHERE (CounterID = 101500) AND (((DontCountHits = 0) OR (IsNotBounce = 1)) AND (URL != '')) GROUP BY `ym:ah:URL` WITH TOTALS HAVING (sum((NOT DontCountHits AND NOT Refresh)) > 0) AND (count() > 0) ORDER BY sum((NOT DontCountHits AND NOT Refresh)) DESC, URL LIMIT 0, 1 diff --git a/dbms/tests/queries/1_stateful/00011_sorting.reference b/dbms/tests/queries/1_stateful/00011_sorting.reference new file mode 100644 index 00000000000..138e8df8fe4 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00011_sorting.reference @@ -0,0 +1,10 @@ +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 diff --git a/dbms/tests/queries/1_stateful/00011_sorting.sql b/dbms/tests/queries/1_stateful/00011_sorting.sql new file mode 100644 index 00000000000..8c6ae457566 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00011_sorting.sql @@ -0,0 +1 @@ +SELECT EventTime FROM test.hits ORDER BY EventTime DESC LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00012_sorting_distributed.reference b/dbms/tests/queries/1_stateful/00012_sorting_distributed.reference new file mode 100644 index 00000000000..138e8df8fe4 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00012_sorting_distributed.reference @@ -0,0 +1,10 @@ +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 diff --git a/dbms/tests/queries/1_stateful/00012_sorting_distributed.sql b/dbms/tests/queries/1_stateful/00012_sorting_distributed.sql new file mode 100644 index 00000000000..51f249b3db8 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00012_sorting_distributed.sql @@ -0,0 +1 @@ +SELECT EventTime FROM remote('127.0.0.{1,2}', test, hits) ORDER BY EventTime DESC LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00013_sorting_of_nested.reference b/dbms/tests/queries/1_stateful/00013_sorting_of_nested.reference new file mode 100644 index 00000000000..cb1cd2eed44 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00013_sorting_of_nested.reference @@ -0,0 +1,10 @@ +['city','place'] +['city','place'] +['city','place'] +['city','place'] +['region','cat','region','cat','region','cat'] +['region','cat'] +['Логин'] +['region','cat'] +['region','cat'] +['region','cat'] diff --git a/dbms/tests/queries/1_stateful/00013_sorting_of_nested.sql b/dbms/tests/queries/1_stateful/00013_sorting_of_nested.sql new file mode 100644 index 00000000000..44f7684d746 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00013_sorting_of_nested.sql @@ -0,0 +1,2 @@ +SELECT ParsedParams.Key1 FROM test.visits FINAL WHERE VisitID != 0 AND notEmpty(ParsedParams.Key1) ORDER BY VisitID LIMIT 10 + diff --git a/dbms/tests/queries/1_stateful/00014_filtering_arrays.reference b/dbms/tests/queries/1_stateful/00014_filtering_arrays.reference new file mode 100644 index 00000000000..d431d8935c1 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00014_filtering_arrays.reference @@ -0,0 +1,10 @@ +[3691,2864,3579,99,3201,118,11,461,459,88] +[3579,3281,37,3,118,2,11,8,600,38,9,2333] +[3579,3281,37,3,118,2,11,8,600,38,9,2333] +[3579,3281,37,3,118,2,11,8,600,38,9,2333] +[3579,3281,37,3,118,2,11,8,600,38,9,2333] +[3579,3281,37,3,118,2,11,8,600,38,9,2333] +[3579,3281,37,3,118,2,11,8,600,38,9,2333] +[3579,3281,37,3,118,2,11,8,600,38,9,2333] +[3579,2865,9,332,88,102,437,95,448,98,89] +[3579,2865,9,332,88,102,437,95,448,98,89] diff --git a/dbms/tests/queries/1_stateful/00014_filtering_arrays.sql b/dbms/tests/queries/1_stateful/00014_filtering_arrays.sql new file mode 100644 index 00000000000..ba83b124ee7 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00014_filtering_arrays.sql @@ -0,0 +1 @@ +SELECT GeneralInterests FROM test.hits WHERE AdvEngineID != 0 ORDER BY GeneralInterests DESC LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference b/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference new file mode 100644 index 00000000000..78c0c2298d3 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.reference @@ -0,0 +1,27 @@ +0 +2 +3 +7 +14 +15 +17 +18 +19 +26 +27 +33 +34 +35 +36 +37 +38 +39 +40 +41 +46 +47 +48 +49 +50 + +0 diff --git a/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql b/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql new file mode 100644 index 00000000000..f17272de57d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00015_totals_and_no_aggregate_functions.sql @@ -0,0 +1 @@ +SELECT AdvEngineID FROM test.hits GROUP BY AdvEngineID WITH TOTALS ORDER BY AdvEngineID diff --git a/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference b/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.reference @@ -0,0 +1 @@ + diff --git a/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql b/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql new file mode 100644 index 00000000000..8beedd09e4e --- /dev/null +++ b/dbms/tests/queries/1_stateful/00016_any_if_distributed_cond_always_false.sql @@ -0,0 +1 @@ +SELECT anyIf(SearchPhrase, CounterID = -1) FROM remote('127.0.0.{1,2}:9000', test, hits) diff --git a/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference b/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference new file mode 100644 index 00000000000..838d0e2d7ae --- /dev/null +++ b/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.reference @@ -0,0 +1,47 @@ +goal://metrika.yandex.ru/add_counter_submit x +http://aist5.ru/ x +http://avg.nation.com/ x +http://avito.ru/ x +http://baraholka.leprosorium.ru/ x +http://bonum.spb.ru/ x +http://clck.yandex.ru/ x +http://clubs.ya.ru/ x +http://direct.yandex.ru/ x +http://facebook.com/ x +http://go.mail.ru/ x +http://google.com.kw/ x +http://google.com/ x +http://google.ru/ x +http://help.yandex.ru/ x +http://hghltd.yandex.net/ x +http://i.yandex.ru/ x +http://krdshop.ru/ x +http://lavkafreida.ru/ x +http://link.2gis.ru/ x +http://m.avito.ru/ x +http://m.vk.com/ x +http://market-preview.yandex.ru/ x +http://market.yandex.ru/ x +http://metrika.yandex.ru/ x +http://metrika.yandex.ua/ x +http://music.yandex.ru/ x +http://news.mail.ru/ x +http://news.yandex.ru/ x +http://partners.yandex.ru/ x +http://passport.yandex.ru/ x +http://postoffice.yandex.ru/ x +http://profkosmetika.net/ x +http://semalt.com/ x +http://shops.pp.ru/ x +http://site.yandex.ru/ x +http://stadium.mvc/ x +http://topas-eco.ru/ x +http://translate.googleusercontent.com/ x +http://virage24.ru/ x +http://vk.com/ x +http://webmaster.yandex.ru/ x +http://yaca.yandex.ru/ x +http://yandex.kz/ x +http://yandex.ru/ x +http://yandex.ua/ x +http://у-полины.рф/ x diff --git a/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql b/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql new file mode 100644 index 00000000000..c7cecc1e7df --- /dev/null +++ b/dbms/tests/queries/1_stateful/00017_aggregation_uninitialized_memory.sql @@ -0,0 +1,2 @@ +SELECT DISTINCT (URLHierarchy(URL)[1]) AS q, 'x' AS w FROM test.hits WHERE CounterID = 101500 ORDER BY URL + diff --git a/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference b/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference new file mode 100644 index 00000000000..b9e5dd760c9 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.reference @@ -0,0 +1 @@ +540 diff --git a/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql b/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql new file mode 100644 index 00000000000..c3c726e4ca7 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00020_distinct_order_by_distributed.sql @@ -0,0 +1,2 @@ +SET max_rows_to_sort = 10000; +SELECT count() FROM (SELECT DISTINCT PredLastVisit AS x FROM remote('127.0.0.{1,2}', test, visits) ORDER BY VisitID); diff --git a/dbms/tests/queries/1_stateful/00021_1_select_with_in.reference b/dbms/tests/queries/1_stateful/00021_1_select_with_in.reference new file mode 100644 index 00000000000..9e7ad722a07 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00021_1_select_with_in.reference @@ -0,0 +1 @@ +108133 diff --git a/dbms/tests/queries/1_stateful/00021_1_select_with_in.sql b/dbms/tests/queries/1_stateful/00021_1_select_with_in.sql new file mode 100644 index 00000000000..2293985b1de --- /dev/null +++ b/dbms/tests/queries/1_stateful/00021_1_select_with_in.sql @@ -0,0 +1 @@ +select sum(Sign) from test.visits where CounterID in (722545); diff --git a/dbms/tests/queries/1_stateful/00021_2_select_with_in.reference b/dbms/tests/queries/1_stateful/00021_2_select_with_in.reference new file mode 100644 index 00000000000..5ad12a6617e --- /dev/null +++ b/dbms/tests/queries/1_stateful/00021_2_select_with_in.reference @@ -0,0 +1 @@ +210797 diff --git a/dbms/tests/queries/1_stateful/00021_2_select_with_in.sql b/dbms/tests/queries/1_stateful/00021_2_select_with_in.sql new file mode 100644 index 00000000000..b4005b8f24a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00021_2_select_with_in.sql @@ -0,0 +1 @@ +select sum(Sign) from test.visits where CounterID in (722545, 731962); diff --git a/dbms/tests/queries/1_stateful/00021_3_select_with_in.reference b/dbms/tests/queries/1_stateful/00021_3_select_with_in.reference new file mode 100644 index 00000000000..8dbdb2f57fd --- /dev/null +++ b/dbms/tests/queries/1_stateful/00021_3_select_with_in.reference @@ -0,0 +1,2 @@ +1 +2417 diff --git a/dbms/tests/queries/1_stateful/00021_3_select_with_in.sql b/dbms/tests/queries/1_stateful/00021_3_select_with_in.sql new file mode 100644 index 00000000000..9c473ca71a1 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00021_3_select_with_in.sql @@ -0,0 +1,3 @@ +select 1 IN (1, 2, 3); + +SELECT count() FROM remote('localhost', test, hits) WHERE CounterID IN (101500); diff --git a/dbms/tests/queries/1_stateful/00022_merge_prewhere.reference b/dbms/tests/queries/1_stateful/00022_merge_prewhere.reference new file mode 100644 index 00000000000..065392a8670 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00022_merge_prewhere.reference @@ -0,0 +1,2 @@ +22948 +22948 diff --git a/dbms/tests/queries/1_stateful/00022_merge_prewhere.sql b/dbms/tests/queries/1_stateful/00022_merge_prewhere.sql new file mode 100644 index 00000000000..74a3677b68e --- /dev/null +++ b/dbms/tests/queries/1_stateful/00022_merge_prewhere.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS test.merge_hits; +CREATE TABLE IF NOT EXISTS test.merge_hits AS test.hits ENGINE = Merge(test, '^hits$'); +SELECT count() FROM test.merge_hits WHERE AdvEngineID = 2; +SELECT count() FROM test.merge_hits PREWHERE AdvEngineID = 2; +DROP TABLE test.merge_hits; diff --git a/dbms/tests/queries/1_stateful/00023_totals_limit.reference b/dbms/tests/queries/1_stateful/00023_totals_limit.reference new file mode 100644 index 00000000000..0be276a358c --- /dev/null +++ b/dbms/tests/queries/1_stateful/00023_totals_limit.reference @@ -0,0 +1,24 @@ +{ + "meta": + [ + { + "name": "ym:s:goalDimension", + "type": "UInt32" + }, + { + "name": "uniqIf(UserID, and(notEquals(UserID, 0), equals(_uniq_Goals, 1)))", + "type": "UInt64" + } + ], + + "data": + [ + [3176497, "3"] + ], + + "totals": [0,"3"], + + "rows": 1, + + "rows_before_limit_at_least": 2 +} diff --git a/dbms/tests/queries/1_stateful/00023_totals_limit.sql b/dbms/tests/queries/1_stateful/00023_totals_limit.sql new file mode 100644 index 00000000000..849b39b2782 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00023_totals_limit.sql @@ -0,0 +1,2 @@ +SET output_format_write_statistics = 0; +SELECT goals_alias.ID AS `ym:s:goalDimension`, uniqIf(UserID, (UserID != 0) AND (`_uniq_Goals` = 1)) FROM test.visits ARRAY JOIN Goals AS goals_alias, arrayEnumerateUniq(Goals.ID) AS `_uniq_Goals` WHERE (CounterID = 101024) GROUP BY `ym:s:goalDimension` WITH TOTALS ORDER BY `ym:s:goalDimension` LIMIT 0, 1 FORMAT JSONCompact; diff --git a/dbms/tests/queries/1_stateful/00024_random_counters.reference b/dbms/tests/queries/1_stateful/00024_random_counters.reference new file mode 100644 index 00000000000..799513a7112 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00024_random_counters.reference @@ -0,0 +1,992 @@ +1 3 5 51 +5 5 6 37 +2 2 2 30 +1 1 7 207 +5 5 21 1049 +3 4 13 598 +2 3 5 1059 +4 5 11 460 +3 11 11 30 +2 3 34 1930 +1 1 1 0 +52 192 268 34081 +1 1 1 0 +4 5 12 2256 +2 2 3 176 +3 4 29 2078 +1 1 1 0 +3 5 12 566 +1 1 1 15 +1 2 2 48 +1 1 30 2054 +1 1 1 15 +255 406 596 42463 +2 2 2 32 +1 1 1 15 +1 1 2 7 +1 1 7 146 +1 1 1 27 +1 1 4 373 +1 1 1 20 +1 2 2 0 +1 1 1 0 +1 1 1 0 +1 1 4 31 +1 2 2 0 +1 1 2 204 +1 1 5 212 +5 5 6 233 +1 1 1 0 +1 1 1 16 +1 1 2 1 +1 2 6 40 +2 2 2 18 +1 1 1 17 +1 1 1 15 +2 2 2 16 +1 1 1 15 +1 1 1 15 +1 1 1 0 +15 34 83 2850 +1 1 1 15 +4 4 19 1806 +1 1 4 177 +41 62 119 12064 +1 1 15 1522 +1 1 1 0 +1 1 1 14 +2 3 10 665 +3 4 5 62 +5 6 35 1790 +1 1 5 33 +1 1 1 16 +1 3 5 36 +1 1 1 15 +2 2 4 407 +25 56 138 7687 +4 5 8 2880 +1 1 1 15 +1 1 1 0 +1 1 1 136 +2 2 2 0 +3 3 6 94 +1 1 1 15 +4 4 5 86 +1 1 1 0 +2 10 21 884 +11 11 11 0 +1 1 1 15 +10 12 17 242 +1 1 2 9 +1 1 1 15 +1 1 10 198 +1 1 5 514 +2 3 4 170 +1 1 1 0 +2 2 3 64 +6 6 19 256 +1 1 1 0 +3 3 3 0 +1 2 12 742 +1 1 1 15 +1 1 2 7 +1 1 1 15 +2 2 19 213 +1 2 8 610 +1 1 1 15 +1 1 5 159 +1 1 1 0 +1 1 1 0 +2 2 2 23 +6 7 16 3197 +1 1 7 421 +1 1 1 0 +1 1 6 101 +1 1 1 15 +2 6 6 0 +3 3 4 92 +1 1 1 0 +2 2 2 0 +1 1 1 0 +1 1 1 0 +1 1 1 16 +3 3 5 364 +21 26 29 1670 +29 57 137 7881 +1 1 2 87 +1 1 1 0 +4 4 4 0 +5 8 10 1376 +2 2 3 9 +1 1 1 15 +1 1 2 39 +2 3 3 0 +1 1 1 16 +2 2 2 0 +1 1 1 82 +3 3 3 17 +1 1 1 0 +1 1 4 39 +2 2 2 22 +1 1 16 134 +3 3 5 19 +6 6 6 78 +2 2 5 504 +1 1 1 0 +2 2 2 15 +1 1 1 15 +8 8 11 16 +2 2 2 15 +1 1 1 0 +1 1 1 0 +1 1 1 0 +2 2 2 20 +4 4 5 630 +1 1 1 0 +1 1 15 756 +5 7 12 695 +2 2 2 0 +1 1 5 109 +1 2 3 37 +1 1 1 15 +2 3 7 161 +2 2 3 147 +2 2 2 0 +1 1 1 96 +6 8 15 668 +1 2 3 186 +1 1 6 272 +13 14 130 4927 +2 2 2 0 +1 3 7 1139 +2 2 2 0 +2 2 23 2344 +2 2 2 15 +1 1 1 0 +2 2 2 0 +3 3 3 30 +13 14 54 4248 +2 2 2 12 +2 2 4 80 +1 1 1 0 +1 1 4 1582 +1 1 1 15 +62 149 663 34873 +2 3 5 37 +20 30 44 2679 +1 1 1 21 +1 1 3 55 +12 14 81 8433 +1 1 1 0 +579 935 2082 129790 +3 3 3 0 +1 1 1 15 +2 2 2 16 +1 1 2 13 +10 11 43 975 +1 1 2 37 +11 15 50 4375 +1 1 1 0 +1 1 1 15 +1 1 1 15 +1 1 1 0 +3 3 3 0 +17 24 74 7612 +3 5 7 888 +3 3 3 0 +1 1 1 13 +1 1 1 0 +27 28 85 6098 +1 2 9 379 +1 1 6 411 +1 1 1 0 +1 1 1 14 +1 1 11 385 +1 1 3 93 +1 2 5 163 +1 1 5 201 +1 2 8 222 +1 1 1 0 +1 1 1 0 +1 1 7 288 +1 1 4 129 +2 2 5 448 +1 1 1 0 +32 32 89 11357 +1 1 4 150 +1 1 2 13 +10 10 32 2553 +1 1 1 0 +1 1 1 15 +2 2 2 0 +2 2 4 25 +5 7 11 225 +1 1 1 0 +2 2 7 349 +4 4 4 0 +10 12 22 1981 +1 4 4 56 +1 1 1 15 +6 6 6 78 +1 1 3 41 +1 1 3 41 +5 5 7 224 +1 1 2 390 +1 1 1 0 +8 9 9 302 +1 1 1 15 +1 1 1 0 +1 1 2 878 +1 1 2 208 +9 10 15 1911 +1 1 1 12 +1 1 5 116 +6 7 11 283 +5 5 10 215 +1 1 1 0 +3 3 13 278 +2 3 3 44 +4 4 22 480 +7 7 10 332 +1 1 1 0 +3 3 6 404 +1 1 1 0 +1 1 1 0 +1 1 1 15 +2 2 2 0 +1 1 1 0 +2 2 2 0 +1 1 1 0 +2 7 12 339 +32 118 610 104334 +1 7 10 478 +1 1 1 0 +1 1 1 14 +1 1 1 0 +1 1 1 39 +2 3 4 141 +1 4 13 2158 +2 2 5 127 +1 1 4 260 +20 35 61 3086 +1 1 1 0 +1 1 1 15 +2 2 3 15 +1 1 1 15 +2 2 2 30 +5 6 14 3683 +42 47 53 914 +13 15 19 1156 +2 2 4 23 +1 1 2 39 +1 1 1 15 +1 1 33 677 +4 7 27 2015 +1 2 3 1438 +3 3 4 264 +3 3 16 441 +1 1 1 0 +1 1 1 0 +1 1 2 124 +1 1 1 14 +2 2 4 131 +1 1 1 15 +3 3 3 45 +1 1 2 24 +1 1 1 0 +1 1 1 0 +3 3 8 357 +1 1 1 0 +5 10 29 1548 +9 10 16 476 +1 1 2 8 +10 10 11 217 +1 1 1 23 +2 2 2 15 +1 1 1 16 +3 7 110 8681 +2 2 2 15 +1 1 1 13 +1 1 4 186 +96 132 319 18933 +8 9 12 125 +4 9 23 1490 +1 1 7 232 +2 2 2 29 +5 6 14 1130 +1 1 1 0 +1 1 1 15 +4 4 14 472 +1 1 1 15 +1 1 1 0 +1 1 19 224 +7 8 9 747 +1 1 4 1448 +1 1 1 15 +1 1 1 14 +10 10 16 474 +1 1 1 0 +1 1 4 51 +2 2 4 279 +4 8 82 6058 +1 1 1 0 +1 1 1 15 +3 3 3 15 +1 1 1 0 +1 1 1 16 +2 2 6 112 +1 1 2 17 +1 1 3 66 +5 5 11 429 +1 1 1 0 +1 1 1 130 +2 2 2 0 +2 3 77 1397 +2 2 9 458 +1 1 1 15 +2 3 3 15 +1 3 49 3825 +1 2 2 0 +3 3 3 0 +2 2 8 99 +2 2 2 31 +1 1 2 19 +3 3 3 15 +2 2 6 1218 +1 1 1 0 +2 2 11 108 +3 3 13 468 +1 1 2 7 +18 19 33 1067 +1 1 1 0 +1 1 4 10 +1 1 1 0 +5 5 6 15 +3 3 18 365 +1 1 3 74 +1 1 1 1528 +1 1 1 16 +1 4 5 249 +1 1 1 0 +2 3 36 1742 +2 2 2 44 +1 1 2 15 +1 1 2 14 +1 1 1 0 +6 10 50 1045 +1 1 1 0 +2 2 8 477 +1 1 2 150 +1 1 1 0 +1 1 1 0 +1 2 2 31 +1 1 44 2438 +1 1 6 525 +22 26 27 1709 +69 90 109 10068 +3 4 4 0 +1 1 4 101 +1 1 3 20 +1 1 1 14 +2 2 2 30 +1 1 1 0 +3 3 10 431 +2 2 15 1358 +1 1 1 0 +1 1 3 30 +1 1 1 0 +1 1 1 0 +126 137 259 23690 +1 1 3 161 +165 394 478 16574 +12 33 158 26562 +9 11 13 1543 +1 1 1 11 +5 5 38 463 +55 109 818 86084 +1 1 1 0 +1 1 7 148 +22 25 66 2914 +1 1 2 19 +1 1 1 0 +1 1 1 10 +1 1 1 17 +1 2 2 29 +1 1 1 0 +3 3 4 178 +1 1 2 99 +38 77 204 27681 +1 1 4 66 +1 1 3 499 +1 1 1 15 +7 8 18 272 +1 1 1 0 +5 5 7 73 +41 45 102 3500 +1 1 2 15 +1 1 1 0 +1 1 2 52 +1 2 34 1480 +1 1 1 0 +2 4 65 3948 +1 1 6 232 +1 1 2 54 +3 3 3 5 +1 1 1 16 +3 3 8 132 +4 4 8 466 +1 1 2 52 +7 14 49 2326 +1 1 1 0 +1 1 1 15 +1 1 3 421 +1 1 10 800 +1 1 1 0 +577 690 749 28625 +21 25 81 4843 +1 1 1 15 +2 2 3 89 +1 2 2 0 +9 11 13 656 +1 1 1 0 +3 3 5 39 +1 1 1 0 +1 1 4 76 +2 2 8 257 +21 21 41 593 +1 1 2 142 +16 16 25 726 +3 3 3 28 +8 8 13 400 +1 1 1 15 +11 11 55 1184 +1 1 1 0 +1 1 1 15 +3 3 7 269 +1 1 3 246 +10 11 36 3737 +1 1 3 211 +1 1 1 0 +1 1 5 223 +1 1 2 82 +1 1 8 320 +1 1 1 15 +1 1 1 0 +1 1 3 62 +2 2 11 82 +6 6 10 155 +1 1 2 15 +1 1 1 0 +1 1 6 182 +2 3 5 114 +1 1 1 15 +1 1 1 0 +1 1 1 0 +1 1 1 0 +1 1 2 132 +1 1 2 166 +1 1 2 46 +2 4 15 4149 +1 1 1 15 +2 3 26 518 +2 12 179 10619 +1 1 10 94 +1 1 1 15 +1 1 1 0 +1 1 9 385 +11 12 42 4487 +1 1 3 68 +5 5 21 1970 +2 2 13 374 +1 1 1 15 +1 1 11 218 +1 1 2 43 +1 1 1 0 +1 5 22 408 +4 23 61 22041 +1 2 2 0 +1 3 3 45 +1 1 1 15 +1 1 2 75 +1 1 1 14 +2 4 4 203 +8 8 9 71 +3 3 22 1545 +1 1 1 0 +12 13 30 1733 +1 1 1 15 +1 1 2 29 +42 48 69 3275 +7 8 8 146 +1 1 1 0 +1 1 1 16 +2 2 2 141 +1 2 20 320 +1 1 1 25 +1 1 1 15 +1 2 12 2196 +2 2 2 30 +1 1 6 245 +3 3 11 167 +22 34 283 11177 +2 2 2 32 +1 1 2 1460 +4 4 15 1200 +1 1 1 0 +1 1 1 15 +2 2 3 195 +3 21 38 9010 +1 1 1 15 +1 1 1 15 +2 2 2 0 +1 1 1 0 +3 3 7 365 +1 1 16 308 +15 34 212 7055 +1 1 1 0 +2 3 8 115 +1 1 1 15 +3 4 6 40 +1 1 1 15 +1 1 1 15 +1 1 2 23 +3 3 3 30 +1 1 1 14 +2 2 5 53 +3 3 8 1205 +5 5 5 60 +1 1 1 0 +7 8 18 2770 +2 2 9 192 +8 8 15 1420 +1 1 1 16 +18 19 20 28 +1 1 2 1575 +2 2 2 0 +1 1 1 7 +1 1 1 0 +1 1 1 0 +1 1 1 15 +26 28 35 3975 +1 1 2 22 +2 2 2 107 +2 2 4 28 +67 89 147 8602 +1 1 1 26 +1 1 17 923 +4 4 4 45 +1 1 1 0 +1 1 1 15 +6 8 19 646 +1 1 1 0 +1 1 3 319 +1 1 1 0 +12 48 137 10340 +1 1 2 416 +4 4 6 643 +1 1 14 388 +1 1 1 15 +1 1 2 13 +2 2 2 30 +2 2 3 68 +1 1 3 44 +1 1 42 1214 +1 1 1 26 +18 22 136 5471 +1 1 1 16 +1 1 17 3200 +1 1 1 0 +1 1 1 15 +1 1 1 14 +2 2 3 17 +1 1 4 119 +1 1 2 33 +1 1 3 143 +1 1 1 0 +37 42 106 55516 +1 1 1 0 +3 3 3 0 +1 1 1 15 +2 4 6 25 +4 8 34 1243 +12 16 52 2089 +25 26 29 983 +2 5 7 179 +3 4 9 1067 +5 5 8 89 +1 1 4 47 +4 4 4 0 +1 1 1 22 +1 1 7 462 +1 1 5 39 +1 1 1 25 +1 1 1 25 +2 2 9 242 +1 1 1 15 +2 7 8 109 +1 1 9 992 +2 3 3 38 +1 1 1 0 +21 37 48 7793 +2 2 2 33 +7 11 16 1730 +2 3 10 3518 +1 1 1 0 +2 2 2 15 +1 1 1 15 +1 1 1 14 +2 2 3 28 +2 2 2 15 +2 2 2 15 +2 6 27 2204 +1 1 7 275 +1 1 1 16 +14 15 72 2898 +2 2 3 162 +1 1 2 24 +1 1 2 58 +4 10 19 1103 +2 2 13 271 +1 1 3 120 +1 1 1 16 +1 1 1 0 +1 1 1 15 +1 1 1 0 +1 1 1 0 +1 1 1 15 +1 1 1 0 +2 2 2 0 +3 3 7 236 +1 1 3 885 +4 5 7 135 +2 2 14 535 +3 5 15 2132 +3 3 4 48 +1 1 6 1685 +2 2 2 0 +2 2 13 2105 +1 1 5 75 +4 4 4 0 +1 1 1 16 +1 1 1 0 +2 2 3 837 +2 2 6 342 +1 2 27 2847 +1 1 2 6 +2 2 4 69 +2 2 2 0 +22 24 39 3421 +1 1 1 15 +1 1 1 15 +1 1 1 19 +4 6 6 62 +11 16 56 3186 +2 2 9 193 +1 1 1 0 +1 1 1 0 +1 2 3 138 +2 2 14 744 +1 1 1 15 +1 5 7 733 +1 1 1 0 +1 1 1 0 +4 7 18 260 +1 1 1 0 +1 1 1 14 +4 4 13 876 +19 62 248 15699 +2 3 10 100 +1 1 6 350 +1 3 3 46 +1 1 1 0 +1 1 1 0 +1 5 6 968 +3 3 3 0 +1 1 2 12 +1 1 1 0 +1 1 3 56 +1 4 20 355 +1 1 2 60 +1 1 1 14 +1 1 1 18 +2 2 7 260 +1 1 1 16 +4 4 7 240 +1 1 1 0 +1 1 1 0 +1 1 2 23 +3 3 4 827 +1 1 2 292 +1 1 2 19 +2 2 6 118 +1 1 1 0 +40 42 51 976 +96 106 146 10671 +1 1 1 0 +1 1 5 26 +1 1 3 93 +1 1 1 0 +1 1 1 15 +2 2 2 0 +3 3 3 29 +2 2 7 23 +1 1 1 15 +3 3 3 0 +1 1 1 0 +1 1 4 91 +2 3 3 0 +2 2 2 0 +1 1 1 0 +1 1 1 15 +2 2 4 83 +1 1 2 6 +1 1 1 15 +3 8 8 0 +4 6 13 1691 +1 2 60 2808 +9 9 11 638 +2 3 4 877 +2 2 4 101 +2 2 3 108 +1 23 31 313 +1 1 1 15 +1 1 1 0 +1 1 5 434 +1 1 1 0 +1 1 1 0 +2 2 2 0 +6 6 8 511 +1 1 1 0 +1 1 1 15 +4 4 6 145 +1 1 1 0 +1 1 1 15 +2 2 3 39 +1 1 1 0 +65 68 117 8892 +3 3 6 2113 +3 4 7 1121 +1 1 4 595 +1 2 10 420 +2 2 6 374 +1 2 2 0 +3 3 3 38 +2 2 3 166 +1 2 6 75 +1 1 2 5 +1 1 8 406 +1 1 4 817 +2 2 2 0 +1 1 1 15 +2 2 2 0 +1 1 1 0 +1 2 3 65 +1 1 2 11 +2 2 6 54 +3 3 3 44 +1 1 1 15 +1 1 2 133 +2 2 2 0 +6 6 7 580 +1 1 1 15 +3 3 3 43 +1 1 1 15 +1 1 16 1746 +17 19 37 3130 +1 2 7 116 +1 1 1 15 +1 1 1 520 +47 54 66 2628 +2 2 2 15 +3 4 18 1455 +1 1 1 15 +1 1 1 0 +1 1 5 104 +3 5 7 12 +1 1 1 0 +1 1 1 0 +1 1 1 0 +1 1 1 17 +1 1 2 48 +5 5 5 0 +1 1 1 16 +2 2 3 85 +1 1 1 15 +1 1 3 259 +1 1 1 15 +1 1 1 15 +1 1 1 0 +1 1 1 0 +1 1 1 0 +1 1 1 16 +1 1 1 29 +2 13 16 1090 +1 1 1 15 +2 2 2 0 +1 1 10 670 +1 1 6 75 +16 38 171 22052 +1 1 2 18 +1 2 3 127 +1 5 10 709 +21 22 23 935 +5 9 15 1779 +12 14 28 1721 +2 2 3 99 +1 1 1 0 +2 2 6 54 +1 1 1 15 +1 2 3 55 +1 1 1 0 +1 1 1 0 +3 4 13 1175 +1 1 7 363 +32 55 74 4937 +1 1 1 0 +2 3 3 0 +1 1 1 0 +1 2 2 30 +1 1 1 0 +1 1 1 0 +2 2 7 62 +1 1 2 40 +1 1 1 0 +2 3 35 2802 +1 1 1 0 +2 2 3 847 +1 2 2 0 +1 3 6 1448 +1 1 2 122 +1 1 6 2020 +2 2 2 0 +3 3 5 1168 +10 21 29 1050 +55 59 69 2209 +1 1 1 0 +1 1 3 51 +1 1 23 952 +27 32 326 11127 +3 3 6 646 +4 4 5 39 +5 5 10 668 +2 2 4 234 +2 2 2 0 +1 1 1 0 +1 1 2 141 +1 1 1 0 +1 1 1 0 +2 2 2 0 +2 4 6 517 +1 1 5 97 +1 1 1 0 +9 10 28 654 +2 2 2 0 +3 9 9 0 +2 2 4 223 +1 1 1 15 +1 1 2 14 +1 1 1 0 +1 2 2 0 +1 1 1 0 +1 2 2 15 +4 4 6 25 +1 1 1 16 +1 1 1 24 +1 1 1 18 +1 1 1 16 +1 1 1 0 +4 4 5 230 +2 3 3 30 +1 1 3 78 +1 1 1 15 +1 1 4 501 +1 1 3 99 +2 2 4 81 +2 2 9 750 +1 1 2 14 +1 1 1 14 +6 6 27 686 +1 1 1 0 +1 1 4 107 +1 1 1 20 +1 1 2 90 +1 1 61 827 +26 37 94 9174 +1 0 2 590 +1 1 1 0 +2 2 2 15 +1 2 4 11 +2 2 3 34 +2 2 2 51 +1 1 5 190 +1 1 1 0 +1 1 1 16 +1 1 1 0 +5 5 7 30 +1 1 1 15 +1 1 2 15 +10 10 15 1749 +1 1 2 62 +6 6 6 0 +1 1 1 15 +3 3 3 0 +1 1 1 0 +1 1 1 0 +5 5 5 0 +2 4 4 0 +2 2 3 128 +1 1 1 14 +3 5 46 1821 +1 1 3 21 +13 13 31 1959 +7 7 9 110 +42 44 56 5763 +1 1 1 17 +1 1 1 0 +1 1 18 481 +1 3 13 739 +1 1 1 0 +1 1 6 82 +2 2 11 449 +1 1 1 15 +1 1 1 0 +3 3 8 1286 +19 19 27 2044 +1 1 1 15 +1 1 3 276 +1 1 1 0 +1 1 2 182 +1 1 8 205 +1 1 2 142 +1 1 2 77 +47 50 92 2135 +1 1 1 15 +7 21 81 4814 +2 2 2 30 +1 1 2 77 +1 1 1 15 +1 1 1 0 +3 3 3 0 +5 20 103 7059 +1 1 1 0 +8 8 10 46 +1 1 1 0 +1 1 1 0 +1 1 1 0 +1 1 1 4 +28 58 130 7134 +1 1 1 15 +1 1 1 0 +1 1 1 0 +4 4 39 2004 +1 1 2 416 +4 4 4 60 +1 1 1 0 +1 1 1 13 +1 1 1 0 +4 4 4 0 +1 1 4 1578 +1 1 1 0 +6 18 37 1378 +1 4 4 60 +1 1 2 19 diff --git a/dbms/tests/queries/1_stateful/00024_random_counters.sql b/dbms/tests/queries/1_stateful/00024_random_counters.sql new file mode 100644 index 00000000000..9411c106afe --- /dev/null +++ b/dbms/tests/queries/1_stateful/00024_random_counters.sql @@ -0,0 +1,992 @@ +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16447300; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 311849; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7227367; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21519412; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1010958; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6272416; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21888502; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21990949; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20331580; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 231276; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 100674; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20830528; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21099526; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 162896; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22424788; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20626921; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22734214; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21279544; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17870599; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18751324; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18264283; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19475803; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20477941; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22929382; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11299489; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16570483; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18391414; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21000451; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11369977; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9886051; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22617568; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 98054; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 970516; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21260029; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22622557; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14762728; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5482534; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10250275; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23344813; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23856562; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13223461; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1064845; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23453899; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22384789; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23982805; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15075358; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 322187; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23953330; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19325545; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21385060; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19965796; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18780277; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19454293; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2755; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4255870; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13723486; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20179597; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23510062; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16567711; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24067630; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10373758; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15871777; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21612955; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23441950; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5361274; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22348093; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23802799; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12903265; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24184564; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14005723; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 509416; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12879952; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6731659; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 148030; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21090781; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18679972; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18965725; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18882277; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20902840; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8542948; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14997619; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21554698; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21069571; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12146479; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1205378; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 771107; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22281868; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22178384; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17838109; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14603353; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10563076; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 724840; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21519640; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23695144; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16192327; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19444645; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10318822; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18910300; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1572259; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6401680; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 35054; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17775658; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23704582; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19154980; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21021124; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9070591; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1340709; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21384403; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19945789; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10049827; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 113943; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20827714; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20361523; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7068424; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5935387; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14371921; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9424240; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1683293; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22602985; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 244688; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12584464; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14582767; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11167036; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22451221; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12978052; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19790521; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15742261; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16908067; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2495578; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4380979; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1633467; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19866526; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21271384; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22486705; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21202006; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21070687; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11091565; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17985739; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22445365; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23772880; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20778388; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 230179; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16679578; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22065736; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3924838; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7212070; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22983808; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7384096; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2604; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 650932; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10900831; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24037393; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22596106; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22180217; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18547225; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1743991; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 956997; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22961593; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7082143; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15972304; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11266375; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 35774; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22151192; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 330149; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18494080; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 983810; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21804988; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23189848; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11064361; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1902205; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19899028; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20726233; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22751884; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2132557; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22594396; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 296158; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9946492; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19451524; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7763785; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16341391; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3796804; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1839; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23462068; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18318652; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11669965; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10501075; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20027482; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11583685; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 42586; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4131496; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6068563; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 70659; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1286379; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20402113; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16842985; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16461736; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14211247; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 183742; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16037572; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23579041; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6611935; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23591458; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24392791; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22161892; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13974904; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 160848; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7918660; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20085736; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1602419; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16923829; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11986633; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18207637; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12885928; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6375496; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10795105; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18557131; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17450179; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17800144; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15354772; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23144515; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15549385; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22716352; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21860068; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1940077; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23150329; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8049691; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11386126; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21803221; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21633664; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23955877; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20122396; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23727913; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13983208; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22711375; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13466764; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19050478; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23175136; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22450234; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1384335; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18792373; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7746310; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17369587; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12874756; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18735907; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12260821; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18635233; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17963329; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22934281; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17201476; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22418647; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13094539; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 752731; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18160555; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12757027; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18597361; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7433614; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5690881; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21118276; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20661916; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21953854; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20837995; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23306071; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 727709; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24061714; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 43861; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23931547; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12957628; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23807914; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 781569; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13607117; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20642545; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19101886; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7752361; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20350945; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22312993; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22495489; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1293793; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6601483; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11552092; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 864913; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3979828; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20662477; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7407445; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17868073; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21155629; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12989809; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 76719; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19830214; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17709220; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14661577; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23264026; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22911616; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6463993; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16677091; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21631570; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21111193; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5899678; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18159811; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1450043; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23534269; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1126596; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13574801; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12634069; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23823433; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14910490; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19860319; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 68737; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21617599; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21634810; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11430829; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 430818; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14317369; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22336622; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22912675; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18436540; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11961628; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 950406; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7178023; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23262754; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16829410; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 88731; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1606607; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14457415; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17923123; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13992169; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23736148; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20923675; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 142203; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20361103; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8205184; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22002982; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17587633; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20991970; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13051300; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19243744; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21625381; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1402053; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13104817; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5270176; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10227019; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 502139; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24065761; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23648725; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10145665; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14614057; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11614093; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21999106; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2040193; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11369239; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 153901; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6798544; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16963450; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21370864; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19999834; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11247409; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22176314; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18207316; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9458011; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21261418; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14765356; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23618191; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23444548; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13084939; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 115226; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22111729; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20570326; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23281543; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21768016; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17586049; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9347143; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23251951; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9837085; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22189843; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23836445; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1750735; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 48974; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 82604; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23429713; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 965907; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7941676; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1057301; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23599927; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18250675; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24033715; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20667007; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9721663; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13107145; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23406805; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15822511; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22922992; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10548637; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2437834; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16796794; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12040550; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4238398; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10793239; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21486094; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 180393; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17748835; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11223940; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1048199; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 404917; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1227383; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 150422; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22563523; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12328705; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13166551; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19248274; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20380597; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 207901; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20994013; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20533000; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21254380; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6531613; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 87499; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13607531; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12998860; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22963318; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24130984; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22186279; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6217882; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23694580; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11542936; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18403198; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20501191; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1839229; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13587707; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 337148; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 237922; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6695203; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23766385; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21893869; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22044565; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18356998; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22856353; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1940974; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21064438; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22491703; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10853794; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19208605; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7763506; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15633052; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12926980; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15619141; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21637108; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21421138; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16675555; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23824468; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23111200; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 977212; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1529177; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1757487; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17069044; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20982994; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15248224; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2958436; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20438797; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 241692; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 726991; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 187304; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22278727; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20699560; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1161170; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17143366; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3936901; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23147602; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 942211; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6360139; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22845019; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22262671; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21277948; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 658955; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21823405; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22222549; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 138960; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11868505; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7570210; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14717959; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4520284; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21945835; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20676250; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14875207; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20030020; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11187883; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 160366; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21047998; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16827436; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5243680; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1246330; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21091438; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17759953; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23344696; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4914007; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24238993; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19353595; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18776962; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11816812; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12775981; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 443080; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12073216; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21723667; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10503499; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23835697; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22324529; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9956137; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23427451; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23787157; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21645604; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12783328; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15502978; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24361354; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21571144; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18269953; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22764370; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23332939; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17378284; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24127903; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1659835; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 387614; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1813771; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24186424; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11454142; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 153960; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22881175; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16404439; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22564360; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21138574; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23913517; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10573612; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18990535; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16654072; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20622772; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18536539; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20430181; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15758722; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11805436; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15469597; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23249362; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16311706; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19907104; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 58364; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21300892; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16706377; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22226776; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11186215; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5629135; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8035720; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12074653; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12855724; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18727279; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23688403; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22348117; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22437676; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21731815; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22160845; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23529592; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21760885; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12896245; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 114634; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23317843; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23082139; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24280942; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23247967; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22473928; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23060461; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23778226; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15046105; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3236848; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7631659; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10507519; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17615887; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13046434; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20554606; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24049246; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18408787; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1497109; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17905573; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21754438; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5538790; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10829980; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13992391; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 66504; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22512982; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15652897; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21276235; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18770644; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15662662; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10161970; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18641509; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22280074; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21379552; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21264430; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23068549; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10772845; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2038669; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20626948; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 104064; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13087750; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4433956; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21344539; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16071850; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24005692; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20648488; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10638034; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 486867; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22998298; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13243405; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21667987; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13190524; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8192197; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21771235; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17734588; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23111911; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17761390; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12888601; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15711313; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21179218; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11486290; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18691054; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20880718; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21637201; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 169649; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21040945; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5734555; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21137548; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24005935; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20993179; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12570466; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10187791; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20212057; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19626166; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 81078; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17776366; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17087953; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20242894; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18246898; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23578141; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20801083; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20841418; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23750032; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12102745; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 201007; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21402886; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3063307; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23086156; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22628254; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21579151; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22474825; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22146962; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18094117; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9650656; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23760628; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24305014; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11900293; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18012577; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23059279; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17079028; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12899989; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21689104; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22357813; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17844745; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3243655; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16392643; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23500396; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11521879; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 794328; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6331864; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20151244; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 70595; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21229069; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10210294; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 461137; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20300068; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23946133; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21738727; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10631026; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 795837; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21329731; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6521590; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22407961; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22230415; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11333239; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21675262; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24131203; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22526434; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19025272; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21344743; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10787479; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14535805; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21879637; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5692288; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 105589; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20571772; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11376784; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14908795; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15896320; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1599005; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12751282; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16631110; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20863162; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20590756; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17304466; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11337502; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12787234; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20975842; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1067283; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15276505; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19962931; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 162300; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6444100; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19411474; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9790033; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22141253; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18325648; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23809642; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1354799; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23919961; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 644217; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20771050; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20695744; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6470599; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19296766; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1010887; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3025; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20735257; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22412722; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24300940; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22731619; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14886760; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18884518; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 159137; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7506880; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24278878; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15465016; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3170437; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11018566; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 65976; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1712285; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11839687; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24275806; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23469325; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21247126; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13037131; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 810429; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21335863; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24066841; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22365193; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24136081; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7518508; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24186001; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 168287; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9688087; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17368204; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9316975; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10008478; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9939709; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13130248; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 485384; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10588285; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13344565; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 755766; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17206873; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21464908; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10552819; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 513165; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 60844; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 193578; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23509153; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19839511; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21190726; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 145604; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4825291; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12910402; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17079070; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13436371; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 195125; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10275760; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21999427; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1284975; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23621008; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 285178; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13220617; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6796894; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17023966; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8123776; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17449006; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21668662; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20096968; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16708717; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10760635; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22733080; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23401273; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23510617; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14476834; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11308495; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24268282; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18114196; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20086945; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22009009; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6842632; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 550606; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22984000; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6651847; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23181460; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1073005; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24287764; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22624171; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13252402; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10718590; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9286879; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 143202; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12203728; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 211231; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11864926; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19677487; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21810544; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17221114; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10764424; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17899918; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 229129; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1098537; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 875109; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24175219; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19913062; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22563961; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19100509; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10032397; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 14467660; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22418446; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19683055; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23531476; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1598765; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10355422; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 3670951; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8048485; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5203336; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22183948; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23813230; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9813811; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10296184; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13266838; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23520373; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23705500; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23761111; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5693914; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24276121; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21962428; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16279855; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23125432; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22129157; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17607211; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13073128; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16297969; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1868509; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6990250; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 8084032; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21613498; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6359830; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22080205; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20358655; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19484734; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2243911; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20661463; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22811053; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10448905; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21427147; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17219197; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18216478; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23375059; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16199908; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9936940; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22433392; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15615931; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17751211; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17921125; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21772687; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12905191; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5748610; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15028942; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17452492; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 934261; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20769670; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4256095; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9615136; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22405135; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5826757; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19455331; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11221384; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23065603; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1464787; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23255512; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21046240; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11152147; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19340260; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20790796; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22751983; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10663036; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12671212; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16979245; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16826713; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21908368; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 215363; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12887305; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 740471; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2006116; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24022195; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11873545; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23613532; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22024711; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15808192; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22510834; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10158082; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20062666; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2155900; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10996243; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23732275; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17657869; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12938056; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21366607; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23533687; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16908964; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21581755; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13146670; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13200580; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18423466; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22459213; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13949116; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17158843; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20406778; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21648643; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22846792; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20046997; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13194094; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19874278; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21553033; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20714647; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11354182; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10487221; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1618385; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 50359; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18610975; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20180476; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20858191; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15064975; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20795134; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24270307; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15478012; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22397041; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21325057; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 11193616; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19598311; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19724464; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 15102916; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16182112; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16384213; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 22145798; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 1367921; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 256297; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21791194; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2228362; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6101386; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20889169; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 17157937; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19942327; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23165935; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 7650367; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 16051609; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23190688; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 12277228; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 18669307; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 13831414; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21223684; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 9615727; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23473615; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5967373; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 4269664; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20641135; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24255571; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 6999850; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 5422; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23999812; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 19762219; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21861691; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 20138260; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 24344110; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 698117; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 21801520; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 23957683; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 2109130; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 214548; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10387462; +SELECT uniq(UserID), sum(Sign), sum(Sign * PageViews), sum(Sign * Duration) FROM test.visits WHERE CounterID = 10972597; diff --git a/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.reference b/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.reference new file mode 100644 index 00000000000..ec635144f60 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.reference @@ -0,0 +1 @@ +9 diff --git a/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.sql b/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.sql new file mode 100644 index 00000000000..bfe3842335c --- /dev/null +++ b/dbms/tests/queries/1_stateful/00030_array_enumerate_uniq.sql @@ -0,0 +1 @@ +SELECT max(arrayJoin(arrayEnumerateUniq(arrayMap(x -> intDiv(x, 10), URLCategories)))) FROM test.hits diff --git a/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.reference b/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.reference new file mode 100644 index 00000000000..80d8ffb01ea --- /dev/null +++ b/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.reference @@ -0,0 +1,20 @@ +9423131355037180 [1,1,1,1,2,1] +99069201391534782 [1,1] +126427371393908088 [1,1,1,2,1] +129406171394116372 [1,2] +137604511395271977 [1,1] +187815321395062055 [1,1,1] +194264491393666756 [1,1] +263245441387901769 [1,2,3,1,4,5,6,2,3,4,7,5,8] +312403081390726793 [1,1,1,1,1,1,1,1,1,1,1,1] +318522451358917158 [1,2] +324118291394008684 [1,1,2,3,4,5,6] +355608731360247322 [1,1,1] +408811531393490978 [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30] +410895631386491691 [1,1,1] +465280661362859686 [1,2,3,4,5] +536058931389612176 [1,1,1] +604260701390284018 [1,2] +644639891379925126 [1,1] +658636461391365106 [1,1] +693047471394026191 [1,2,1] diff --git a/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.sql b/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.sql new file mode 100644 index 00000000000..158095c4333 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00031_array_enumerate_uniq.sql @@ -0,0 +1,20 @@ +SELECT UserID, arrayEnumerateUniq(groupArray(SearchPhrase)) AS arr +FROM +( + SELECT UserID, SearchPhrase + FROM test.hits + WHERE CounterID = 731962 AND UserID IN + ( + SELECT UserID + FROM test.hits + WHERE notEmpty(SearchPhrase) AND CounterID = 731962 + GROUP BY UserID + HAVING count() > 1 + ) + ORDER BY UserID, WatchID +) +WHERE notEmpty(SearchPhrase) +GROUP BY UserID +HAVING length(arr) > 1 +ORDER BY UserID +LIMIT 20 diff --git a/dbms/tests/queries/1_stateful/00032_aggregate_key64.reference b/dbms/tests/queries/1_stateful/00032_aggregate_key64.reference new file mode 100644 index 00000000000..dc47fead232 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00032_aggregate_key64.reference @@ -0,0 +1,10 @@ +0 8040919 +2 498635 +3 229872 +6 38617 +13 20490 +181 4786 +68 4735 +10 4675 +85 4494 +1 3741 diff --git a/dbms/tests/queries/1_stateful/00032_aggregate_key64.sql b/dbms/tests/queries/1_stateful/00032_aggregate_key64.sql new file mode 100644 index 00000000000..56ee5080ed0 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00032_aggregate_key64.sql @@ -0,0 +1 @@ +SELECT SearchEngineID AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00033_aggregate_key_string.reference b/dbms/tests/queries/1_stateful/00033_aggregate_key_string.reference new file mode 100644 index 00000000000..1de4721d6e1 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00033_aggregate_key_string.reference @@ -0,0 +1,10 @@ + 8267016 +интерьер ванной комнаты 2166 +яндекс 1655 +весна 2014 мода 1549 +фриформ фото 1480 +анджелина джоли 1245 +омск 1112 +фото собак разных пород 1091 +дизайн штор 1064 +баку 1000 diff --git a/dbms/tests/queries/1_stateful/00033_aggregate_key_string.sql b/dbms/tests/queries/1_stateful/00033_aggregate_key_string.sql new file mode 100644 index 00000000000..22c2817a00d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00033_aggregate_key_string.sql @@ -0,0 +1 @@ +SELECT SearchPhrase AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference b/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference new file mode 100644 index 00000000000..8672453b66a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.reference @@ -0,0 +1,10 @@ +\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 8267016 +картинки 6148 +смотреть 5055 +интерьер 2417 +игры для 2284 +красивые 1756 +яндекс\0\0\0\0\0 1655 +весна 2014 1549 +фриформ ф 1480 +свадебны 1432 diff --git a/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql b/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql new file mode 100644 index 00000000000..ae281816f6f --- /dev/null +++ b/dbms/tests/queries/1_stateful/00034_aggregate_key_fixed_string.sql @@ -0,0 +1 @@ +SELECT toFixedString(substring(SearchPhrase, 1, 17), 17) AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00035_aggregate_keys128.reference b/dbms/tests/queries/1_stateful/00035_aggregate_keys128.reference new file mode 100644 index 00000000000..fe6adc72c92 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00035_aggregate_keys128.reference @@ -0,0 +1,10 @@ +0 0 8019359 +2 0 498608 +3 0 220903 +6 0 38616 +13 0 20489 +0 2 13924 +3 2 8944 +181 0 4786 +68 0 4700 +10 0 4673 diff --git a/dbms/tests/queries/1_stateful/00035_aggregate_keys128.sql b/dbms/tests/queries/1_stateful/00035_aggregate_keys128.sql new file mode 100644 index 00000000000..6dfa7454c4b --- /dev/null +++ b/dbms/tests/queries/1_stateful/00035_aggregate_keys128.sql @@ -0,0 +1 @@ +SELECT SearchEngineID AS k1, AdvEngineID AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC, k1, k2 LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00036_aggregate_hashed.reference b/dbms/tests/queries/1_stateful/00036_aggregate_hashed.reference new file mode 100644 index 00000000000..7bd3aef3079 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00036_aggregate_hashed.reference @@ -0,0 +1,10 @@ +0 8040919 +3 194175 +2 22288 +125 3327 +126 2188 +2 интерьер ванной комнаты 2166 +12 1934 +2 весна 2014 мода 1549 +2 фриформ фото 1442 +2 анджелина джоли 1245 diff --git a/dbms/tests/queries/1_stateful/00036_aggregate_hashed.sql b/dbms/tests/queries/1_stateful/00036_aggregate_hashed.sql new file mode 100644 index 00000000000..dacf9b96821 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00036_aggregate_hashed.sql @@ -0,0 +1 @@ +SELECT SearchEngineID AS k1, SearchPhrase AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC, k1, k2 LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.reference b/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.reference new file mode 100644 index 00000000000..b456459c189 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.reference @@ -0,0 +1,100 @@ +yandex.ru 23162 23162 +e.mail.ru 7460 7460 +rutube.ru 5567 5567 +images.yandex.ru 4158 4158 +news.yandex.ru 4099 4099 +mail.yandex.ru 3571 3571 +avito.ru 3334 3334 + 3281 3281 +yabs.yandex.ru 3274 3274 +news.mail.ru 3255 3255 +hurpass.com 3066 3066 +youtube.com 2845 2845 +pogoda.yandex.ru 2816 2816 +yandex.ua 2641 2641 +maps.yandex.ru 2489 2489 +yandex.com.tr 2301 2301 +clck.yandex.ru 2238 2238 +my.mail.ru 2186 2186 +ria.ru 2155 2155 +rambler.ru 2082 2082 +tvigle.ru 1980 1980 +hugesex.tv 1930 1930 +mynet.com 1920 1920 +top.rbc.ru 1790 1790 +images.rambler.ru 1746 1746 +megogo.net 1717 1717 +kinopoisk.ru 1669 1669 +gismeteo.ru 1655 1655 +fucked-tube.com 1631 1631 +vesti.ru 1611 1611 +warthunder.ru 1562 1562 +wotlauncher.exe 1513 1513 +content.directadvert.ru 1454 1454 +tubecup.com 1418 1418 +market.yandex.ru 1415 1415 +sozcu.com.tr 1300 1300 +an.yandex.ru 1279 1279 +otvet.mail.ru 1255 1255 +code.directadvert.ru 1220 1220 +searcher.takataka.coccoc.com 1202 1202 +loginza.ru 1190 1190 +rg.ru 1183 1183 +liveinternet.ru 1173 1173 +tabs.ultimate-guitar.com 1155 1155 +news.rambler.ru 1147 1147 +vk.com 1146 1146 +gazeta.ru 1132 1132 +kinogo.net 1125 1125 +turbobit.net 1119 1119 +tv.yandex.ru 1080 1080 +bigcinema.tv 1070 1070 +itar-tass.com 1042 1042 +www.avito.ru 1036 1036 +sprashivai.ru 1034 1034 +eksisozluk.com 1018 1018 +coccoc.com 1010 1010 +haberler.com 1010 1010 +money.yandex.ru 1008 1008 +online.sberbank.ru 1008 1008 +utro.ru 999 999 +woman.ru 999 999 +wildberries.ru 959 959 +afisha.mail.ru 954 954 +regnum.ru 949 949 +kp.ru 948 948 +privatehomeclips.com 946 946 +znanija.com 946 946 +browser.yandex.ru 943 943 +marketgid.com 936 936 +lenta.ru 912 912 +video.yandex.ru 900 900 +ivi.ru 849 849 +segodnya.ua 831 831 +video.mail.ru 821 821 +interfax.ru 819 819 +worldoftanks.ru 816 816 +games.mail.ru 802 802 +disk.yandex.ru 801 801 +docviewer.yandex.ru 796 796 +cum.mobi 782 782 +market-click2.yandex.ru 781 781 +syndication.exoclick.com 778 778 +odnoklassniki.ru 772 772 +echo.msk.ru 769 769 +deti.mail.ru 765 765 +censor.net.ua 757 757 +sberbank.ru 753 753 +passport.yandex.ru 750 750 +spor.mynet.com 749 749 +bolshoyvopros.ru 732 732 +yandex.by 724 724 +korrespondent.net 723 723 +zoomby.ru 723 723 +ntv.ru 716 716 +1tv.ru 714 714 +music.yandex.ru 714 714 +yobt.com 713 713 +seasonvar.ru 704 704 +prntscr.com 699 699 +rbc.ru 697 697 diff --git a/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.sql b/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.sql new file mode 100644 index 00000000000..c941a14b571 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00037_uniq_state_merge1.sql @@ -0,0 +1 @@ +SELECT k, any(u) AS u, uniqMerge(us) AS us FROM (SELECT domain(URL) AS k, uniq(UserID) AS u, uniqState(UserID) AS us FROM test.hits GROUP BY k) GROUP BY k ORDER BY u DESC, k ASC LIMIT 100 diff --git a/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.reference b/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.reference new file mode 100644 index 00000000000..5fc3b5a6fdb --- /dev/null +++ b/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.reference @@ -0,0 +1,100 @@ +ru 458921 60255 +com 138290 60492 +ua 57079 12203 +net 40851 20955 +tr 14141 7851 +tv 13603 9771 +org 12599 9089 +by 10983 2885 +kz 6594 2571 + 6570 5540 +info 6255 4811 +su 4418 3643 +mobi 3125 2387 +biz 3008 2189 +me 2180 1829 +exe 1516 1516 +ws 1079 995 +to 988 860 +eu 979 853 +cc 920 836 +pl 819 673 +fm 745 702 +pro 692 644 +am 641 367 +vc 600 599 +lv 593 475 +az 591 390 +in 498 473 +md 467 256 +vn 459 132 +hu 426 342 +co 422 419 +de 409 390 +uz 406 256 +sx 386 277 +name 378 373 +im 361 300 +asia 359 352 +br 355 317 +kg 343 122 +us 327 203 +il 262 193 +travel 243 214 +sk 194 193 +cu 188 188 +cz 188 162 +ee 174 144 +uk 169 165 +ge 160 114 +tk 159 136 +my 138 138 +cn 131 121 +nu 130 128 +fr 110 105 +es 107 95 +sg 102 102 +io 92 59 +tl 92 92 +ro 91 87 +ca 87 86 +tj 87 44 +xxx 84 76 +be 81 80 +nl 78 63 +ly 76 76 +lt 75 71 +bg 73 66 +ae 70 53 +gl 59 59 +bz 57 49 +it 56 52 +li 55 55 +dj 51 39 +fi 50 44 +mx 41 41 +gr 40 34 +pw 37 37 +edu 36 34 +ch 35 35 +ec 35 35 +ar 34 33 +se 34 33 +so 33 29 +aero 30 25 +gov 29 28 +cl 27 27 +au 26 26 +at 25 24 +kr 24 20 +re 23 17 +hr 22 22 +pt 22 19 +tc 22 22 +za 21 21 +jp 20 20 +guru 17 17 +tm 16 15 +rs 15 14 +ms 14 14 +gg 12 12 diff --git a/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.sql b/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.sql new file mode 100644 index 00000000000..677458daeda --- /dev/null +++ b/dbms/tests/queries/1_stateful/00038_uniq_state_merge2.sql @@ -0,0 +1 @@ +SELECT topLevelDomain(concat('http://', k)) AS tld, sum(u) AS u, uniqMerge(us) AS us FROM (SELECT domain(URL) AS k, uniq(UserID) AS u, uniqState(UserID) AS us FROM test.hits GROUP BY k) GROUP BY tld ORDER BY u DESC, tld ASC LIMIT 100 diff --git a/dbms/tests/queries/1_stateful/00039_primary_key.reference b/dbms/tests/queries/1_stateful/00039_primary_key.reference new file mode 100644 index 00000000000..cf9f11d0a25 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00039_primary_key.reference @@ -0,0 +1,2 @@ +32670 +32670 diff --git a/dbms/tests/queries/1_stateful/00039_primary_key.sql b/dbms/tests/queries/1_stateful/00039_primary_key.sql new file mode 100644 index 00000000000..1cfb117ef14 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00039_primary_key.sql @@ -0,0 +1,2 @@ +SELECT count() FROM test.hits WHERE CounterID < 10000; +SELECT count() FROM test.hits WHERE 10000 > CounterID; diff --git a/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.reference b/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.reference new file mode 100644 index 00000000000..61c98c6a49d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.reference @@ -0,0 +1,21 @@ +2014-03-17 265108 36201 +2014-03-18 258723 36085 +2014-03-19 261624 36479 +2014-03-20 255328 36065 +2014-03-21 236232 35120 +2014-03-22 197354 31256 +2014-03-23 202212 31075 +2014-03-17 15797 7121 +2014-03-18 15704 7099 +2014-03-19 15599 7082 +2014-03-20 15081 6962 +2014-03-21 14409 6691 +2014-03-22 12875 5682 +2014-03-23 13199 5767 +2014-03-17 15797 7121 +2014-03-18 15704 7099 +2014-03-19 15599 7082 +2014-03-20 15081 6962 +2014-03-21 14409 6691 +2014-03-22 12875 5682 +2014-03-23 13199 5767 diff --git a/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.sql b/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.sql new file mode 100644 index 00000000000..460e5f62371 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00040_aggregating_materialized_view.sql @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS test.basic; + +CREATE MATERIALIZED VIEW test.basic +ENGINE = AggregatingMergeTree(StartDate, (CounterID, StartDate), 8192) +POPULATE AS +SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM test.visits +GROUP BY CounterID, StartDate; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +WHERE CounterID = 731962 +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sum(Sign) AS Visits, + uniq(UserID) AS Users +FROM test.visits +WHERE CounterID = 731962 +GROUP BY StartDate +ORDER BY StartDate; + + +DROP TABLE test.basic; diff --git a/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.reference b/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.reference new file mode 100644 index 00000000000..61c98c6a49d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.reference @@ -0,0 +1,21 @@ +2014-03-17 265108 36201 +2014-03-18 258723 36085 +2014-03-19 261624 36479 +2014-03-20 255328 36065 +2014-03-21 236232 35120 +2014-03-22 197354 31256 +2014-03-23 202212 31075 +2014-03-17 15797 7121 +2014-03-18 15704 7099 +2014-03-19 15599 7082 +2014-03-20 15081 6962 +2014-03-21 14409 6691 +2014-03-22 12875 5682 +2014-03-23 13199 5767 +2014-03-17 15797 7121 +2014-03-18 15704 7099 +2014-03-19 15599 7082 +2014-03-20 15081 6962 +2014-03-21 14409 6691 +2014-03-22 12875 5682 +2014-03-23 13199 5767 diff --git a/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.sql b/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.sql new file mode 100644 index 00000000000..2aa97332e49 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00041_aggregating_materialized_view.sql @@ -0,0 +1,73 @@ +DROP TABLE IF EXISTS test.basic; +DROP TABLE IF EXISTS test.visits_null; + +CREATE TABLE test.visits_null +( + CounterID UInt32, + StartDate Date, + Sign Int8, + UserID UInt64 +) ENGINE = Null; + +CREATE MATERIALIZED VIEW test.basic +ENGINE = AggregatingMergeTree(StartDate, (CounterID, StartDate), 8192) +AS SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM test.visits_null +GROUP BY CounterID, StartDate; + +INSERT INTO test.visits_null +SELECT + CounterID, + StartDate, + Sign, + UserID +FROM test.visits; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM test.basic +WHERE CounterID = 731962 +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sum(Sign) AS Visits, + uniq(UserID) AS Users +FROM test.visits +WHERE CounterID = 731962 +GROUP BY StartDate +ORDER BY StartDate; + + +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; +OPTIMIZE TABLE test.basic; + + +DROP TABLE test.visits_null; +DROP TABLE test.basic; diff --git a/dbms/tests/queries/1_stateful/00042_any_left_join.reference b/dbms/tests/queries/1_stateful/00042_any_left_join.reference new file mode 100644 index 00000000000..ba19dab8ba9 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00042_any_left_join.reference @@ -0,0 +1,10 @@ +1143050 523264 13665 +731962 475698 102664 +722545 337212 108133 +722889 252197 10545 +2237260 196036 9522 +23057320 147211 7685 +722818 90109 17837 +48221 85379 4652 +19762435 77807 7024 +722884 77492 11050 diff --git a/dbms/tests/queries/1_stateful/00042_any_left_join.sql b/dbms/tests/queries/1_stateful/00042_any_left_join.sql new file mode 100644 index 00000000000..6ff0c5d4feb --- /dev/null +++ b/dbms/tests/queries/1_stateful/00042_any_left_join.sql @@ -0,0 +1,21 @@ +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY LEFT JOIN +( + SELECT + CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID +) USING CounterID +ORDER BY hits DESC +LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00043_any_left_join.reference b/dbms/tests/queries/1_stateful/00043_any_left_join.reference new file mode 100644 index 00000000000..ba19dab8ba9 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00043_any_left_join.reference @@ -0,0 +1,10 @@ +1143050 523264 13665 +731962 475698 102664 +722545 337212 108133 +722889 252197 10545 +2237260 196036 9522 +23057320 147211 7685 +722818 90109 17837 +48221 85379 4652 +19762435 77807 7024 +722884 77492 11050 diff --git a/dbms/tests/queries/1_stateful/00043_any_left_join.sql b/dbms/tests/queries/1_stateful/00043_any_left_join.sql new file mode 100644 index 00000000000..9e61e69c547 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00043_any_left_join.sql @@ -0,0 +1,15 @@ +SELECT + CounterID, + count() AS hits, + any(visits) +FROM test.hits ANY LEFT JOIN +( + SELECT + CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID +) USING CounterID +GROUP BY CounterID +ORDER BY hits DESC +LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00044_any_left_join_string.reference b/dbms/tests/queries/1_stateful/00044_any_left_join_string.reference new file mode 100644 index 00000000000..f346dda0f2a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00044_any_left_join_string.reference @@ -0,0 +1,10 @@ +yandex.ru 346544 84669 +yandex.ua 46242 11768 +yandex.by 14989 3861 +images.yandex.ru 11635 0 +yabs.yandex.ru 10469 0 + 9754 169 +yandex.kz 8527 2085 +clck.yandex.ru 2605 0 +maps.yandex.ru 1809 0 +images.yandex.ua 1476 0 diff --git a/dbms/tests/queries/1_stateful/00044_any_left_join_string.sql b/dbms/tests/queries/1_stateful/00044_any_left_join_string.sql new file mode 100644 index 00000000000..06e2873c2cb --- /dev/null +++ b/dbms/tests/queries/1_stateful/00044_any_left_join_string.sql @@ -0,0 +1,23 @@ +SELECT + domain, + hits, + visits +FROM +( + SELECT + domain(URL) AS domain, + count() AS hits + FROM test.hits + WHERE CounterID = 731962 + GROUP BY domain +) ANY LEFT JOIN +( + SELECT + domain(StartURL) AS domain, + sum(Sign) AS visits + FROM test.visits + WHERE CounterID = 731962 + GROUP BY domain +) USING domain +ORDER BY hits DESC +LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00045_uniq_upto.reference b/dbms/tests/queries/1_stateful/00045_uniq_upto.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00045_uniq_upto.sql b/dbms/tests/queries/1_stateful/00045_uniq_upto.sql new file mode 100644 index 00000000000..d7322c12561 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00045_uniq_upto.sql @@ -0,0 +1 @@ +SELECT RegionID, uniqExact(UserID) AS u1, uniqUpTo(10)(UserID) AS u2 FROM test.visits GROUP BY RegionID HAVING u1 <= 11 AND u1 != u2 \ No newline at end of file diff --git a/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.reference b/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.sql b/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.sql new file mode 100644 index 00000000000..1491972b37d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00046_uniq_upto_distributed.sql @@ -0,0 +1 @@ +SELECT RegionID, uniqExact(UserID) AS u1, uniqUpTo(10)(UserID) AS u2 FROM remote('127.0.0.{1,2}', test, visits) GROUP BY RegionID HAVING u1 <= 11 AND u1 != u2 \ No newline at end of file diff --git a/dbms/tests/queries/1_stateful/00047_bar.reference b/dbms/tests/queries/1_stateful/00047_bar.reference new file mode 100644 index 00000000000..7ef3fab92d9 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00047_bar.reference @@ -0,0 +1,100 @@ +1143050 523264 ████████████████████████████████████████████████████████████████████████████████ +731962 475698 ████████████████████████████████████████████████████████████████████████▋ +722545 337212 ███████████████████████████████████████████████████▌ +722889 252197 ██████████████████████████████████████▌ +2237260 196036 █████████████████████████████▊ +23057320 147211 ██████████████████████▌ +722818 90109 █████████████▋ +48221 85379 █████████████ +19762435 77807 ███████████▊ +722884 77492 ███████████▋ +20860117 73213 ███████████▏ +21211267 68945 ██████████▌ +22753222 67570 ██████████▎ +12725416 64174 █████████▋ +23910442 60456 █████████▏ +23414332 58389 ████████▊ +160656 57017 ████████▋ +14545480 52345 ████████ +64174 52142 ███████▊ +24142063 47758 ███████▎ +10288858 44080 ██████▋ +19765189 43395 ██████▋ +15553948 43279 ██████▌ +13837519 40581 ██████▏ +17956927 37562 █████▋ +23969905 34301 █████▏ +21137731 32776 █████ +23723584 28788 ████▍ +16443139 26603 ████ +15068284 25733 ███▊ +64539 25595 ███▊ +24201256 25585 ███▊ +16949086 25496 ███▊ +805556 25270 ███▋ +815578 24744 ███▋ +16037275 23349 ███▌ +14645857 21270 ███▎ +16443178 20825 ███▏ +13205491 20788 ███▏ +146686 20785 ███▏ +14231362 19897 ███ +94020 19724 ███ +38230 19717 ███ +15011071 19402 ██▊ +9325861 18557 ██▋ +7234936 18370 ██▋ +115080 17443 ██▋ +22131482 17390 ██▋ +15987325 17302 ██▋ +10652611 17279 ██▋ +114208 16959 ██▌ +23427556 16849 ██▌ +21407305 16175 ██▍ +21270109 16017 ██▍ +7692568 15340 ██▎ +9927988 15171 ██▎ +18746557 15146 ██▎ +20008321 15104 ██▎ +18274111 14719 ██▎ +13227769 14584 ██▏ +23474449 14540 ██▏ +91244 14199 ██▏ +17969140 13972 ██▏ +149814 13930 ██▏ +230672 13792 ██ +22663942 13615 ██ +1911064 13509 ██ +79376 13308 ██ +10065061 13181 ██ +20424475 13181 ██ +23745772 12922 █▊ +67763 12520 █▊ +15748243 12352 █▊ +1605811 12283 █▊ +11492179 12183 █▋ +24327397 12170 █▋ +23878183 12158 █▋ +7941022 12049 █▋ +10849243 11818 █▋ +14544658 11733 █▋ +21374125 11658 █▋ +12929146 11514 █▋ +1276757 11452 █▋ +960630 11444 █▋ +10538560 11358 █▋ +62180 11193 █▋ +24056266 11047 █▋ +20793439 10936 █▋ +6414910 10747 █▋ +23194783 10738 █▋ +14131513 10656 █▋ +1598595 10655 █▋ +158751 10625 █▌ +150748 10522 █▌ +3228 10503 █▌ +18520561 10128 █▌ +1252825 10098 █▌ +4308403 10022 █▌ +199955 9997 █▌ +23214754 9780 █▍ diff --git a/dbms/tests/queries/1_stateful/00047_bar.sql b/dbms/tests/queries/1_stateful/00047_bar.sql new file mode 100644 index 00000000000..c7310763525 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00047_bar.sql @@ -0,0 +1 @@ +SELECT CounterID, count() AS c, bar(c, 0, 523264) FROM test.hits GROUP BY CounterID ORDER BY c DESC, CounterID ASC LIMIT 100 diff --git a/dbms/tests/queries/1_stateful/00048_min_max.reference b/dbms/tests/queries/1_stateful/00048_min_max.reference new file mode 100644 index 00000000000..cb21dc07b1c --- /dev/null +++ b/dbms/tests/queries/1_stateful/00048_min_max.reference @@ -0,0 +1 @@ +2014-03-17 2014-03-23 diff --git a/dbms/tests/queries/1_stateful/00048_min_max.sql b/dbms/tests/queries/1_stateful/00048_min_max.sql new file mode 100644 index 00000000000..a62a6edfb61 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00048_min_max.sql @@ -0,0 +1 @@ +SELECT min(EventDate), max(EventDate) FROM test.hits diff --git a/dbms/tests/queries/1_stateful/00049_max_string_if.reference b/dbms/tests/queries/1_stateful/00049_max_string_if.reference new file mode 100644 index 00000000000..51f17c87649 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00049_max_string_if.reference @@ -0,0 +1,20 @@ +1143050 523264 яндекс почта яндекс почта +731962 475698 • ротация товара по принципу fifo; +722545 337212 індія вавілон египет +722889 252197 ↵ ↵ ↵ анна↵↵ ↵ ↵ ↵ +2237260 196036 ярмарка калининград авто с пробегом +23057320 147211 +722818 90109 яровая +48221 85379 уссурийский аграрный техникум +19762435 77807 электромобиль купить в красноярске +722884 77492 ян женчак лявоны +20860117 73213 ярмарка калининград авто с пробегом +21211267 68945 マジックかいと +22753222 67570 ютуб видео +12725416 64174 эдгар по фильм ворон +23910442 60456 +23414332 58389 №18-52-857 от 19.03.2002 +160656 57017 яндекс маркет +14545480 52345 şu polarmi apolarmi +64174 52142 яндекс почта +24142063 47758 diff --git a/dbms/tests/queries/1_stateful/00049_max_string_if.sql b/dbms/tests/queries/1_stateful/00049_max_string_if.sql new file mode 100644 index 00000000000..af87123ef02 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00049_max_string_if.sql @@ -0,0 +1 @@ +SELECT CounterID, count(), maxIf(SearchPhrase, notEmpty(SearchPhrase)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 diff --git a/dbms/tests/queries/1_stateful/00050_min_max.reference b/dbms/tests/queries/1_stateful/00050_min_max.reference new file mode 100644 index 00000000000..83c625ab0cc --- /dev/null +++ b/dbms/tests/queries/1_stateful/00050_min_max.reference @@ -0,0 +1,20 @@ +1143050 5713920003519694728 5716397076045616998 +731962 5713920015071971218 5716397257116862311 +722545 5713920015651149659 5716397256090216288 +722889 5713920017089177471 5716397260516085458 +2237260 5713920016994238167 5716397237297002364 +23057320 5713949520802533074 5716413447410800488 +722818 5713920043014670169 5716397236420603606 +48221 5713920004276864894 5716397210278713206 +19762435 5713920017181982418 5716397246466066301 +722884 5713920011998402414 5716397246767304399 +20860117 5713920017370074831 5716397208719681371 +21211267 5713920045935435469 5716397248483389135 +22753222 5713920007612184431 5716397151789805258 +12725416 5713920109650941822 5716397254591100797 +23910442 5714042239335131000 5716386006459287388 +23414332 5713920019599158998 5716397247414163312 +160656 5713921425816235729 5716396958603462522 +14545480 5713949495531890580 5716413422480215759 +64174 5713920018026575709 5716397258157885302 +24142063 5713875910905472884 5716353012118216394 diff --git a/dbms/tests/queries/1_stateful/00050_min_max.sql b/dbms/tests/queries/1_stateful/00050_min_max.sql new file mode 100644 index 00000000000..4c45f6fffa6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00050_min_max.sql @@ -0,0 +1 @@ +SELECT CounterID, min(WatchID), max(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 diff --git a/dbms/tests/queries/1_stateful/00051_min_max_array.reference b/dbms/tests/queries/1_stateful/00051_min_max_array.reference new file mode 100644 index 00000000000..fac952200a9 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00051_min_max_array.reference @@ -0,0 +1,20 @@ +1143050 523264 [353023] [] [353023] +731962 475698 [] [] [] +722545 337212 [] [] [] +722889 252197 [1698655] [] [1545418] +2237260 196036 [1096990] [] [1095625] +23057320 147211 [] [] [] +722818 90109 [4187887] [] [1418056] +48221 85379 [] [] [] +19762435 77807 [] [] [] +722884 77492 [3440203] [] [1784959] +20860117 73213 [] [] [] +21211267 68945 [3658087] [] [3658087] +22753222 67570 [4249348,3287725] [] [3287395,3287425] +12725416 64174 [2645938] [] [939124] +23910442 60456 [] [] [] +23414332 58389 [] [] [] +160656 57017 [4118521,4067026,2509678] [] [56598,1084654,1113568,1960378] +14545480 52345 [] [] [] +64174 52142 [] [] [] +24142063 47758 [4211836] [] [4211836] diff --git a/dbms/tests/queries/1_stateful/00051_min_max_array.sql b/dbms/tests/queries/1_stateful/00051_min_max_array.sql new file mode 100644 index 00000000000..1027586372d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00051_min_max_array.sql @@ -0,0 +1 @@ +SELECT CounterID, count(), max(GoalsReached), min(GoalsReached), minIf(GoalsReached, notEmpty(GoalsReached)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 diff --git a/dbms/tests/queries/1_stateful/00052_group_by_in.reference b/dbms/tests/queries/1_stateful/00052_group_by_in.reference new file mode 100644 index 00000000000..46bd3c48aa6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00052_group_by_in.reference @@ -0,0 +1,14 @@ +2014-03-17 other 306 +2014-03-17 type_in 52 +2014-03-18 other 307 +2014-03-18 type_in 50 +2014-03-19 other 332 +2014-03-19 type_in 42 +2014-03-20 other 276 +2014-03-20 type_in 31 +2014-03-21 other 262 +2014-03-21 type_in 48 +2014-03-22 other 232 +2014-03-22 type_in 52 +2014-03-23 other 329 +2014-03-23 type_in 34 diff --git a/dbms/tests/queries/1_stateful/00052_group_by_in.sql b/dbms/tests/queries/1_stateful/00052_group_by_in.sql new file mode 100644 index 00000000000..17fb4acaaae --- /dev/null +++ b/dbms/tests/queries/1_stateful/00052_group_by_in.sql @@ -0,0 +1,4 @@ +select StartDate, TraficSourceID in (0) ? 'type_in' : 'other' as traf_type, sum(Sign) +from test.visits +where CounterID = 160656 +group by StartDate, traf_type ORDER BY StartDate, traf_type diff --git a/dbms/tests/queries/1_stateful/00053_replicate_segfault.reference b/dbms/tests/queries/1_stateful/00053_replicate_segfault.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00053_replicate_segfault.reference @@ -0,0 +1 @@ +1 diff --git a/dbms/tests/queries/1_stateful/00053_replicate_segfault.sql b/dbms/tests/queries/1_stateful/00053_replicate_segfault.sql new file mode 100644 index 00000000000..b727ae3aad8 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00053_replicate_segfault.sql @@ -0,0 +1 @@ +SELECT count() > 0 FROM (SELECT ParsedParams.Key1 AS p FROM test.visits WHERE arrayAll(y -> arrayExists(x -> y != x, p), p)) diff --git a/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.reference b/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.reference new file mode 100644 index 00000000000..43960608828 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.reference @@ -0,0 +1,12 @@ +475698 +42917 +432781 +432781 +0 +432781 +475698 +42917 +432781 +458792 +475698 +492604 diff --git a/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.sql b/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.sql new file mode 100644 index 00000000000..8e8ea29a9db --- /dev/null +++ b/dbms/tests/queries/1_stateful/00054_merge_tree_partitions.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS test.partitions; +CREATE TABLE test.partitions (EventDate Date, CounterID UInt32) ENGINE = MergeTree(EventDate, CounterID, 8192); +INSERT INTO test.partitions SELECT EventDate + UserID % 365 AS EventDate, CounterID FROM test.hits WHERE CounterID = 731962; + + +SELECT count() FROM test.partitions; +SELECT count() FROM test.partitions WHERE EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01'); +SELECT count() FROM test.partitions WHERE EventDate < toDate('2015-01-01') OR EventDate >= toDate('2015-02-01'); + +ALTER TABLE test.partitions DETACH PARTITION 201501; + +SELECT count() FROM test.partitions; +SELECT count() FROM test.partitions WHERE EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01'); +SELECT count() FROM test.partitions WHERE EventDate < toDate('2015-01-01') OR EventDate >= toDate('2015-02-01'); + +ALTER TABLE test.partitions ATTACH PARTITION 201501; + +SELECT count() FROM test.partitions; +SELECT count() FROM test.partitions WHERE EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01'); +SELECT count() FROM test.partitions WHERE EventDate < toDate('2015-01-01') OR EventDate >= toDate('2015-02-01'); + + +ALTER TABLE test.partitions DETACH PARTITION 201403; + +SELECT count() FROM test.partitions; + +INSERT INTO test.partitions SELECT EventDate + UserID % 365 AS EventDate, CounterID FROM test.hits WHERE CounterID = 731962 AND toStartOfMonth(EventDate) = toDate('2014-03-01'); + +SELECT count() FROM test.partitions; + +ALTER TABLE test.partitions ATTACH PARTITION 201403; + +SELECT count() FROM test.partitions; + + +DROP TABLE test.partitions; diff --git a/dbms/tests/queries/1_stateful/00055_index_and_not.reference b/dbms/tests/queries/1_stateful/00055_index_and_not.reference new file mode 100644 index 00000000000..6aa5d5ca5d6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00055_index_and_not.reference @@ -0,0 +1 @@ +8873898 diff --git a/dbms/tests/queries/1_stateful/00055_index_and_not.sql b/dbms/tests/queries/1_stateful/00055_index_and_not.sql new file mode 100644 index 00000000000..faf3264be18 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00055_index_and_not.sql @@ -0,0 +1 @@ +SELECT count() FROM test.hits WHERE NOT (EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01')) diff --git a/dbms/tests/queries/1_stateful/00056_view.reference b/dbms/tests/queries/1_stateful/00056_view.reference new file mode 100644 index 00000000000..16cf0436b1c --- /dev/null +++ b/dbms/tests/queries/1_stateful/00056_view.reference @@ -0,0 +1,31 @@ +109760 +2 23449 +1 18139 +3 9216 +4 8296 +5 5237 +6 4585 +7 3290 +8 3083 +9 2347 +10 2070 +1143050 523264 +731962 475698 +722545 337212 +722889 252197 +2237260 196036 +23057320 147211 +722818 90109 +48221 85379 +19762435 77807 +722884 77492 +1143050 523264 +731962 475698 +722545 337212 +722889 252197 +2237260 196036 +23057320 147211 +722818 90109 +48221 85379 +19762435 77807 +722884 77492 diff --git a/dbms/tests/queries/1_stateful/00056_view.sql b/dbms/tests/queries/1_stateful/00056_view.sql new file mode 100644 index 00000000000..5701cee3e15 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00056_view.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test.view; +CREATE VIEW test.view AS SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID; +SELECT count() FROM test.view; +SELECT c, count() FROM test.view GROUP BY c ORDER BY count() DESC LIMIT 10; +SELECT * FROM test.view ORDER BY c DESC LIMIT 10; +SELECT * FROM test.view SAMPLE 0.1 ORDER BY c DESC LIMIT 10; +DROP TABLE test.view; diff --git a/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference b/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql b/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql new file mode 100644 index 00000000000..9ff482e4b46 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00059_merge_sorting_empty_array_joined.sql @@ -0,0 +1 @@ +SELECT CounterID FROM test.visits ARRAY JOIN Goals.ID WHERE CounterID = 731962 ORDER BY CounterID diff --git a/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference b/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference new file mode 100644 index 00000000000..60a2c1d1bb7 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.reference @@ -0,0 +1 @@ +4487 diff --git a/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql b/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql new file mode 100644 index 00000000000..f9691503385 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00060_move_to_prewhere_and_sets.sql @@ -0,0 +1,2 @@ +SET optimize_move_to_prewhere = 1; +SELECT uniq(URL) FROM test.hits WHERE TraficSourceID IN (7); diff --git a/dbms/tests/queries/1_stateful/00061_storage_buffer.reference b/dbms/tests/queries/1_stateful/00061_storage_buffer.reference new file mode 100644 index 00000000000..50a5f6923ea --- /dev/null +++ b/dbms/tests/queries/1_stateful/00061_storage_buffer.reference @@ -0,0 +1,4 @@ +2417 +0 +2417 +2417 diff --git a/dbms/tests/queries/1_stateful/00061_storage_buffer.sql b/dbms/tests/queries/1_stateful/00061_storage_buffer.sql new file mode 100644 index 00000000000..4e3f4b81394 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00061_storage_buffer.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test.hits_dst; +DROP TABLE IF EXISTS test.hits_buffer; + +CREATE TABLE test.hits_dst AS test.hits; +CREATE TABLE test.hits_buffer AS test.hits_dst ENGINE = Buffer(test, hits_dst, 8, 1, 10, 10000, 100000, 10000000, 100000000); + +INSERT INTO test.hits_buffer SELECT * FROM test.hits WHERE CounterID = 101500; +SELECT count() FROM test.hits_buffer; +SELECT count() FROM test.hits_dst; + +OPTIMIZE TABLE test.hits_buffer; +SELECT count() FROM test.hits_buffer; +SELECT count() FROM test.hits_dst; + +DROP TABLE test.hits_dst; +DROP TABLE test.hits_buffer; diff --git a/dbms/tests/queries/1_stateful/00062_loyalty.reference b/dbms/tests/queries/1_stateful/00062_loyalty.reference new file mode 100644 index 00000000000..266295d996d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00062_loyalty.reference @@ -0,0 +1,12 @@ +-10 4291 ████████████████████████████████████████████████████████████████████████████▊ +-9 222 █████████████████████████████████████████████████▋ +-8 156 ██████████████████████████████████████████████▍ +-7 102 ██████████████████████████████████████████▌ +-6 91 █████████████████████████████████████████▌ +-5 100 ██████████████████████████████████████████▍ +5 87 █████████████████████████████████████████▏ +6 123 ████████████████████████████████████████████▎ +7 133 █████████████████████████████████████████████ +8 236 ██████████████████████████████████████████████████▎ +9 603 ██████████████████████████████████████████████████████████▊ +10 5604 ███████████████████████████████████████████████████████████████████████████████▎ diff --git a/dbms/tests/queries/1_stateful/00062_loyalty.sql b/dbms/tests/queries/1_stateful/00062_loyalty.sql new file mode 100644 index 00000000000..f57a303a15a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00062_loyalty.sql @@ -0,0 +1 @@ +SELECT loyalty, count() AS c, bar(log(c + 1) * 1000, 0, log(6000) * 1000, 80) FROM (SELECT UserID, toInt8((yandex > google ? yandex / (yandex + google) : -google / (yandex + google)) * 10) AS loyalty FROM (SELECT UserID, sum(SearchEngineID = 2) AS yandex, sum(SearchEngineID = 3) AS google FROM test.hits WHERE SearchEngineID = 2 OR SearchEngineID = 3 GROUP BY UserID HAVING yandex + google > 10)) GROUP BY loyalty ORDER BY loyalty \ No newline at end of file diff --git a/dbms/tests/queries/1_stateful/00063_loyalty_joins.reference b/dbms/tests/queries/1_stateful/00063_loyalty_joins.reference new file mode 100644 index 00000000000..f43569bbd7a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00063_loyalty_joins.reference @@ -0,0 +1,51 @@ +-10 1244506 +-9 145771 +-8 74083 +-7 52819 +-6 32860 +-5 54350 +0 3550514 +5 48423 +6 56766 +7 73688 +8 136048 +9 472052 +10 2932018 +-10 1244506 +-9 145771 +-8 74083 +-7 52819 +-6 32860 +-5 54350 +0 3550514 +5 48423 +6 56766 +7 73688 +8 136048 +9 472052 +10 2932018 +-10 1244506 +-9 145771 +-8 74083 +-7 52819 +-6 32860 +-5 54350 +0 3550514 +5 48423 +6 56766 +7 73688 +8 136048 +9 472052 +10 2932018 +-10 1244506 ███████████████████████████████████████████████████████████████████████████▎ +-9 145771 ███████████████████████████████████████████████████████████████▋ +-8 74083 ████████████████████████████████████████████████████████████▏ +-7 52819 ██████████████████████████████████████████████████████████▎ +-6 32860 ███████████████████████████████████████████████████████▋ +-5 54350 ██████████████████████████████████████████████████████████▍ +5 48423 █████████████████████████████████████████████████████████▋ +6 56766 ██████████████████████████████████████████████████████████▋ +7 73688 ████████████████████████████████████████████████████████████ +8 136048 ███████████████████████████████████████████████████████████████▍ +9 472052 ██████████████████████████████████████████████████████████████████████ +10 2932018 ███████████████████████████████████████████████████████████████████████████████▊ diff --git a/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql b/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql new file mode 100644 index 00000000000..f24a4221779 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00063_loyalty_joins.sql @@ -0,0 +1,94 @@ +SELECT + loyalty, + count() +FROM test.hits ANY LEFT JOIN +( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 +) USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + + +SELECT + loyalty, + count() +FROM +( + SELECT UserID + FROM test.hits +) ANY LEFT JOIN +( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 +) USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + + +SELECT + loyalty, + count() +FROM +( + SELECT + loyalty, + UserID + FROM + ( + SELECT UserID + FROM test.hits + ) ANY LEFT JOIN + ( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 + ) USING UserID +) +GROUP BY loyalty +ORDER BY loyalty ASC; + + +SELECT + loyalty, + count() AS c, + bar(log(c + 1) * 1000, 0, log(3000000) * 1000, 80) +FROM test.hits ANY INNER JOIN +( + SELECT + UserID, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM + ( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 + ) +) USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; diff --git a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference b/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference new file mode 100644 index 00000000000..6ce4ca04eea --- /dev/null +++ b/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.reference @@ -0,0 +1,24 @@ +-10 1244506 +-9 145771 +-8 74083 +-7 52819 +-6 32860 +-5 54350 +5 48423 +6 56766 +7 73688 +8 136048 +9 472052 +10 2932018 +-10 1244506 +-9 145771 +-8 74083 +-7 52819 +-6 32860 +-5 54350 +5 48423 +6 56766 +7 73688 +8 136048 +9 472052 +10 2932018 diff --git a/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql b/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql new file mode 100644 index 00000000000..f009a856ba9 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00065_loyalty_with_storage_join.sql @@ -0,0 +1,34 @@ +USE test; + +DROP TABLE IF EXISTS join; +CREATE TABLE join (UserID UInt64, loyalty Int8) ENGINE = Join(ANY, INNER, UserID); + +INSERT INTO join +SELECT + UserID, + toInt8(if((sum(SearchEngineID = 2) AS yandex) > (sum(SearchEngineID = 3) AS google), + yandex / (yandex + google), + -google / (yandex + google)) * 10) AS loyalty +FROM hits +WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) +GROUP BY UserID +HAVING (yandex + google) > 10; + +SELECT + loyalty, + count() +FROM hits ANY INNER JOIN join USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + +DETACH TABLE join; +ATTACH TABLE join (UserID UInt64, loyalty Int8) ENGINE = Join(ANY, INNER, UserID); + +SELECT + loyalty, + count() +FROM hits ANY INNER JOIN join USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + +DROP TABLE join; diff --git a/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference b/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference new file mode 100644 index 00000000000..138e8df8fe4 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.reference @@ -0,0 +1,10 @@ +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 +2014-03-23 23:59:59 diff --git a/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql b/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql new file mode 100644 index 00000000000..d0636133186 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00066_sorting_distributed_many_replicas.sql @@ -0,0 +1,2 @@ +SET max_parallel_replicas = 2; +SELECT EventTime FROM remote('127.0.0.{1|2}', test, hits) ORDER BY EventTime DESC LIMIT 10 diff --git a/dbms/tests/queries/1_stateful/00067_union_all.reference b/dbms/tests/queries/1_stateful/00067_union_all.reference new file mode 100644 index 00000000000..0a5bc0f84e0 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00067_union_all.reference @@ -0,0 +1,20 @@ +5994089094820940230 2 +5994089094820940230 2 +5994090037056589214 2 +5994090037056589214 2 +5994090037056589214 2 +5994090037056589214 2 +5994090037056589214 2 +5994090037056589214 2 +5994090526165207301 2 +5994090526165207301 2 +18442957653752051221 1 +18442957653752051221 1 +18442957653752051221 1 +18442957653752051221 1 +18443101109984791817 1 +18443101109984791817 1 +18443101109984791817 1 +18443101109984791817 1 +18443935406854847385 1 +18443935406854847385 1 diff --git a/dbms/tests/queries/1_stateful/00067_union_all.sql b/dbms/tests/queries/1_stateful/00067_union_all.sql new file mode 100644 index 00000000000..2a1d00e975d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00067_union_all.sql @@ -0,0 +1,13 @@ +SELECT * FROM +( + SELECT UserID AS id, 1 AS event + FROM remote('127.0.0.{1,2}', test, hits) + ORDER BY id DESC + LIMIT 10 +UNION ALL + SELECT FUniqID AS id, 2 AS event + FROM remote('127.0.0.{1,2}', test, hits) + ORDER BY id DESC + LIMIT 10 +) +ORDER BY id, event; diff --git a/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.reference b/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.reference new file mode 100644 index 00000000000..90f1ab13794 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.reference @@ -0,0 +1 @@ +66498 diff --git a/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.sql b/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.sql new file mode 100644 index 00000000000..d84bacbf692 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00068_subquery_in_prewhere.sql @@ -0,0 +1 @@ +SELECT count() FROM test.hits PREWHERE UserID IN (SELECT UserID FROM test.hits WHERE CounterID = 101500); diff --git a/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference b/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference new file mode 100644 index 00000000000..4fee49b4716 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.reference @@ -0,0 +1,52 @@ +http://images.yandex.ru/yandsearch?text=картинки ракеты в космосе&stype=image&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=картинки ракеты в космосе&stype=image&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=планеты картинки&img_url=http:%2F%2Fcomments.ua%2Fimg%2F20110328143805.jpg&pos=7&rpt=simage&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=планеты картинки&img_url=http:%2F%2Fcomments.ua%2Fimg%2F20110328143805.jpg&pos=7&rpt=simage&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=планеты картинки&stype=image&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=планеты картинки&stype=image&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=раскраски для мальчиков&stype=image&lr=35&noreask=1&source=wiz 2014-03-20 http://images.yandex.ru/yandsearch?text=раскраски для мальчиков&stype=image&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=русская народная ярмарка картинки&img_url=http:%2F%2Fcs405226.userapi.com%2Fv405226882%2F2629%2Fr7DF4nIozfo.jpg&pos=1&rpt=simage&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=русская народная ярмарка картинки&img_url=http:%2F%2Fcs405226.userapi.com%2Fv405226882%2F2629%2Fr7DF4nIozfo.jpg&pos=1&rpt=simage&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=русская народная ярмарка картинки&stype=image&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=русская народная ярмарка картинки&stype=image&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=русская народная ярмарка рисунки&stype=image&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=русская народная ярмарка рисунки&stype=image&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=ярмарка картинки рисунки&stype=image&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=ярмарка картинки рисунки&stype=image&lr=35&noreask=1&source=wiz +http://images.yandex.ru/yandsearch?text=ярмарка картинки&stype=image&lr=35&noreask=1&source=wiz 2014-03-18 http://images.yandex.ru/yandsearch?text=ярмарка картинки&stype=image&lr=35&noreask=1&source=wiz +http://yabs.yandex.ru/count/9Fi4y9pf9Uu40000ZhiZrp85KfK1cm9kGxS198Yrp0QG1Oco2fLM0PYCAPshshu6fbYAfsmLrBs-gZmA0Qe1fQoMomAD0P6tm7fG0O-vvR6F0f-yMKpz2fCZcGL2Z90r3A2Gc3gla3KCb9229AUUNYoee5W86AIm0000WQx-3nB9IZGmzGIn0RA04Bchshu6k_cm1WlFsxvC0duH 2014-03-22 http://yabs.yandex.ru/count/9Fi4y9pf9Uu40000ZhiZrp85KfK1cm9kGxS198Yrp0QG1Oco2fLM0PYCAPshshu6fbYAfsmLrBs-gZmA0Qe1fQoMomAD0P6tm7fG0O-vvR6F0f-yMKpz2fCZcGL2Z90r3A2Gc3gla3KCb9229AUUNYoee5W86AIm0000WQx-3nB9IZGmzGIn0RA04Bchshu6k_cm1WlFsxvC0duH +http://yandex.ru/sitesearch?searchid=1887792&100n=ru&reqenc=&text=&Submit=Поиск 2014-03-22 http://yandex.ru/sitesearch?searchid=1887792&100n=ru&reqenc=&text=&Submit=Поиск +http://yandex.ru/yandsearch?lr=35&msid=20933.19395.1395338687.75786&text=гдз 2014-03-20 http://yandex.ru/yandsearch?lr=35&msid=20933.19395.1395338687.75786&text=гдз +http://yandex.ru/yandsearch?lr=35&msid=20943.9035.1395477291.50774&text=шуточная клятва пенсионера женщины 2014-03-22 http://yandex.ru/yandsearch?lr=35&msid=20943.9035.1395477291.50774&text=шуточная клятва пенсионера женщины +http://yandex.ru/yandsearch?lr=35&msid=20953.3223.1395159223.1527&text=смайлики вк скрытые 2014-03-18 http://yandex.ru/yandsearch?lr=35&msid=20953.3223.1395159223.1527&text=смайлики вк скрытые +http://yandex.ru/yandsearch?lr=35&msid=22871.10212.1395165538.52533&text=русская народная ярмарка картинки 2014-03-18 http://yandex.ru/yandsearch?lr=35&msid=22871.10212.1395165538.52533&text=русская народная ярмарка картинки +http://yandex.ru/yandsearch?lr=35&msid=22876.29908.1395165501.19017&text=русская народная ярмарка картинки 2014-03-18 http://yandex.ru/yandsearch?lr=35&msid=22876.29908.1395165501.19017&text=русская народная ярмарка картинки +http://yandex.ru/yandsearch?lr=35&msid=22878.24291.1395333478.42148&text=7лепестков 2014-03-20 http://yandex.ru/yandsearch?lr=35&msid=22878.24291.1395333478.42148&text=7лепестков +http://yandex.ru/yandsearch?lr=35&msid=22885.24948.1395159598.71259&text=пессимист 2014-03-18 http://yandex.ru/yandsearch?lr=35&msid=22885.24948.1395159598.71259&text=пессимист +http://yandex.ru/yandsearch?lr=35&msid=22889.4244.1395162598.65317&text=планеты картинки 2014-03-18 http://yandex.ru/yandsearch?lr=35&msid=22889.4244.1395162598.65317&text=планеты картинки +http://yandex.ru/yandsearch?lr=35&msid=22889.4252.1395165264.65698&text=ярмарка картинки 2014-03-18 http://yandex.ru/yandsearch?lr=35&msid=22889.4252.1395165264.65698&text=ярмарка картинки +http://yandex.ru/yandsearch?lr=35&msid=22894.2811.1395171040.46029&text=планеты картинки 2014-03-18 http://yandex.ru/yandsearch?lr=35&msid=22894.2811.1395171040.46029&text=планеты картинки +http://yandex.ru/yandsearch?lr=35&msid=22895.12503.1395323042.95018&text=раскраски для мальчиков 2014-03-20 http://yandex.ru/yandsearch?lr=35&msid=22895.12503.1395323042.95018&text=раскраски для мальчиков +http://yandex.ru/yandsearch?p=1&text=прикольные тосты с выходом на пенсию женщине в прозе&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=1&text=прикольные тосты с выходом на пенсию женщине в прозе&lr=35 +http://yandex.ru/yandsearch?p=1&text=сценарий проводов на пенсию женщины&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=1&text=сценарий проводов на пенсию женщины&lr=35 +http://yandex.ru/yandsearch?p=1&text=шуточные вопросы и ответы для гостей&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=1&text=шуточные вопросы и ответы для гостей&lr=35 +http://yandex.ru/yandsearch?p=1&text=шуточные песни к выходу на пенсию&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=1&text=шуточные песни к выходу на пенсию&lr=35 +http://yandex.ru/yandsearch?p=1&text=шуточные песни-переделки к выходу на пенсию женщине скачать&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=1&text=шуточные песни-переделки к выходу на пенсию женщине скачать&lr=35 +http://yandex.ru/yandsearch?p=1&text=шуточные тосты с выходом на пенсию женщине&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=1&text=шуточные тосты с выходом на пенсию женщине&lr=35 +http://yandex.ru/yandsearch?p=2&text=прикольные тосты с выходом на пенсию женщине в прозе&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=2&text=прикольные тосты с выходом на пенсию женщине в прозе&lr=35 +http://yandex.ru/yandsearch?p=2&text=сценарий проводов на пенсию женщины&lr=35 2014-03-22 http://yandex.ru/yandsearch?p=2&text=сценарий проводов на пенсию женщины&lr=35 +http://yandex.ru/yandsearch?text=Seemon – Играли нервы, летели ножи текст песни&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=Seemon – Играли нервы, летели ножи текст песни&lr=35 +http://yandex.ru/yandsearch?text=Seemon – Играли нервы, летели ножи&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=Seemon – Играли нервы, летели ножи&lr=35 +http://yandex.ru/yandsearch?text=картинки ракеты в космосе&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=картинки ракеты в космосе&lr=35 +http://yandex.ru/yandsearch?text=картинки ракеты для детей&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=картинки ракеты для детей&lr=35 +http://yandex.ru/yandsearch?text=пессимизм это&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=пессимизм это&lr=35 +http://yandex.ru/yandsearch?text=пессимист и оптимист&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=пессимист и оптимист&lr=35 +http://yandex.ru/yandsearch?text=прикольные тосты с выходом на пенсию женщине в прозе&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=прикольные тосты с выходом на пенсию женщине в прозе&lr=35 +http://yandex.ru/yandsearch?text=прикольные тосты с выходом на пенсию женщине&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=прикольные тосты с выходом на пенсию женщине&lr=35 +http://yandex.ru/yandsearch?text=русская народная ярмарка картинки&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=русская народная ярмарка картинки&lr=35 +http://yandex.ru/yandsearch?text=русская народная ярмарка рисунки &lr=35 2014-03-18 http://yandex.ru/yandsearch?text=русская народная ярмарка рисунки &lr=35 +http://yandex.ru/yandsearch?text=смайлики вк скрытые&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=смайлики вк скрытые&lr=35 +http://yandex.ru/yandsearch?text=сценарий проводов на пенсию женщины от подруг дома&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=сценарий проводов на пенсию женщины от подруг дома&lr=35 +http://yandex.ru/yandsearch?text=сценарий проводов на пенсию женщины&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=сценарий проводов на пенсию женщины&lr=35 +http://yandex.ru/yandsearch?text=шуточные вопросы и ответы для гостей&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные вопросы и ответы для гостей&lr=35 +http://yandex.ru/yandsearch?text=шуточные дипломы к выходу на пенсию женщине&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные дипломы к выходу на пенсию женщине&lr=35 +http://yandex.ru/yandsearch?text=шуточные дипломы к выходу на пенсию&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные дипломы к выходу на пенсию&lr=35 +http://yandex.ru/yandsearch?text=шуточные песни к выходу на пенсию женщине скачать&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные песни к выходу на пенсию женщине скачать&lr=35 +http://yandex.ru/yandsearch?text=шуточные песни к выходу на пенсию&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные песни к выходу на пенсию&lr=35 +http://yandex.ru/yandsearch?text=шуточные песни-переделки к выходу на пенсию женщине скачать&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные песни-переделки к выходу на пенсию женщине скачать&lr=35 +http://yandex.ru/yandsearch?text=шуточные поздравления с выходом на пенсию женщине&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные поздравления с выходом на пенсию женщине&lr=35 +http://yandex.ru/yandsearch?text=шуточные тосты с выходом на пенсию женщине&lr=35 2014-03-22 http://yandex.ru/yandsearch?text=шуточные тосты с выходом на пенсию женщине&lr=35 +http://yandex.ru/yandsearch?text=ярмарка картинки рисунки&lr=35 2014-03-18 http://yandex.ru/yandsearch?text=ярмарка картинки рисунки&lr=35 diff --git a/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql b/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql new file mode 100644 index 00000000000..ec92d4a8ace --- /dev/null +++ b/dbms/tests/queries/1_stateful/00069_duplicate_aggregation_keys.sql @@ -0,0 +1 @@ +SELECT URL, EventDate, max(URL) FROM test.hits WHERE CounterID = 731962 AND UserID = 2651474201385397001 GROUP BY URL, EventDate, EventDate ORDER BY URL, EventDate; diff --git a/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference b/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference new file mode 100644 index 00000000000..d366a28c867 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.reference @@ -0,0 +1,100 @@ +2014-03-18 00:00:00 2014-03-18 152905 1149766421370435682 http://podrobnosti.ua/kaleidoscope/2014/03/17/965156.html http://podrobnosti.ua/society/2014/02/19/959834.html +2014-03-18 00:00:00 2014-03-18 303268 5927163351345602343 http://5-shagov.ru/grippe/ http://5-shagov.ru/parents/hospitalization.php +2014-03-18 00:00:00 2014-03-18 722884 8841128461379701592 http://youtube.com/watch?v=JeOfJi4ZPXw http://yandex.ru/video/search?text=Girlicious - Stupid Shit танцы&filmId=JrZrradeUXI +2014-03-18 00:00:00 2014-03-18 722889 712457281393857753 http://images.yandex.ua/yandsearch?text=тату для мужчин&img_url=http:%2F%2Fi1031.photobucket.com%2Falbums%2Fy379%2Fviki_loves_me%2Fpic_355.jpg&pos=5&rpt=simage&lr=10363&noreask=1&source=wiz http://yandex.ua/yandsearch?text=тату для мужщин&lr=10363 +2014-03-18 00:00:00 2014-03-18 731962 6127057901301120896 http://yandex.ru/yandsearch?text=как правильно посадить лилию в грунт&lr=213 http://yandex.ru/yandsearch?lr=213&oprnd=5603136039&text=как правильно посадить лилию в горшок +2014-03-18 00:00:00 2014-03-18 4308403 260966421390077298 http://lenta.ru/rubrics/ussr/ +2014-03-18 00:00:00 2014-03-18 10095472 3034723561394017307 http://kolesa.kz/a/show/15015981 http://auto.yandex.ru/volkswagen/passat/6391673?state=USED +2014-03-18 00:00:00 2014-03-18 11229106 799814451394901442 http://prntscr.com/31iiin +2014-03-18 00:00:00 2014-03-18 21263557 1158837751395057041 http://hugesex.tv/ar/newest.html http://hugesex.tv/ar/خولات.html +2014-03-18 00:00:00 2014-03-18 23785906 47041921395093581 http://mcums.com/videos/licking-and-fucking-shaved-pussy-of-brunette-gf/ http://mcums.com/tags/pussy-licking/ +2014-03-18 00:00:01 2014-03-18 132528 7556707501378763475 http://eva.ru/jsf/forum/frame-content-post-message.jsp?topicId=3236928&boardId=131&messageId=84758330 http://eva.ru/jsf/forum/frame-content-tree-topic-messages.jsp?topicId=3236928&reload=&showAll=false&reloadTo= +2014-03-18 00:00:01 2014-03-18 722889 1211651541357579377 http://images.yandex.ru/yandsearch?source=wiz&fp=0&text=хайрюнниса гюль&noreask=1&pos=18&lr=101084&rpt=simage&uinfo=ww-980-wh-598-fw-938-fh-448-pd-2&img_url=http:%2F%2Fimgnews.hurriyet.com.tr%2FLiveImages\\photonews\\Today\'s News in Pictures - Feb. 13, 2009\\5.jpg http://yandex.ru/yandsearch?lr=101084&text=хайрюнниса гюль +2014-03-18 00:00:01 2014-03-18 731962 1996577731349080058 http://yandex.by/yandsearch?text=каскад&clid=9582&lr=157 http://yandex.by/yandsearch?rdrnd=764250&text=01100011&clid=9582&lr=157&redircnt=1395086250.1 +2014-03-18 00:00:01 2014-03-18 922978 2402137561319480416 http://segodnya.ua/allnews/p3.html http://segodnya.ua/allnews/p2.html +2014-03-18 00:00:01 2014-03-18 1143050 2007287821344978095 https://mail.yandex.ru/for/webprofiters.ru/neo2/#folder/2090000160054569829 https://mail.yandex.ru/for/webprofiters.ru/neo2/#inbox/thread/2090000004677772251 +2014-03-18 00:00:01 2014-03-18 5503465 3099908391394899634 http://lamoda.ru/c/563/bags-sumki-chehli/?genders=women&sitelink=topmenu http://lamoda.ru/?ef_id=UyYS-wAABWroAQsl:20140317195941:s +2014-03-18 00:00:01 2014-03-18 9927757 1886498191394574261 http://yandex.com.tr/ +2014-03-18 00:00:01 2014-03-18 10193245 5187318751332874613 https://parimatch.by/ https://parimatch.by/?login=1 +2014-03-18 00:00:01 2014-03-18 10849243 712457281393857753 http://images.yandex.ua/yandsearch?text=тату для мужчин&img_url=http:%2F%2Fi1031.photobucket.com%2Falbums%2Fy379%2Fviki_loves_me%2Fpic_355.jpg&pos=5&rpt=simage&lr=10363&noreask=1&source=wiz http://yandex.ua/yandsearch?text=тату для мужщин&lr=10363 +2014-03-18 00:00:01 2014-03-18 12725416 6312826621394013737 goal://megogo.net/playtime http://megogo.net/ru/view/44131-zakrytaya-shkola-sezon-2-seriya-24.html +2014-03-18 00:00:01 2014-03-18 16436437 9135489181394970660 http://naitimp3.com/search/?query=Сектор Газа Роковой год 1999 http://go.mail.ru/search?q=сектор газа в роковой год можете мне не верить 1999 слушать +2014-03-18 00:00:01 2014-03-18 23414332 303872061330707283 http://blognews.am/arm/press/141257/ https://facebook.com/ +2014-03-18 00:00:01 2014-03-18 23544181 303872061330707283 http://blognews.am/arm/press/141257/ https://facebook.com/ +2014-03-18 00:00:01 2014-03-18 24129763 2090186401395086374 http://flvto.com/es/ +2014-03-18 00:00:02 2014-03-18 731962 2415724631390139276 http://yandex.ru/yandsearch?text=смотреть аниме очень приятно, бог&lr=213 http://yandex.ru/yandsearch?lr=213&text=смотреть аниме удар крови +2014-03-18 00:00:02 2014-03-18 1143050 2007287821344978095 https://mail.yandex.ru/for/webprofiters.ru/neo2/#folder/2090000160054569829/thread/2090000001841781953 +2014-03-18 00:00:02 2014-03-18 1143050 2007287821344978095 https://mail.yandex.ru/for/webprofiters.ru/neo2/#folder/2090000160054569829/thread/2090000001841781953 https://mail.yandex.ru/for/webprofiters.ru/neo2/#folder/2090000160054569829 +2014-03-18 00:00:02 2014-03-18 10041976 4249766291378661688 http://magazin-gobelenov.ru/catalog/elizabet/ http://magazin-gobelenov.ru/catalog/angelina/ +2014-03-18 00:00:02 2014-03-18 10740559 5234980771391796639 http://ludivteme.com/user/contacts/ http://ludivteme.com/user/post/?uId=2617253&postId=2713054&ref=fav +2014-03-18 00:00:02 2014-03-18 11492179 690778681385387852 http://ru.tsn.ua/politika +2014-03-18 00:00:02 2014-03-18 12539611 1949138091341424951 http://pup-sik.ru/main/8-tanki-onlajn.html +2014-03-18 00:00:02 2014-03-18 13814323 48915321355515668 http://lentaporno.com/minet/page/2/ +2014-03-18 00:00:02 2014-03-18 15044245 4249766291378661688 http://magazin-gobelenov.ru/catalog/elizabet/ http://magazin-gobelenov.ru/catalog/angelina/ +2014-03-18 00:00:02 2014-03-18 16137184 1595224551373963471 http://seria-online.ru/100579-v-lesah-i-na-gorah-1-sezon-20-serija.html#pInfoData http://seria-online.ru/100578-v-lesah-i-na-gorah-1-sezon-19-serija.html +2014-03-18 00:00:02 2014-03-18 19957570 355266201395086276 http://photos.wowgirls.com/aa73789d/MTY1MjY6NDQ6MzI/ http://milkmanbook.com/ +2014-03-18 00:00:02 2014-03-18 23194813 2630182581386173797 http://posta.com.tr/magazin/GaleriHaber/Cicekciyi-gorunce---.htm?ArticleID=220836&PageIndex=3 http://posta.com.tr/magazin/GaleriHaber/Cicekciyi-gorunce---.htm?ArticleID=220836&PageIndex=2 +2014-03-18 00:00:02 2014-03-18 23414332 1949138091341424951 http://pup-sik.ru/main/8-tanki-onlajn.html +2014-03-18 00:00:02 2014-03-18 23427556 1167178261394699416 goal://debilizator.tv/Online http://debilizator.tv/tnt/ +2014-03-18 00:00:02 2014-03-18 24322408 531864411394811824 http://sefan.mobi/yo.php?id=6&place=main http://sefan.ru/ +2014-03-18 00:00:03 2014-03-18 115931 62014561357385318 http://aukro.ua/videoregistratory-111986?a_enum[695][1]=1&change_view=Найти >&listing_interval=7&listing_sel=2&order=qd&offerTypeBuyNow=1&ap=1&aid=17390505 +2014-03-18 00:00:03 2014-03-18 731962 4598420951330592755 http://yandex.ru/yandsearch?lr=43&msid=22881.14658.1395086392.12925&oprnd=9814174567&text=авито http://yandex.ru/ +2014-03-18 00:00:03 2014-03-18 942065 728339451390597173 http://utkonos.ru/cat/catalogue/41/page/3?property[]=480:230849 http://utkonos.ru/cat/catalogue/41/page/3?property[]=56:409&property[]=480:230849 +2014-03-18 00:00:03 2014-03-18 1143050 2007287821344978095 https://mail.yandex.ru/for/webprofiters.ru/neo2/#folder/2090000160054569829/thread/2090000001841781953 +2014-03-18 00:00:03 2014-03-18 6599752 1618423011295641161 http://pornoload.com/video/gestkoe http://pornoload.com/video/orgii +2014-03-18 00:00:03 2014-03-18 23609092 303872061330707283 http://blognews.am/arm/press/141257/ https://facebook.com/ +2014-03-18 00:00:03 2014-03-18 24142063 1833987961394132913 http://searcher.takataka.coccoc.com/searcher/frame/default?abid=fdbb8847fde149930234e02fc55dc6f0 http://tinngoisao.vn/tin-tuc/lo-dien-thi-sinh-cua-hoa-hau-phu-nhan-tai-my-2014 +2014-03-18 00:00:04 2014-03-18 62180 773991171388847630 http://rutube.ru/video/9049a252a077b229303b4cbe3fd08cd1/ http://rutube.ru/popup_http://rutube.ru/video/9049a252a077b229303b4cbe3fd08cd1/ +2014-03-18 00:00:04 2014-03-18 62180 773991171388847630 http://rutube.ru/video/9049a252a077b229303b4cbe3fd08cd1/ http://rutube.ru/popup_http://rutube.ru/video/9049a252a077b229303b4cbe3fd08cd1/ +2014-03-18 00:00:04 2014-03-18 722545 913874051395084276 http://yandex.ru/ +2014-03-18 00:00:04 2014-03-18 722889 824625261393690464 http://images.yandex.ua/yandsearch?tld=ua&p=5&text=ржачные картинки&fp=5&pos=167&uinfo=ww-1905-wh-936-fw-1680-fh-598-pd-1&rpt=simage&img_url=http:%2F%2Fcs7002.userapi.com%2Fv7002416%2F30%2FyjhueCmnUIc.jpg http://yandex.ua/ +2014-03-19 00:00:00 2014-03-19 722545 695702261356209942 http://yandex.by/ +2014-03-19 00:00:00 2014-03-19 722545 7181768941378219775 http://yandex.ru/ +2014-03-19 00:00:00 2014-03-19 722545 9484883841392744913 http://yandex.ru/ +2014-03-19 00:00:00 2014-03-19 731962 7190331241393769781 http://yandex.ua/yandsearch?text=.Народная Солянка + Поиски чернобыльского Шахматиста прохождение.контейнер калмыка&clid=2070746&lr=143 +2014-03-19 00:00:00 2014-03-19 2237260 4222828211330284262 http://avito.ru/kamensk-shahtinskiy?p=2 http://avito.ru/kamensk-shahtinskiy +2014-03-19 00:00:00 2014-03-19 12901042 2297304011387025366 http://radario.ru/vk/app?appKey=685c02962351452f9dc5497cad23fa9d&api_url=http://api.vk.com/api.php&api_id=3786618&api_settings=0&viewer_id=1528921&viewer_type=0&sid=4527ab66148ec89be4b6f51b9b8236e8caec812dcf6aded81d0908ed794b8ec513824a549e7bbad3bd2ef&secret=d54dbc6532&access_token=e619ee54a2d5687ac4147c0d6b35adbd36827847a05983c5e7f407288f02281b0708e1be5dc4f1da03c5c&user_id=0&group_id=0&is_app_user=0&auth_key=0bc4451e889647c1d615673e5fe39422&language=0&parent_language=0&ad_info=ElsdCQBZQ11tBwVNRARQBHR/FAsmMQxVUUZGNgBQbwYfQyQrWQA=&is_secure=0&ads_app_id=3786618_47e214d0ab09e5d822&api_result={"response":[{"uid":1528921,"first_name":"Doctor","last_name":"Banan","sex":2,"nickname":"","bdate":"18.2.1990","photo_big":"http:\\%2F\\%2Fcs425527.vk.me\\%2Fv425527921\\%2F879f\\%2F0Ka3g6CM2a0.jpg"}]}&referrer=unknown&lc_name=ae2044e2&hash= http://vk.com/msessions +2014-03-19 00:00:00 2014-03-19 15003622 61132721390316282 http://acunn.com/survivor#anket http://acunn.com/survivor +2014-03-19 00:00:01 2014-03-19 109993 1578933551394054044 http://molotok.ru/myaccount/smanager.php?page=auctions&type=not_sold&p=87 http://molotok.ru/myaccount/smanager.php?page=auctions&type=not_sold +2014-03-19 00:00:01 2014-03-19 722545 2436598271392110542 http://yandex.ru/ +2014-03-19 00:00:01 2014-03-19 11514781 2111148871395066201 http://intellect-video.com/5345/BBC--Horizon--Prizrak-v-moikh-genakh-online/ http://intellect-video.com/natural-sciences-biology/ +2014-03-19 00:00:01 2014-03-19 12040240 2040458821375390840 http://gazetaby.com/cont/list.php?sn_arx=1&sn_cat=32 +2014-03-19 00:00:01 2014-03-19 15003622 61132721390316282 http://acunn.com/survivor +2014-03-19 00:00:01 2014-03-19 21270109 781274971395130639 http://fotostrana.ru/user/71384319/ http://e.mail.ru/cgi-bin/link?check=1&refresh=1&cnf=3506c8&url=http:%2F%2Ffotostrana.ru%2Fuser%2Fautologin%2F%3Fu%3D71384319%26h%3Ddb59c4c43dfb7f3%26eRf%3D1301%26t%3D1301%26v%3D2%26time%3D1395130362%26to_url%3D%2Fuser%2F74646240%2F%26utm_campaign%3Demail_notify%26utm_content%3Dlink%26utm_medium%3Demail_1301%26utm_source%3Devent_0&msgid=13951308140000000339;0,1&x-email=anderson_66@mail.ru&js=1&redir=1 +2014-03-19 00:00:01 2014-03-19 21279787 7255079541391233211 http://diary.ru/~Tahgira/p196293215.htm?oam#more1 http://mylostharem.diary.ru/?favorite&from=40 +2014-03-19 00:00:01 2014-03-19 21879736 2581102761372914766 http://cocok.mobi/video/listing/3/3 http://cocok.mobi/video/listing/3/2 +2014-03-19 00:00:01 2014-03-19 22063525 2634772471395187200 http://porniac.de/free-porn-teen-german-deutsch-creampie-inpussy http://google.de/url?sa=t&rct=j&q=&esrc=s&source=web&cd=3&ved=0CEAQFjAC&url=http:%2F%2Fwww.porniac.de%2Ffree-porn-teen-german-deutsch-creampie-inpussy&ei=utwoU9OBOcig4gTt0IH4Bg&usg=AFQjCNG__41cA_2JKqf2AOYlxK8Vg5-hHA&bvm=bv.62922401,d.bGE +2014-03-19 00:00:01 2014-03-19 22273222 7971532421394017800 http://video.nur.kz/serial/14-velikolepnyy-vek/sezon-4-seria-19&ref=search_redirect&autoplay=1 +2014-03-19 00:00:02 2014-03-19 73874 5271033891286095023 http://livetv.sx/eventinfo/218436_chelsea_galatasaray/ http://livetv.sx/allupcomingsports/1/ +2014-03-19 00:00:02 2014-03-19 187695 6517393831394014299 http://malls.ru/upload/resize_cache/iblock/ea6/800_800_1bb65d3589fb69dd4a17961e4a75772f4/ea6b77961089460feaf0260d7d28345f.jpg http://malls.ru/rus/rent/demand/id45961.shtml +2014-03-19 00:00:02 2014-03-19 9927988 1968640351383680744 http://yandex.com.tr/yandsearch?lr=11503&text=anne yeşil örgü modelleri http://yandex.com.tr/ +2014-03-19 00:00:02 2014-03-19 23075158 1856338591385061414 http://1.newtop1.ru/?tid=999364201 http://proligtb.com/news.php?tz=2873580 +2014-03-19 00:00:02 2014-03-19 23711410 683272311395171829 https://betcruise.com/signin/expire/ https://betcruise.com/signup/success/ +2014-03-19 00:00:03 2014-03-19 48221 939058481380477704 http://love.mail.ru/search.phtml?ia=F&lf=M&af=26&at=35&wp=1&wv=0&wvc=0&ni=1&wr=0&sz=b&s_c=1012_25547957_25552720_0&geo=0&s_tg=&geo=0&t=o http://love.mail.ru/search.phtml?t=&sz=b&ia=F&lf=M&af=26&at=35&s_c=1012_25547957_25552720_0&target=&wp=1 +2014-03-19 00:00:03 2014-03-19 106966 3556373091312196176 http://rosbalt.ru/tests/gibdd +2014-03-19 00:00:03 2014-03-19 722545 149923051366137669 http://yandex.ru/ +2014-03-19 00:00:03 2014-03-19 731962 7407418091393711524 http://hghltd.yandex.net/yandbtm?fmode=inject&url=http:%2F%2Fweb-ulitka.ru%2Fshowthread.php%3Ft%3D955&tld=ru&lang=ru&text=Advanced System Protector ключ&l10n=ru&mime=html&sign=cc8ef02d6794cf571f8e88f8c4e5db2b&keyno=0 http://yandex.ru/yandsearch?clid=1955452&lr=11116&text=Advanced System Protector ключ +2014-03-19 00:00:03 2014-03-19 7970125 1852607591356602753 http://casio.prommag.ru/watches/protrek/protrek.html http://casio.prommag.ru/watches/protrek/protrek.html +2014-03-19 00:00:03 2014-03-19 10935790 9734083581388238977 http://domexpo.ru/company_info.htm?id=520 +2014-03-19 00:00:03 2014-03-19 22273222 7971532421394017800 goal://video.nur.kz/UPPOD_PLAY http://video.nur.kz/serial/14-velikolepnyy-vek/sezon-4-seria-19&ref=search_redirect&autoplay=1 +2014-03-19 00:00:03 2014-03-19 22273222 7971532421394017800 http://video.nur.kz/serial/14-velikolepnyy-vek/sezon-4-seria-19&ref=search_redirect&autoplay=1#UPPOD_PLAY_HIT http://video.nur.kz/serial/14-velikolepnyy-vek/sezon-4-seria-19&ref=search_redirect&autoplay=1 +2014-03-19 00:00:03 2014-03-19 23723584 7524871931395144801 http://mamba.ru/my/messages.phtml http://mamba.ru/ +2014-03-19 00:00:04 2014-03-19 48221 1173254791395172208 http://love.mail.ru/search.phtml?ia=M&lf=F&af=18&at=80&wp=1&wv=0&wvc=0&ni=1&wr=0&gid=1395172789&t=o&sz=b&s_c=3159_4052_0_0&geo=0&s_tg=&target=Sex&offset=0&nchanged=1395092224&noid=1243669604 http://love.mail.ru/search.phtml?ia=M&lf=F&af=18&at=80&wp=1&wv=0&wvc=0&ni=1&wr=0&gid=1395172383&sz=b&s_c=3159_4052_0_0&geo=0&s_tg=&target=Sex&geo=0&t=o +2014-03-19 00:00:04 2014-03-19 48221 7524871931395144801 http://mamba.ru/my/messages.phtml http://mamba.ru/ +2014-03-19 00:00:04 2014-03-19 79376 781274971395130639 http://my.mail.ru/?from=email http://my.mail.ru/?from=email +2014-03-19 00:00:04 2014-03-19 731962 1265434081376817556 http://yandex.ru/yandsearch?text=dj paroff слушать онлайн бесплатно&lr=2 http://yandex.ru/yandsearch?lr=2&text=depeche mode +2014-03-19 00:00:04 2014-03-19 2199583 1471157941382120170 http://litres.ru/?td +2014-03-19 00:00:04 2014-03-19 2237260 5917401071394958225 http://avito.ru/sankt-peterburg/avtomobili_s_probegom/ford_transit_1992_270304651 http://avito.ru/sankt-peterburg/avtomobili_s_probegom/ford/transit?pmax=150000&pmin=0 +2014-03-19 00:00:04 2014-03-19 2344120 8729055551317074933 http://proelectro.ru/cabinet/notices/list http://proelectro.ru/cabinet/notices/view/id/46749 +2014-03-19 00:00:04 2014-03-19 6969847 8729055551317074933 http://proelectro.ru/cabinet/notices/list http://proelectro.ru/cabinet/notices/view/id/46749 +2014-03-19 00:00:04 2014-03-19 7234936 2683079681355752489 http://korrespondent.net/ http://korrespondent.net/ukraine/politics/3320139-yarosh-staranyiamy-putyna-myr-stoyt-na-porohe-tretei-myrovoi-voiny +2014-03-19 00:00:04 2014-03-19 7604263 948490331394925267 http://small-games.info/?go=game&c=12&i=3968 https://google.com.ua/ +2014-03-19 00:00:04 2014-03-19 9422815 22123591394214537 http://audiopoisk.com/?q=зара http://audiopoisk.com/?q=Соундтреки из кино филыма можно звать мама +2014-03-19 00:00:04 2014-03-19 12725416 963351201395165694 goal://megogo.net/playtime http://megogo.net/ru/view/14391-mamy.html +2014-03-19 00:00:04 2014-03-19 12725416 1837943761391284585 http://megogo.net/ru/view/152541-dumay-kak-zhenshchina-seriya-1.html +2014-03-19 00:00:04 2014-03-19 12725416 6312826621394013737 goal://megogo.net/pause http://megogo.net/ru/view/44021-zakrytaya-shkola-sezon-2-seriya-13.html +2014-03-19 00:00:04 2014-03-19 12787931 2355954411395172413 http://sumo.ua/ https://google.com.ua/ +2014-03-19 00:00:04 2014-03-19 12908773 1558109441368022235 http://adultmanga.ru/sadistic_boy/vol1/1?mature=1 http://adultmanga.ru/sadistic_boy/vol1/1 +2014-03-19 00:00:04 2014-03-19 13375903 2246101281391512061 http://dota2.starladder.tv/tournament/6360 +2014-03-19 00:00:04 2014-03-19 19762435 5917401071394958225 http://avito.ru/sankt-peterburg/avtomobili_s_probegom/ford_transit_1992_270304651 http://avito.ru/sankt-peterburg/avtomobili_s_probegom/ford/transit?pmax=150000&pmin=0 +2014-03-19 00:00:04 2014-03-19 19765189 2088033481395179889 http://amkspor.com/2014/03/18/derin-futbolda-buyuk-kavga-274576/ http://sozcu.com.tr/ diff --git a/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql b/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql new file mode 100644 index 00000000000..1891cd63555 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00071_merge_tree_optimize_aio.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test.hits_snippet; + +CREATE TABLE test.hits_snippet(EventTime DateTime, EventDate Date, CounterID UInt32, UserID UInt64, URL String, Referer String) ENGINE = MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192); + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 4096; + +INSERT INTO test.hits_snippet(EventTime, EventDate, CounterID, UserID, URL, Referer) SELECT EventTime, EventDate, CounterID, UserID, URL, Referer FROM test.hits WHERE EventDate = toDate('2014-03-18') ORDER BY EventTime, EventDate, CounterID, UserID, URL, Referer ASC LIMIT 50; +INSERT INTO test.hits_snippet(EventTime, EventDate, CounterID, UserID, URL, Referer) SELECT EventTime, EventDate, CounterID, UserID, URL, Referer FROM test.hits WHERE EventDate = toDate('2014-03-19') ORDER BY EventTime, EventDate, CounterID, UserID, URL, Referer ASC LIMIT 50; + +SET min_bytes_to_use_direct_io = 8192; + +OPTIMIZE TABLE test.hits_snippet; + +SELECT EventTime, EventDate, CounterID, UserID, URL, Referer FROM test.hits_snippet ORDER BY EventTime, EventDate, CounterID, UserID, URL, Referer ASC; + +DROP TABLE test.hits_snippet; diff --git a/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.reference b/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.reference new file mode 100644 index 00000000000..97db25771ee --- /dev/null +++ b/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.reference @@ -0,0 +1,26 @@ +1383658 +1406958 +6083282 +2790616 +7466940 +2789455 +1383658 +1406958 +6083282 +2790616 +7466940 +2789455 +1383658 +9 +1448371 +7425518 +1448380 +7425527 +11 +9 +1448371 +7425518 +1448380 +7425527 +11 +9 diff --git a/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.sql b/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.sql new file mode 100644 index 00000000000..90f1c875acd --- /dev/null +++ b/dbms/tests/queries/1_stateful/00072_compare_date_and_string_index.sql @@ -0,0 +1,37 @@ +SELECT count() FROM test.hits WHERE EventDate = '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate < '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate > '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate <= '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate >= '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate IN ('2014-03-18', '2014-03-19'); + +SELECT count() FROM test.hits WHERE EventDate = toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate < toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate > toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate <= toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate >= toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate IN (toDate('2014-03-18'), toDate('2014-03-19')); + +SELECT count() FROM test.hits WHERE EventDate = concat('2014-0', '3-18'); + +DROP TABLE IF EXISTS test.hits_indexed_by_time; +CREATE TABLE test.hits_indexed_by_time (EventDate Date, EventTime DateTime) ENGINE = MergeTree(EventDate, EventTime, 8192); +INSERT INTO test.hits_indexed_by_time SELECT EventDate, EventTime FROM test.hits; + +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime = '2014-03-18 01:02:03'; +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime < '2014-03-18 01:02:03'; +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime > '2014-03-18 01:02:03'; +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime <= '2014-03-18 01:02:03'; +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime >= '2014-03-18 01:02:03'; +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime IN ('2014-03-18 01:02:03', '2014-03-19 04:05:06'); + +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime = toDateTime('2014-03-18 01:02:03'); +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime < toDateTime('2014-03-18 01:02:03'); +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime > toDateTime('2014-03-18 01:02:03'); +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime <= toDateTime('2014-03-18 01:02:03'); +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime >= toDateTime('2014-03-18 01:02:03'); +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime IN (toDateTime('2014-03-18 01:02:03'), toDateTime('2014-03-19 04:05:06')); + +SELECT count() FROM test.hits_indexed_by_time WHERE EventTime = concat('2014-03-18 ', '01:02:03'); + +DROP TABLE test.hits_indexed_by_time; diff --git a/dbms/tests/queries/1_stateful/00073_uniq_array.reference b/dbms/tests/queries/1_stateful/00073_uniq_array.reference new file mode 100644 index 00000000000..d40e6f39073 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00073_uniq_array.reference @@ -0,0 +1,7 @@ +2014-03-17 7158 7158 7158 +2014-03-18 7122 7122 7122 +2014-03-19 7109 7109 7109 +2014-03-20 6997 6997 6997 +2014-03-21 6718 6718 6718 +2014-03-22 5716 5716 5716 +2014-03-23 5799 5799 5799 diff --git a/dbms/tests/queries/1_stateful/00073_uniq_array.sql b/dbms/tests/queries/1_stateful/00073_uniq_array.sql new file mode 100644 index 00000000000..74c031ee7a6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00073_uniq_array.sql @@ -0,0 +1 @@ +SELECT EventDate, uniqExact(UserID), length(groupUniqArray(UserID)), arrayUniq(groupArray(UserID)) FROM test.hits WHERE CounterID = 731962 GROUP BY EventDate ORDER BY EventDate; diff --git a/dbms/tests/queries/1_stateful/00074_full_join.reference b/dbms/tests/queries/1_stateful/00074_full_join.reference new file mode 100644 index 00000000000..0a4ec891a33 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00074_full_join.reference @@ -0,0 +1,60 @@ +22564972 1331 0 +21423112 357 0 +99165 72 0 +9409582 37 0 +93428 33 0 +50615 21 0 +21191842 21 0 +10274467 11 0 +14013802 0 1 +14013940 0 1 +15049483 0 1 +15834589 0 1 +16635889 0 1 +16837024 0 1 +16971091 0 1 +17757130 0 1 +18428878 0 1 +19738795 0 1 +20137870 0 1 +20338870 0 1 +22564972 1331 0 +21423112 357 0 +99165 72 0 +9409582 37 0 +93428 33 0 +50615 21 0 +21191842 21 0 +10274467 11 0 +54047 9 0 +24327391 9 0 +12133420 8 0 +22382245 8 0 +23498743 7 0 +23498989 7 0 +23799985 7 0 +84367 6 0 +1811350 5 0 +7490101 5 0 +22157924 5 0 +22334527 5 0 +14013802 0 1 +14013940 0 1 +15049483 0 1 +15834589 0 1 +16635889 0 1 +16837024 0 1 +16971091 0 1 +17757130 0 1 +18428878 0 1 +19738795 0 1 +20137870 0 1 +20338870 0 1 +20897719 0 1 +21165355 0 1 +21764326 0 1 +22101154 0 1 +22669492 0 1 +23218363 0 1 +23578141 0 1 +23712970 0 1 diff --git a/dbms/tests/queries/1_stateful/00074_full_join.sql b/dbms/tests/queries/1_stateful/00074_full_join.sql new file mode 100644 index 00000000000..19550006baa --- /dev/null +++ b/dbms/tests/queries/1_stateful/00074_full_join.sql @@ -0,0 +1,106 @@ +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY FULL OUTER JOIN +( + SELECT + CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; + + +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY LEFT JOIN +( + SELECT + CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; + + +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY RIGHT JOIN +( + SELECT + CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; + + +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY INNER JOIN +( + SELECT + CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; diff --git a/dbms/tests/queries/1_stateful/00075_left_array_join.reference b/dbms/tests/queries/1_stateful/00075_left_array_join.reference new file mode 100644 index 00000000000..52579b4a5ea --- /dev/null +++ b/dbms/tests/queries/1_stateful/00075_left_array_join.reference @@ -0,0 +1,200 @@ +4187429269810 2014-03-19 10:02:28 theme ['theme'] +4187429269810 2014-03-19 10:02:28 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +4187429269810 2014-03-19 10:02:28 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +4187429269810 2014-03-19 10:02:28 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +4187429269810 2014-03-19 10:02:28 Цифровой логин ['Цифровой логин'] +4187429269810 2014-03-19 10:02:28 Эксперимент про отрыв счетчика у папки СПАМ Счетчик есть ['Эксперимент про отрыв счетчика у папки СПАМ'] +4187429269810 2014-03-19 10:02:29 attach ['attach'] +4187429269810 2014-03-19 10:02:29 attach onefile ['attach'] +4187429269810 2014-03-19 10:02:29 Просмотр письма ['Просмотр письма'] +4187429269810 2014-03-19 10:02:30 Правая колонка Вкладка «Треды» ['Правая колонка'] +4187429269810 2014-03-19 10:02:30 Эксперимент про отрыв счетчика у папки СПАМ Счетчик есть ['Эксперимент про отрыв счетчика у папки СПАМ'] +4187429269810 2014-03-19 10:02:35 attach body ['attach'] +4187429269810 2014-03-19 10:03:47 Toolbar Входящие ['Toolbar'] +4187429269810 2014-03-19 10:03:48 Эксперимент про отрыв счетчика у папки СПАМ Счетчик есть ['Эксперимент про отрыв счетчика у папки СПАМ'] +4187429269810 2014-03-19 10:04:21 attach onefile ['attach'] +4187429269810 2014-03-19 10:04:22 attach ['attach'] +4187429269810 2014-03-19 10:04:22 Правая колонка Вкладка «Треды» ['Правая колонка'] +4187429269810 2014-03-19 10:04:22 Просмотр письма ['Просмотр письма'] +4187429269810 2014-03-19 10:04:28 attach body ['attach'] +4187429269810 2014-03-19 10:08:03 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +4187429269810 2014-03-19 10:08:04 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +142097207152117 2014-03-22 08:10:15 theme blue ['theme'] +142097207152117 2014-03-22 08:10:15 Залогиновая шапка. Правая часть Уголок ['Залогиновая шапка. Правая часть'] +142097207152117 2014-03-22 08:10:15 Промо сборщиков На всех ['Промо сборщиков'] +142097207152117 2014-03-22 08:10:17 registerProtocolHandler https: ['registerProtocolHandler'] +142097207152117 2014-03-22 08:10:30 Промо сборщиков На всех ['Промо сборщиков'] +142097207152117 2014-03-22 08:10:32 Правая колонка Вкладка «Треды» ['Правая колонка'] +142097207152117 2014-03-22 08:10:33 Просмотр письма показ ['Просмотр письма'] +340670786804313 2014-03-18 01:52:20 theme ['theme'] +340670786804313 2014-03-18 01:52:21 Цифровой логин ['Цифровой логин'] +340670786804313 2014-03-18 01:52:25 Правая колонка Вкладка «Треды» ['Правая колонка'] +340670786804313 2014-03-18 01:52:25 Просмотр письма ['Просмотр письма'] +340670786804313 2014-03-19 16:01:13 lcn ['lcn'] +340670786804313 2014-03-19 16:01:13 theme ['theme'] +340670786804313 2014-03-19 16:01:13 Погодная тема Показ города ['Погодная тема'] +340670786804313 2014-03-19 16:01:15 Правая колонка Вкладка «Треды» ['Правая колонка'] +340670786804313 2014-03-19 16:01:15 Просмотр письма ['Просмотр письма'] +523951316868678 2014-03-18 15:03:08 theme ['theme'] +523951316868678 2014-03-18 15:03:09 Цифровой логин ['Цифровой логин'] +523951316868678 2014-03-18 15:03:13 Просмотр письма ['Просмотр письма'] +523951316868678 2014-03-18 15:03:14 attach ['attach'] +523951316868678 2014-03-18 15:03:14 attach onefile ['attach'] +523951316868678 2014-03-18 15:03:14 Правая колонка Вкладка «Треды» ['Правая колонка'] +523951316868678 2014-03-18 17:19:31 theme ['theme'] +523951316868678 2014-03-18 17:19:31 Цифровой логин ['Цифровой логин'] +523951316868678 2014-03-18 17:19:32 lcn ['lcn'] +680431331227081 2014-03-18 08:15:14 lcn ['lcn'] +680431331227081 2014-03-18 08:15:14 theme ['theme'] +680431331227081 2014-03-18 08:15:15 Цифровой логин ['Цифровой логин'] +680431331227081 2014-03-18 08:15:17 Правая колонка Вкладка «Треды» ['Правая колонка'] +680431331227081 2014-03-18 08:15:17 Просмотр письма ['Просмотр письма'] +680431331227081 2014-03-18 08:16:10 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +680431331227081 2014-03-18 08:16:13 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +711535488569826 2014-03-17 09:07:22 theme ['theme'] +711535488569826 2014-03-17 09:07:22 Залогиновая шапка. Правая часть Уголок ['Залогиновая шапка. Правая часть'] +1385143435049415 2014-03-17 16:32:19 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:32:19 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:39:06 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:39:07 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:39:07 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:30:25 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:39:19 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:39:21 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:39:21 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:43:50 Toolbar Письмо ['Toolbar'] +1385143435049415 2014-03-17 20:43:50 lcn ['lcn'] +1385143435049415 2014-03-17 20:43:50 theme ['theme'] +1385143435049415 2014-03-17 20:43:50 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:43:50 Просмотр письма ['Просмотр письма'] +1385143435049415 2014-03-17 20:43:51 Toolbar Письмо ['Toolbar'] +1385143435049415 2014-03-17 20:43:51 Toolbar Письмо ['Toolbar'] +1385143435049415 2014-03-17 20:43:51 Toolbar Письмо ['Toolbar'] +1385143435049415 2014-03-17 20:43:51 Toolbar Письмо ['Toolbar'] +1385143435049415 2014-03-17 20:43:51 Toolbar Письмо ['Toolbar'] +1385143435049415 2014-03-17 20:43:51 Toolbar Письмо ['Toolbar'] +1385143435049415 2014-03-17 20:43:51 Правая колонка Вкладка «Треды» ['Правая колонка'] +1385143435049415 2014-03-17 20:43:51 Правая колонка Вкладка «Треды» ['Правая колонка'] +1385143435049415 2014-03-17 20:43:51 События в письмах ПДД ['События в письмах'] +1385143435049415 2014-03-17 20:44:07 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:44:12 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-17 20:44:12 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-17 20:44:12 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-17 20:44:12 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-17 20:44:12 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-17 20:44:17 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-17 20:44:18 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-18 11:36:47 lcn ['lcn'] +1385143435049415 2014-03-18 11:36:47 theme ['theme'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:36:47 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-18 11:45:10 Toolbar Входящие ['Toolbar'] +1385143435049415 2014-03-18 11:45:10 lcn ['lcn'] +4187429269810 2014-03-19 10:02:28 [] +4187429269810 2014-03-19 10:02:28 theme ['theme'] +4187429269810 2014-03-19 10:02:28 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +4187429269810 2014-03-19 10:02:28 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +4187429269810 2014-03-19 10:02:28 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +4187429269810 2014-03-19 10:02:28 Цифровой логин ['Цифровой логин'] +4187429269810 2014-03-19 10:02:28 Эксперимент про отрыв счетчика у папки СПАМ Счетчик есть ['Эксперимент про отрыв счетчика у папки СПАМ'] +4187429269810 2014-03-19 10:02:29 attach ['attach'] +4187429269810 2014-03-19 10:02:29 attach onefile ['attach'] +4187429269810 2014-03-19 10:02:29 Просмотр письма ['Просмотр письма'] +4187429269810 2014-03-19 10:02:30 [] +4187429269810 2014-03-19 10:02:30 Правая колонка Вкладка «Треды» ['Правая колонка'] +4187429269810 2014-03-19 10:02:30 Эксперимент про отрыв счетчика у папки СПАМ Счетчик есть ['Эксперимент про отрыв счетчика у папки СПАМ'] +4187429269810 2014-03-19 10:02:35 [] +4187429269810 2014-03-19 10:02:35 attach body ['attach'] +4187429269810 2014-03-19 10:03:07 [] +4187429269810 2014-03-19 10:03:47 Toolbar Входящие ['Toolbar'] +4187429269810 2014-03-19 10:03:48 Эксперимент про отрыв счетчика у папки СПАМ Счетчик есть ['Эксперимент про отрыв счетчика у папки СПАМ'] +4187429269810 2014-03-19 10:04:21 attach onefile ['attach'] +4187429269810 2014-03-19 10:04:22 [] +4187429269810 2014-03-19 10:04:22 attach ['attach'] +4187429269810 2014-03-19 10:04:22 Правая колонка Вкладка «Треды» ['Правая колонка'] +4187429269810 2014-03-19 10:04:22 Просмотр письма ['Просмотр письма'] +4187429269810 2014-03-19 10:04:28 [] +4187429269810 2014-03-19 10:04:28 attach body ['attach'] +4187429269810 2014-03-19 10:08:03 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +4187429269810 2014-03-19 10:08:04 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +142097207152117 2014-03-22 08:10:15 [] +142097207152117 2014-03-22 08:10:15 [] +142097207152117 2014-03-22 08:10:15 theme blue ['theme'] +142097207152117 2014-03-22 08:10:15 Залогиновая шапка. Правая часть Уголок ['Залогиновая шапка. Правая часть'] +142097207152117 2014-03-22 08:10:15 Промо сборщиков На всех ['Промо сборщиков'] +142097207152117 2014-03-22 08:10:17 registerProtocolHandler https: ['registerProtocolHandler'] +142097207152117 2014-03-22 08:10:30 Промо сборщиков На всех ['Промо сборщиков'] +142097207152117 2014-03-22 08:10:32 Правая колонка Вкладка «Треды» ['Правая колонка'] +142097207152117 2014-03-22 08:10:33 [] +142097207152117 2014-03-22 08:10:33 Просмотр письма показ ['Просмотр письма'] +340670786804313 2014-03-18 01:52:20 [] +340670786804313 2014-03-18 01:52:20 theme ['theme'] +340670786804313 2014-03-18 01:52:21 Цифровой логин ['Цифровой логин'] +340670786804313 2014-03-18 01:52:25 Правая колонка Вкладка «Треды» ['Правая колонка'] +340670786804313 2014-03-18 01:52:25 Просмотр письма ['Просмотр письма'] +340670786804313 2014-03-18 01:52:26 [] +340670786804313 2014-03-19 16:01:13 [] +340670786804313 2014-03-19 16:01:13 lcn ['lcn'] +340670786804313 2014-03-19 16:01:13 theme ['theme'] +340670786804313 2014-03-19 16:01:13 Погодная тема Показ города ['Погодная тема'] +340670786804313 2014-03-19 16:01:15 [] +340670786804313 2014-03-19 16:01:15 Правая колонка Вкладка «Треды» ['Правая колонка'] +340670786804313 2014-03-19 16:01:15 Просмотр письма ['Просмотр письма'] +523951316868678 2014-03-18 15:03:08 [] +523951316868678 2014-03-18 15:03:08 theme ['theme'] +523951316868678 2014-03-18 15:03:09 Цифровой логин ['Цифровой логин'] +523951316868678 2014-03-18 15:03:13 Просмотр письма ['Просмотр письма'] +523951316868678 2014-03-18 15:03:14 [] +523951316868678 2014-03-18 15:03:14 attach ['attach'] +523951316868678 2014-03-18 15:03:14 attach onefile ['attach'] +523951316868678 2014-03-18 15:03:14 Правая колонка Вкладка «Треды» ['Правая колонка'] +523951316868678 2014-03-18 15:03:20 [] +523951316868678 2014-03-18 15:03:20 [] +523951316868678 2014-03-18 17:19:31 [] +523951316868678 2014-03-18 17:19:31 theme ['theme'] +523951316868678 2014-03-18 17:19:31 Цифровой логин ['Цифровой логин'] +523951316868678 2014-03-18 17:19:32 lcn ['lcn'] +610121304480129 2014-03-17 05:35:14 [] +610121304480129 2014-03-18 10:22:47 [] +610121304480129 2014-03-19 05:26:31 [] +610121304480129 2014-03-20 05:32:35 [] +610121304480129 2014-03-20 05:41:56 [] +610121304480129 2014-03-20 10:17:05 [] +610121304480129 2014-03-21 08:46:30 [] +610121304480129 2014-03-22 06:06:53 [] +610121304480129 2014-03-22 09:54:27 [] +680431331227081 2014-03-18 08:15:14 [] +680431331227081 2014-03-18 08:15:14 lcn ['lcn'] +680431331227081 2014-03-18 08:15:14 theme ['theme'] +680431331227081 2014-03-18 08:15:15 Цифровой логин ['Цифровой логин'] +680431331227081 2014-03-18 08:15:17 Правая колонка Вкладка «Треды» ['Правая колонка'] +680431331227081 2014-03-18 08:15:17 Просмотр письма ['Просмотр письма'] +680431331227081 2014-03-18 08:15:18 [] +680431331227081 2014-03-18 08:16:10 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +680431331227081 2014-03-18 08:16:13 [] +680431331227081 2014-03-18 08:16:13 Залогиновая шапка. Правая часть Меню за логином ['Залогиновая шапка. Правая часть'] +711535488569826 2014-03-17 09:07:22 [] +711535488569826 2014-03-17 09:07:22 [] +711535488569826 2014-03-17 09:07:22 theme ['theme'] +711535488569826 2014-03-17 09:07:22 Залогиновая шапка. Правая часть Уголок ['Залогиновая шапка. Правая часть'] +711535488569826 2014-03-17 09:07:23 [] +711535488569826 2014-03-17 09:07:24 [] +1385143435049415 2014-03-17 16:32:19 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:32:19 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:39:06 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:39:07 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 16:39:07 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:30:25 [] +1385143435049415 2014-03-17 20:30:25 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:39:19 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:39:21 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:39:21 Аттачи в списке писем Превьюшки ['Аттачи в списке писем'] +1385143435049415 2014-03-17 20:43:50 [] diff --git a/dbms/tests/queries/1_stateful/00075_left_array_join.sql b/dbms/tests/queries/1_stateful/00075_left_array_join.sql new file mode 100644 index 00000000000..0de215ceb36 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00075_left_array_join.sql @@ -0,0 +1,2 @@ +SELECT UserID, EventTime, pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1143050 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; +SELECT UserID, EventTime, pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits LEFT ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1143050 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; diff --git a/dbms/tests/queries/1_stateful/00076_system_columns_bytes.reference b/dbms/tests/queries/1_stateful/00076_system_columns_bytes.reference new file mode 100644 index 00000000000..72749c905a3 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00076_system_columns_bytes.reference @@ -0,0 +1 @@ +1 1 1 diff --git a/dbms/tests/queries/1_stateful/00076_system_columns_bytes.sql b/dbms/tests/queries/1_stateful/00076_system_columns_bytes.sql new file mode 100644 index 00000000000..434054b6980 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00076_system_columns_bytes.sql @@ -0,0 +1 @@ +SELECT sum(data_compressed_bytes) > 0, sum(data_uncompressed_bytes) > 0, sum(marks_bytes) > 0 FROM system.columns; diff --git a/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference b/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference new file mode 100644 index 00000000000..1dcb023e190 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.reference @@ -0,0 +1,10 @@ +1 +1 +1 +8873898 5148628775290158164 +8873898 5148628775290158164 +8873898 5148628775290158164 +8873898 5148628775290158164 +1 +1 +1 diff --git a/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql b/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql new file mode 100644 index 00000000000..d745203ea6b --- /dev/null +++ b/dbms/tests/queries/1_stateful/00077_log_tinylog_stripelog.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS test.hits_log; +DROP TABLE IF EXISTS test.hits_tinylog; +DROP TABLE IF EXISTS test.hits_stripelog; + +CREATE TABLE test.hits_log (CounterID UInt32, AdvEngineID UInt8, RegionID UInt32, SearchPhrase String, UserID UInt64) ENGINE = Log; +CREATE TABLE test.hits_tinylog (CounterID UInt32, AdvEngineID UInt8, RegionID UInt32, SearchPhrase String, UserID UInt64) ENGINE = TinyLog; +CREATE TABLE test.hits_stripelog (CounterID UInt32, AdvEngineID UInt8, RegionID UInt32, SearchPhrase String, UserID UInt64) ENGINE = StripeLog; + +CHECK TABLE test.hits_log; +CHECK TABLE test.hits_tinylog; +CHECK TABLE test.hits_stripelog; + +INSERT INTO test.hits_log SELECT CounterID, AdvEngineID, RegionID, SearchPhrase, UserID FROM test.hits; +INSERT INTO test.hits_tinylog SELECT CounterID, AdvEngineID, RegionID, SearchPhrase, UserID FROM test.hits; +INSERT INTO test.hits_stripelog SELECT CounterID, AdvEngineID, RegionID, SearchPhrase, UserID FROM test.hits; + +SELECT count(), sum(cityHash64(CounterID, AdvEngineID, RegionID, SearchPhrase, UserID)) FROM test.hits; +SELECT count(), sum(cityHash64(*)) FROM test.hits_log; +SELECT count(), sum(cityHash64(*)) FROM test.hits_tinylog; +SELECT count(), sum(cityHash64(*)) FROM test.hits_stripelog; + +CHECK TABLE test.hits_log; +CHECK TABLE test.hits_tinylog; +CHECK TABLE test.hits_stripelog; + +DROP TABLE test.hits_log; +DROP TABLE test.hits_tinylog; +DROP TABLE test.hits_stripelog; diff --git a/dbms/tests/queries/1_stateful/00078_group_by_arrays.reference b/dbms/tests/queries/1_stateful/00078_group_by_arrays.reference new file mode 100644 index 00000000000..5f932f4649f --- /dev/null +++ b/dbms/tests/queries/1_stateful/00078_group_by_arrays.reference @@ -0,0 +1,40 @@ +[] 8001320 +[1698655] 218383 +[3288004] 26325 +[2793757] 26259 +[3308677,2622979,2532754,2532649,2532640,2532631,2532622,2532613,2623111,2532604,2532448,2532439,2532430,2532718,2532568,2532421,2532709,2532412,2532403,2532520,2532727,2532577,2532658,2532367,2532736,2532586,2532376,2532745,2532595,2532385,2532394,2532457,2532763,2532475,2532772,2532484,2532781,2532493,2532502,2532511,2532529] 24078 +[2459053] 15706 +[3658087] 14326 +[3288145] 11558 +[3875836] 9599 +[4208059] 8938 +[] [] 3929946 +[] [1698655] 88062 +[9] [] 21709 +[6] [] 18019 +[90] [] 13936 +[1] [] 12855 +[99] [] 12368 +[20,1] [] 11949 +[4,5] [] 11550 +[4] [] 11530 +[] [] 3970023 +['gen_time'] [] 74637 +[] [9] 22059 +['Эксперимент про отрыв счетчика у папки СПАМ'] [] 18814 +['Toolbar'] [] 18168 +['Правая колонка'] [] 18132 +['Аттачи в списке писем'] [] 16735 +['Драгндроп папок'] [] 14652 +[] [6] 14350 +[] [90] 14183 +['Banner'] [5,4,3,118,11,1,37,38,3281,3201,3579,15,2] 1472 +['Аттачи в списке писем'] [7,6,82,9,3135,1,105,96,333,437,95,19,76] 1226 +['Аттачи в списке писем'] [2873,2866,9,96,635,3,39,26,12,2,8,11,37] 1173 +['registration_month','user_login','is_registred'] [4,90,5,601,2894,2893,6,68,399,11,88,117] 937 +['gen_time'] [9] 881 +['Аттачи в списке писем'] [7,6,82,9,3135,1,105,333,96,437,95,19,76] 859 +['Аттачи в списке писем'] [2873,2866,9,96,635,39,3,26,12,8,2,11,118] 837 +['Banner'] [635,1,9,90,96,19,2,3281,3349,8,5,3579] 777 +['Banner'] [90,1,635,19,9,96,2,3579,8] 766 +['gallery'] [7,76,636,460,6,99,4] 750 diff --git a/dbms/tests/queries/1_stateful/00078_group_by_arrays.sql b/dbms/tests/queries/1_stateful/00078_group_by_arrays.sql new file mode 100644 index 00000000000..eb47be0ec22 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00078_group_by_arrays.sql @@ -0,0 +1,4 @@ +SELECT GoalsReached AS k, count() AS c FROM test.hits GROUP BY k ORDER BY c DESC LIMIT 10; +SELECT GeneralInterests AS k1, GoalsReached AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; +SELECT ParsedParams.Key1 AS k1, GeneralInterests AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; +SELECT ParsedParams.Key1 AS k1, GeneralInterests AS k2, count() AS c FROM test.hits WHERE notEmpty(k1) AND notEmpty(k2) GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference b/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference new file mode 100644 index 00000000000..1b1d514856f --- /dev/null +++ b/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.reference @@ -0,0 +1,104 @@ +"Пригласи друга" в inbox — Беларусь 1 +"Пригласи друга" на done 0 +Click in compose 0 +Compose-Translate 4699 +Done 4032 +Done-Promo 2 +Hotels 0 +Money 0 +Search 0 +TinyMCE 15 +Todo 4 +Toolbar 0 +User Button 0 +attach 8010 +aviaeticket 0 +hot-keys 0 +lcn 14387 +popular_suggest 1855 +registerProtocolHandler 76 +theme 15777 +Аттачи в списке писем 0 +Аттачи из Диска 3401 +Баббл 0 +Башкортостан 2 +Виджет валидации (внутри письма) 0 +Визард (эксперимент) 0 +ГБ за ДР 0 +Директ над письмами 1 +Драгндроп папок 24211 +Живые письма в почте 1 +Залогиновая шапка. Правая часть 0 +Как делалась пластилиновая тема 2 +Карточка контакта 71 +Кнопка Прикрепить 7149 +МЯП->Валидация 0 +Марка 2389 +Настроение 5 +Настройки 0 +Неработающий сборщик 695 +Нижегородская 2 +Новосибирская 2 +Новый поп-ап фильтров 0 +Ответить всем 0 +Пейджер по датам 2986 +Переход к недоступному письму 23 +Плашка про старый браузер 840 +Погодная тема 0 +Подписи 0 +Поиск 5264 +Поменять пароль-2 11 +Поп-ап создания папки 26 +Попап для цифровых логинов 0 +Правая колонка 1418 +Предложение фильтровать групоны 0 +Промо Языков 0 +Промо меток 0 +Промо полоска элементов 0 +Промо сборщиков 0 +Просмотр письма 25023 +Просмотрщик картинок 10920 +Ревалидация 90 +Самарская 4 +События в письмах 0 +Сообщение о непоказе картинок и ссылок в спаме 456 +Сообщение о пересылке 2 +Статуслайн для фильтров 0 +Статуслайн игнорирования треда 1 +Татарстан 0 +Тема КХЛ 4 +Тема мишки 425 +Тема: region_primorie 1 +Тизер социальных новостей 6 +Томск 0 +Установка браузера 12 +Фильтр по типам писем 0 +Цифровой логин 3878 +Что нового 1 +Шаблоны 0 +Эксперимент про отрыв счетчика у папки СПАМ 0 +отправить адресату SMS-уведомление после отправки письма 0 +промо SMS 0 +промо-полоска Волгограда 1 +промо-полоска Торпедо 0 +промо-полоска Трактора 2 +"Пригласи друга" в inbox — Беларусь [''] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +"Пригласи друга" на done ['Новая простая'] +['черный'] +['черный'] +['черн'] +['черн'] +['черн'] +['черн'] +['черн'] +['фото автора и из архива редакции'] +['фото Георгий Садков'] +['фото Александр Кульнев'] diff --git a/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql b/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql new file mode 100644 index 00000000000..6474c44c0b6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00079_array_join_not_used_joined_column.sql @@ -0,0 +1,3 @@ +SELECT PP.Key1 AS `ym:s:paramsLevel1`, sum(arrayAll(`x_1` -> `x_1`= '', ParsedParams.Key2)) AS `ym:s:visits` FROM test.hits ARRAY JOIN ParsedParams AS `PP` WHERE CounterID = 1143050 GROUP BY `ym:s:paramsLevel1` ORDER BY PP.Key1, `ym:s:visits` LIMIT 0, 100; +SELECT PP.Key1 AS x1, ParsedParams.Key2 AS x2 FROM test.hits ARRAY JOIN ParsedParams AS PP WHERE CounterID = 1143050 ORDER BY x1, x2 LIMIT 10; +SELECT ParsedParams.Key2 AS x FROM test.hits ARRAY JOIN ParsedParams AS PP ORDER BY x DESC LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00080_array_join_and_union.reference b/dbms/tests/queries/1_stateful/00080_array_join_and_union.reference new file mode 100644 index 00000000000..209e3ef4b62 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00080_array_join_and_union.reference @@ -0,0 +1 @@ +20 diff --git a/dbms/tests/queries/1_stateful/00080_array_join_and_union.sql b/dbms/tests/queries/1_stateful/00080_array_join_and_union.sql new file mode 100644 index 00000000000..eab92ad99ed --- /dev/null +++ b/dbms/tests/queries/1_stateful/00080_array_join_and_union.sql @@ -0,0 +1 @@ +SELECT count() FROM (SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 101500 LIMIT 10 UNION ALL SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 101500 LIMIT 10); diff --git a/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference b/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference new file mode 100644 index 00000000000..a29f53e89c6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.reference @@ -0,0 +1,60 @@ +475698 + +475698 +475698 + +475698 +475698 + +475698 +475698 + +475698 +1 475698 + +1 475698 +1 475698 + +1 475698 +1 475698 + +1 475698 +1 475698 + +1 475698 +-1 237160 +0 182238 +1 3594 +2 704 +5 2357 +6 49622 +8 23 + +0 475698 +-1 237160 +0 182238 +1 3594 +2 704 +5 2357 +6 49622 +8 23 + +0 475698 +-1 237160 +0 182238 +1 3594 +2 704 +5 2357 +6 49622 +8 23 + +0 475698 +-1 237160 +0 182238 +1 3594 +2 704 +5 2357 +6 49622 +8 23 + +0 475698 diff --git a/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql b/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql new file mode 100644 index 00000000000..ec5bdca5d85 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00081_group_by_without_key_and_totals.sql @@ -0,0 +1,15 @@ +SELECT count() AS c FROM test.hits WHERE CounterID = 731962 WITH TOTALS SETTINGS totals_mode = 'before_having', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT count() AS c FROM test.hits WHERE CounterID = 731962 WITH TOTALS SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT count() AS c FROM test.hits WHERE CounterID = 731962 WITH TOTALS SETTINGS totals_mode = 'after_having_exclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT count() AS c FROM test.hits WHERE CounterID = 731962 WITH TOTALS SETTINGS totals_mode = 'after_having_auto', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; + +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'before_having', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'after_having_exclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'after_having_auto', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; + +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'before_having', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_exclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 731962 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_auto', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; + diff --git a/dbms/tests/queries/1_stateful/00082_quantiles.reference b/dbms/tests/queries/1_stateful/00082_quantiles.reference new file mode 100644 index 00000000000..5a841b948ea --- /dev/null +++ b/dbms/tests/queries/1_stateful/00082_quantiles.reference @@ -0,0 +1,80 @@ +1143050 1366 +731962 1366 +722545 1366 +722889 1366 +2237260 1366 +23057320 1366 +722818 1366 +48221 1280 +19762435 1366 +722884 1366 +1143050 [1024,1366,1920,1920,2560] +731962 [1024,1366,1920,1920,2560] +722545 [1024,1366,1920,1920,2259] +722889 [1152,1366,1920,1920,2560] +2237260 [1024,1366,1680,1920,1920] +23057320 [914,1366,1680,1920,2560] +722818 [1024,1366,1920,1920,2560] +48221 [320,1280,1600,1920,2000] +19762435 [1024,1366,1680,1920,1920] +722884 [1024,1366,1920,1920,1920] +1143050 1353 +731962 1353 +722545 1353 +722889 1353 +2237260 1353 +23057320 1353 +722818 1353 +48221 1283 +19762435 1353 +722884 1353 +1143050 [1016,1353,1916,1916,2559] +731962 [1016,1353,1916,1916,2559] +722545 [1016,1353,1916,1916,2258] +722889 [1149,1353,1916,1916,2559] +2237260 [1016,1353,1687,1916,1916] +23057320 [914,1353,1687,1916,2559] +722818 [1016,1353,1916,1916,2559] +48221 [320,1283,1606,1916,2006] +19762435 [1016,1353,1687,1916,1916] +722884 [1016,1353,1916,1916,1916] +1143050 1366 +731962 1366 +722545 1366 +722889 1366 +2237260 1366 +23057320 1366 +722818 1366 +48221 1280 +19762435 1366 +722884 1366 +1143050 [1024,1366,1920,1920,2560] +731962 [1024,1366,1920,1920,2560] +722545 [1024,1366,1920,1920,2259] +722889 [1152,1366,1920,1920,2560] +2237260 [1024,1366,1680,1920,1920] +23057320 [914,1366,1680,1920,2560] +722818 [1024,1366,1920,1920,2560] +48221 [320,1280,1600,1920,2000] +19762435 [1024,1366,1680,1920,1920] +722884 [1024,1366,1920,1920,1920] +1143050 1353 +731962 1353 +722545 1353 +722889 1353 +2237260 1353 +23057320 1353 +722818 1353 +48221 1283 +19762435 1353 +722884 1353 +1143050 [1016,1353,1916,1916,2559] +731962 [1016,1353,1916,1916,2559] +722545 [1016,1353,1916,1916,2258] +722889 [1149,1353,1916,1916,2559] +2237260 [1016,1353,1687,1916,1916] +23057320 [914,1353,1687,1916,2559] +722818 [1016,1353,1916,1916,2559] +48221 [320,1283,1606,1916,2006] +19762435 [1016,1353,1687,1916,1916] +722884 [1016,1353,1916,1916,1916] diff --git a/dbms/tests/queries/1_stateful/00082_quantiles.sql b/dbms/tests/queries/1_stateful/00082_quantiles.sql new file mode 100644 index 00000000000..3c42b43f3f9 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00082_quantiles.sql @@ -0,0 +1,12 @@ +SELECT CounterID AS k, quantileExact(0.5)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesExact(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + +SELECT CounterID AS k, quantileTiming(0.5)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesTiming(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + + +SELECT CounterID AS k, quantileExact(0.5)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesExact(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + +SELECT CounterID AS k, quantileTiming(0.5)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesTiming(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00083_array_filter.reference b/dbms/tests/queries/1_stateful/00083_array_filter.reference new file mode 100644 index 00000000000..388e32b8920 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00083_array_filter.reference @@ -0,0 +1,2 @@ +1025826 +1025826 diff --git a/dbms/tests/queries/1_stateful/00083_array_filter.sql b/dbms/tests/queries/1_stateful/00083_array_filter.sql new file mode 100644 index 00000000000..07793846713 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00083_array_filter.sql @@ -0,0 +1,2 @@ +SELECT sum(length(ParsedParams.Key1)) FROM test.hits WHERE notEmpty(ParsedParams.Key1); +SELECT sum(length(ParsedParams.ValueDouble)) FROM test.hits WHERE notEmpty(ParsedParams.ValueDouble); diff --git a/dbms/tests/queries/1_stateful/00084_external_aggregation.reference b/dbms/tests/queries/1_stateful/00084_external_aggregation.reference new file mode 100644 index 00000000000..89c454decf6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00084_external_aggregation.reference @@ -0,0 +1,20 @@ +https://mail.yandex.ru/neo2/#inbox 594 +http://yandex.ru/ 318 +http://onlain-film.com/ym.php 235 +https://mail.yandex.ru/neo2/ 220 +http://pluginplus.net/plugins/statistic/statistic_all.html 186 +http://m.kporno.com/ 163 +http://err.hc.ru/cgierr/26/ 160 +http://pluginplus.net/plugins/statistic/statistic_im.html 160 +http://znanija.com/ 141 +https://e.mail.ru/messages/inbox/ 135 +https://mail.yandex.ru/neo2/#inbox 594 +http://yandex.ru/ 318 +http://onlain-film.com/ym.php 235 +https://mail.yandex.ru/neo2/ 220 +http://pluginplus.net/plugins/statistic/statistic_all.html 186 +http://m.kporno.com/ 163 +http://err.hc.ru/cgierr/26/ 160 +http://pluginplus.net/plugins/statistic/statistic_im.html 160 +http://znanija.com/ 141 +https://e.mail.ru/messages/inbox/ 135 diff --git a/dbms/tests/queries/1_stateful/00084_external_aggregation.sql b/dbms/tests/queries/1_stateful/00084_external_aggregation.sql new file mode 100644 index 00000000000..1423f026bbb --- /dev/null +++ b/dbms/tests/queries/1_stateful/00084_external_aggregation.sql @@ -0,0 +1,10 @@ +SET max_bytes_before_external_group_by = 200000000; + +SET max_memory_usage = 1000000000; +SET max_threads = 12; +SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u DESC, URL LIMIT 10; + +SET max_memory_usage = 300000000; +SET max_threads = 2; +SET aggregation_memory_efficient_merge_threads = 1; +SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u DESC, URL LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference b/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.reference @@ -0,0 +1 @@ +0 diff --git a/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql b/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql new file mode 100644 index 00000000000..db6c24b8b81 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00085_monotonic_evaluation_segfault.sql @@ -0,0 +1 @@ +SELECT any(0) FROM test.visits WHERE (toInt32(toDateTime(StartDate))) > 1000000000; diff --git a/dbms/tests/queries/1_stateful/00086_array_reduce.reference b/dbms/tests/queries/1_stateful/00086_array_reduce.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00086_array_reduce.sql b/dbms/tests/queries/1_stateful/00086_array_reduce.sql new file mode 100644 index 00000000000..dfe2e63aa1e --- /dev/null +++ b/dbms/tests/queries/1_stateful/00086_array_reduce.sql @@ -0,0 +1 @@ +SELECT arrayFilter(x -> x != 1, arrayMap((a, b) -> a = b, GeneralInterests, arrayReduce('groupArray', GeneralInterests))) AS res FROM test.hits WHERE length(res) != 0; diff --git a/dbms/tests/queries/1_stateful/00087_where_0.reference b/dbms/tests/queries/1_stateful/00087_where_0.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dbms/tests/queries/1_stateful/00087_where_0.sql b/dbms/tests/queries/1_stateful/00087_where_0.sql new file mode 100644 index 00000000000..c8fe05225fa --- /dev/null +++ b/dbms/tests/queries/1_stateful/00087_where_0.sql @@ -0,0 +1,3 @@ +SET max_rows_to_read = 1000; +SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 != 0 GROUP BY CounterID; +SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 AND CounterID = 34 GROUP BY CounterID; diff --git a/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference b/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference new file mode 100644 index 00000000000..deafee64641 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.reference @@ -0,0 +1,26 @@ +{ + "meta": + [ + { + "name": "EventDate", + "type": "Date" + }, + { + "name": "count()", + "type": "UInt64" + } + ], + + "data": + [ + ["2014-03-17", "1406958"], + ["2014-03-18", "1383658"], + ["2014-03-19", "1405797"], + ["2014-03-20", "1353623"], + ["2014-03-21", "1245779"] + ], + + "rows": 5, + + "rows_before_limit_at_least": 7 +} diff --git a/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql b/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql new file mode 100644 index 00000000000..60dbec2e9bc --- /dev/null +++ b/dbms/tests/queries/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql @@ -0,0 +1,2 @@ +SET output_format_write_statistics = 0; +SELECT EventDate, count() FROM remote('127.0.0.1', test.hits) WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits) GROUP BY EventDate ORDER BY EventDate LIMIT 5 FORMAT JSONCompact; diff --git a/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference b/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference new file mode 100644 index 00000000000..f0db728c5e7 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.reference @@ -0,0 +1,9 @@ +0 +0 +0 +0 +samsungapps://productdetail/com.umojo.irr.android +ts3server://31.135.72.55?nickname=kridQl +http://ru/rus/index.php ru 15 +http://ex.ru/ ex.ru 12 +http://dex.ru/ dex.ru 11 diff --git a/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql b/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql new file mode 100644 index 00000000000..ecfd500ae6a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00089_position_functions_with_non_constant_arg.sql @@ -0,0 +1,8 @@ +SELECT count() FROM test.hits WHERE position(URL, 'metrika') != position(URL, materialize('metrika')); +SELECT count() FROM test.hits WHERE positionCaseInsensitive(URL, 'metrika') != positionCaseInsensitive(URL, materialize('metrika')); +SELECT count() FROM test.hits WHERE positionUTF8(Title, 'новости') != positionUTF8(Title, materialize('новости')); +SELECT count() FROM test.hits WHERE positionCaseInsensitiveUTF8(Title, 'новости') != positionCaseInsensitiveUTF8(Title, materialize('новости')); + +SELECT position(URL, URLDomain) AS x FROM test.hits WHERE x = 0 AND URL NOT LIKE '%yandex.ru%' LIMIT 100; +SELECT URL FROM test.hits WHERE x > 10 ORDER BY position(URL, URLDomain) AS x DESC, URL LIMIT 2; +SELECT DISTINCT URL, URLDomain, position('http://yandex.ru/', URLDomain) AS x FROM test.hits WHERE x > 8 ORDER BY position('http://yandex.ru/', URLDomain) DESC, URL LIMIT 3; diff --git a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.reference b/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.reference new file mode 100644 index 00000000000..f00c965d830 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.reference @@ -0,0 +1,10 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 diff --git a/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh b/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh new file mode 100755 index 00000000000..c30f137e8a6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00090_thread_pool_deadlock.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +echo '1'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '2'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '3'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '4'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '5'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '6'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '7'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '8'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '9'; +clickhouse-client --distributed_aggregation_memory_efficient=1 --group_by_two_level_threshold=1 --max_execution_time=1 --query="SELECT SearchPhrase AS k, count() AS c FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) GROUP BY k ORDER BY c DESC LIMIT 10" --format=Null 2>/dev/null; +echo '10'; diff --git a/dbms/tests/queries/1_stateful/00139_like.reference b/dbms/tests/queries/1_stateful/00139_like.reference new file mode 100644 index 00000000000..2cfe3231ff6 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00139_like.reference @@ -0,0 +1,4 @@ +79628 +0 +79628 +102851 diff --git a/dbms/tests/queries/1_stateful/00139_like.sql b/dbms/tests/queries/1_stateful/00139_like.sql new file mode 100644 index 00000000000..ccc195bc81d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00139_like.sql @@ -0,0 +1,5 @@ +/* Заметим, что запросы написаны так, как будто пользователь не понимает смысл символа _ в LIKE выражении. */ +SELECT count() FROM test.hits WHERE URL LIKE '%/avtomobili_s_probegom/_%__%__%__%'; +SELECT count() FROM test.hits WHERE URL LIKE '/avtomobili_s_probegom/_%__%__%__%'; +SELECT count() FROM test.hits WHERE URL LIKE '%_/avtomobili_s_probegom/_%__%__%__%'; +SELECT count() FROM test.hits WHERE URL LIKE '%avtomobili%'; diff --git a/dbms/tests/queries/1_stateful/00140_rename.reference b/dbms/tests/queries/1_stateful/00140_rename.reference new file mode 100644 index 00000000000..80776d85438 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00140_rename.reference @@ -0,0 +1,12 @@ +286 +1962 +286 +1962 +1962 +286 +1962 +1962 +286 +1962 +1962 +286 diff --git a/dbms/tests/queries/1_stateful/00140_rename.sql b/dbms/tests/queries/1_stateful/00140_rename.sql new file mode 100644 index 00000000000..33b7b1ad720 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00140_rename.sql @@ -0,0 +1,32 @@ +RENAME TABLE test.hits TO test.visits_tmp, test.visits TO test.hits, test.visits_tmp TO test.visits; + +SELECT sum(Sign) FROM test.hits WHERE CounterID = 34; +SELECT count() FROM test.visits WHERE CounterID = 34; + +RENAME TABLE test.hits TO test.hits_tmp, test.hits_tmp TO test.hits; + +SELECT sum(Sign) FROM test.hits WHERE CounterID = 34; +SELECT count() FROM test.visits WHERE CounterID = 34; + +RENAME TABLE test.hits TO test.visits_tmp, test.visits TO test.hits, test.visits_tmp TO test.visits; + +SELECT count() FROM test.hits WHERE CounterID = 34; +SELECT sum(Sign) FROM test.visits WHERE CounterID = 34; + +RENAME TABLE test.hits TO test.hits2, test.hits2 TO test.hits3, test.hits3 TO test.hits4, test.hits4 TO test.hits5, test.hits5 TO test.hits6, test.hits6 TO test.hits7, test.hits7 TO test.hits8, test.hits8 TO test.hits9, test.hits9 TO test.hits10; + +SELECT count() FROM test.hits10 WHERE CounterID = 34; + +RENAME TABLE test.hits10 TO test.hits; + +SELECT count() FROM test.hits WHERE CounterID = 34; + +RENAME TABLE test.hits TO hits, test.visits TO test.hits; + +SELECT sum(Sign) FROM test.hits WHERE CounterID = 34; +SELECT count() FROM hits WHERE CounterID = 34; + +RENAME TABLE test.hits TO test.visits, hits TO test.hits; + +SELECT count() FROM test.hits WHERE CounterID = 34; +SELECT sum(Sign) FROM test.visits WHERE CounterID = 34; diff --git a/dbms/tests/queries/1_stateful/00141_transform.reference b/dbms/tests/queries/1_stateful/00141_transform.reference new file mode 100644 index 00000000000..9cd12dac687 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00141_transform.reference @@ -0,0 +1,3 @@ +Яндекс 498635 +Google 229872 +Остальные 104472 diff --git a/dbms/tests/queries/1_stateful/00141_transform.sql b/dbms/tests/queries/1_stateful/00141_transform.sql new file mode 100644 index 00000000000..0ec27826747 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00141_transform.sql @@ -0,0 +1 @@ +SELECT transform(SearchEngineID, [2, 3], ['Яндекс', 'Google'], 'Остальные') AS title, count() AS c FROM test.hits WHERE SearchEngineID != 0 GROUP BY title HAVING c > 0 ORDER BY c DESC LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00142_system_columns.reference b/dbms/tests/queries/1_stateful/00142_system_columns.reference new file mode 100644 index 00000000000..2cc224dccaf --- /dev/null +++ b/dbms/tests/queries/1_stateful/00142_system_columns.reference @@ -0,0 +1,133 @@ +hits WatchID UInt64 +hits JavaEnable UInt8 +hits Title String +hits GoodEvent Int16 +hits EventTime DateTime +hits EventDate Date +hits CounterID UInt32 +hits ClientIP UInt32 +hits ClientIP6 FixedString(16) +hits RegionID UInt32 +hits UserID UInt64 +hits CounterClass Int8 +hits OS UInt8 +hits UserAgent UInt8 +hits URL String +hits Referer String +hits URLDomain String +hits RefererDomain String +hits Refresh UInt8 +hits IsRobot UInt8 +hits RefererCategories Array(UInt16) +hits URLCategories Array(UInt16) +hits URLRegions Array(UInt32) +hits RefererRegions Array(UInt32) +hits ResolutionWidth UInt16 +hits ResolutionHeight UInt16 +hits ResolutionDepth UInt8 +hits FlashMajor UInt8 +hits FlashMinor UInt8 +hits FlashMinor2 String +hits NetMajor UInt8 +hits NetMinor UInt8 +hits UserAgentMajor UInt16 +hits UserAgentMinor FixedString(2) +hits CookieEnable UInt8 +hits JavascriptEnable UInt8 +hits IsMobile UInt8 +hits MobilePhone UInt8 +hits MobilePhoneModel String +hits Params String +hits IPNetworkID UInt32 +hits TraficSourceID Int8 +hits SearchEngineID UInt16 +hits SearchPhrase String +hits AdvEngineID UInt8 +hits IsArtifical UInt8 +hits WindowClientWidth UInt16 +hits WindowClientHeight UInt16 +hits ClientTimeZone Int16 +hits ClientEventTime DateTime +hits SilverlightVersion1 UInt8 +hits SilverlightVersion2 UInt8 +hits SilverlightVersion3 UInt32 +hits SilverlightVersion4 UInt16 +hits PageCharset String +hits CodeVersion UInt32 +hits IsLink UInt8 +hits IsDownload UInt8 +hits IsNotBounce UInt8 +hits FUniqID UInt64 +hits HID UInt32 +hits IsOldCounter UInt8 +hits IsEvent UInt8 +hits IsParameter UInt8 +hits DontCountHits UInt8 +hits WithHash UInt8 +hits HitColor FixedString(1) +hits UTCEventTime DateTime +hits Age UInt8 +hits Sex UInt8 +hits Income UInt8 +hits Interests UInt16 +hits Robotness UInt8 +hits GeneralInterests Array(UInt16) +hits RemoteIP UInt32 +hits RemoteIP6 FixedString(16) +hits WindowName Int32 +hits OpenerName Int32 +hits HistoryLength Int16 +hits BrowserLanguage FixedString(2) +hits BrowserCountry FixedString(2) +hits SocialNetwork String +hits SocialAction String +hits HTTPError UInt16 +hits SendTiming Int32 +hits DNSTiming Int32 +hits ConnectTiming Int32 +hits ResponseStartTiming Int32 +hits ResponseEndTiming Int32 +hits FetchTiming Int32 +hits RedirectTiming Int32 +hits DOMInteractiveTiming Int32 +hits DOMContentLoadedTiming Int32 +hits DOMCompleteTiming Int32 +hits LoadEventStartTiming Int32 +hits LoadEventEndTiming Int32 +hits NSToDOMContentLoadedTiming Int32 +hits FirstPaintTiming Int32 +hits RedirectCount Int8 +hits SocialSourceNetworkID UInt8 +hits SocialSourcePage String +hits ParamPrice Int64 +hits ParamOrderID String +hits ParamCurrency FixedString(3) +hits ParamCurrencyID UInt16 +hits GoalsReached Array(UInt32) +hits OpenstatServiceName String +hits OpenstatCampaignID String +hits OpenstatAdID String +hits OpenstatSourceID String +hits UTMSource String +hits UTMMedium String +hits UTMCampaign String +hits UTMContent String +hits UTMTerm String +hits FromTag String +hits HasGCLID UInt8 +hits RefererHash UInt64 +hits URLHash UInt64 +hits CLID UInt32 +hits YCLID UInt64 +hits ShareService String +hits ShareURL String +hits ShareTitle String +hits ParsedParams.Key1 Array(String) +hits ParsedParams.Key2 Array(String) +hits ParsedParams.Key3 Array(String) +hits ParsedParams.Key4 Array(String) +hits ParsedParams.Key5 Array(String) +hits ParsedParams.ValueDouble Array(Float64) +hits IslandID FixedString(16) +hits RequestNum UInt32 +hits RequestTry UInt8 diff --git a/dbms/tests/queries/1_stateful/00142_system_columns.sql b/dbms/tests/queries/1_stateful/00142_system_columns.sql new file mode 100644 index 00000000000..f6b5e7ed89f --- /dev/null +++ b/dbms/tests/queries/1_stateful/00142_system_columns.sql @@ -0,0 +1 @@ +SELECT table, name, type, default_kind, default_expression FROM system.columns WHERE database = 'test' AND table = 'hits' diff --git a/dbms/tests/queries/1_stateful/00143_transform_non_const_default.reference b/dbms/tests/queries/1_stateful/00143_transform_non_const_default.reference new file mode 100644 index 00000000000..b9772df780b --- /dev/null +++ b/dbms/tests/queries/1_stateful/00143_transform_non_const_default.reference @@ -0,0 +1,10 @@ +Яндекс 498635 +Google 229872 +utf-8 73842 +windows-1251 28664 + 1284 +koi8-r 165 +windows-1252 148 +windows-1254 126 +iso-8859-1 102 +iso-8859-9 86 diff --git a/dbms/tests/queries/1_stateful/00143_transform_non_const_default.sql b/dbms/tests/queries/1_stateful/00143_transform_non_const_default.sql new file mode 100644 index 00000000000..68d2bc52ab0 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00143_transform_non_const_default.sql @@ -0,0 +1 @@ +SELECT transform(SearchEngineID, [2, 3], ['Яндекс', 'Google'], PageCharset) AS title, count() AS c FROM test.hits WHERE SearchEngineID != 0 GROUP BY title HAVING c > 0 ORDER BY c DESC LIMIT 10; diff --git a/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference b/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference new file mode 100644 index 00000000000..72b11764a12 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.reference @@ -0,0 +1,7 @@ +2014-03-17 36613 36613 +2014-03-18 36531 54710 +2014-03-19 36940 69954 +2014-03-20 36462 83923 +2014-03-21 35447 96824 +2014-03-22 31555 108565 +2014-03-23 31200 119497 diff --git a/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql b/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql new file mode 100644 index 00000000000..ff05ff86799 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00144_functions_of_aggregation_states.sql @@ -0,0 +1 @@ +SELECT EventDate, finalizeAggregation(state), runningAccumulate(state) FROM (SELECT EventDate, uniqState(UserID) AS state FROM test.hits GROUP BY EventDate ORDER BY EventDate); diff --git a/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference b/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference new file mode 100644 index 00000000000..39980910bfc --- /dev/null +++ b/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.reference @@ -0,0 +1,21 @@ +nan +nan +159323.735527 +nan +nan +399.153774 +nan +0 +159323.717573 +nan +0 +399.153752 +nan +nan +48783.944394 +nan +0 +48783.938897 +nan +nan +0.451491 diff --git a/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql b/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql new file mode 100644 index 00000000000..07dd63d723d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00145_aggregate_functions_statistics.sql @@ -0,0 +1,28 @@ +SELECT varSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT varSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(varSamp(ResolutionWidth), 6) FROM test.hits; + +SELECT stddevSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT stddevSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(stddevSamp(ResolutionWidth), 6) FROM test.hits; + +SELECT varPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT varPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(varPop(ResolutionWidth), 6) FROM test.hits; + +SELECT stddevPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT stddevPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(stddevPop(ResolutionWidth), 6) FROM test.hits; + +SELECT covarSamp(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 0); +SELECT covarSamp(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 1); +SELECT round(covarSamp(ResolutionWidth, ResolutionHeight), 6) FROM test.hits; + +SELECT covarPop(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 0); +SELECT covarPop(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 1); +SELECT round(covarPop(ResolutionWidth, ResolutionHeight), 6) FROM test.hits; + +SELECT corr(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 0); +SELECT corr(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 1); +SELECT round(corr(ResolutionWidth, ResolutionHeight), 6) FROM test.hits; + diff --git a/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.reference b/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.reference new file mode 100644 index 00000000000..3e5f4b9e260 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.reference @@ -0,0 +1,19 @@ +2 428064 +54 105908 +143 190223 +157 161971 +187 288270 +213 1324424 +225 318611 +11503 194698 +11508 199656 +2 428333 +54 106848 +143 187608 +157 164665 +187 284689 +213 1315011 +225 319575 +11503 197005 +11508 196642 +12247 diff --git a/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.sql b/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.sql new file mode 100644 index 00000000000..fd3fde7636d --- /dev/null +++ b/dbms/tests/queries/1_stateful/00146_aggregate_function_uniq.sql @@ -0,0 +1,3 @@ +SELECT RegionID, uniqHLL12(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; +SELECT RegionID, uniqCombined(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; +SELECT abs(uniq(WatchID) - uniqExact(WatchID)) FROM test.hits; diff --git a/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.reference b/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.reference new file mode 100644 index 00000000000..9c49da1ab8a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.reference @@ -0,0 +1,2 @@ +17747796 +17747796 diff --git a/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.sql b/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.sql new file mode 100644 index 00000000000..8276f4c1b16 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00147_global_in_aggregate_function.sql @@ -0,0 +1,2 @@ +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); diff --git a/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference b/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference new file mode 100644 index 00000000000..3f8f6c523ed --- /dev/null +++ b/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.reference @@ -0,0 +1,39 @@ +1962 +1962 +1962 +1962 +1962 +1962 +1999 +17669 +1999 +409 1 17669 +409 1 17669 +3 1 1999 +3 1 1999 +1 1962 +479124 +481541 +475681 +476843 +478098 +479260 +479124 +481541 +475681 +476843 +478098 +479260 +475681 +476843 +478098 +479260 +1353623 +1353623 +1353623 +1353623 +1353623 +1353623 +0 +0 +0 diff --git a/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql b/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql new file mode 100644 index 00000000000..84faa6e059a --- /dev/null +++ b/dbms/tests/queries/1_stateful/00148_monotonic_functions_and_index.sql @@ -0,0 +1,59 @@ +SET max_rows_to_read = 50000; + +SELECT count() FROM test.hits WHERE -CounterID = -34; +SELECT count() FROM test.hits WHERE abs(-CounterID) = 34; +SELECT count() FROM test.hits WHERE -abs(CounterID) = -34; +SELECT count() FROM test.hits WHERE toUInt32(CounterID) = 34; +SELECT count() FROM test.hits WHERE toInt32(CounterID) = 34; +SELECT count() FROM test.hits WHERE toFloat32(CounterID) = 34; + +SET max_rows_to_read = 0; + +SELECT count() FROM test.hits WHERE toInt16(CounterID) = 34; +SELECT count() FROM test.hits WHERE toInt8(CounterID) = 34; + +SELECT count() FROM test.hits WHERE toDate(toUInt16(CounterID)) = toDate(34); + +SELECT uniq(CounterID), uniqUpTo(5)(toInt8(CounterID)), count() FROM test.hits WHERE toInt8(CounterID + 1 - 1) = 34; +SELECT uniq(CounterID), uniqUpTo(5)(toInt8(CounterID)), count() FROM test.hits WHERE toInt8(CounterID) = 34; + +SELECT uniq(CounterID), uniqUpTo(5)(toInt16(CounterID)), count() FROM test.hits WHERE toInt16(CounterID + 1 - 1) = 34; +SELECT uniq(CounterID), uniqUpTo(5)(toInt16(CounterID)), count() FROM test.hits WHERE toInt16(CounterID) = 34; + +SET max_rows_to_read = 500000; + +SELECT uniq(CounterID), count() FROM test.hits WHERE toString(CounterID) = '34'; + +SET max_rows_to_read = 2000000; + +SELECT count() FROM test.hits WHERE CounterID < 101500; +SELECT count() FROM test.hits WHERE CounterID <= 101500; +SELECT count() FROM test.hits WHERE CounterID < 101500 AND CounterID > 42; +SELECT count() FROM test.hits WHERE CounterID < 101500 AND CounterID >= 42; +SELECT count() FROM test.hits WHERE CounterID <= 101500 AND CounterID > 42; +SELECT count() FROM test.hits WHERE CounterID <= 101500 AND CounterID >= 42; +SELECT count() FROM test.hits WHERE -CounterID > -101500; +SELECT count() FROM test.hits WHERE -CounterID >= -101500; +SELECT count() FROM test.hits WHERE -CounterID > -101500 AND CounterID > 42; +SELECT count() FROM test.hits WHERE -CounterID > -101500 AND CounterID >= 42; +SELECT count() FROM test.hits WHERE -CounterID >= -101500 AND CounterID > 42; +SELECT count() FROM test.hits WHERE -CounterID >= -101500 AND CounterID >= 42; +SELECT count() FROM test.hits WHERE CounterID < 101500 AND -CounterID < -42; +SELECT count() FROM test.hits WHERE CounterID < 101500 AND -CounterID <= -42; +SELECT count() FROM test.hits WHERE CounterID <= 101500 AND -CounterID < -42; +SELECT count() FROM test.hits WHERE CounterID <= 101500 AND -CounterID <= -42; + +SET max_rows_to_read = 0; + +SELECT count() FROM test.hits WHERE EventDate = '2014-03-20'; +SELECT count() FROM test.hits WHERE toDayOfMonth(EventDate) = 20; +SELECT count() FROM test.hits WHERE toDayOfWeek(EventDate) = 4; +SELECT count() FROM test.hits WHERE toUInt16(EventDate) = toUInt16(toDate('2014-03-20')); +SELECT count() FROM test.hits WHERE toInt64(EventDate) = toInt64(toDate('2014-03-20')); +SELECT count() FROM test.hits WHERE toDateTime(EventDate) = '2014-03-20 00:00:00'; + +SET max_rows_to_read = 50000; + +SELECT count() FROM test.hits WHERE toMonth(EventDate) != 3; +SELECT count() FROM test.hits WHERE toYear(EventDate) != 2014; +SELECT count() FROM test.hits WHERE toDayOfMonth(EventDate) > 23 OR toDayOfMonth(EventDate) < 17; diff --git a/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference b/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference new file mode 100644 index 00000000000..3183819b008 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.reference @@ -0,0 +1 @@ +4306270979949069156 diff --git a/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql b/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql new file mode 100644 index 00000000000..b195518e1e7 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00149_quantiles_timing_distributed.sql @@ -0,0 +1 @@ +SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID); diff --git a/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.reference b/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.reference new file mode 100644 index 00000000000..5c047df92c4 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.reference @@ -0,0 +1 @@ +7234936 1824 1829 5755 0.003 diff --git a/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.sql b/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.sql new file mode 100644 index 00000000000..7d5b27fafd3 --- /dev/null +++ b/dbms/tests/queries/1_stateful/00150_quantiles_timing_precision.sql @@ -0,0 +1 @@ +SELECT CounterID, quantileTiming(0.5)(SendTiming) AS qt, least(30000, quantileExact(0.5)(SendTiming)) AS qe, count() AS c, round(abs(qt - qe) / greatest(qt, qe) AS diff, 3) AS rounded_diff FROM test.hits WHERE SendTiming != -1 GROUP BY CounterID HAVING diff != 0 ORDER BY diff DESC; diff --git a/debian/changelog b/debian/changelog index 28ae11ca4c8..1b7b9165ed5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,5 @@ -clickhouse (18.1.0) unstable; urgency=low +clickhouse (18.9.0) unstable; urgency=low * Modified source code - -- Fri, 20 Jul 2018 04:00:20 +0300 + -- Alexey Milovidov Fri, 03 Aug 2018 19:17:05 +0300 diff --git a/debian/clickhouse-server.postinst b/debian/clickhouse-server.postinst index 54fc03f4a36..57882006d78 100644 --- a/debian/clickhouse-server.postinst +++ b/debian/clickhouse-server.postinst @@ -3,6 +3,7 @@ set -e CLICKHOUSE_USER=${CLICKHOUSE_USER=clickhouse} CLICKHOUSE_GROUP=${CLICKHOUSE_GROUP=${CLICKHOUSE_USER}} +CLICKHOUSE_CONFDIR=${CLICKHOUSE_CONFDIR=/etc/clickhouse-server} CLICKHOUSE_DATADIR=${CLICKHOUSE_DATADIR=/var/lib/clickhouse} CLICKHOUSE_LOGDIR=${CLICKHOUSE_LOGDIR=/var/log/clickhouse-server} OS=${OS=`lsb_release -is 2>/dev/null || uname -s || true`} @@ -64,6 +65,10 @@ Please fix this and reinstall this package." >&2 exit 1 fi + if [ -d ${CLICKHOUSE_CONFDIR} ]; then + su -s /bin/sh ${CLICKHOUSE_USER} -c "test -w ${CLICKHOUSE_CONFDIR}" || chown ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_CONFDIR} + fi + if [ ! -d ${CLICKHOUSE_DATADIR} ]; then mkdir -p ${CLICKHOUSE_DATADIR} chown ${CLICKHOUSE_USER}:${CLICKHOUSE_GROUP} ${CLICKHOUSE_DATADIR} diff --git a/debian/control b/debian/control index b690b96e915..745764fd9df 100644 --- a/debian/control +++ b/debian/control @@ -8,12 +8,8 @@ Build-Depends: debhelper (>= 9), gcc-7 [amd64 i386] | gcc-8 [amd64 i386], g++-7 [amd64 i386] | g++-8 [amd64 i386], clang-6.0 [arm64 armhf] | clang-5.0 [arm64 armhf] | clang-7 [arm64 armhf], libc6-dev, - libmariadbclient-dev | default-libmysqlclient-dev | libmysqlclient-dev, libicu-dev, - libltdl-dev, - libreadline-dev, - libssl1.0-dev | libssl-dev, - unixodbc-dev + libreadline-dev Standards-Version: 3.9.8 Package: clickhouse-client @@ -21,7 +17,7 @@ Architecture: all Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}) | clickhouse-server-base (= ${binary:Version}) Replaces: clickhouse-compressor Conflicts: clickhouse-compressor -Description: Client binary for clickhouse +Description: Client binary for ClickHouse Yandex ClickHouse is a column-oriented database management system that allows generating analytical data reports in real time. . @@ -32,7 +28,7 @@ Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends}, tzdata Replaces: clickhouse-server-base Provides: clickhouse-server-base -Description: Common files for clickhouse +Description: Common files for ClickHouse Yandex ClickHouse is a column-oriented database management system that allows generating analytical data reports in real time. . @@ -43,7 +39,7 @@ Architecture: all Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-common-static (= ${binary:Version}), adduser Replaces: clickhouse-server-common, clickhouse-server-base Provides: clickhouse-server-common -Description: Server binary for clickhouse +Description: Server binary for ClickHouse Yandex ClickHouse is a column-oriented database management system that allows generating analytical data reports in real time. . @@ -63,7 +59,7 @@ Package: clickhouse-test Priority: optional Architecture: all Depends: ${shlibs:Depends}, ${misc:Depends}, clickhouse-client, bash, expect, python, python-lxml, python-termcolor, python-requests, curl, perl, sudo, openssl, netcat-openbsd, telnet -Description: Clickhouse tests +Description: ClickHouse tests diff --git a/debian/rules b/debian/rules index edece55c89e..c9ff7635350 100755 --- a/debian/rules +++ b/debian/rules @@ -70,8 +70,8 @@ override_dh_auto_configure: override_dh_auto_build: # Fix for ninja. Do not add -O. - #cd $(BUILDDIR) && $(MAKE) -j$(THREADS_COUNT) - cd $(BUILDDIR) && cmake --build . -- -j$(THREADS_COUNT) + cd $(BUILDDIR) && $(MAKE) -j$(THREADS_COUNT) + #cd $(BUILDDIR) && cmake --build . -- -j$(THREADS_COUNT) # cmake return true on error override_dh_auto_test: #TODO, use ENABLE_TESTS=1 diff --git a/docker/client/Dockerfile b/docker/client/Dockerfile index 3393dd0e697..5f3b6ad9d42 100644 --- a/docker/client/Dockerfile +++ b/docker/client/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=\* +ARG version=18.9.0 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/server/Dockerfile b/docker/server/Dockerfile index e1ba52b0989..af1c42c85e7 100644 --- a/docker/server/Dockerfile +++ b/docker/server/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=\* +ARG version=18.9.0 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docker/test/Dockerfile b/docker/test/Dockerfile index 3f7755c58dd..f03e21e082b 100644 --- a/docker/test/Dockerfile +++ b/docker/test/Dockerfile @@ -1,7 +1,7 @@ FROM ubuntu:18.04 ARG repository="deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" -ARG version=\* +ARG version=18.9.0 RUN apt-get update && \ apt-get install -y apt-transport-https dirmngr && \ diff --git a/docs/en/development/build.md b/docs/en/development/build.md index 98e25c5ab4a..1901425020c 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -1,6 +1,28 @@ -# How to build ClickHouse on Linux +# How to build ClickHouse release package -Build should work on Linux Ubuntu 12.04, 14.04 or newer. +## Install Git and pbuilder + +```bash +sudo apt-get update +sudo apt-get install git pbuilder debhelper lsb-release fakeroot sudo debian-archive-keyring debian-keyring +``` + +## Checkout ClickHouse sources + +```bash +git clone --recursive --branch stable https://github.com/yandex/ClickHouse.git +cd ClickHouse +``` + +## Run release script + +```bash +./release +``` + +# How to build ClickHouse for development + +Build should work on Ubuntu Linux. With appropriate changes, it should also work on any other Linux distribution. The build process is not intended to work on Mac OS X. Only x86_64 with SSE 4.2 is supported. Support for AArch64 is experimental. @@ -46,69 +68,22 @@ export CXX=g++-7 ## Install required libraries from packages ```bash -sudo apt-get install libicu-dev libreadline-dev libmysqlclient-dev libssl-dev unixodbc-dev ninja-build +sudo apt-get install libicu-dev libreadline-dev ``` ## Checkout ClickHouse sources -To get the latest stable version: - ```bash -git clone -b stable --recursive git@github.com:yandex/ClickHouse.git -# or: git clone -b stable --recursive https://github.com/yandex/ClickHouse.git +git clone --recursive git@github.com:yandex/ClickHouse.git +# or: git clone --recursive https://github.com/yandex/ClickHouse.git cd ClickHouse ``` -For development, switch to the `master` branch. -For the latest release candidate, switch to the `testing` branch. +For the latest stable version, switch to the `stable` branch. ## Build ClickHouse -There are two build variants. - -### Build release package - -Install prerequisites to build Debian packages. - -```bash -sudo apt-get install devscripts dupload fakeroot debhelper -``` - -Install the most recent version of Clang. - -Clang is embedded into the ClickHouse package and used at runtime. The minimum version is 5.0. It is optional. - -To install clang, see [ci/build-clang-from-sources.sh](https://github.com/yandex/ClickHouse/blob/master/ci/build-clang-from-sources.sh) - -You may also build ClickHouse with Clang for development purposes. -For production releases, GCC is used. - -Run the release script: - -```bash -rm -f ../clickhouse*.deb -./release -``` - -You will find built packages in the parent directory: - -```bash -ls -l ../clickhouse*.deb -``` - -Note that usage of debian packages is not required. -ClickHouse has no runtime dependencies except libc, so it could work on almost any Linux. - -Installing freshly built packages on a development server: - -```bash -sudo dpkg -i ../clickhouse*.deb -sudo service clickhouse-server start -``` - -### Build to work with code - ```bash mkdir build cd build diff --git a/docs/en/development/build_osx.md b/docs/en/development/build_osx.md index d29301a4b2b..732e95ca55b 100644 --- a/docs/en/development/build_osx.md +++ b/docs/en/development/build_osx.md @@ -12,22 +12,19 @@ With appropriate changes, it should also work on any other Linux distribution. ## Install required compilers, tools, and libraries ```bash -brew install cmake ninja gcc icu4c mysql openssl unixodbc libtool gettext readline +brew install cmake ninja gcc icu4c mariadb-connector-c openssl libtool gettext readline ``` ## Checkout ClickHouse sources -To get the latest stable version: - ```bash -git clone -b stable --recursive --depth=10 git@github.com:yandex/ClickHouse.git -# or: git clone -b stable --recursive --depth=10 https://github.com/yandex/ClickHouse.git +git clone --recursive --depth=10 git@github.com:yandex/ClickHouse.git +# or: git clone --recursive --depth=10 https://github.com/yandex/ClickHouse.git cd ClickHouse ``` -For development, switch to the `master` branch. -For the latest release candidate, switch to the `testing` branch. +For the latest stable version, switch to the `stable` branch. ## Build ClickHouse @@ -43,9 +40,8 @@ cd .. If you intend to run clickhouse-server, make sure to increase the system's maxfiles variable. -
-Note: you'll need to use sudo. -
+!!! info "Note" + You'll need to use sudo. To do so, create the following file: diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index d6c92cf622e..ee2402aedc7 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -32,8 +32,6 @@ See `dbms/tests/integration/README.md` on how to run these tests. Note that integration of ClickHouse with third-party drivers is not tested. Also we currently don't have integration tests with our JDBC and ODBC drivers. -We don't have integration tests for `Kafka` table engine that is developed by community - this is one of the most anticipated tests (otherwise there is almost no way to be confident with `Kafka` tables). - ## Unit Tests diff --git a/docs/en/faq/general.md b/docs/en/faq/general.md new file mode 100644 index 00000000000..64267c12625 --- /dev/null +++ b/docs/en/faq/general.md @@ -0,0 +1,12 @@ +# General questions + +## Why not use something like MapReduce? + +We can refer to systems like MapReduce as distributed computing systems in which the reduce operation is based on a distributed sort. The most common opensource solution of this kind is [Apache Hadoop](http://hadoop.apache.org), while Yandex internally uses it's own MapReduce implementation — YT. + +The systems of this kind are not suitable for online queries due to their high latency. In other words, they can't be used as the back-end for a web interface. + +Distributed sorting isn't the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is the optimal way to perform reduce operations. A common approach to optimizing MapReduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. +Distributed sorting is one of the main causes of reduced performance when running simple MapReduce tasks. + +Most MapReduce implementations allow executing any code on the cluster. But a declarative query language is better suited to OLAP in order to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala, Shark (outdated) for Spark, and Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface. diff --git a/docs/en/getting_started/example_datasets/nyc_taxi.md b/docs/en/getting_started/example_datasets/nyc_taxi.md index 04bb31cc7a6..c44670476aa 100644 --- a/docs/en/getting_started/example_datasets/nyc_taxi.md +++ b/docs/en/getting_started/example_datasets/nyc_taxi.md @@ -360,9 +360,8 @@ We ran queries using a client located in a Yandex datacenter in Finland on a clu ## Summary -```text -nodes Q1 Q2 Q3 Q4 - 1 0.490 1.224 2.104 3.593 - 3 0.212 0.438 0.733 1.241 -140 0.028 0.043 0.051 0.072 -``` +| nodes | Q1 | Q2 | Q3 | Q4 | +| ----- | ----- | ----- | ----- | ----- | +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | diff --git a/docs/en/getting_started/example_datasets/ontime.md b/docs/en/getting_started/example_datasets/ontime.md index 150fc8bb5bd..14c7af3f876 100644 --- a/docs/en/getting_started/example_datasets/ontime.md +++ b/docs/en/getting_started/example_datasets/ontime.md @@ -2,15 +2,6 @@ # OnTime -This performance test was created by Vadim Tkachenko. See: - -- -- -- -- -- -- - Downloading data: ```bash @@ -316,3 +307,12 @@ SELECT OriginCityName, DestCityName, count() AS c FROM ontime GROUP BY OriginCit SELECT OriginCityName, count() AS c FROM ontime GROUP BY OriginCityName ORDER BY c DESC LIMIT 10; ``` + +This performance test was created by Vadim Tkachenko. For mode details see: + +- +- +- +- +- +- diff --git a/docs/en/images/column_oriented.gif b/docs/en/images/column_oriented.gif new file mode 100644 index 00000000000..15f4b12e697 Binary files /dev/null and b/docs/en/images/column_oriented.gif differ diff --git a/docs/en/images/row_oriented.gif b/docs/en/images/row_oriented.gif new file mode 100644 index 00000000000..53daa20f322 Binary files /dev/null and b/docs/en/images/row_oriented.gif differ diff --git a/docs/en/index.md b/docs/en/index.md index 72efa70802b..b793b0e148e 100644 --- a/docs/en/index.md +++ b/docs/en/index.md @@ -1,69 +1,87 @@ # What is ClickHouse? -ClickHouse is a columnar DBMS for OLAP. +ClickHouse is a columnar database management system (DBMS) for online analytical processing (OLAP). In a "normal" row-oriented DBMS, data is stored in this order: -```text -5123456789123456789 1 Eurobasket - Greece - Bosnia and Herzegovina - example.com 1 2011-09-01 01:03:02 6274717 1294101174 11409 612345678912345678 0 33 6 http://www.example.com/basketball/team/123/match/456789.html http://www.example.com/basketball/team/123/match/987654.html 0 1366 768 32 10 3183 0 0 13 0\0 1 1 0 0 2011142 -1 0 0 01321 613 660 2011-09-01 08:01:17 0 0 0 0 utf-8 1466 0 0 0 5678901234567890123 277789954 0 0 0 0 0 -5234985259563631958 0 Consulting, Tax assessment, Accounting, Law 1 2011-09-01 01:03:02 6320881 2111222333 213 6458937489576391093 0 3 2 http://www.example.ru/ 0 800 600 16 10 2 153.1 0 0 10 63 1 1 0 0 2111678 000 0 588 368 240 2011-09-01 01:03:17 4 0 60310 0 windows-1251 1466 0 000 778899001 0 0 0 0 0 -... -``` +| Row | WatchID | JavaEnable | Title | GoodEvent | EventTime | +| --- | ----------- | ---------- | ------------------ | --------- | ------------------- | +| #0 | 89354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 | +| #1 | 90329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 | +| #2 | 89953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 | +| #N | ... | ... | ... | ... | ... | -In order words, all the values related to a row are stored next to each other. -Examples of a row-oriented DBMS are MySQL, Postgres, MS SQL Server, and others. +In order words, all the values related to a row are physically stored next to each other. + +Examples of a row-oriented DBMSs are MySQL, Postgres and MS SQL Server. +{: .grey } In a column-oriented DBMS, data is stored like this: -```text -WatchID: 5385521489354350662 5385521490329509958 5385521489953706054 5385521490476781638 5385521490583269446 5385521490218868806 5385521491437850694 5385521491090174022 5385521490792669254 5385521490420695110 5385521491532181574 5385521491559694406 5385521491459625030 5385521492275175494 5385521492781318214 5385521492710027334 5385521492955615302 5385521493708759110 5385521494506434630 5385521493104611398 -JavaEnable: 1 0 1 0 0 0 1 0 1 1 1 1 1 1 0 1 0 0 1 1 -Title: Yandex Announcements - Investor Relations - Yandex Yandex — Contact us — Moscow Yandex — Mission Ru Yandex — History — History of Yandex Yandex Financial Releases - Investor Relations - Yandex Yandex — Locations Yandex Board of Directors - Corporate Governance - Yandex Yandex — Technologies -GoodEvent: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -EventTime: 2016-05-18 05:19:20 2016-05-18 08:10:20 2016-05-18 07:38:00 2016-05-18 01:13:08 2016-05-18 00:04:06 2016-05-18 04:21:30 2016-05-18 00:34:16 2016-05-18 07:35:49 2016-05-18 11:41:59 2016-05-18 01:13:32 -``` + +| Row: | #0 | #1 | #2 | #N | +| ----------- | ------------------- | ------------------- | ------------------- | ------------------- | +| WatchID: | 89354350662 | 90329509958 | 89953706054 | ... | +| JavaEnable: | 1 | 0 | 1 | ... | +| Title: | Investor Relations | Contact us | Mission | ... | +| GoodEvent: | 1 | 1 | 1 | ... | +| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | ... | + These examples only show the order that data is arranged in. The values from different columns are stored separately, and data from the same column is stored together. -Examples of column-oriented DBMSs: `Vertica`, `Paraccel (Actian Matrix) (Amazon Redshift)`, `Sybase IQ`, `Exasol`, `Infobright`, `InfiniDB`, `MonetDB (VectorWise) (Actian Vector)`, `LucidDB`, `SAP HANA`, `Google Dremel`, `Google PowerDrill`, `Druid`, `kdb+`, and so on. +Examples of column-oriented DBMSs: Vertica, Paraccel (Actian Matrix, Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise, Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, kdb+. +{: .grey } -Different orders for storing data are better suited to different scenarios. -The data access scenario refers to what queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and updating data; the working size of the data and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. +Different orders for storing data are better suited to different scenarios. The data access scenario refers to which queries are made, how often, and in what proportion; how much data is read for each type of query – rows, columns, and bytes; the relationship between reading and writing data; the size of the actively used dataset and how locally it is used; whether transactions are used, and how isolated they are; requirements for data replication and logical integrity; requirements for latency and throughput for each type of query, and so on. -The higher the load on the system, the more important it is to customize the system to the scenario, and the more specific this customization becomes. There is no system that is equally well-suited to significantly different scenarios. If a system is adaptable to a wide set of scenarios, under a high load, the system will handle all the scenarios equally poorly, or will work well for just one of the scenarios. +The higher the load on the system, the more important it is to customize the system set up to match the requirements of the usage scenario, and the more fine grained this customization becomes. There is no system that is equally well-suited to significantly different scenarios. If a system is adaptable to a wide set of scenarios, under a high load, the system will handle all the scenarios equally poorly, or will work well for just one or few of possible scenarios. -We'll say that the following is true for the OLAP (online analytical processing) scenario: +## Key properties of OLAP scenario - The vast majority of requests are for read access. -- Data is updated in fairly large batches (> 1000 rows), not by single rows; or it is not updated at all. +- Data is ingested in fairly large batches (> 1000 rows), not by single rows; or it is not updated at all. - Data is added to the DB but is not modified. - For reads, quite a large number of rows are extracted from the DB, but only a small subset of columns. -- Tables are "wide," meaning they contain a large number of columns. -- Queries are relatively rare (usually hundreds of queries per server or less per second). +- Tables are "wide", meaning they contain a large number of columns. +- Queries are relatively rare (usually hundreds of queries per second per server or less). - For simple queries, latencies around 50 ms are allowed. - Column values are fairly small: numbers and short strings (for example, 60 bytes per URL). - Requires high throughput when processing a single query (up to billions of rows per second per server). -- There are no transactions. +- Transactions are not necessary. - Low requirements for data consistency. - There is one large table per query. All tables are small, except for one. -- A query result is significantly smaller than the source data. In other words, data is filtered or aggregated. The result fits in a single server's RAM. +- A query result is significantly smaller than the source data. In other words, data is filtered or aggregated, so the result fits in a single server's RAM. -It is easy to see that the OLAP scenario is very different from other popular scenarios (such as OLTP or Key-Value access). So it doesn't make sense to try to use OLTP or a Key-Value DB for processing analytical queries if you want to get decent performance. For example, if you try to use MongoDB or Elliptics for analytics, you will get very poor performance compared to OLAP databases. +It is easy to see that the OLAP scenario is very different from other popular scenarios (such as OLTP or Key-Value access). So it doesn't make sense to try to use OLTP or a Key-Value DB for processing analytical queries if you want to get decent performance. For example, if you try to use MongoDB or Redis for analytics, you will get very poor performance compared to OLAP databases. -Columnar-oriented databases are better suited to OLAP scenarios (at least 100 times better in processing speed for most queries), for the following reasons: +## Reasons why columnar databases are better suited for OLAP scenario -1. For I/O. -2. For an analytical query, only a small number of table columns need to be read. In a column-oriented database, you can read just the data you need. For example, if you need 5 columns out of 100, you can expect a 20-fold reduction in I/O. -3. Since data is read in packets, it is easier to compress. Data in columns is also easier to compress. This further reduces the I/O volume. -4. Due to the reduced I/O, more data fits in the system cache. +Column-oriented databases are better suited to OLAP scenarios (at least 100 times better in processing speed for most queries). The reasons for that are explained below in detail, but it's easier to be demonstrated visually: + +**Row oriented** + +![Row oriented](images/row_oriented.gif#) + +**Column oriented** + +![Column oriented](images/column_oriented.gif#) + +See the difference? Read further to learn why this happens. + +### Input/output + +1. For an analytical query, only a small number of table columns need to be read. In a column-oriented database, you can read just the data you need. For example, if you need 5 columns out of 100, you can expect a 20-fold reduction in I/O. +2. Since data is read in packets, it is easier to compress. Data in columns is also easier to compress. This further reduces the I/O volume. +3. Due to the reduced I/O, more data fits in the system cache. For example, the query "count the number of records for each advertising platform" requires reading one "advertising platform ID" column, which takes up 1 byte uncompressed. If most of the traffic was not from advertising platforms, you can expect at least 10-fold compression of this column. When using a quick compression algorithm, data decompression is possible at a speed of at least several gigabytes of uncompressed data per second. In other words, this query can be processed at a speed of approximately several billion rows per second on a single server. This speed is actually achieved in practice. -Example: - -```bash -milovidov@hostname:~$ clickhouse-client +
Example +

+

+$ clickhouse-client
 ClickHouse client version 0.0.52053.
 Connecting to localhost:9000.
 Connected to ClickHouse server version 0.0.52053.
@@ -104,9 +122,11 @@ LIMIT 20
 20 rows in set. Elapsed: 0.153 sec. Processed 1.00 billion rows, 4.00 GB (6.53 billion rows/s., 26.10 GB/s.)
 
 :)
-```
+
+

+
-2. For CPU. +### CPU Since executing a query requires processing a large number of rows, it helps to dispatch all operations for entire vectors instead of for separate rows, or to implement the query engine so that there is almost no dispatching cost. If you don't do this, with any half-decent disk subsystem, the query interpreter inevitably stalls the CPU. It makes sense to both store data in columns and process it, when possible, by columns. diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 7e45beb3e2c..13409866b13 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -64,7 +64,11 @@ Comma Separated Values format ([RFC](https://tools.ietf.org/html/rfc4180)). When formatting, rows are enclosed in double quotes. A double quote inside a string is output as two double quotes in a row. There are no other rules for escaping characters. Date and date-time are enclosed in double quotes. Numbers are output without quotes. Values ​​are separated by a delimiter*. Rows are separated using the Unix line feed (LF). Arrays are serialized in CSV as follows: first the array is serialized to a string as in TabSeparated format, and then the resulting string is output to CSV in double quotes. Tuples in CSV format are serialized as separate columns (that is, their nesting in the tuple is lost). -*By default — `,`. See a [format_csv_delimiter](/docs/en/operations/settings/settings/#format_csv_delimiter) setting for additional info. +``` +clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +``` + +*By default — `,`. See a [format_csv_delimiter](/operations/settings/settings/#format_csv_delimiter) setting for additional info. When parsing, all values can be parsed either with or without quotes. Both double and single quotes are supported. Rows can also be arranged without quotes. In this case, they are parsed up to a delimiter or line feed (CR or LF). In violation of the RFC, when parsing rows without quotes, the leading and trailing spaces and tabs are ignored. For the line feed, Unix (LF), Windows (CR LF) and Mac OS Classic (CR LF) are all supported. diff --git a/docs/en/introduction/distinctive_features.md b/docs/en/introduction/distinctive_features.md index f626f13c274..ad6a7efc6e0 100644 --- a/docs/en/introduction/distinctive_features.md +++ b/docs/en/introduction/distinctive_features.md @@ -2,27 +2,27 @@ ## True column-oriented DBMS -In a true column-oriented DBMS, there isn't any "garbage" stored with the values. Among other things, this means that constant-length values must be supported, to avoid storing their length "number" next to the values. As an example, a billion UInt8-type values should actually consume around 1 GB uncompressed, or this will strongly affect the CPU use. It is very important to store data compactly (without any "garbage") even when uncompressed, since the speed of decompression (CPU usage) depends mainly on the volume of uncompressed data. +In a true column-oriented DBMS, there is no excessive data stored with the values. For example, this means that constant-length values must be supported, to avoid storing their length as additional integer next to the values. In this case, a billion UInt8 values should actually consume around 1 GB uncompressed, or this will strongly affect the CPU use. It is very important to store data compactly even when uncompressed, since the speed of decompression (CPU usage) depends mainly on the volume of uncompressed data. -This is worth noting because there are systems that can store values of separate columns separately, but that can't effectively process analytical queries due to their optimization for other scenarios. Examples are HBase, BigTable, Cassandra, and HyperTable. In these systems, you will get throughput around a hundred thousand rows per second, but not hundreds of millions of rows per second. +This is worth noting because there are systems that can store values of different columns separately, but that can't effectively process analytical queries due to their optimization for other scenarios. Examples are HBase, BigTable, Cassandra, and HyperTable. In these systems, you will get throughput around a hundred thousand rows per second, but not hundreds of millions of rows per second. -Also note that ClickHouse is a DBMS, not a single database. ClickHouse allows creating tables and databases in runtime, loading data, and running queries without reconfiguring and restarting the server. +Also note that ClickHouse is a database management system, not a single database. ClickHouse allows creating tables and databases in runtime, loading data, and running queries without reconfiguring and restarting the server. ## Data compression -Some column-oriented DBMSs (InfiniDB CE and MonetDB) do not use data compression. However, data compression really improves performance. +Some column-oriented DBMSs (InfiniDB CE and MonetDB) do not use data compression. However, data compression is crucial to achieve excellent performance. ## Disk storage of data -Many column-oriented DBMSs (such as SAP HANA and Google PowerDrill) can only work in RAM. But even on thousands of servers, the RAM is too small for storing all the pageviews and sessions in Yandex.Metrica. +Many column-oriented DBMSs (such as SAP HANA and Google PowerDrill) can only work in RAM. This approach stimulates the allocation of a larger hardware budget than is actually necessary for real-time analysis. ClickHouse is designed to work on regular hard drives, which ensures low cost of ownership per gigabyte of data, but SSD and additional RAM are also utilized fully if available. ## Parallel processing on multiple cores -Large queries are parallelized in a natural way. +Large queries are parallelized in a natural way, utilizing all necessary resources that are available on the current server. ## Distributed processing on multiple servers -Almost none of the columnar DBMSs listed above have support for distributed processing. +Almost none of the columnar DBMSs mentioned above have support for distributed query processing. In ClickHouse, data can reside on different shards. Each shard can be a group of replicas that are used for fault tolerance. The query is processed on all the shards in parallel. This is transparent for the user. ## SQL support @@ -33,30 +33,37 @@ However, this is a declarative query language based on SQL that can't be differe JOINs are supported. Subqueries are supported in FROM, IN, and JOIN clauses, as well as scalar subqueries. Dependent subqueries are not supported. +ClickHouse supports declarative query language that is based on SQL and complies to SQL standard in many cases. +GROUP BY, ORDER BY, scalar subqueries and subqueries in FROM, IN and JOIN clauses are supported. +Correlated subqueries and window functions are not supported. + ## Vector engine -Data is not only stored by columns, but is processed by vectors (parts of columns). This allows us to achieve high CPU performance. +Data is not only stored by columns, but is also processed by vectors (parts of columns). This allows to achieve high CPU efficiency. ## Real-time data updates -ClickHouse supports primary key tables. In order to quickly perform queries on the range of the primary key, the data is sorted incrementally using the merge tree. Due to this, data can continually be added to the table. There is no locking when adding data. +ClickHouse supports tables with a primary key. In order to quickly perform queries on the range of the primary key, the data is sorted incrementally using the merge tree. Due to this, data can continually be added to the table. No locks are taken when new data is ingested. -## Indexes +## Index -Having a primary key makes it possible to extract data for specific clients (for instance, Yandex.Metrica tracking tags) for a specific time range, with low latency less than several dozen milliseconds. +Having a data physically sorted by primary key makes it possible to extract data for it's specific values or value ranges with low latency, less than few dozen milliseconds. ## Suitable for online queries -This lets us use the system as the back-end for a web interface. Low latency means queries can be processed without delay, while the Yandex.Metrica interface page is loading. In other words, in online mode. +Low latency means that queries can be processed without delay and without trying to prepare answer in advance, right at the same moment while user interface page is loading. In other words, online. ## Support for approximated calculations -1. The system contains aggregate functions for approximated calculation of the number of various values, medians, and quantiles. -2. Supports running a query based on a part (sample) of data and getting an approximated result. In this case, proportionally less data is retrieved from the disk. -3. Supports running an aggregation for a limited number of random keys, instead of for all keys. Under certain conditions for key distribution in the data, this provides a reasonably accurate result while using fewer resources. +ClickHouse provides various ways to trade accuracy for performance: -## Data replication and support for data integrity on replicas +1. Aggregate functions for approximated calculation of the number of distinct values, medians, and quantiles. +2. Running a query based on a part (sample) of data and getting an approximated result. In this case, proportionally less data is retrieved from the disk. +3. Running an aggregation for a limited number of random keys, instead of for all keys. Under certain conditions for key distribution in the data, this provides a reasonably accurate result while using fewer resources. -Uses asynchronous multimaster replication. After being written to any available replica, data is distributed to all the remaining replicas. The system maintains identical data on different replicas. Data is restored automatically after a failure, or using a "button" for complex cases. -For more information, see the section [Data replication](../operations/table_engines/replication.md#table_engines-replication). +## Data replication and integrity + +ClickHouse uses asynchronous multimaster replication. After being written to any available replica, data is distributed to all the other replicas in background. The system maintains identical data on different replicas. Data is restored automatically after most failures, or semiautomatically in complicated cases. + +For more information, see the [Data replication](../operations/table_engines/replication.md#table_engines-replication) section. diff --git a/docs/en/introduction/features_considered_disadvantages.md b/docs/en/introduction/features_considered_disadvantages.md index 80708c02883..010fdf7c9ec 100644 --- a/docs/en/introduction/features_considered_disadvantages.md +++ b/docs/en/introduction/features_considered_disadvantages.md @@ -1,6 +1,6 @@ # ClickHouse features that can be considered disadvantages -1. No transactions. -2. For aggregation, query results must fit in the RAM on a single server. However, the volume of source data for a query may be indefinitely large. -3. Lack of full-fledged UPDATE/DELETE implementation. +1. No full-fledged transactions. +2. Lack of ability to modify or delete already inserted data with high rate and low latency. There are batch deletes available to clean up data that is not needed anymore or to comply with [GDPR](https://gdpr-info.eu). Batch updates are currently in development as of July 2018. +3. Sparse index makes ClickHouse not really suitable for point queries retrieving single rows by their keys. diff --git a/docs/en/introduction/index.md b/docs/en/introduction/index.md deleted file mode 100644 index 3d07efe555d..00000000000 --- a/docs/en/introduction/index.md +++ /dev/null @@ -1,2 +0,0 @@ -# Introduction - diff --git a/docs/en/introduction/performance.md b/docs/en/introduction/performance.md index d8958431dfe..4050cceb642 100644 --- a/docs/en/introduction/performance.md +++ b/docs/en/introduction/performance.md @@ -1,22 +1,24 @@ # Performance -According to internal testing results, ClickHouse shows the best performance for comparable operating scenarios among systems of its class that were available for testing. This includes the highest throughput for long queries, and the lowest latency on short queries. Testing results are shown on a separate page. +According to internal testing results by Yandex, ClickHouse shows the best performance for comparable operating scenarios among systems of its class that were available for testing. This includes the highest throughput for long queries, and the lowest latency on short queries. Testing results are shown on a [separate page](https://clickhouse.yandex/benchmark.html). + +There are a lot of independent benchmarks that confirm this as well. You can look it up on your own or here is the small [collection of independent benchmark links](https://clickhouse.yandex/#independent-benchmarks). ## Throughput for a single large query -Throughput can be measured in rows per second or in megabytes per second. If the data is placed in the page cache, a query that is not too complex is processed on modern hardware at a speed of approximately 2-10 GB/s of uncompressed data on a single server (for the simplest cases, the speed may reach 30 GB/s). If data is not placed in the page cache, the speed depends on the disk subsystem and the data compression rate. For example, if the disk subsystem allows reading data at 400 MB/s, and the data compression rate is 3, the speed will be around 1.2 GB/s. To get the speed in rows per second, divide the speed in bytes per second by the total size of the columns used in the query. For example, if 10 bytes of columns are extracted, the speed will be around 100-200 million rows per second. +Throughput can be measured in rows per second or in megabytes per second. If the data is placed in the page cache, a query that is not too complex is processed on modern hardware at a speed of approximately 2-10 GB/s of uncompressed data on a single server (for the simplest cases, the speed may reach 30 GB/s). If data is not placed in the page cache, the speed is bound by the disk subsystem and how well the data has been compressed. For example, if the disk subsystem allows reading data at 400 MB/s, and the data compression rate is 3, the speed will be around 1.2 GB/s. To get the speed in rows per second, divide the speed in bytes per second by the total size of the columns used in the query. For example, if 10 bytes of columns are extracted, the speed will be around 100-200 million rows per second. The processing speed increases almost linearly for distributed processing, but only if the number of rows resulting from aggregation or sorting is not too large. ## Latency when processing short queries -If a query uses a primary key and does not select too many rows to process (hundreds of thousands), and does not use too many columns, we can expect less than 50 milliseconds of latency (single digits of milliseconds in the best case) if data is placed in the page cache. Otherwise, latency is calculated from the number of seeks. If you use rotating drives, for a system that is not overloaded, the latency is calculated by this formula: seek time (10 ms) \* number of columns queried \* number of data parts. +If a query uses a primary key and does not select too many rows to process (hundreds of thousands), and does not use too many columns, we can expect less than 50 milliseconds of latency (single digits of milliseconds in the best case) if data is placed in the page cache. Otherwise, latency is calculated from the number of seeks. If you use rotating drives, for a system that is not overloaded, the approximate latency can be calculated by this formula: seek time (10 ms) \* number of columns queried \* number of data parts. ## Throughput when processing a large quantity of short queries -Under the same conditions, ClickHouse can handle several hundred queries per second on a single server (up to several thousand in the best case). Since this scenario is not typical for analytical DBMSs, we recommend expecting a maximum of 100 queries per second. +Under the same circumstances, ClickHouse can handle several hundred queries per second on a single server (up to several thousands in the best case). Since this scenario is not typical for analytical DBMSs, it is better to expect a maximum of hundreds of queries per second. ## Performance when inserting data -We recommend inserting data in packets of at least 1000 rows, or no more than a single request per second. When inserting to a MergeTree table from a tab-separated dump, the insertion speed will be from 50 to 200 MB/s. If the inserted rows are around 1 Kb in size, the speed will be from 50,000 to 200,000 rows per second. If the rows are small, the performance will be higher in rows per second (on Banner System data -`>` 500,000 rows per second; on Graphite data -`>` 1,000,000 rows per second). To improve performance, you can make multiple INSERT queries in parallel, and performance will increase linearly. +It is recommended to insert data in batches of at least 1000 rows, or no more than a single request per second. When inserting to a MergeTree table from a tab-separated dump, the insertion speed will be from 50 to 200 MB/s. If the inserted rows are around 1 Kb in size, the speed will be from 50,000 to 200,000 rows per second. If the rows are small, the performance will be higher in rows per second (on Banner System data -`>` 500,000 rows per second; on Graphite data -`>` 1,000,000 rows per second). To improve performance, you can make multiple INSERT queries in parallel, and performance will increase linearly. diff --git a/docs/en/introduction/possible_silly_questions.md b/docs/en/introduction/possible_silly_questions.md deleted file mode 100644 index cf7b2c48032..00000000000 --- a/docs/en/introduction/possible_silly_questions.md +++ /dev/null @@ -1,15 +0,0 @@ -# Questions you were afraid to ask - -## Why not use something like MapReduce? - -We can refer to systems like map-reduce as distributed computing systems in which the reduce operation is based on distributed sorting. In this sense, they include Hadoop, and YT (YT is developed at Yandex for internal use). - -These systems aren't appropriate for online queries due to their high latency. In other words, they can't be used as the back-end for a web interface. -These types of systems aren't useful for real-time data updates. -Distributed sorting isn't the best way to perform reduce operations if the result of the operation and all the intermediate results (if there are any) are located in the RAM of a single server, which is usually the case for online queries. In such a case, a hash table is the optimal way to perform reduce operations. A common approach to optimizing map-reduce tasks is pre-aggregation (partial reduce) using a hash table in RAM. The user performs this optimization manually. -Distributed sorting is one of the main causes of reduced performance when running simple map-reduce tasks. - -Systems like map-reduce allow executing any code on the cluster. But a declarative query language is better suited to OLAP in order to run experiments quickly. For example, Hadoop has Hive and Pig. Also consider Cloudera Impala, Shark (outdated) for Spark, and Spark SQL, Presto, and Apache Drill. Performance when running such tasks is highly sub-optimal compared to specialized systems, but relatively high latency makes it unrealistic to use these systems as the backend for a web interface. - -YT allows storing groups of columns separately. But YT can't be considered a true column-based system because it doesn't have fixed-length data types (for efficiently storing numbers without extra "garbage"), and also due to its lack of a vector engine. Tasks are performed in YT using custom code in streaming mode, so they cannot be optimized enough (up to hundreds of millions of rows per second per server). "Dynamic table sorting" is under development in YT using MergeTree, strict value typing, and a query language similar to SQL. Dynamically sorted tables are not appropriate for OLAP tasks because the data is stored by row. The YT query language is still under development, so we can't yet rely on this functionality. YT developers are considering using dynamically sorted tables in OLTP and Key-Value scenarios. - diff --git a/docs/en/introduction/ya_metrika_task.md b/docs/en/introduction/ya_metrika_task.md index 9c16b4e708b..ffc11afe8fa 100644 --- a/docs/en/introduction/ya_metrika_task.md +++ b/docs/en/introduction/ya_metrika_task.md @@ -1,9 +1,10 @@ # Yandex.Metrica use case -ClickHouse currently powers [Yandex.Metrica](https://metrica.yandex.com/), [the second largest web analytics platform in the world](http://w3techs.com/technologies/overview/traffic_analysis/all). With more than 13 trillion records in the database and more than 20 billion events daily, ClickHouse allows you generating custom reports on the fly directly from non-aggregated data. +ClickHouse has been initially developed to power [Yandex.Metrica](https://metrica.yandex.com/), [the second largest web analytics platform in the world](http://w3techs.com/technologies/overview/traffic_analysis/all), and continues to be it's core component. With more than 13 trillion records in the database and more than 20 billion events daily, ClickHouse allows generating custom reports on the fly directly from non-aggregated data. This article gives a historical background on what was the main goal of ClickHouse before it became an opensource product. -We need to get custom reports based on hits and sessions, with custom segments set by the user. Data for the reports is updated in real-time. Queries must be run immediately (in online mode). We must be able to build reports for any time period. Complex aggregates must be calculated, such as the number of unique visitors. -At this time (April 2014), Yandex.Metrica receives approximately 12 billion events (pageviews and mouse clicks) daily. All these events must be stored in order to build custom reports. A single query may require scanning hundreds of millions of rows over a few seconds, or millions of rows in no more than a few hundred milliseconds. +Yandex.Metrica generates custom reports based on hits and sessions on the fly, with arbitrary segments and time periods chosen by the end user. Complex aggregates are often required, such as the number of unique visitors. New data for the reports arrives in real-time. + +As of April 2014, Yandex.Metrica received approximately 12 billion events (page views and clicks) daily. All these events must be stored in order to build those custom reports. A single query may require scanning millions of rows in no more than a few hundred milliseconds, or hundreds of millions of rows over a few seconds. ## Usage in Yandex.Metrica and other Yandex services diff --git a/docs/en/operations/configuration_files.md b/docs/en/operations/configuration_files.md index 52e9e10ffea..1551ab47952 100644 --- a/docs/en/operations/configuration_files.md +++ b/docs/en/operations/configuration_files.md @@ -4,7 +4,7 @@ The main server config file is `config.xml`. It resides in the `/etc/clickhouse-server/` directory. -Individual settings can be overridden in the `*.xml`and`*.conf` files in the `conf.d` and `config.d` directories next to the config file. +Individual settings can be overridden in the `*.xml`and `*.conf` files in the `conf.d` and `config.d` directories next to the config file. The `replace` or `remove` attributes can be specified for the elements of these config files. @@ -12,11 +12,11 @@ If neither is specified, it combines the contents of elements recursively, repla If `replace` is specified, it replaces the entire element with the specified one. -If ` remove` is specified, it deletes the element. +If `remove` is specified, it deletes the element. -The config can also define "substitutions". If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](server_settings/settings.md#server_settings-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in ` incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros]()server_settings/settings.md#server_settings-macros)). +The config can also define "substitutions". If an element has the `incl` attribute, the corresponding substitution from the file will be used as the value. By default, the path to the file with substitutions is `/etc/metrika.xml`. This can be changed in the [include_from](server_settings/settings.md#server_settings-include_from) element in the server config. The substitution values are specified in `/yandex/substitution_name` elements in this file. If a substitution specified in ` incl` does not exist, it is recorded in the log. To prevent ClickHouse from logging missing substitutions, specify the `optional="true"` attribute (for example, settings for [macros](server_settings/settings.md#server_settings-macros)). -Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at ` /path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element. +Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk="/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element. The `config.xml` file can specify a separate config with user settings, profiles, and quotas. The relative path to this config is set in the 'users_config' element. By default, it is `users.xml`. If `users_config` is omitted, the user settings, profiles, and quotas are specified directly in `config.xml`. diff --git a/docs/en/operations/index.md b/docs/en/operations/index.md index 0ff38af8086..1450754bc0f 100644 --- a/docs/en/operations/index.md +++ b/docs/en/operations/index.md @@ -1,2 +1,2 @@ -# Usage +# Operations diff --git a/docs/en/operations/server_settings/settings.md b/docs/en/operations/server_settings/settings.md index 7745f226128..b93fdd15e62 100644 --- a/docs/en/operations/server_settings/settings.md +++ b/docs/en/operations/server_settings/settings.md @@ -22,11 +22,8 @@ Default value: 3600. Data compression settings. -
- -Don't use it if you have just started using ClickHouse. - -
+!!! warning "Warning" + Don't use it if you have just started using ClickHouse. The configuration looks like this: @@ -334,7 +331,7 @@ Also, logging to syslog is possible. Configuration example: 1
syslog.remote:10514
- myhost.local + myhost.local LOG_LOCAL6 syslog
@@ -345,8 +342,8 @@ Keys: - user_syslog - activation key, turning on syslog logging. - address - host[:port] of syslogd. If not specified, local one would be used. - hostname - optional, source host of logs -- facility - [syslog facility](https://en.wikipedia.org/wiki/Syslog#Facility), -in uppercase, prefixed with "LOG_": (``LOG_USER``, ``LOG_DAEMON``, ``LOG_LOCAL3`` etc.). +- facility - [syslog facility](https://en.wikipedia.org/wiki/Syslog#Facility), +in uppercase, prefixed with "LOG_": (``LOG_USER``, ``LOG_DAEMON``, ``LOG_LOCAL3`` etc.). Default values: when ``address`` is specified, then ``LOG_USER``, otherwise - ``LOG_DAEMON`` - format - message format. Possible values are - ``bsd`` and ``syslog`` @@ -561,11 +558,9 @@ Use the following parameters to configure logging: The path to the directory containing data. -
+!!! warning "Attention" + The trailing slash is mandatory. -The end slash is mandatory. - -
**Example** @@ -651,11 +646,8 @@ Port for communicating with clients over the TCP protocol. Path to temporary data for processing large queries. -
- -The end slash is mandatory. - -
+!!! warning "Attention" + The trailing slash is mandatory. **Example** diff --git a/docs/en/operations/table_engines/file.md b/docs/en/operations/table_engines/file.md index 400601c8d91..ea78a7ca86e 100644 --- a/docs/en/operations/table_engines/file.md +++ b/docs/en/operations/table_engines/file.md @@ -24,9 +24,8 @@ When creating table using `File(Format)` it creates empty subdirectory in that f You may manually create this subfolder and file in server filesystem and then [ATTACH](../../query_language/misc.md#queries-attach) it to table information with matching name, so you can query data from that file. -
-Be careful with this funcionality, because ClickHouse does not keep track of external changes to such files. The result of simultaneous writes via ClickHouse and outside of ClickHouse is undefined. -
+!!! warning + Be careful with this funcionality, because ClickHouse does not keep track of external changes to such files. The result of simultaneous writes via ClickHouse and outside of ClickHouse is undefined. **Example:** diff --git a/docs/en/operations/table_engines/kafka.md b/docs/en/operations/table_engines/kafka.md index 31616e77d25..f04c234dcd5 100644 --- a/docs/en/operations/table_engines/kafka.md +++ b/docs/en/operations/table_engines/kafka.md @@ -8,20 +8,41 @@ Kafka lets you: - Organize fault-tolerant storage. - Process streams as they become available. + +Old format: + ``` -Kafka(broker_list, topic_list, group_name, format[, schema, num_consumers]) +Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format + [, kafka_row_delimiter, kafka_schema, kafka_num_consumers]) ``` -Parameters: +New format: -- `broker_list` – A comma-separated list of brokers (`localhost:9092`). -- `topic_list` – A list of Kafka topics (`my_topic`). -- `group_name` – A group of Kafka consumers (`group1`). Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. -- `--format` – Message format. Uses the same notation as the SQL ` FORMAT` function, such as ` JSONEachRow`. For more information, see the "Formats" section. -- `schema` – An optional parameter that must be used if the format requires a schema definition. For example, [Cap'n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. -- `num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition. +``` +Kafka SETTINGS + kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic1,topic2', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_row_delimiter = '\n' + kafka_schema = '', + kafka_num_consumers = 2 +``` -Example: +Required parameters: + +- `kafka_broker_list` – A comma-separated list of brokers (`localhost:9092`). +- `kafka_topic_list` – A list of Kafka topics (`my_topic`). +- `kafka_group_name` – A group of Kafka consumers (`group1`). Reading margins are tracked for each group separately. If you don't want messages to be duplicated in the cluster, use the same group name everywhere. +- `kafka_format` – Message format. Uses the same notation as the SQL ` FORMAT` function, such as ` JSONEachRow`. For more information, see the "Formats" section. + +Optional parameters: + +- `kafka_row_delimiter` - Character-delimiter of records (rows), which ends the message. +- `kafka_schema` – An optional parameter that must be used if the format requires a schema definition. For example, [Cap'n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. +- `kafka_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition. + +Examples: ```sql CREATE TABLE queue ( @@ -31,6 +52,24 @@ Example: ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); SELECT * FROM queue LIMIT 5; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') + SETTINGS kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; ``` The delivered messages are tracked automatically, so each message in a group is only counted once. If you want to get the data twice, then create a copy of the table with another group name. @@ -59,7 +98,7 @@ Example: level String, total UInt64 ) ENGINE = SummingMergeTree(day, (day, level), 8192); - + CREATE MATERIALIZED VIEW consumer TO daily AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total FROM queue GROUP BY day, level; diff --git a/docs/en/operations/table_engines/summingmergetree.md b/docs/en/operations/table_engines/summingmergetree.md index 409c4160576..f19f156f9e5 100644 --- a/docs/en/operations/table_engines/summingmergetree.md +++ b/docs/en/operations/table_engines/summingmergetree.md @@ -14,7 +14,7 @@ SummingMergeTree(EventDate, (OrderID, EventDate, BannerID, ...), 8192, (Shows, C The columns to total are set explicitly (the last parameter – Shows, Clicks, Cost, ...). When merging, all rows with the same primary key value have their values totaled in the specified columns. The specified columns also must be numeric and must not be part of the primary key. -If the values were null in all of these columns, the row is deleted. (The exception is cases when the data part would not have any rows left in it.) +If the values were zero in all of these columns, the row is deleted. For the other columns that are not part of the primary key, the first value that occurs is selected when merging. But if a column is of AggregateFunction type, then it is merged according to that function, which effectively makes this engine behave like `AggregatingMergeTree`. diff --git a/docs/en/operations/table_engines/url.md b/docs/en/operations/table_engines/url.md new file mode 100644 index 00000000000..d8dec7dcabd --- /dev/null +++ b/docs/en/operations/table_engines/url.md @@ -0,0 +1,77 @@ + + +# URL(URL, Format) + +This data source operates with data on remote HTTP/HTTPS server. The engine is +similar to [`File`](./file.md#). + +## Usage in ClickHouse server + +``` +URL(URL, Format) +``` + +`Format` should be supported for `SELECT` and/or `INSERT`. For the full list of +supported formats see [Formats](../../interfaces/formats.md#formats). + +`URL` must match the format of Uniform Resource Locator. The specified +URL must address a server working with HTTP or HTTPS. The server shouldn't +require any additional HTTP-headers. + +`INSERT` and `SELECT` queries are transformed into `POST` and `GET` requests +respectively. For correct `POST`-requests handling the remote server should support +[Chunked transfer encoding](https://ru.wikipedia.org/wiki/Chunked_transfer_encoding). + +**Example:** + +**1.** Create the `url_engine_table` table: + +```sql +CREATE TABLE url_engine_table (word String, value UInt64) +ENGINE=URL('http://127.0.0.1:12345/', CSV) +``` + +**2.** Implement simple http-server using python3: + +```python3 +from http.server import BaseHTTPRequestHandler, HTTPServer + +class CSVHTTPServer(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header('Content-type', 'text/csv') + self.end_headers() + + self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) + +if __name__ == "__main__": + server_address = ('127.0.0.1', 12345) + HTTPServer(server_address, CSVHTTPServer).serve_forever() +``` + +```bash +python3 server.py +``` + +**3.** Query the data: + +```sql +SELECT * FROM url_engine_table +``` + +```text +┌─word──┬─value─┐ +│ Hello │ 1 │ +│ World │ 2 │ +└───────┴───────┘ +``` + + +## Details of implementation + +- Reads and writes can be parallel +- Not supported: + - `ALTER` + - `SELECT ... SAMPLE` + - Indices + - Replication diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index 9378c25fab1..4d999062a5d 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -113,6 +113,10 @@ With the default settings, ZooKeeper is a time bomb: This bomb must be defused. +If you want to move data between different ZooKeeper clusters, never move it by hand-written script, because it will produce wrong data for sequential nodes. Never use "zkcopy" tool, by the same reason: https://github.com/ksprojects/zkcopy/issues/15 + +If you want to split ZooKeeper cluster, proper way is to increase number of replicas and then reconfigure it as two independent clusters. + The ZooKeeper (3.5.1) configuration below is used in the Yandex.Metrica production environment as of May 20, 2017: zoo.cfg: diff --git a/docs/en/operations/utils/clickhouse-local.md b/docs/en/operations/utils/clickhouse-local.md index 1960263caaa..bfa612569f3 100644 --- a/docs/en/operations/utils/clickhouse-local.md +++ b/docs/en/operations/utils/clickhouse-local.md @@ -10,9 +10,8 @@ Accepts data that represent tables and queries them using [ClickHouse SQL dialec By default `clickhouse-local` does not have access to data on the same host, but it supports loading server configuration using `--config-file` argument. -
-It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error. -
+!!! warning + It is not recommended to load production server configuration into `clickhouse-local` because data can be damaged in case of human error. ## Usage diff --git a/docs/en/query_language/alter.md b/docs/en/query_language/alter.md index e428bf27bef..068dd9eb606 100644 --- a/docs/en/query_language/alter.md +++ b/docs/en/query_language/alter.md @@ -246,6 +246,8 @@ Mutations are totally ordered by their creation order and are applied to each pa A mutation query returns immediately after the mutation entry is added (in case of replicated tables to ZooKeeper, for nonreplicated tables - to the filesystem). The mutation itself executes asynchronously using the system profile settings. To track the progress of mutations you can use the `system.mutations` table. A mutation that was successfully submitted will continue to execute even if ClickHouse servers are restarted. There is no way to roll back the mutation once it is submitted. +Entries for finished mutations are not deleted right away (the number of preserved entries is determined by the `finished_mutations_to_keep` storage engine parameter). Older mutation entries are deleted. + #### system.mutations table The table contains information about mutations of MergeTree tables and their progress. Each mutation command is represented by a single row. The table has the following columns: diff --git a/docs/en/query_language/dicts/external_dicts.md b/docs/en/query_language/dicts/external_dicts.md index af8c280a4e6..4ab71d97826 100644 --- a/docs/en/query_language/dicts/external_dicts.md +++ b/docs/en/query_language/dicts/external_dicts.md @@ -39,8 +39,5 @@ You can [configure](external_dicts_dict.md#dicts-external_dicts_dict) any number See also "[Functions for working with external dictionaries](../functions/ext_dict_functions.md#ext_dict_functions)". -
- -You can convert values ​​for a small dictionary by describing it in a `SELECT` query (see the [transform](../functions/other_functions.md#other_functions-transform) function). This functionality is not related to external dictionaries. - -
+!!! attention + You can convert values for a small dictionary by describing it in a `SELECT` query (see the [transform](../functions/other_functions.md#other_functions-transform) function). This functionality is not related to external dictionaries. diff --git a/docs/en/query_language/dicts/external_dicts_dict_layout.md b/docs/en/query_language/dicts/external_dicts_dict_layout.md index ef59cefae7d..6e2129003d6 100644 --- a/docs/en/query_language/dicts/external_dicts_dict_layout.md +++ b/docs/en/query_language/dicts/external_dicts_dict_layout.md @@ -219,11 +219,8 @@ Set a large enough cache size. You need to experiment to select the number of ce 3. Assess memory consumption using the `system.dictionaries` table. 4. Increase or decrease the number of cells until the required memory consumption is reached. -
- -Do not use ClickHouse as a source, because it is slow to process queries with random reads. - -
+!!! warning + Do not use ClickHouse as a source, because it is slow to process queries with random reads. diff --git a/docs/en/query_language/dicts/external_dicts_dict_sources.md b/docs/en/query_language/dicts/external_dicts_dict_sources.md index 9a4cf5f0dc0..b60176c13fd 100644 --- a/docs/en/query_language/dicts/external_dicts_dict_sources.md +++ b/docs/en/query_language/dicts/external_dicts_dict_sources.md @@ -156,34 +156,36 @@ Configuring `/etc/odbc.ini` (or `~/.odbc.ini`): The dictionary configuration in ClickHouse: ```xml - - table_name - - - - - DSN=myconnection - postgresql_table
-
- - - 300 - 360 - - - - - - - id - - - some_column - UInt64 - 0 - - -
+ + + table_name + + + + + DSN=myconnection + postgresql_table
+
+ + + 300 + 360 + + + + + + + id + + + some_column + UInt64 + 0 + + +
+
``` You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. diff --git a/docs/en/query_language/dicts/external_dicts_dict_structure.md b/docs/en/query_language/dicts/external_dicts_dict_structure.md index 869d6f16ca5..41c106957f9 100644 --- a/docs/en/query_language/dicts/external_dicts_dict_structure.md +++ b/docs/en/query_language/dicts/external_dicts_dict_structure.md @@ -39,11 +39,8 @@ ClickHouse supports the following types of keys: A structure can contain either `` or `` . -
- -The key doesn't need to be defined separately in attributes. - -
+!!! warning + The key doesn't need to be defined separately in attributes. ### Numeric key @@ -65,9 +62,8 @@ Configuration fields: The key can be a `tuple` from any types of fields. The [layout](external_dicts_dict_layout.md#dicts-external_dicts_dict_layout) in this case must be `complex_key_hashed` or `complex_key_cache`. -
-A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. -
+!!! tip + A composite key can consist of a single element. This makes it possible to use a string as the key, for instance. The key structure is set in the element ``. Key fields are specified in the same format as the dictionary [attributes](external_dicts_dict_structure.md#dicts-external_dicts_dict_structure-attributes). Example: diff --git a/docs/en/query_language/index.md b/docs/en/query_language/index.md index 1be92bb33d0..d6ce83bbea8 100644 --- a/docs/en/query_language/index.md +++ b/docs/en/query_language/index.md @@ -1,4 +1,4 @@ -# SQL dialect +# SQL reference * [SELECT](select.md#select) * [INSERT INTO](insert_into.md#queries-insert) diff --git a/docs/en/query_language/misc.md b/docs/en/query_language/misc.md index 237c58902a3..99601086e70 100644 --- a/docs/en/query_language/misc.md +++ b/docs/en/query_language/misc.md @@ -11,7 +11,7 @@ After executing an ATTACH query, the server will know about the existence of the If the table was previously detached (``DETACH``), meaning that its structure is known, you can use shorthand without defining the structure. ```sql -ATTACH TABLE [IF NOT EXISTS] [db.]name +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] ``` This query is used when starting the server. The server stores table metadata as files with `ATTACH` queries, which it simply runs at launch (with the exception of system tables, which are explicitly created on the server). @@ -39,7 +39,7 @@ If `IF EXISTS` is specified, it doesn't return an error if the table doesn't exi Deletes information about the 'name' table from the server. The server stops knowing about the table's existence. ```sql -DETACH TABLE [IF EXISTS] [db.]name +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` This does not delete the table's data or metadata. On the next server launch, the server will read the metadata and find out about the table again. @@ -167,7 +167,7 @@ To make settings that persist after a server restart, you can only use the serve ## OPTIMIZE ```sql -OPTIMIZE TABLE [db.]name [PARTITION partition] [FINAL] +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition] [FINAL] ``` Asks the table engine to do something for optimization. @@ -175,10 +175,13 @@ Supported only by `*MergeTree` engines, in which this query initializes a non-sc If you specify a `PARTITION`, only the specified partition will be optimized. If you specify `FINAL`, optimization will be performed even when all the data is already in one part. +!!! warning + OPTIMIZE can't fix the "Too many parts" error. + ## KILL QUERY ```sql -KILL QUERY +KILL QUERY [ON CLUSTER cluster] WHERE [SYNC|ASYNC|TEST] [FORMAT format] diff --git a/docs/en/query_language/select.md b/docs/en/query_language/select.md index 1b457819342..c92ac7c9dc4 100644 --- a/docs/en/query_language/select.md +++ b/docs/en/query_language/select.md @@ -722,11 +722,8 @@ A subquery in the IN clause is always run just one time on a single server. Ther There are two options for IN-s with subqueries (similar to JOINs): normal `IN` / ` OIN` and `IN GLOBAL` / `GLOBAL JOIN`. They differ in how they are run for distributed query processing. -
- -Remember that the algorithms described below may work differently depending on the [settings](../operations/settings/settings.md#settings-distributed_product_mode) `distributed_product_mode` setting. - -
+!!! attention + Remember that the algorithms described below may work differently depending on the [settings](../operations/settings/settings.md#settings-distributed_product_mode) `distributed_product_mode` setting. When using the regular IN, the query is sent to remote servers, and each of them runs the subqueries in the `IN` or `JOIN` clause. diff --git a/docs/en/query_language/table_functions/remote.md b/docs/en/query_language/table_functions/remote.md index 8ceaa0cd659..425c6f81a7d 100644 --- a/docs/en/query_language/table_functions/remote.md +++ b/docs/en/query_language/table_functions/remote.md @@ -13,11 +13,8 @@ remote('addresses_expr', db.table[, 'user'[, 'password']]) `addresses_expr` – An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`. The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets. The port is the TCP port on the remote server. If the port is omitted, it uses `tcp_port` from the server's config file (by default, 9000). -
- -The port is required for an IPv6 address. - -
+!!! important + The port is required for an IPv6 address. Examples: diff --git a/docs/en/query_language/table_functions/url.md b/docs/en/query_language/table_functions/url.md new file mode 100644 index 00000000000..7e30936bd45 --- /dev/null +++ b/docs/en/query_language/table_functions/url.md @@ -0,0 +1,19 @@ + + +# url + +`url(URL, format, structure)` - returns a table created from the `URL` with given +`format` and `structure`. + +URL - HTTP or HTTPS server address, which can accept `GET` and/or `POST` requests. + +format - [format](../../interfaces/formats.md#formats) of the data. + +structure - table structure in `'UserID UInt64, Name String'` format. Determines column names and types. + +**Example** + +```sql +-- getting the first 3 lines of a table that contains columns of String and UInt32 type from HTTP-server which answers in CSV format. +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` diff --git a/docs/en/security_changelog.md b/docs/en/security_changelog.md new file mode 100644 index 00000000000..a876b2f9a73 --- /dev/null +++ b/docs/en/security_changelog.md @@ -0,0 +1,21 @@ +## Fixed in ClickHouse release 1.1.54388, 2018-06-28 + +### CVE-2018-14668 +"remote" table function allowed arbitrary symbols in "user", "password" and "default_database" fields which led to Cross Protocol Request Forgery Attacks. + +Credits: Andrey Krasichkov of Yandex Information Security Team + +## Fixed in ClickHouse release 1.1.54390, 2018-07-06 + +### CVE-2018-14669 +ClickHouse MySQL client had "LOAD DATA LOCAL INFILE" functionality enabled that allowed a malicious MySQL database read arbitrary files from the connected ClickHouse server. + +Credits: Andrey Krasichkov and Evgeny Sidorov of Yandex Information Security Team + +## Fixed in ClickHouse release 1.1.54131, 2017-01-10 + +### CVE-2018-14670 + +Incorrect configuration in deb package could lead to unauthorized use of the database. + +Credits: the UK's National Cyber Security Centre (NCSC) \ No newline at end of file diff --git a/docs/redirects.txt b/docs/redirects.txt index 3a975a5f812..f9473c69d94 100644 --- a/docs/redirects.txt +++ b/docs/redirects.txt @@ -115,3 +115,4 @@ table_functions/merge.md query_language/table_functions/merge.md table_functions/numbers.md query_language/table_functions/numbers.md table_functions/remote.md query_language/table_functions/remote.md query_language/queries.md query_language.md +introduction/possible_silly_questions.md faq/general.md diff --git a/docs/ru/data_types/array.md b/docs/ru/data_types/array.md index 894b6c7647c..2e85a84d19e 100644 --- a/docs/ru/data_types/array.md +++ b/docs/ru/data_types/array.md @@ -1,4 +1,83 @@ + + # Array(T) -Массив из элементов типа T. Типом T может быть любой тип, в том числе, массив. -Многомерные массивы не рекомендуется использовать, так как их поддержка довольно слабая (например, многомерные массивы нельзя сохранить в таблицы с движком семейства MergeTree). +Массив из элементов типа `T`. + +`T` может любым, в том числе, массивом. Используйте многомерные массивы с осторожностью. ClickHouse поддерживает многомерные массивы ограниченно, например, их нельзя хранить в таблицах семейства `MergeTree`. + +## Создание массива + +Массив можно создать с помощью функции: + +``` +array(T) +``` + +Также можно использовать квадратные скобки + +``` +[] +``` + +Пример создания массива: +``` +:) SELECT array(1, 2) AS x, toTypeName(x) + +SELECT + [1, 2] AS x, + toTypeName(x) + +┌─x─────┬─toTypeName(array(1, 2))─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴─────────────────────────┘ + +1 rows in set. Elapsed: 0.002 sec. + +:) SELECT [1, 2] AS x, toTypeName(x) + +SELECT + [1, 2] AS x, + toTypeName(x) + +┌─x─────┬─toTypeName([1, 2])─┐ +│ [1,2] │ Array(UInt8) │ +└───────┴────────────────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + +## Особенности работы с типами данных + +При создании массива "на лету" ClickHouse автоматически определяет тип аргументов как наиболее узкий тип данных, в котором можно хранить все перечисленные аргументы. Если среди аргументов есть [NULL](../query_language/syntax.md#null-literal) или аргумент типа [Nullable](nullable.md#data_type-nullable), то тип элементов массива — [Nullable](nullable.md#data_type-nullable). + +Если ClickHouse не смог подобрать тип данных, то он сгенерирует исключение. Это произойдёт, например, при попытке создать массив одновременно со строками и числами `SELECT array(1, 'a')`. + +Примеры автоматического определения типа данных: + +``` +:) SELECT array(1, 2, NULL) AS x, toTypeName(x) + +SELECT + [1, 2, NULL] AS x, + toTypeName(x) + +┌─x──────────┬─toTypeName(array(1, 2, NULL))─┐ +│ [1,2,NULL] │ Array(Nullable(UInt8)) │ +└────────────┴───────────────────────────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + +Если попытаться создать массив из несовместимых типов данных, то ClickHouse выбросит исключение: + +``` +:) SELECT array(1, 'a') + +SELECT [1, 'a'] + +Received exception from server (version 1.1.54388): +Code: 386. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: There is no supertype for types UInt8, String because some of them are String/FixedString and some of them are not. + +0 rows in set. Elapsed: 0.246 sec. +``` diff --git a/docs/ru/data_types/datetime.md b/docs/ru/data_types/datetime.md index 8b0ca24d9dd..dfc54e43add 100644 --- a/docs/ru/data_types/datetime.md +++ b/docs/ru/data_types/datetime.md @@ -1,5 +1,6 @@ -# DateTime + +# DateTime Дата-с-временем. Хранится в 4 байтах, в виде (беззнакового) unix timestamp. Позволяет хранить значения в том же интервале, что и для типа Date. Минимальное значение выводится как 0000-00-00 00:00:00. Время хранится с точностью до одной секунды (без учёта секунд координации). diff --git a/docs/ru/data_types/enum.md b/docs/ru/data_types/enum.md index ff977431959..aaae8aad378 100644 --- a/docs/ru/data_types/enum.md +++ b/docs/ru/data_types/enum.md @@ -1,18 +1,100 @@ -# Enum + -Enum8 или Enum16. Представляет собой конечное множество строковых значений, сохраняемых более эффективно, чем это делает тип данных `String`. +# Enum8, Enum16 -Пример: +Включает в себя типы `Enum8` и `Enum16`. `Enum` сохраняет конечный набор пар `'строка' = целое число`. Все операции с данными типа `Enum` ClickHouse выполняет как с числами, однако пользователь при этом работает со строковыми константами. Это более эффективно с точки зрения производительности, чем работа с типом данных `String`. -```text -Enum8('hello' = 1, 'world' = 2) +- `Enum8` описывается парами `'String' = Int8`. +- `Enum16` описывается парами `'String' = Int16`. + +## Примеры применения + +Создадим таблицу со столбцом типа `Enum8('hello' = 1, 'world' = 2)`. + +``` +CREATE TABLE t_enum +( + x Enum8('hello' = 1, 'world' = 2) +) +ENGINE = TinyLog ``` -- тип данных с двумя возможными значениями - 'hello' и 'world'. +В столбец `x` можно сохранять только значения, перечисленные при определении типа, т.е. `'hello'` или `'world'`. Если попытаться сохранить другое значение, ClickHouse сгенерирует исключение. + +``` +:) INSERT INTO t_enum Values('hello'),('world'),('hello') + +INSERT INTO t_enum VALUES + +Ok. + +3 rows in set. Elapsed: 0.002 sec. + +:) insert into t_enum values('a') + +INSERT INTO t_enum VALUES + + +Exception on client: +Code: 49. DB::Exception: Unknown element 'a' for type Enum8('hello' = 1, 'world' = 2) +``` + +При запросе данных из таблицы ClickHouse выдаст строковые значения из `Enum`. + +``` +SELECT * FROM t_enum + +┌─x─────┐ +│ hello │ +│ world │ +│ hello │ +└───────┘ +``` +Если необходимо увидеть цифровые эквиваленты строкам, то необходимо привести тип. + +``` +SELECT CAST(x, 'Int8') FROM t_enum + +┌─CAST(x, 'Int8')─┐ +│ 1 │ +│ 2 │ +│ 1 │ +└─────────────────┘ +``` + +Чтобы создать значение типа Enum в запросе, также необходима функция `CAST`. + +``` +SELECT toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)')) + +┌─toTypeName(CAST('a', 'Enum8(\'a\' = 1, \'b\' = 2)'))─┐ +│ Enum8('a' = 1, 'b' = 2) │ +└──────────────────────────────────────────────────────┘ +``` + +## Общие правила и особенности использования Для каждого из значений прописывается число в диапазоне `-128 .. 127` для `Enum8` или в диапазоне `-32768 .. 32767` для `Enum16`. Все строки должны быть разными, числа - тоже. Разрешена пустая строка. При указании такого типа (в определении таблицы), числа могут идти не подряд и в произвольном порядке. При этом, порядок не имеет значения. -В оперативке столбец такого типа представлен так же, как `Int8` или `Int16` соответствующими числовыми значениями. +Ни строка, ни цифровое значение в `Enum` не могут быть [NULL](../query_language/syntax.md#null-literal). + +`Enum` может быть передан в тип [Nullable](nullable.md#data_type-nullable). Таким образом, если создать таблицу запросом + +``` +CREATE TABLE t_enum_nullable +( + x Nullable( Enum8('hello' = 1, 'world' = 2) ) +) +ENGINE = TinyLog +``` + +, то в ней можно будет хранить не только `'hello'` и `'world'`, но и `NULL`. + +``` +INSERT INTO t_enum_null Values('hello'),('world'),(NULL) +``` + +В оперативке столбец типа `Enum` представлен так же, как `Int8` или `Int16` соответствующими числовыми значениями. При чтении в текстовом виде, парсит значение как строку и ищет соответствующую строку из множества значений Enum-а. Если не находит - кидается исключение. При записи в текстовом виде, записывает значение как соответствующую строку. Если в данных столбца есть мусор - числа не из допустимого множества, то кидается исключение. При чтении и записи в бинарном виде, оно осуществляется так же, как для типов данных Int8, Int16. Неявное значение по умолчанию - это значение с минимальным номером. diff --git a/docs/ru/data_types/int_uint.md b/docs/ru/data_types/int_uint.md index d79ce7326bc..49b7bbbbcf8 100644 --- a/docs/ru/data_types/int_uint.md +++ b/docs/ru/data_types/int_uint.md @@ -1,3 +1,5 @@ + + # UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64 Целые числа фиксированной длины, без знака или со знаком. @@ -9,8 +11,8 @@ - Int32 - [ -2147483648 : 2147483647 ] - Int64 - [ -9223372036854775808 : 9223372036854775807 ] - - + + ## Диапазоны Uint - UInt8 - [ 0 : 255 ] diff --git a/docs/ru/data_types/nullable.md b/docs/ru/data_types/nullable.md new file mode 100644 index 00000000000..f098cac2f76 --- /dev/null +++ b/docs/ru/data_types/nullable.md @@ -0,0 +1,63 @@ + + +# Nullable(TypeName) + +Позволяет работать как со значением типа `TypeName` так и с отсутствием этого значения ([NULL](../query_language/syntax.md#null-literal)) в одной и той же переменной, в том числе хранить `NULL` в таблицах вместе со значения типа `TypeName`. Например, в столбце типа `Nullable(Int8)` можно хранить значения типа `Int8`, а в тех строках, где значения нет, будет храниться `NULL`. + +В качестве `TypeName` нельзя использовать составные типы данных [Array](array.md#data_type-array) и [Tuple](tuple.md#data_type-tuple). Составные типы данных могут содержать значения типа `Nullable`, например `Array(Nullable(Int8))`. + +Поле типа `Nullable` нельзя включать в индексы. + +`NULL` — значение по умолчанию для типа `Nullable`, если в конфигурации сервера ClickHouse не указано иное. + +##Особенности хранения + +Для хранения значения типа `Nullable` ClickHouse использует: + +- Отдельный файл с масками `NULL` (далее маска). +- Непосредственно файл со значениями. + +Маска определяет, что лежит в ячейке данных: `NULL` или значение. + +В случае, когда маска указывает, что в ячейке хранится `NULL`, в файле значений хранится значение по умолчанию для типа данных. Т.е. если, например, поле имеет тип `Nullable(Int8)`, то ячейка будет хранить значение по умолчанию для `Int8`. Эта особенность увеличивает размер хранилища. + +!!! info + Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз. + +## Пример использования + +``` +:) CREATE TABLE t_null(x Int8, y Nullable(Int8)) ENGINE TinyLog + +CREATE TABLE t_null +( + x Int8, + y Nullable(Int8) +) +ENGINE = TinyLog + +Ok. + +0 rows in set. Elapsed: 0.012 sec. + +:) INSERT INTO t_null VALUES (1, NULL) + +INSERT INTO t_null VALUES + +Ok. + +1 rows in set. Elapsed: 0.007 sec. + +:) SELECT x + y from t_null + +SELECT x + y +FROM t_null + +┌─plus(x, y)─┐ +│ ᴺᵁᴸᴸ │ +│ 5 │ +└────────────┘ + +2 rows in set. Elapsed: 0.144 sec. + +``` diff --git a/docs/ru/data_types/special_data_types/nothing.md b/docs/ru/data_types/special_data_types/nothing.md new file mode 100644 index 00000000000..6b83c354d5e --- /dev/null +++ b/docs/ru/data_types/special_data_types/nothing.md @@ -0,0 +1,19 @@ + + +# Nothing + +Этот тип данных предназначен только для того, чтобы представлять [NULL](../../query_language/syntax.md#null-literal), т.е. отсутствие значения. + +Невозможно создать значение типа `Nothing`, поэтому он используется там, где значение не подразумевается. Например, `NULL` записывается как `Nullable(Nothing)` ([Nullable](../../data_types/nullable.md#data_type-nullable) — это тип данных, позволяющий хранить `NULL` в таблицах). Также тип `Nothing` используется для обозначения пустых массивов: + +```bash +:) SELECT toTypeName(Array()) + +SELECT toTypeName([]) + +┌─toTypeName(array())─┐ +│ Array(Nothing) │ +└─────────────────────┘ + +1 rows in set. Elapsed: 0.062 sec. +``` diff --git a/docs/ru/data_types/string.md b/docs/ru/data_types/string.md index e35fa6a892d..f4a100470c7 100644 --- a/docs/ru/data_types/string.md +++ b/docs/ru/data_types/string.md @@ -1,3 +1,5 @@ + + # String Строки произвольной длины. Длина не ограничена. Значение может содержать произвольный набор байт, включая нулевые байты. diff --git a/docs/ru/data_types/tuple.md b/docs/ru/data_types/tuple.md index abac42bc4b7..66aca46d939 100644 --- a/docs/ru/data_types/tuple.md +++ b/docs/ru/data_types/tuple.md @@ -1,5 +1,53 @@ + + # Tuple(T1, T2, ...) -Кортежи не могут быть записаны в таблицы (кроме таблиц типа Memory). Они используется для временной группировки столбцов. Столбцы могут группироваться при использовании выражения IN в запросе, а также для указания нескольких формальных параметров лямбда-функций. Подробнее смотрите раздел "Операторы IN", "Функции высшего порядка". +Кортеж из элементов любого [типа](index.md#data_types). Элементы кортежа могут быть одного или разных типов. -Кортежи могут быть выведены в результате выполнения запроса. В этом случае, в текстовых форматах кроме JSON\*, значения выводятся в круглых скобках через запятую. В форматах JSON\*, кортежи выводятся в виде массивов (в квадратных скобках). +Кортежи нельзя хранить в таблицах (кроме таблиц типа Memory). Они используется для временной группировки столбцов. Столбцы могут группироваться при использовании выражения IN в запросе, а также для указания нескольких формальных параметров лямбда-функций. Подробнее смотрите разделы [Операторы IN](../query_language/select.md#in_operators), [Функции высшего порядка](../query_language/functions/higher_order_functions.md#higher_order_functions). + +Кортежи могут быть результатом запроса. В этом случае, в текстовых форматах кроме JSON, значения выводятся в круглых скобках через запятую. В форматах JSON, кортежи выводятся в виде массивов (в квадратных скобках). + +## Создание кортежа + +Кортеж можно создать с помощью функции + +``` +tuple(T1, T2, ...) +``` + +Пример создания кортежа: + +``` +:) SELECT tuple(1,'a') AS x, toTypeName(x) + +SELECT + (1, 'a') AS x, + toTypeName(x) + +┌─x───────┬─toTypeName(tuple(1, 'a'))─┐ +│ (1,'a') │ Tuple(UInt8, String) │ +└─────────┴───────────────────────────┘ + +1 rows in set. Elapsed: 0.021 sec. +``` + +## Особенности работы с типами данных + +При создании кортежа "на лету" ClickHouse автоматически определяет тип каждого аргументов как минимальный из типов, который может сохранить значение аргумента. Если аргумент — [NULL](../query_language/syntax.md#null-literal), то тип элемента кортежа — [Nullable](nullable.md#data_type-nullable). + +Пример автоматического определения типа данных: + +``` +SELECT tuple(1,NULL) AS x, toTypeName(x) + +SELECT + (1, NULL) AS x, + toTypeName(x) + +┌─x────────┬─toTypeName(tuple(1, NULL))──────┐ +│ (1,NULL) │ Tuple(UInt8, Nullable(Nothing)) │ +└──────────┴─────────────────────────────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` diff --git a/docs/ru/faq/general.md b/docs/ru/faq/general.md new file mode 100644 index 00000000000..15a19bbeb85 --- /dev/null +++ b/docs/ru/faq/general.md @@ -0,0 +1,12 @@ +# Общие вопросы + +## Почему бы не использовать системы типа MapReduce? + +Системами типа MapReduce будем называть системы распределённых вычислений, в которых операция reduce сделана на основе распределённой сортировки. Наиболее распространённым opensource решением данного класса является [Apache Hadoop](http://hadoop.apache.org), а в Яндексе используется внутрення разработка — YT. + +Такие системы не подходят для онлайн запросов в силу слишком большой latency. То есть, не могут быть использованы в качестве бэкенда для веб-интерфейса. +Такие системы не подходят для обновления данных в реальном времени. +Распределённая сортировка не является оптимальным способом выполнения операции reduce, если результат выполнения операции и все промежуточные результаты, при их наличии, помещаются в оперативку на одном сервере, как обычно бывает в запросах, выполняющихся в режиме онлайн. В таком случае, оптимальным способом выполнения операции reduce является хэш-таблица. Частым способом оптимизации map-reduce задач является предагрегация (частичный reduce) с использованием хэш-таблицы в оперативной памяти. Эта оптимизация делается пользователем в ручном режиме. +Распределённая сортировка является основной причиной тормозов при выполнении несложных map-reduce задач. + +Большинство реализаций MapReduce позволяют выполнять произвольный код на кластере. Но для OLAP задач лучше подходит декларативный язык запросов, который позволяет быстро проводить исследования. Для примера, для Hadoop существует Hive и Pig. Также смотрите Cloudera Impala, Shark (устаревший) для Spark, а также Spark SQL, Presto, Apache Drill. Впрочем, производительность при выполнении таких задач является сильно неоптимальной по сравнению со специализированными системами, а сравнительно высокая latency не позволяет использовать эти системы в качестве бэкенда для веб-интерфейса. \ No newline at end of file diff --git a/docs/ru/getting_started/example_datasets/nyc_taxi.md b/docs/ru/getting_started/example_datasets/nyc_taxi.md index 859f77c6ef4..f66f37de89a 100644 --- a/docs/ru/getting_started/example_datasets/nyc_taxi.md +++ b/docs/ru/getting_started/example_datasets/nyc_taxi.md @@ -361,9 +361,8 @@ Q4: 0.072 sec. ## Резюме -```text -nodes Q1 Q2 Q3 Q4 - 1 0.490 1.224 2.104 3.593 - 3 0.212 0.438 0.733 1.241 -140 0.028 0.043 0.051 0.072 -``` +| серверов| Q1 | Q2 | Q3 | Q4 | +| ------- | ----- | ----- | ----- | ----- | +| 1 | 0.490 | 1.224 | 2.104 | 3.593 | +| 3 | 0.212 | 0.438 | 0.733 | 1.241 | +| 140 | 0.028 | 0.043 | 0.051 | 0.072 | diff --git a/docs/ru/getting_started/example_datasets/ontime.md b/docs/ru/getting_started/example_datasets/ontime.md index 923c33befa1..afd4de06893 100644 --- a/docs/ru/getting_started/example_datasets/ontime.md +++ b/docs/ru/getting_started/example_datasets/ontime.md @@ -2,15 +2,6 @@ # OnTime -Данный тест производительности был создан Вадимом Ткаченко, см: - -- -- -- -- -- -- - Скачивание данных: ```bash @@ -316,3 +307,12 @@ SELECT OriginCityName, DestCityName, count() AS c FROM ontime GROUP BY OriginCit SELECT OriginCityName, count() AS c FROM ontime GROUP BY OriginCityName ORDER BY c DESC LIMIT 10; ``` + +Данный тест производительности был создан Вадимом Ткаченко, статьи по теме: + +- +- +- +- +- +- diff --git a/docs/ru/images/column_oriented.gif b/docs/ru/images/column_oriented.gif new file mode 100644 index 00000000000..15f4b12e697 Binary files /dev/null and b/docs/ru/images/column_oriented.gif differ diff --git a/docs/ru/images/row_oriented.gif b/docs/ru/images/row_oriented.gif new file mode 100644 index 00000000000..53daa20f322 Binary files /dev/null and b/docs/ru/images/row_oriented.gif differ diff --git a/docs/ru/index.md b/docs/ru/index.md index 035bae20639..cd83b410f55 100644 --- a/docs/ru/index.md +++ b/docs/ru/index.md @@ -1,39 +1,44 @@ # Что такое ClickHouse -ClickHouse - столбцовая СУБД для OLAP (Columnar DBMS). +ClickHouse - столбцовая система управления базами данных (СУБД) для онлайн обработки аналитических запросов (OLAP). В обычной, "строковой" СУБД, данные хранятся в таком порядке: -```text -5123456789123456789 1 Eurobasket - Greece - Bosnia and Herzegovina - example.com 1 2011-09-01 01:03:02 6274717 1294101174 11409 612345678912345678 0 33 6 http://www.example.com/basketball/team/123/match/456789.html http://www.example.com/basketball/team/123/match/987654.html 0 1366 768 32 10 3183 0 0 13 0\0 1 1 0 0 2011142 -1 0 0 01321 613 660 2011-09-01 08:01:17 0 0 0 0 utf-8 1466 0 0 0 5678901234567890123 277789954 0 0 0 0 0 -5234985259563631958 0 Consulting, Tax assessment, Accounting, Law 1 2011-09-01 01:03:02 6320881 2111222333 213 6458937489576391093 0 3 2 http://www.example.ru/ 0 800 600 16 10 2 153.1 0 0 10 63 1 1 0 0 2111678 000 0 588 368 240 2011-09-01 01:03:17 4 0 60310 0 windows-1251 1466 0 000 778899001 0 0 0 0 0 -... -``` +| Строка | WatchID | JavaEnable | Title | GoodEvent | EventTime | +| ------ | ------------------- | ---------- | ------------------ | --------- | ------------------- | +| #0 | 5385521489354350662 | 1 | Investor Relations | 1 | 2016-05-18 05:19:20 | +| #1 | 5385521490329509958 | 0 | Contact us | 1 | 2016-05-18 08:10:20 | +| #2 | 5385521489953706054 | 1 | Mission | 1 | 2016-05-18 07:38:00 | +| #N | ... | ... | ... | ... | ... | -То есть, значения, относящиеся к одной строке, хранятся рядом. -Примеры строковых СУБД: MySQL, Postgres, MS SQL Server и т. п. +То есть, значения, относящиеся к одной строке, физически хранятся рядом. + +Примеры строковых СУБД: MySQL, Postgres, MS SQL Server. +{: .grey } В столбцовых СУБД, данные хранятся в таком порядке: -```text -WatchID: 5385521489354350662 5385521490329509958 5385521489953706054 5385521490476781638 5385521490583269446 5385521490218868806 5385521491437850694 5385521491090174022 5385521490792669254 5385521490420695110 5385521491532181574 5385521491559694406 5385521491459625030 5385521492275175494 5385521492781318214 5385521492710027334 5385521492955615302 5385521493708759110 5385521494506434630 5385521493104611398 -JavaEnable: 1 0 1 0 0 0 1 0 1 1 1 1 1 1 0 1 0 0 1 1 -Title: Yandex Announcements - Investor Relations - Yandex Yandex — Contact us — Moscow Yandex — Mission Ru Yandex — History — History of Yandex Yandex Financial Releases - Investor Relations - Yandex Yandex — Locations Yandex Board of Directors - Corporate Governance - Yandex Yandex — Technologies -GoodEvent: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 -EventTime: 2016-05-18 05:19:20 2016-05-18 08:10:20 2016-05-18 07:38:00 2016-05-18 01:13:08 2016-05-18 00:04:06 2016-05-18 04:21:30 2016-05-18 00:34:16 2016-05-18 07:35:49 2016-05-18 11:41:59 2016-05-18 01:13:32 -``` +| Строка: | #0 | #1 | #2 | #N | +| ----------- | ------------------- | ------------------- | ------------------- | ------------------- | +| WatchID: | 5385521489354350662 | 5385521490329509958 | 5385521489953706054 | ... | +| JavaEnable: | 1 | 0 | 1 | ... | +| Title: | Investor Relations | Contact us | Mission | ... | +| GoodEvent: | 1 | 1 | 1 | ... | +| EventTime: | 2016-05-18 05:19:20 | 2016-05-18 08:10:20 | 2016-05-18 07:38:00 | ... | + В примерах изображён только порядок расположения данных. То есть, значения из разных столбцов хранятся отдельно, а данные одного столбца - вместе. -Примеры столбцовых СУБД: `Vertica`, `Paraccel (Actian Matrix) (Amazon Redshift)`, `Sybase IQ`, `Exasol`, `Infobright`, `InfiniDB`, `MonetDB (VectorWise) (Actian Vector)`, `LucidDB`, `SAP HANA`, `Google Dremel`, `Google PowerDrill`, `Druid`, `kdb+` и т. п. +Примеры столбцовых СУБД: Vertica, Paraccel (Actian Matrix, Amazon Redshift), Sybase IQ, Exasol, Infobright, InfiniDB, MonetDB (VectorWise, Actian Vector), LucidDB, SAP HANA, Google Dremel, Google PowerDrill, Druid, kdb+. +{: .grey } Разный порядок хранения данных лучше подходит для разных сценариев работы. Сценарий работы с данными - это то, какие производятся запросы, как часто и в каком соотношении; сколько читается данных на запросы каждого вида - строк, столбцов, байт; как соотносятся чтения и обновления данных; какой рабочий размер данных и насколько локально он используется; используются ли транзакции и с какой изолированностью; какие требования к дублированию данных и логической целостности; требования к задержкам на выполнение и пропускной способности запросов каждого вида и т. п. Чем больше нагрузка на систему, тем более важной становится специализация под сценарий работы, и тем более конкретной становится эта специализация. Не существует системы, одинаково хорошо подходящей под существенно различные сценарии работы. Если система подходит под широкое множество сценариев работы, то при достаточно большой нагрузке, система будет справляться со всеми сценариями работы плохо, или справляться хорошо только с одним из сценариев работы. -Будем говорить, что OLAP (онлайн обработка аналитических запросов) сценарий работы - это: +## Ключевые особенности OLAP сценария работы - подавляющее большинство запросов - на чтение; - данные обновляются достаточно большими пачками (> 1000 строк), а не по одной строке, или не обновляются вообще; @@ -49,21 +54,34 @@ EventTime: 2016-05-18 05:19:20 2016-05-18 08:10:20 2016-05-18 07:38:00 - в запросе одна большая таблица, все таблицы кроме одной маленькие; - результат выполнения запроса существенно меньше исходных данных - то есть, данные фильтруются или агрегируются; результат выполнения помещается в оперативку на одном сервере; -Легко видеть, что OLAP сценарий работы существенно отличается от других распространённых сценариев работы (например, OLTP или Key-Value сценариев работы). Таким образом, не имеет никакого смысла пытаться использовать OLTP или Key-Value БД для обработки аналитических запросов, если вы хотите получить приличную производительность ("выше плинтуса"). Например, если вы попытаетесь использовать для аналитики MongoDB или Elliptics - вы получите анекдотически низкую производительность по сравнению с OLAP-СУБД. +Легко видеть, что OLAP сценарий работы существенно отличается от других распространённых сценариев работы (например, OLTP или Key-Value сценариев работы). Таким образом, не имеет никакого смысла пытаться использовать OLTP или Key-Value БД для обработки аналитических запросов, если вы хотите получить приличную производительность ("выше плинтуса"). Например, если вы попытаетесь использовать для аналитики MongoDB или Redis - вы получите анекдотически низкую производительность по сравнению с OLAP-СУБД. -Столбцовые СУБД лучше (от 100 раз по скорости обработки большинства запросов) подходят для OLAP сценария работы по следующим причинам: +## Причины, по которым столбцовые СУБД лучше подходят для OLAP сценария -1. По I/O. -2. Для выполнения аналитического запроса, требуется прочитать небольшое количество столбцов таблицы. В столбцовой БД для этого можно читать только нужные данные. Например, если вам требуется только 5 столбцов из 100, то следует рассчитывать на 20-кратное уменьшение ввода-вывода. -3. Так как данные читаются пачками, то их проще сжимать. Данные, лежащие по столбцам также лучше сжимаются. За счёт этого, дополнительно уменьшается объём ввода-вывода. -4. За счёт уменьшения ввода-вывода, больше данных влезает в системный кэш. +Столбцовые СУБД лучше (от 100 раз по скорости обработки большинства запросов) подходят для OLAP сценария работы. Причины в деталях буду разъяснены ниже, а сам факт проще проще продемонстрировать визуально: + +**Строковые СУБД** + +![Строковые](images/row_oriented.gif#) + +**Столбцовые СУБД** + +![Столбцовые](images/column_oriented.gif#) + +Видите разницу? + +### По вводу-выводу + +1. Для выполнения аналитического запроса, требуется прочитать небольшое количество столбцов таблицы. В столбцовой БД для этого можно читать только нужные данные. Например, если вам требуется только 5 столбцов из 100, то следует рассчитывать на 20-кратное уменьшение ввода-вывода. +2. Так как данные читаются пачками, то их проще сжимать. Данные, лежащие по столбцам также лучше сжимаются. За счёт этого, дополнительно уменьшается объём ввода-вывода. +3. За счёт уменьшения ввода-вывода, больше данных влезает в системный кэш. Для примера, для запроса "посчитать количество записей для каждой рекламной системы", требуется прочитать один столбец "идентификатор рекламной системы", который занимает 1 байт в несжатом виде. Если большинство переходов было не с рекламных систем, то можно рассчитывать хотя бы на десятикратное сжатие этого столбца. При использовании быстрого алгоритма сжатия, возможно разжатие данных со скоростью более нескольких гигабайт несжатых данных в секунду. То есть, такой запрос может выполняться со скоростью около нескольких миллиардов строк в секунду на одном сервере. На практике, такая скорость действительно достигается. -Пример: - -```bash -milovidov@hostname:~$ clickhouse-client +
Пример +

+

+$ clickhouse-client
 ClickHouse client version 0.0.52053.
 Connecting to localhost:9000.
 Connected to ClickHouse server version 0.0.52053.
@@ -104,9 +122,11 @@ LIMIT 20
 20 rows in set. Elapsed: 0.153 sec. Processed 1.00 billion rows, 4.00 GB (6.53 billion rows/s., 26.10 GB/s.)
 
 :)
-```
+
+

+
-2. По CPU. +### По вычислениям Так как для выполнения запроса надо обработать достаточно большое количество строк, становится актуальным диспетчеризовывать все операции не для отдельных строк, а для целых векторов, или реализовать движок выполнения запроса так, чтобы издержки на диспетчеризацию были примерно нулевыми. Если этого не делать, то при любой не слишком плохой дисковой подсистеме, интерпретатор запроса неизбежно упрётся в CPU. Имеет смысл не только хранить данные по столбцам, но и обрабатывать их, по возможности, тоже по столбцам. diff --git a/docs/ru/interfaces/formats.md b/docs/ru/interfaces/formats.md index ab6f8591f4b..7854b832ea6 100644 --- a/docs/ru/interfaces/formats.md +++ b/docs/ru/interfaces/formats.md @@ -8,29 +8,29 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT Формат | INSERT | SELECT -------|--------|-------- -[TabSeparated](formats.md#tabseparated) | ✔ | ✔ | -[TabSeparatedRaw](formats.md#tabseparatedraw) | ✗ | ✔ | -[TabSeparatedWithNames](formats.md#tabseparatedwithnames) | ✔ | ✔ | -[TabSeparatedWithNamesAndTypes](formats.md#tabseparatedwithnamesandtypes) | ✔ | ✔ | -[CSV](formats.md#csv) | ✔ | ✔ | -[CSVWithNames](formats.md#csvwithnames) | ✔ | ✔ | -[Values](formats.md#values) | ✔ | ✔ | -[Vertical](formats.md#vertical) | ✗ | ✔ | -[VerticalRaw](formats.md#verticalraw) | ✗ | ✔ | -[JSON](formats.md#json) | ✗ | ✔ | -[JSONCompact](formats.md#jsoncompact) | ✗ | ✔ | -[JSONEachRow](formats.md#jsoneachrow) | ✔ | ✔ | -[TSKV](formats.md#tskv) | ✔ | ✔ | -[Pretty](formats.md#pretty) | ✗ | ✔ | -[PrettyCompact](formats.md#prettycompact) | ✗ | ✔ | -[PrettyCompactMonoBlock](formats.md#prettycompactmonoblock) | ✗ | ✔ | -[PrettyNoEscapes](formats.md#prettynoescapes) | ✗ | ✔ | -[PrettySpace](formats.md#prettyspace) | ✗ | ✔ | -[RowBinary](formats.md#rowbinary) | ✔ | ✔ | -[Native](formats.md#native) | ✔ | ✔ | -[Null](formats.md#null) | ✗ | ✔ | -[XML](formats.md#xml) | ✗ | ✔ | -[CapnProto](formats.md#capnproto) | ✔ | ✔ | +[TabSeparated](#tabseparated) | ✔ | ✔ | +[TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | +[TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | +[TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | +[CSV](#csv) | ✔ | ✔ | +[CSVWithNames](#csvwithnames) | ✔ | ✔ | +[Values](#values) | ✔ | ✔ | +[Vertical](#vertical) | ✗ | ✔ | +[VerticalRaw](#verticalraw) | ✗ | ✔ | +[JSON](#json) | ✗ | ✔ | +[JSONCompact](#jsoncompact) | ✗ | ✔ | +[JSONEachRow](#jsoneachrow) | ✔ | ✔ | +[TSKV](#tskv) | ✔ | ✔ | +[Pretty](#pretty) | ✗ | ✔ | +[PrettyCompact](#prettycompact) | ✗ | ✔ | +[PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | +[PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | +[PrettySpace](#prettyspace) | ✗ | ✔ | +[RowBinary](#rowbinary) | ✔ | ✔ | +[Native](#native) | ✔ | ✔ | +[Null](#null) | ✗ | ✔ | +[XML](#xml) | ✗ | ✔ | +[CapnProto](#capnproto) | ✔ | ✔ | @@ -64,14 +64,19 @@ struct Message { Формат comma separated values ([RFC](https://tools.ietf.org/html/rfc4180)). -При форматировании, строки выводятся в двойных кавычках. Двойная кавычка внутри строки выводится как две двойные кавычки подряд. Других правил экранирования нет. Даты и даты-с-временем выводятся в двойных кавычках. Числа выводятся без кавычек. Значения разделяются символом-разделителем*. Строки разделяются unix переводом строки (LF). Массивы сериализуются в CSV следующим образом: сначала массив сериализуется в строку, как в формате TabSeparated, а затем полученная строка выводится в CSV в двойных кавычках. Кортежи в формате CSV сериализуются, как отдельные столбцы (то есть, теряется их вложенность в кортеж). +При форматировании, строки выводятся в двойных кавычках. Двойная кавычка внутри строки выводится как две двойные кавычки подряд. Других правил экранирования нет. Даты и даты-с-временем выводятся в двойных кавычках. Числа выводятся без кавычек. Значения разделяются символом-разделителем, по умолчанию — `,`. Символ-разделитель определяется настройкой [format_csv_delimiter](../operations/settings/settings.md#format_csv_delimiter). Строки разделяются unix переводом строки (LF). Массивы сериализуются в CSV следующим образом: сначала массив сериализуется в строку, как в формате TabSeparated, а затем полученная строка выводится в CSV в двойных кавычках. Кортежи в формате CSV сериализуются, как отдельные столбцы (то есть, теряется их вложенность в кортеж). -*По умолчанию — `,`. См. настройку [format_csv_delimiter](/docs/ru/operations/settings/settings/#format_csv_delimiter) для дополнительной информации. +``` +clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +``` + +*По умолчанию — `,`. См. настройку [format_csv_delimiter](/operations/settings/settings/#format_csv_delimiter) для дополнительной информации. При парсинге, все значения могут парситься как в кавычках, так и без кавычек. Поддерживаются как двойные, так и одинарные кавычки. В том числе, строки могут быть расположены без кавычек - тогда они парсятся до символа-разделителя или перевода строки (CR или LF). В нарушение RFC, в случае парсинга строк не в кавычках, начальные и конечные пробелы и табы игнорируются. В качестве перевода строки, поддерживаются как Unix (LF), так и Windows (CR LF) и Mac OS Classic (LF CR) варианты. +`NULL` форматируется в виде `\N`. + Формат CSV поддерживает вывод totals и extremes аналогично `TabSeparated`. - ## CSVWithNames @@ -162,7 +167,11 @@ JSON совместим с JavaScript. Для этого, дополнитель `extremes` - экстремальные значения (при настройке extremes, выставленной в 1). Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). + +ClickHouse поддерживает [NULL](../query_language/syntax.md#null-literal), который при выводе JSON будет отображен как `null`. + Смотрите также формат JSONEachRow. + ## JSONCompact @@ -252,6 +261,18 @@ JSON совместим с JavaScript. Для этого, дополнитель Выводит данные в виде Unicode-art табличек, также используя ANSI-escape последовательности для установки цветов в терминале. Рисуется полная сетка таблицы и, таким образом, каждая строчка занимает две строки в терминале. Каждый блок результата выводится в виде отдельной таблицы. Это нужно, чтобы можно было выводить блоки без буферизации результата (буферизация потребовалась бы, чтобы заранее вычислить видимую ширину всех значений.) + +[NULL](../query_language/syntax.md#null-literal) выводится как `ᴺᵁᴸᴸ`. + +```sql +SELECT * FROM t_null +``` +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + Для защиты от вываливания слишком большого количества данных в терминал, выводится только первые 10 000 строк. Если строк больше или равно 10 000, то будет написано "Showed first 10 000." Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). @@ -283,6 +304,7 @@ Extremes: │ 2014-03-23 │ 1406958 │ └────────────┴─────────┘ ``` + ## PrettyCompact @@ -293,7 +315,7 @@ Extremes: ## PrettyCompactMonoBlock -Отличается от [PrettyCompact](formats.md#prettycompact) тем, что строки (до 10 000 штук) буферизуются и затем выводятся в виде одной таблицы, а не по блокам. +Отличается от [PrettyCompact](#prettycompact) тем, что строки (до 10 000 штук) буферизуются и затем выводятся в виде одной таблицы, а не по блокам. ## PrettyNoEscapes @@ -319,7 +341,7 @@ watch -n1 "clickhouse-client --query='SELECT * FROM system.events FORMAT PrettyC ## PrettySpace -Отличается от [PrettyCompact](formats.md#prettycompact) тем, что вместо сетки используется пустое пространство (пробелы). +Отличается от [PrettyCompact](#prettycompact) тем, что вместо сетки используется пустое пространство (пробелы). ## RowBinary @@ -334,12 +356,44 @@ String представлены как длина в формате varint (unsi FixedString представлены просто как последовательность байт. Array представлены как длина в формате varint (unsigned [LEB128](https://en.wikipedia.org/wiki/LEB128)), а затем элементы массива, подряд. + +Для поддержки [NULL](../query_language/syntax.md#null-literal) перед каждым значением типа [Nullable](../data_types/nullable.md#data_type-nullable) в строке добавлен дополнительный байт, который содержит 1 или 0. Если 1, то значение — `NULL` и этот байт трактуется как отдельное значение. Если 0, то после байта идёт не `NULL`-значение. + ## TabSeparated В TabSeparated формате данные пишутся по строкам. Каждая строчка содержит значения, разделённые табами. После каждого значения идёт таб, кроме последнего значения в строке, после которого идёт перевод строки. Везде подразумеваются исключительно unix-переводы строк. Последняя строка также обязана содержать перевод строки на конце. Значения пишутся в текстовом виде, без обрамляющих кавычек, с экранированием служебных символов. +Этот формат также доступен под именем `TSV`. + +Формат `TabSeparated` удобен для обработки данных произвольными программами и скриптами. Он используется по умолчанию в HTTP-интерфейсе, а также в batch-режиме клиента командной строки. Также формат позволяет переносить данные между разными СУБД. Например, вы можете получить дамп из MySQL и загрузить его в ClickHouse, или наоборот. + +Формат `TabSeparated` поддерживает вывод тотальных значений (при использовании WITH TOTALS) и экстремальных значений (при настройке extremes выставленной в 1). В этих случаях, после основных данных выводятся тотальные значения, и экстремальные значения. Основной результат, тотальные значения и экстремальные значения, отделяются друг от друга пустой строкой. Пример: + +```sql +SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated`` +``` + +```text +2014-03-17 1406958 +2014-03-18 1383658 +2014-03-19 1405797 +2014-03-20 1353623 +2014-03-21 1245779 +2014-03-22 1031592 +2014-03-23 1046491 + +0000-00-00 8873898 + +2014-03-17 1031592 +2014-03-23 1406958 +``` + + + +## Форматирование данных + Целые числа пишутся в десятичной форме. Числа могут содержать лишний символ "+" в начале (игнорируется при парсинге, а при форматировании не пишется). Неотрицательные числа не могут содержать знак отрицания. При чтении допустим парсинг пустой строки, как числа ноль, или (для знаковых типов) строки, состоящей из одного минуса, как числа ноль. Числа, не помещающиеся в соответствующий тип данных, могут парсится, как некоторое другое число, без сообщения об ошибке. Числа с плавающей запятой пишутся в десятичной форме. При этом, десятичный разделитель - точка. Поддерживается экспоненциальная запись, а также inf, +inf, -inf, nan. Запись числа с плавающей запятой может начинаться или заканчиваться на десятичную точку. @@ -370,30 +424,8 @@ world Массивы форматируются в виде списка значений через запятую в квадратных скобках. Элементы массива - числа форматируются как обычно, а даты, даты-с-временем и строки - в одинарных кавычках с такими же правилами экранирования, как указано выше. -Формат TabSeparated удобен для обработки данных произвольными программами и скриптами. Он используется по умолчанию в HTTP-интерфейсе, а также в batch-режиме клиента командной строки. Также формат позволяет переносить данные между разными СУБД. Например, вы можете получить дамп из MySQL и загрузить его в ClickHouse, или наоборот. +[NULL](../query_language/syntax.md#null-literal) форматируется в виде `\N`. -Формат TabSeparated поддерживает вывод тотальных значений (при использовании WITH TOTALS) и экстремальных значений (при настройке extremes выставленной в 1). В этих случаях, после основных данных выводятся тотальные значения, и экстремальные значения. Основной результат, тотальные значения и экстремальные значения, отделяются друг от друга пустой строкой. Пример: - -```sql -SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated`` -``` - -```text -2014-03-17 1406958 -2014-03-18 1383658 -2014-03-19 1405797 -2014-03-20 1353623 -2014-03-21 1245779 -2014-03-22 1031592 -2014-03-23 1046491 - -0000-00-00 8873898 - -2014-03-17 1031592 -2014-03-23 1406958 -``` - -Этот формат также доступен под именем `TSV`. ## TabSeparatedRaw @@ -438,26 +470,52 @@ SearchPhrase=дизайн штор count()=1064 SearchPhrase=баку count()=1000 ``` +[NULL](../query_language/syntax.md#null-literal) форматируется в виде `\N`. + +```sql +SELECT * FROM t_null FORMAT TSKV +``` +``` +x=1 y=\N +``` + При большом количестве маленьких столбцов, этот формат существенно неэффективен, и обычно нет причин его использовать. Он реализован, так как используется в некоторых отделах Яндекса. Поддерживается как вывод, так и парсинг данных в этом формате. При парсинге, поддерживается расположение значений разных столбцов в произвольном порядке. Допустимо отсутствие некоторых значений - тогда они воспринимаются как равные значениям по умолчанию. При этом, в качестве значений по умолчанию используются нули, пустые строки и не поддерживаются сложные значения по умолчанию, которые могут быть заданы в таблице. При парсинге, в качестве дополнительного поля, может присутствовать `tskv` без знака равенства и без значения. Это поле игнорируется. - + ## Values -Выводит каждую строку в скобках. Строки разделены запятыми. После последней строки запятой нет. Значения внутри скобок также разделены запятыми. Числа выводятся в десятичном виде без кавычек. Массивы выводятся в квадратных скобках. Строки, даты, даты-с-временем выводятся в кавычках. Правила экранирования и особенности парсинга аналогичны формату TabSeparated. При форматировании, лишние пробелы не ставятся, а при парсинге - допустимы и пропускаются (за исключением пробелов внутри значений типа массив, которые недопустимы). +Выводит каждую строку в скобках. Строки разделены запятыми. После последней строки запятой нет. Значения внутри скобок также разделены запятыми. Числа выводятся в десятичном виде без кавычек. Массивы выводятся в квадратных скобках. Строки, даты, даты-с-временем выводятся в кавычках. Правила экранирования и особенности парсинга аналогичны формату [TabSeparated](#tabseparated). При форматировании, лишние пробелы не ставятся, а при парсинге - допустимы и пропускаются (за исключением пробелов внутри значений типа массив, которые недопустимы). [NULL](../query_language/syntax.md#null-literal) представляется как `NULL`. Минимальный набор символов, которых вам необходимо экранировать при передаче в Values формате: одинарная кавычка и обратный слеш. Именно этот формат используется в запросе `INSERT INTO t VALUES ...`, но вы также можете использовать его для форматирования результатов запросов. + ## Vertical Выводит каждое значение на отдельной строке, с указанием имени столбца. Формат удобно использовать для вывода одной-нескольких строк, если каждая строка состоит из большого количества столбцов. + +[NULL](../query_language/syntax.md#null-literal) выводится как `ᴺᵁᴸᴸ`. + +Пример: + +```sql +SELECT * FROM t_null FORMAT Vertical +``` +``` +Row 1: +────── +x: 1 +y: ᴺᵁᴸᴸ +``` + Этот формат подходит только для вывода результата выполнения запроса, но не для парсинга (приёма данных для вставки в таблицу). + ## VerticalRaw diff --git a/docs/ru/interfaces/jdbc.md b/docs/ru/interfaces/jdbc.md index 4cdf4a5769f..90051ad464f 100644 --- a/docs/ru/interfaces/jdbc.md +++ b/docs/ru/interfaces/jdbc.md @@ -1,3 +1,4 @@ # JDBC-драйвер -Для ClickHouse существует официальный JDBC драйвер. Смотрите [здесь](https://github.com/yandex/clickhouse-jdbc) . +- [Официальный драйвер](https://github.com/yandex/clickhouse-jdbc). +- Драйвер от сторонней огранизации [ClickHouse-Native-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC). diff --git a/docs/ru/interfaces/third-party_gui.md b/docs/ru/interfaces/third-party_gui.md index 676b58a7ba0..a06a6f3bc2e 100644 --- a/docs/ru/interfaces/third-party_gui.md +++ b/docs/ru/interfaces/third-party_gui.md @@ -20,13 +20,18 @@ Основные возможности: -- Создание запросов. +- Построение запросов с подсветкой синтаксиса. Просмотр ответа в табличном или JSON представлении. +- Экспортирование результатов запроса в формате CSV или JSON. +- Список процессов с описанием. Режим записи. Возможность остановки (`KILL`) процесса. +- Граф базы данных. Показывает все таблицы и их столбцы с дополнительной информацией. +- Быстрый просмотр размера стоблбца. +- Конфигурирование сервера. Планируется разработка следующих возможностей: - Управление базами. - Управление пользователями. -- Управление кластером. - Анализ данных в режиме реального времени. - Мониторинг кластера. +- Управление кластером. - Мониторинг реплицированных и Kafka таблиц. diff --git a/docs/ru/introduction/distinctive_features.md b/docs/ru/introduction/distinctive_features.md index 031a5c7f6bb..c85d464222b 100644 --- a/docs/ru/introduction/distinctive_features.md +++ b/docs/ru/introduction/distinctive_features.md @@ -2,23 +2,23 @@ ## По-настоящему столбцовая СУБД -В по-настоящему столбцовой СУБД рядом со значениями не хранится никакого "мусора". Например, должны поддерживаться значения постоянной длины, чтобы не хранить рядом со значениями типа "число" их длины. Для примера, миллиард значений типа UInt8 должен действительно занимать в несжатом виде около 1GB, иначе это сильно ударит по эффективности использования CPU. Очень важно хранить данные компактно (без "мусора") в том числе в несжатом виде, так как скорость разжатия (использование CPU) зависит, в основном, от объёма несжатых данных. +В по-настоящему столбцовой СУБД рядом со значениями не хранится никаких лишних данных. Например, должны поддерживаться значения постоянной длины, чтобы не хранить рядом со значениями типа "число" их длины. Для примера, миллиард значений типа UInt8 должен действительно занимать в несжатом виде около 1GB, иначе это сильно ударит по эффективности использования CPU. Очень важно хранить данные компактно (без "мусора") в том числе в несжатом виде, так как скорость разжатия (использование CPU) зависит, в основном, от объёма несжатых данных. -Этот пункт пришлось выделить, так как существуют системы, которые могут хранить значения отдельных столбцов по отдельности, но не могут эффективно выполнять аналитические запросы в силу оптимизации под другой сценарий работы. Примеры: HBase, BigTable, Cassandra, HyperTable. В этих системах вы получите throughput в районе сотен тысяч строк в секунду, но не сотен миллионов строк в секунду. +Этот пункт пришлось выделить, так как существуют системы, которые могут хранить значения отдельных столбцов по отдельности, но не могут эффективно выполнять аналитические запросы в силу оптимизации под другой сценарий работы. Примеры: HBase, BigTable, Cassandra, HyperTable. В этих системах вы получите пропускную способность в районе сотен тысяч строк в секунду, но не сотен миллионов строк в секунду. -Также стоит заметить, что ClickHouse является СУБД, а не одной базой данных. То есть, ClickHouse позволяет создавать таблицы и базы данных в runtime, загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера. +Также стоит заметить, что ClickHouse является системой управления базами данных, а не одной базой данных. То есть, ClickHouse позволяет создавать таблицы и базы данных в runtime, загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера. ## Сжатие данных -Некоторые столбцовые СУБД (InfiniDB CE, MonetDB) не используют сжатие данных. Но сжатие данных действительно серьёзно увеличивает производительность. +Некоторые столбцовые СУБД (InfiniDB CE, MonetDB) не используют сжатие данных. Однако сжатие данных действительно играет одну из ключевых ролей в демонстрации отличной производительности. ## Хранение данных на диске -Многие столбцовые СУБД (SAP HANA, Google PowerDrill) могут работать только в оперативке. Но оперативки (даже на тысячах серверах) слишком мало для хранения всех хитов и визитов в Яндекс.Метрике. +Многие столбцовые СУБД (SAP HANA, Google PowerDrill) могут работать только в оперативной памяти. Такой подход стимулирует выделять больший бюджет на оборудование, чем фактически требуется для анализа в реальном времени. ClickHouse спроектирован для работы на обычных жестких дисках, что обеспечивает низкую стоимость хранения на гигабайт данных, но SSD b дополнительная оперативная память тоже полноценно используются, если доступны. ## Параллельная обработка запроса на многих процессорных ядрах -Большие запросы естественным образом распараллеливаются. +Большие запросы естественным образом распараллеливаются, используя все необходимые ресурсы из доступных на сервере. ## Распределённая обработка запроса на многих серверах @@ -27,11 +27,9 @@ ## Поддержка SQL -Если вы знаете, что такое стандартный SQL, то говорить о поддержке SQL всё-таки нельзя. -Все функции названы по-другому. -Тем не менее, это - декларативный язык запросов на основе SQL и во многих случаях не отличимый от SQL. -Поддерживаются JOIN-ы. Поддерживаются подзапросы в секциях FROM, IN, JOIN, а также скалярные подзапросы. -Зависимые подзапросы не поддерживаются. +ClickHouse поддерживает декларативный язык запросов на основе SQL и во многих случаях совпадающий с SQL стандартом. +Поддерживаются GROUP BY, ORDER BY, подзапросы в секциях FROM, IN, JOIN, а также скалярные подзапросы. +Зависимые подзапросы и оконные функции не поддерживаются. ## Векторный движок @@ -41,21 +39,24 @@ ClickHouse поддерживает таблицы с первичным ключом. Для того, чтобы можно было быстро выполнять запросы по диапазону первичного ключа, данные инкрементально сортируются с помощью merge дерева. За счёт этого, поддерживается постоянное добавление данных в таблицу. Блокировки при добавлении данных отсутствуют. -## Наличие индексов +## Наличие индекса -Наличие первичного ключа позволяет, например, вынимать данные для конкретных клиентов (счётчиков Метрики), для заданного диапазона времени, с низкими задержками - менее десятков миллисекунд. +Физическая сортировка данных по первичному ключу позволяет получать данные для конкретных его значений или их диапазонов с низкими задержками - менее десятков миллисекунд. ## Подходит для онлайн запросов -Это позволяет использовать систему в качестве бэкенда для веб-интерфейса. Низкие задержки позволяют не откладывать выполнение запроса, а выполнять его в момент загрузки страницы интерфейса Яндекс.Метрики. То есть, в режиме онлайн. +Низкие задержки позволяют не откладывать выполнение запроса и не подготавливать ответ заранее, а выполнять его именно в момент загрузки страницы пользовательского интерфейса. То есть, в режиме онлайн. ## Поддержка приближённых вычислений +ClickHouse предоставляет различные способы разменять точность вычислений на производительность: + 1. Система содержит агрегатные функции для приближённого вычисления количества различных значений, медианы и квантилей. 2. Поддерживается возможность выполнить запрос на основе части (выборки) данных и получить приближённый результат. При этом, с диска будет считано пропорционально меньше данных. 3. Поддерживается возможность выполнить агрегацию не для всех ключей, а для ограниченного количества первых попавшихся ключей. При выполнении некоторых условий на распределение ключей в данных, это позволяет получить достаточно точный результат с использованием меньшего количества ресурсов. -## Репликация данных, поддержка целостности данных на репликах +## Репликация данных и поддержка целостности + +Используется асинхронная multimaster репликация. После записи на любую доступную реплику, данные распространяются на все остальные реплики в фоне. Система поддерживает полную идентичность данных на разных репликах. Восстановление после большинства сбоев осуществляется автоматически, а в сложных случаях — полуавтоматически. -Используется асинхронная multimaster репликация. После записи на любую доступную реплику, данные распространяются на все остальные реплики. Система поддерживает полную идентичность данных на разных репликах. Восстановление после сбоя осуществляется автоматически, а в сложных случаях - "по кнопке". Подробнее смотрите раздел [Репликация данных](../operations/table_engines/replication.md#table_engines-replication). diff --git a/docs/ru/introduction/features_considered_disadvantages.md b/docs/ru/introduction/features_considered_disadvantages.md index c26272f4b6c..b7ac877cc32 100644 --- a/docs/ru/introduction/features_considered_disadvantages.md +++ b/docs/ru/introduction/features_considered_disadvantages.md @@ -1,6 +1,6 @@ # Особенности ClickHouse, которые могут считаться недостатками -1. Отсутствие транзакций. -2. Необходимо, чтобы результат выполнения запроса, в случае агрегации, помещался в оперативку на одном сервере. Объём исходных данных для запроса, при этом, может быть сколь угодно большим. -3. Отсутствие полноценной реализации UPDATE/DELETE. - +1. Отсутствие полноценных транзакций. +2. Возможность изменять или удалять ранее записанные данные с низкими задержками и высокой частотой запросов не предоставляется. Есть массовое удаление данных для очистки более не нужного или соответствия [GDPR](https://gdpr-info.eu). Массовое изменение данных находится в разработке (на момент июля 2018). +3. Разреженный индекс делает ClickHouse плохо пригодным для точечных чтений одиночных строк по своим +ключам. diff --git a/docs/ru/introduction/index.md b/docs/ru/introduction/index.md deleted file mode 100644 index ee863e07c73..00000000000 --- a/docs/ru/introduction/index.md +++ /dev/null @@ -1 +0,0 @@ -# Введение diff --git a/docs/ru/introduction/performance.md b/docs/ru/introduction/performance.md index f513881dcfc..95e1d1cd008 100644 --- a/docs/ru/introduction/performance.md +++ b/docs/ru/introduction/performance.md @@ -1,10 +1,12 @@ # Производительность -По результатам внутреннего тестирования, ClickHouse обладает наиболее высокой производительностью (как наиболее высоким throughput на длинных запросах, так и наиболее низкой latency на коротких запросах), при соответствующем сценарии работы, среди доступных для тестирования систем подобного класса. Результаты тестирования можно посмотреть на отдельной странице. +По результатам внутреннего тестирования в Яндексе, ClickHouse обладает наиболее высокой производительностью (как наиболее высокой пропускной способностью на длинных запросах, так и наиболее низкой задержкой на коротких запросах), при соответствующем сценарии работы, среди доступных для тестирования систем подобного класса. Результаты тестирования можно посмотреть на [отдельной странице](https://clickhouse.yandex/benchmark.html). + +Также это подтверждают многочисленные независимые бенчмарки. Их не сложно найти в Интернете самостоятельно, либо можно воспользоваться [небольшой коллекцией ссылок по теме](https://clickhouse.yandex/#independent-benchmarks). ## Пропускная способность при обработке одного большого запроса -Пропускную способность можно измерять в строчках в секунду и в мегабайтах в секунду. При условии, что данные помещаются в page cache, не слишком сложный запрос обрабатывается на современном железе со скоростью около 2-10 GB/sec. несжатых данных на одном сервере (в простейшем случае скорость может достигать 30 GB/sec). Если данные не помещаются в page cache, то скорость работы зависит от скорости дисковой подсистемы и коэффициента сжатия данных. Например, если дисковая подсистема позволяет читать данные со скоростью 400 MB/sec., а коэффициент сжатия данных составляет 3, то скорость будет около 1.2GB/sec. Для получения скорости в строчках в секунду, следует поделить скорость в байтах в секунду на суммарный размер используемых в запросе столбцов. Например, если вынимаются столбцы на 10 байт, то скорость будет в районе 100-200 млн. строчек в секунду. +Пропускную способность можно измерять в строчках в секунду и в мегабайтах в секунду. При условии, что данные помещаются в page cache, не слишком сложный запрос обрабатывается на современном железе со скоростью около 2-10 GB/sec. несжатых данных на одном сервере (в простейшем случае скорость может достигать 30 GB/sec). Если данные не помещаются в page cache, то скорость работы зависит от скорости дисковой подсистемы и коэффициента сжатия данных. Например, если дисковая подсистема позволяет читать данные со скоростью 400 MB/sec., а коэффициент сжатия данных составляет 3, то скорость будет около 1.2GB/sec. Для получения скорости в строчках в секунду, следует поделить скорость в байтах в секунду на суммарный размер используемых в запросе столбцов. Например, если вынимаются столбцы на 10 байт, то скорость будет в районе 100-200 млн. строк в секунду. При распределённой обработке запроса, скорость обработки запроса растёт почти линейно, но только при условии, что в результате агрегации или при сортировке получается не слишком большое множество строчек. @@ -12,7 +14,7 @@ Если запрос использует первичный ключ, и выбирает для обработки не слишком большое количество строчек (сотни тысяч), и использует не слишком большое количество столбцов, то вы можете рассчитывать на latency менее 50 миллисекунд (от единиц миллисекунд в лучшем случае), при условии, что данные помещаются в page cache. Иначе latency вычисляется из количества seek-ов. Если вы используйте вращающиеся диски, то на не слишком сильно нагруженной системе, latency вычисляется по формуле: seek time (10 мс.) \* количество столбцов в запросе \* количество кусков с данными. -## Пропускная способность при обработке большого количества коротких запросов +## Пропускная способность при обработке многочисленных коротких запросов При тех же условиях, ClickHouse может обработать несколько сотен (до нескольких тысяч в лучшем случае) запросов в секунду на одном сервере. Так как такой сценарий работы не является типичным для аналитических СУБД, рекомендуется рассчитывать не более чем на 100 запросов в секунду. diff --git a/docs/ru/introduction/possible_silly_questions.md b/docs/ru/introduction/possible_silly_questions.md deleted file mode 100644 index fc2eb6f24e4..00000000000 --- a/docs/ru/introduction/possible_silly_questions.md +++ /dev/null @@ -1,14 +0,0 @@ -# Возможные глупые вопросы - -## Почему бы не использовать системы типа MapReduce? - -Системами типа map-reduce будем называть системы распределённых вычислений, в которых операция reduce сделана на основе распределённой сортировки. Таким образом, к ним относятся Hadoop и YT (YT является внутренней разработкой Яндекса). - -Такие системы не подходят для онлайн запросов в силу слишком большой latency. То есть, не могут быть использованы в качестве бэкенда для веб-интерфейса. -Такие системы не подходят для обновления данных в реальном времени. -Распределённая сортировка не является оптимальным способом выполнения операции reduce, если результат выполнения операции и все промежуточные результаты, при их наличии, помещаются в оперативку на одном сервере, как обычно бывает в запросах, выполняющихся в режиме онлайн. В таком случае, оптимальным способом выполнения операции reduce является хэш-таблица. Частым способом оптимизации map-reduce задач является предагрегация (частичный reduce) с использованием хэш-таблицы в оперативке. Эта оптимизация делается пользователем в ручном режиме. -Распределённая сортировка является основной причиной тормозов при выполнении несложных map-reduce задач. - -Системы типа map-reduce позволяют выполнять произвольный код на кластере. Но для OLAP задач лучше подходит декларативный язык запросов, который позволяет быстро проводить исследования. Для примера, для Hadoop существует Hive и Pig. Также смотрите Cloudera Impala, Shark (устаревший) для Spark а также Spark SQL, Presto, Apache Drill. Впрочем, производительность при выполнении таких задач является сильно неоптимальной по сравнению со специализированными системами, а сравнительно высокая latency не позволяет использовать эти системы в качестве бэкенда для веб-интерфейса. - -YT позволяет хранить группы столбцов по отдельности. Но YT нельзя назвать по-настоящему столбцовой системой, так как в системе отсутствуют типы данных постоянной длины (чтобы можно было эффективно хранить числа без "мусора"), а также за счёт отсутствия векторного движка. Задачи в YT выполняются с помощью произвольного кода в режиме streaming, то есть, не могут быть достаточно оптимизированы (до сотен миллионов строк в секунду на один сервер). В YT в 2014-2016 годах находится в разработке функциональность "динамических сортированных таблиц" с использованием Merge Tree, строгой типизацией значений и языком запросов типа SQL. Динамические сортированные таблицы не подходят для OLAP задач, так как данные в них хранятся по строкам. Разработка языка запросов в YT всё ещё находится в зачаточной стадии, что не позволяет ориентироваться на эту функциональность. Разработчики YT рассматривают динамические сортированные таблицы для применения в OLTP и Key-Value сценариях работы. diff --git a/docs/ru/introduction/ya_metrika_task.md b/docs/ru/introduction/ya_metrika_task.md index 6e23491a465..44bf683c0b4 100644 --- a/docs/ru/introduction/ya_metrika_task.md +++ b/docs/ru/introduction/ya_metrika_task.md @@ -1,9 +1,10 @@ # Постановка задачи в Яндекс.Метрике -ClickHouse на данный момент обеспечивает работу [Яндекс.Метрики](https://metrika.yandex.ru/), [второй крупнейшей в мире](http://w3techs.com/technologies/overview/traffic_analysis/all) платформы для веб аналитики. При более 13 триллионах записей в базе данных и более 20 миллиардах событий в сутки, ClickHouse позволяет генерировать индивидуально настроенные отчёты на лету напрямую из неагрегированных данных. +ClickHouse изначально разрабатывался для обеспечения работы [Яндекс.Метрики](https://metrika.yandex.ru/), [второй крупнейшей в мире](http://w3techs.com/technologies/overview/traffic_analysis/all) платформы для веб аналитики, и продолжает быть её ключевым компонентом. При более 13 триллионах записей в базе данных и более 20 миллиардах событий в сутки, ClickHouse позволяет генерировать индивидуально настроенные отчёты на лету напрямую из неагрегированных данных. Данная статья вкратце демонстрирует какие цели исторически стояли перед ClickHouse на ранних этапах его развития. -Нужно получать произвольные отчёты на основе хитов и визитов, с произвольными сегментами, задаваемыми пользователем. Данные для отчётов обновляются в реальном времени. Запросы должны выполняться сразу (в режиме онлайн). Отчёты должно быть возможно строить за произвольный период. Требуется вычислять сложные агрегаты типа количества уникальных посетителей. -На данный момент (апрель 2014), каждый день в Яндекс.Метрику поступает около 12 миллиардов событий (хитов и кликов мыши). Все эти события должны быть сохранены для возможности строить произвольные отчёты. Один запрос может потребовать просканировать сотни миллионов строк за время не более нескольких секунд, или миллионы строк за время не более нескольких сотен миллисекунд. +Яндекс.Метрика на лету строит индивидуальные отчёты на основе хитов и визитов, с периодом и произвольными сегментами, задаваемыми конечным пользователем. Часто требуется построение сложных агрегатов, например числа уникальных пользлователей. Новые данные для построения отчета поступают в реальном времени. + +На апрель 2014, в Яндекс.Метрику поступало около 12 миллиардов событий (показов страниц и кликов мыши) ежедневно. Все эти события должны быть сохранены для возможности строить произвольные отчёты. Один запрос может потребовать просканировать миллионы строк за время не более нескольких сотен миллисекунд, или сотни миллионов строк за время не более нескольких секунд. ## Использование в Яндекс.Метрике и других отделах Яндекса diff --git a/docs/ru/operations/server_settings/settings.md b/docs/ru/operations/server_settings/settings.md index e15782a34c4..ed1ed92da9f 100644 --- a/docs/ru/operations/server_settings/settings.md +++ b/docs/ru/operations/server_settings/settings.md @@ -22,11 +22,8 @@ ClickHouse перезагружает встроенные словари с з Настройки компрессии данных. -
- -Не используйте, если вы только начали работать с ClickHouse. - -
+!!! warning "Внимание" + Лучше не использовать, если вы только начали работать с ClickHouse. Общий вид конфигурации: @@ -335,7 +332,7 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat 1
syslog.remote:10514
- myhost.local + myhost.local LOG_LOCAL6 syslog
@@ -346,12 +343,12 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat - user_syslog - обязательная настройка, если требуется запись в syslog - address - хост[:порт] демона syslogd. Если не указан, используется локальный - hostname - опционально, имя хоста, с которого отсылаются логи -- facility - [категория syslog](https://en.wikipedia.org/wiki/Syslog#Facility), -записанная в верхнем регистре, с префиксом "LOG_": (``LOG_USER``, ``LOG_DAEMON``, ``LOG_LOCAL3`` и прочие). +- facility - [категория syslog](https://en.wikipedia.org/wiki/Syslog#Facility), +записанная в верхнем регистре, с префиксом "LOG_": (``LOG_USER``, ``LOG_DAEMON``, ``LOG_LOCAL3`` и прочие). Значения по-умолчанию: при указанном ``address`` - ``LOG_USER``, иначе - ``LOG_DAEMON`` - format - формат сообщений. Возможные значения - ``bsd`` и ``syslog`` - + ## macros @@ -564,11 +561,8 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat Путь к каталогу с данными. -
- -Завершающий слеш обязателен. - -
+!!! warning "Обратите внимание" + Завершающий слеш обязателен. **Пример** @@ -655,11 +649,8 @@ ClickHouse проверит условия `min_part_size` и `min_part_size_rat Путь ко временным данным для обработки больших запросов. -
- -Завершающий слеш обязателен. - -
+!!! warning "Обратите внимание" + Завершающий слеш обязателен. **Пример** diff --git a/docs/ru/operations/settings/index.md b/docs/ru/operations/settings/index.md index f6da2dcccae..3de41c00b3f 100644 --- a/docs/ru/operations/settings/index.md +++ b/docs/ru/operations/settings/index.md @@ -5,18 +5,19 @@ Все настройки, описанные ниже, могут быть заданы несколькими способами. Настройки задаются послойно, т.е. каждый следующий слой перезаписывает предыдущие настройки. -Способы задания настроек, упорядоченные по их приоритету: +Способы задания настроек, упорядоченные по приоритету: -- Настройки в конфигурационных файлах сервера. +- Настройки в конфигурационном файле сервера `users.xml`. - Задаются через профили пользователей. + Устанавливаются в элементе ``. -- Для сессии. +- Настройки для сессии. Из консольного клиента ClickHouse в интерактивном режиме отправьте запрос `SET setting=value`. Аналогично можно использовать ClickHouse-сессии в HTTP-протоколе, для этого необходимо указывать HTTP-праметр `session_id`. -- Для запроса. +- Настройки для запроса. + - При запуске консольного клиента ClickHouse в неинтерактивном режиме установите параметр запуска `--setting=value`. - При использовании HTTP API передавайте cgi-параметры (`URL?setting_1=value&setting_2=value...`). diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 727189b5e99..8d8d20a4928 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -12,7 +12,7 @@ ClickHouse применяет настройку в тех случаях, ко - Только подзапросы для IN, JOIN. - Только если в секции FROM используется распределённая таблица, содержащая более одного шарда. -- Если подзапрос касается распределенной таблицы, содержащей более одного шарда, +- Если подзапрос касается распределенной таблицы, содержащей более одного шарда, - Не используется в случае табличной функции [remote](../../query_language/table_functions/remote.md#table_functions-remote). Возможные значения: @@ -20,7 +20,7 @@ ClickHouse применяет настройку в тех случаях, ко - `deny` - (по умолчанию) запрещает использование таких подзапросов (При попытке использование вернет исключение "Double-distributed IN/JOIN subqueries is denied"); - `local` - заменит базу данных и таблицу в подзапросе на локальные для конечного сервера (шарда), оставив обычный `IN` / `JOIN`; - `global` - заменит запрос `IN` / `JOIN` на `GLOBAL IN` / `GLOBAL JOIN`; -- `allow` - разрешает использование таких подзапросов. +- `allow` - разрешает использование таких подзапросов. @@ -347,3 +347,11 @@ ClickHouse применяет настройку в тех случаях, ко ## format_csv_delimiter Символ, интерпретируемый как разделитель в данных формата CSV. По умолчанию — `,`. + + + +## join_use_nulls {: #settings-join_use_nulls} + +Влияет на поведение [JOIN](../../query_language/select.md#query_language-join). + +При `join_use_nulls=1` `JOIN` ведёт себя как в стандартном SQL, т.е. если при слиянии возникают пустые ячейки, то тип соответствующего поля преобразуется к [Nullable](../../data_types/nullable.md#data_type-nullable), а пустые ячейки заполняются значениями [NULL](../../query_language/syntax.md#null-literal). diff --git a/docs/ru/operations/system_tables.md b/docs/ru/operations/system_tables.md index 3d8f3032d10..84fcb14092b 100644 --- a/docs/ru/operations/system_tables.md +++ b/docs/ru/operations/system_tables.md @@ -7,13 +7,14 @@ Системные таблицы расположены в базе данных system. -# system.asynchronous_metrics +## system.asynchronous_metrics Содержат метрики, используемые для профилирования и мониторинга. Обычно отражают количество событий, происходящих в данный момент в системе, или ресурсов, суммарно потребляемых системой. Пример: количество запросов типа SELECT, исполняемых в текущий момент; количество потребляемой памяти. `system.asynchronous_metrics` и `system.metrics` отличаются набором и способом вычисления метрик. -# system.clusters + +## system.clusters Содержит информацию о доступных в конфигурационном файле кластерах и серверах, которые в них входят. Столбцы: @@ -28,7 +29,7 @@ host_address String - IP-адрес хоста, полученный из DNS port UInt16 - порт, на который обращаться для соединения с сервером user String - имя пользователя, которого использовать для соединения с сервером ``` -# system.columns +## system.columns Содержит информацию о столбцах всех таблиц. С помощью этой таблицы можно получить информацию аналогично запросу `DESCRIBE TABLE`, но для многих таблиц сразу. @@ -41,12 +42,12 @@ type String - тип столбца default_type String - тип (DEFAULT, MATERIALIZED, ALIAS) выражения для значения по умолчанию, или пустая строка, если оно не описано default_expression String - выражение для значения по умолчанию, или пустая строка, если оно не описано ``` -# system.databases +## system.databases Таблица содержит один столбец name типа String - имя базы данных. Для каждой базы данных, о которой знает сервер, будет присутствовать соответствующая запись в таблице. Эта системная таблица используется для реализации запроса `SHOW DATABASES`. -# system.dictionaries +## system.dictionaries Содержит информацию о внешних словарях. @@ -70,12 +71,12 @@ default_expression String - выражение для значения по ум Заметим, что количество оперативной памяти, которое использует словарь, не является пропорциональным количеству элементов, хранящихся в словаре. Так, для flat и cached словарей, все ячейки памяти выделяются заранее, независимо от реальной заполненности словаря. -# system.events +## system.events Содержит информацию о количестве произошедших в системе событий, для профилирования и мониторинга. Пример: количество обработанных запросов типа SELECT. Столбцы: event String - имя события, value UInt64 - количество. -# system.functions +## system.functions Содержит информацию об обычных и агрегатных функциях. @@ -83,7 +84,7 @@ default_expression String - выражение для значения по ум - `name` (`String`) – Имя функции. - `is_aggregate` (`UInt8`) – Признак, является ли функция агрегатной. -# system.merges +## system.merges Содержит информацию о производящихся прямо сейчас слияниях для таблиц семейства MergeTree. @@ -103,22 +104,23 @@ default_expression String - выражение для значения по ум - `rows_written UInt64` — Количество записанных строк. -# system.metrics -# system.numbers +## system.metrics +## system.numbers Таблица содержит один столбец с именем number типа UInt64, содержащим почти все натуральные числа, начиная с нуля. Эту таблицу можно использовать для тестов, а также если вам нужно сделать перебор. Чтения из этой таблицы не распараллеливаются. -# system.numbers_mt +## system.numbers_mt То же самое, что и system.numbers, но чтение распараллеливается. Числа могут возвращаться в произвольном порядке. Используется для тестов. -# system.one +## system.one Таблица содержит одну строку с одним столбцом dummy типа UInt8, содержащим значение 0. Эта таблица используется, если в SELECT запросе не указана секция FROM. То есть, это - аналог таблицы DUAL, которую можно найти в других СУБД. -# system.parts + +## system.parts Содержит информацию о кусках таблиц семейства [MergeTree](../operations/table_engines/mergetree.md#table_engines-mergetree). @@ -126,7 +128,13 @@ default_expression String - выражение для значения по ум Столбцы: -- partition (String) - Имя партиции. Формат YYYYMM. Что такое партиция можно узнать из описания запроса [ALTER](../query_language/alter.md#query_language_queries_alter). +- partition (String) - Имя партиции. Что такое партиция можно узнать из описания запроса [ALTER](../query_language/alter.md#query_language_queries_alter). + + Форматы: + + - `YYYYMM` для автоматической схемы партиционирования по месяцам. + - `any_string` при партиционировании вручную. + - name (String) - Имя куска. - active (UInt8) - Признак активности. Если кусок активен, то он используется таблице, в противном случает он будет удален. Неактивные куски остаются после слияний. - marks (UInt64) - Количество засечек. Чтобы получить примерное количество строк в куске, умножьте ``marks`` на гранулированность индекса (обычно 8192). @@ -146,7 +154,7 @@ default_expression String - выражение для значения по ум - database (String) - Имя базы данных. - table (String) - Имя таблицы. - engine (String) - Имя движка таблицы, без параметров. -# system.processes +## system.processes Эта системная таблица используется для реализации запроса `SHOW PROCESSLIST`. Столбцы: @@ -170,7 +178,7 @@ query String - текст запроса. В случае INSERT - query_id String - идентификатор запроса, если был задан. ``` -# system.replicas +## system.replicas Содержит информацию и статус для реплицируемых таблиц, расположенных на локальном сервере. Эту таблицу можно использовать для мониторинга. Таблица содержит по строчке для каждой Replicated\*-таблицы. @@ -294,7 +302,7 @@ WHERE ``` Если этот запрос ничего не возвращает - значит всё хорошо. -# system.settings +## system.settings Содержит информацию о настройках, используемых в данный момент. То есть, используемых для выполнения запроса, с помощью которого вы читаете из таблицы system.settings. @@ -323,15 +331,17 @@ WHERE changed │ max_memory_usage │ 10000000000 │ 1 │ └────────────────────────┴─────────────┴─────────┘ ``` -# system.tables +## system.tables Таблица содержит столбцы database, name, engine типа String. Также таблица содержит три виртуальных столбца: metadata_modification_time типа DateTime, create_table_query и engine_full типа String. Для каждой таблицы, о которой знает сервер, будет присутствовать соответствующая запись в таблице system.tables. Эта системная таблица используется для реализации запросов SHOW TABLES. -# system.zookeeper -Позволяет читать данные из ZooKeeper кластера, описанного в конфигурации. + +## system.zookeeper + +Таблицы не существует, если ZooKeeper не сконфигурирован. Позволяет читать данные из ZooKeeper кластера, описанного в конфигурации. В запросе обязательно в секции WHERE должно присутствовать условие на равенство path - путь в ZooKeeper, для детей которого вы хотите получить данные. Запрос `SELECT * FROM system.zookeeper WHERE path = '/clickhouse'` выведет данные по всем детям узла `/clickhouse`. diff --git a/docs/ru/operations/table_engines/aggregatingmergetree.md b/docs/ru/operations/table_engines/aggregatingmergetree.md index f14bb679cd0..cff9116988d 100644 --- a/docs/ru/operations/table_engines/aggregatingmergetree.md +++ b/docs/ru/operations/table_engines/aggregatingmergetree.md @@ -28,8 +28,8 @@ CREATE TABLE t Значение типа `AggregateFunction` нельзя вывести в Pretty-форматах. В других форматах, значения такого типа выводятся в виде implementation-specific бинарных данных. То есть, значения типа `AggregateFunction` не предназначены для вывода, сохранения в дамп. -Единственную полезную вещь, которую можно сделать со значениями типа `AggregateFunction` - это объединить состояния и получить результат, по сути - доагрегировать до конца. Для этого используются агрегатные функции с суффиксом Merge. -Пример: `uniqMerge(UserIDState), где UserIDState имеет тип AggregateFunction`. +Единственную полезную вещь, которую можно сделать со значениями типа `AggregateFunction` — это объединить состояния и получить результат, по сути — доагрегировать до конца. Для этого используются агрегатные функции с суффиксом Merge. +Пример: `uniqMerge(UserIDState)`, где `UserIDState` имеет тип `AggregateFunction`. То есть, агрегатная функция с суффиксом Merge берёт множество состояний, объединяет их, и возвращает готовый результат. Для примера, эти два запроса возвращают один и тот же результат: diff --git a/docs/ru/operations/table_engines/file.md b/docs/ru/operations/table_engines/file.md index aaa334c7806..a4672929d72 100644 --- a/docs/ru/operations/table_engines/file.md +++ b/docs/ru/operations/table_engines/file.md @@ -24,9 +24,8 @@ File(Format) Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../query_language/misc.md#queries-attach)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные. -
-Будьте аккуратны с этой функциональностью, поскольку сервер ClickHouse не отслеживает внешние изменения данных. Если в файл будет производиться запись одновременно со стороны сервера ClickHouse и с внешней стороны, то результат непредсказуем. -
+!!! warning + Будьте аккуратны с этой функциональностью, поскольку сервер ClickHouse не отслеживает внешние изменения данных. Если в файл будет производиться запись одновременно со стороны сервера ClickHouse и с внешней стороны, то результат непредсказуем. **Пример:** diff --git a/docs/ru/operations/table_engines/kafka.md b/docs/ru/operations/table_engines/kafka.md index f368fae3860..a0f370df795 100644 --- a/docs/ru/operations/table_engines/kafka.md +++ b/docs/ru/operations/table_engines/kafka.md @@ -1,6 +1,6 @@ # Kafka -Движок работает с [Apache Kafka](http://kafka.apache.org/). +Движок работает с [Apache Kafka](http://kafka.apache.org/). Kafka позволяет: @@ -8,20 +8,40 @@ Kafka позволяет: - Организовать отказо-устойчивое хранилище. - Обрабатывать потоки по мере их появления. +Старый формат: + ``` -Kafka(broker_list, topic_list, group_name, format[, schema, num_consumers]) +Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format + [, kafka_row_delimiter, kafka_schema, kafka_num_consumers]) ``` -Параметры: +Новый формат: -- `broker_list` - Перечень брокеров, разделенный запятыми (`localhost:9092`). -- `topic_list` - Перечень необходимых топиков Kafka (`my_topic`). -- `group_name` - Группа потребителя Kafka (`group1`). Отступы для чтения отслеживаются для каждой группы отдельно. Если необходимо, чтобы сообщения не повторялись на кластере, используйте везде одно имя группы. -- `format` - Формат сообщений. Имеет те же обозначения, что выдает SQL-выражение `FORMAT`, например, `JSONEachRow`. Подробнее смотрите в разделе "Форматы". -- `schema` - Опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap'n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`. -- `num_consumers` - Количество потребителей (consumer) на таблицу. По умолчанию `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. Общее число потребителей не должно превышать количество партиций в топике, так как на одну партицию может быть назначено не более одного потребителя. +``` +Kafka SETTINGS + kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic1,topic2', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_row_delimiter = '\n' + kafka_schema = '', + kafka_num_consumers = 2 +``` -Пример: +Обязательные параметры: + +- `kafka_broker_list` - Перечень брокеров, разделенный запятыми (`localhost:9092`). +- `kafka_topic_list` - Перечень необходимых топиков Kafka (`my_topic`). +- `kafka_group_name` - Группа потребителя Kafka (`group1`). Отступы для чтения отслеживаются для каждой группы отдельно. Если необходимо, чтобы сообщения не повторялись на кластере, используйте везде одно имя группы. +- `kafka_format` - Формат сообщений. Имеет те же обозначения, что выдает SQL-выражение `FORMAT`, например, `JSONEachRow`. Подробнее смотрите в разделе "Форматы". + +Опциональные параметры: + +- `kafka_row_delimiter` - Символ-разделитель записей (строк), которым завершается сообщение. +- `kafka_schema` - Опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap'n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`. +- `kafka_num_consumers` - Количество потребителей (consumer) на таблицу. По умолчанию `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. Общее число потребителей не должно превышать количество партиций в топике, так как на одну партицию может быть назначено не более одного потребителя. + +Примеры: ```sql CREATE TABLE queue ( @@ -31,6 +51,24 @@ Kafka(broker_list, topic_list, group_name, format[, schema, num_consumers]) ) ENGINE = Kafka('localhost:9092', 'topic', 'group1', 'JSONEachRow'); SELECT * FROM queue LIMIT 5; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka SETTINGS kafka_broker_list = 'localhost:9092', + kafka_topic_list = 'topic', + kafka_group_name = 'group1', + kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; + + CREATE TABLE queue2 ( + timestamp UInt64, + level String, + message String + ) ENGINE = Kafka('localhost:9092', 'topic', 'group1') + SETTINGS kafka_format = 'JSONEachRow', + kafka_num_consumers = 4; ``` Полученные сообщения отслеживаются автоматически, поэтому из одной группы каждое сообщение считывается только один раз. Если необходимо получить данные дважды, то создайте копию таблицы с другим именем группы. @@ -59,7 +97,7 @@ Kafka(broker_list, topic_list, group_name, format[, schema, num_consumers]) level String, total UInt64 ) ENGINE = SummingMergeTree(day, (day, level), 8192); - + CREATE MATERIALIZED VIEW consumer TO daily AS SELECT toDate(toDateTime(timestamp)) AS day, level, count() as total FROM queue GROUP BY day, level; diff --git a/docs/ru/operations/table_engines/mysql.md b/docs/ru/operations/table_engines/mysql.md index 5db09c25b71..e08edc4e180 100644 --- a/docs/ru/operations/table_engines/mysql.md +++ b/docs/ru/operations/table_engines/mysql.md @@ -7,9 +7,21 @@ Формат вызова: ``` -MySQL('host:port', 'database', 'table', 'user', 'password'); +MySQL('host:port', 'database', 'table', 'user', 'password'[, replace_query, 'on_duplicate_clause']); ``` +**Параметры вызова** + +- `host:port` — Адрес сервера MySQL. +- `database` — Имя базы данных на сервере MySQL. +- `table` — Имя таблицы. +- `user` — Пользователь MySQL. +- `password` — Пароль пользователя. +- `replace_query` — Флаг, устанавливающий замену запроса `INSERT INTO` на `REPLACE INTO`. Если `replace_query=1`, то запрос заменяется. +- `'on_duplicate_clause'` — Добавляет выражение `ON DUPLICATE KEY UPDATE 'on_duplicate_clause'` в запрос `INSERT`. Например, `impression = VALUES(impression) + impression`. Чтобы указать `'on_duplicate_clause'` необходимо передать `0` в параметр `replace_query`. Если одновременно передать `replace_query = 1` и `'on_duplicate_clause'`, то ClickHouse сгенерирует исключение. + На данный момент простые условия `WHERE`, такие как `=, !=, >, >=, <, <=` будут выполняться на стороне сервера MySQL. Остальные условия и ограничение выборки `LIMIT` будут выполнены в ClickHouse только после выполнения запроса к MySQL. + +Движок `MySQL` не поддерживает тип данных [Nullable](../../data_types/nullable.md#data_type-nullable), поэтому при чтении данных из таблиц MySQL `NULL` преобразуются в значения по умолчанию для заданного типа столбца, обычно это 0 или пустая строка. diff --git a/docs/ru/operations/table_engines/summingmergetree.md b/docs/ru/operations/table_engines/summingmergetree.md index 44b95a7123d..189aac06504 100644 --- a/docs/ru/operations/table_engines/summingmergetree.md +++ b/docs/ru/operations/table_engines/summingmergetree.md @@ -14,7 +14,7 @@ SummingMergeTree(EventDate, (OrderID, EventDate, BannerID, ...), 8192, (Shows, C Явно заданные столбцы для суммирования (последний параметр - Shows, Clicks, Cost, ...). При слиянии, для всех строчек с одинаковым значением первичного ключа, производится суммирование значений в указанных столбцах. Указанные столбцы также должны быть числовыми и не входить в первичный ключ. -Если значения во всех таких столбцах оказались нулевыми, то строчка удаляется. (За исключением случаев, когда в куске данных не осталось бы ни одной строчки.) +Если значения во всех таких столбцах оказались нулевыми, то строчка удаляется. Для остальных столбцов, не входящих в первичный ключ, при слиянии выбирается первое попавшееся значение. Но для столбцов типа AggregateFunction выполняется агрегация согласно заданной функции, так что этот движок фактически ведёт себя как `AggregatingMergeTree`. diff --git a/docs/ru/operations/table_engines/url.md b/docs/ru/operations/table_engines/url.md new file mode 100644 index 00000000000..b3daae06169 --- /dev/null +++ b/docs/ru/operations/table_engines/url.md @@ -0,0 +1,74 @@ + + +# URL(URL, Format) + +Управляет данными на удаленном HTTP/HTTPS сервере. Данный движок похож +на движок [`File`](./file.md#). + +## Использование движка в сервере ClickHouse + +`Format` должен быть таким, который ClickHouse может использовать в запросах +`SELECT` и, если есть необходимость, `INSERT`. Полный список поддерживаемых форматов смотрите в +разделе [Форматы](../../interfaces/formats.md#formats). + +`URL` должен соответствовать структуре Uniform Resource Locator. По указанному URL должен находится сервер +работающий по протоколу HTTP или HTTPS. При этом не должно требоваться никаких +дополнительных заголовков для получения ответа от сервера. + +Запросы `INSERT` и `SELECT` транслируются в `POST` и `GET` запросы +соответственно. Для обработки `POST`-запросов удаленный сервер должен поддерживать +[Chunked transfer encoding](https://ru.wikipedia.org/wiki/Chunked_transfer_encoding). + +**Пример:** + +**1.** Создадим на сервере таблицу `url_engine_table`: + +```sql +CREATE TABLE url_engine_table (word String, value UInt64) +ENGINE=URL('http://127.0.0.1:12345/', CSV) +``` + +**2.** Создадим простейший http-сервер стандартными средствами языка python3 и +запустим его: + +```python3 +from http.server import BaseHTTPRequestHandler, HTTPServer + +class CSVHTTPServer(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header('Content-type', 'text/csv') + self.end_headers() + + self.wfile.write(bytes('Hello,1\nWorld,2\n', "utf-8")) + +if __name__ == "__main__": + server_address = ('127.0.0.1', 12345) + HTTPServer(server_address, CSVHTTPServer).serve_forever() +``` + +```bash +python3 server.py +``` + +**3.** Запросим данные: + +```sql +SELECT * FROM url_engine_table +``` + +```text +┌─word──┬─value─┐ +│ Hello │ 1 │ +│ World │ 2 │ +└───────┴───────┘ +``` + +## Особенности использования + +- Поддерживается многопоточное чтение и запись. +- Не поддерживается: + - использование операций `ALTER` и `SELECT...SAMPLE`; + - индексы; + - репликация. + diff --git a/docs/ru/operations/tips.md b/docs/ru/operations/tips.md index 315a8fb07fa..a1ddc9246e5 100644 --- a/docs/ru/operations/tips.md +++ b/docs/ru/operations/tips.md @@ -107,6 +107,10 @@ XFS также подходит, но не так тщательно проте Лучше использовать свежую версию ZooKeeper, как минимум 3.4.9. Версия в стабильных дистрибутивах Linux может быть устаревшей. +Никогда не используете написанные вручную скрипты для переноса данных между разными ZooKeeper кластерами, потому что результат будет некорректный для sequential нод. Никогда не используйте утилиту "zkcopy", по той же причине: https://github.com/ksprojects/zkcopy/issues/15 + +Если вы хотите разделить существующий ZooKeeper кластер на два, правильный способ - увеличить количество его реплик, а затем переконфигурировать его как два независимых кластера. + С настройками по умолчанию, ZooKeeper является бомбой замедленного действия: > Сервер ZooKeeper не будет удалять файлы со старыми снепшоты и логами при использовании конфигурации по умолчанию (см. autopurge), это является ответственностью оператора. diff --git a/docs/ru/operations/utils/clickhouse-local.md b/docs/ru/operations/utils/clickhouse-local.md index 06053e15a2f..60ab2b0a8e8 100644 --- a/docs/ru/operations/utils/clickhouse-local.md +++ b/docs/ru/operations/utils/clickhouse-local.md @@ -8,10 +8,8 @@ `clickhouse-local` при настройке по умолчанию не имеет доступа к данным, которыми управляет сервер ClickHouse, установленный на этом же хосте, однако можно подключить конфигурацию сервера с помощью ключа `--config-file`. -
-Мы не рекомендуем подключать серверную конфигурацию к `clickhouse-local`, поскольку данные можно легко повредить неосторожными действиями. -
- +!!! warning + Мы не рекомендуем подключать серверную конфигурацию к `clickhouse-local`, поскольку данные можно легко повредить неосторожными действиями. ## Вызов программы diff --git a/docs/ru/query_language/agg_functions/index.md b/docs/ru/query_language/agg_functions/index.md index 57afd3dfa81..4c540d0954a 100644 --- a/docs/ru/query_language/agg_functions/index.md +++ b/docs/ru/query_language/agg_functions/index.md @@ -8,3 +8,56 @@ ClickHouse поддерживает также: - [Параметрические агрегатные функции](parametric_functions.md#aggregate_functions_parametric), которые помимо стоблцов принимаю и другие параметры. - [Комбинаторы](combinators.md#aggregate_functions_combinators), которые изменяют поведение агрегатных фунций. + +## Обработка NULL + +При агрегации все `NULL` пропускаются. + +**Примеры** + +Рассмотрим таблицу: + +``` +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +Выполним суммирование значений в столбце `y`: + +``` +:) SELECT sum(y) FROM t_null_big + +SELECT sum(y) +FROM t_null_big + +┌─sum(y)─┐ +│ 7 │ +└────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + +Функция `sum` работает с `NULL` как с `0`. В частности, это означает, что если на вход в функцию подать выборку, где все значения `NULL`, то результат будет `0`, а не `NULL`. + + +Теперь с помощью фукции `groupArray` сформируем массив из стобца `y`: + +``` +:) SELECT groupArray(y) FROM t_null_big + +SELECT groupArray(y) +FROM t_null_big + +┌─groupArray(y)─┐ +│ [2,2,3] │ +└───────────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + +`groupArray` не включает `NULL` в результирующий массив. diff --git a/docs/ru/query_language/agg_functions/parametric_functions.md b/docs/ru/query_language/agg_functions/parametric_functions.md index 4a1c1ee7d38..b86b75baf6c 100644 --- a/docs/ru/query_language/agg_functions/parametric_functions.md +++ b/docs/ru/query_language/agg_functions/parametric_functions.md @@ -50,6 +50,63 @@ minIf(EventTime, URL LIKE '%company%') < maxIf(EventTime, URL LIKE '%cart%'). Аналогично функции sequenceMatch, но возвращает не факт наличия цепочки событий, а UInt64 - количество найденных цепочек. Цепочки ищутся без перекрытия. То есть, следующая цепочка может начаться только после окончания предыдущей. +## windowFunnel(window)(timestamp, cond1, cond2, cond3, ...) + +Отыскивает цепочки событий в скользящем окне по времени и вычисляет максимальное количество произошедших событий из цепочки. + + +``` +windowFunnel(window)(timestamp, cond1, cond2, cond3, ...) +``` + +**Параметры** + +- `window` — ширина скользящего окна по времени в секундах. +- `timestamp` — имя столбца, содержащего отметки времени. Тип данных [DateTime](../../data_types/datetime.md#data_type-datetime) или [UInt32](../../data_types/int_uint.md#data_type-int). +- `cond1`, `cond2`... — условия или данные, описывающие цепочку событий. Тип данных — `UInt8`. Значения могут быть 0 или 1. + +**Алгоритм** + +- Функция отыскивает данные, на которых срабатывает первое условие из цепочки, и присваивает счетчику событий значение 1. С этого же момента начинается отсчет времени скользящего окна. +- Если в пределах окна последовательно попадаются события из цепочки, то счетчик увеличивается. Если последовательность событий нарушается, то счетчик не растёт. +- Если в данных оказалось несколько цепочек разной степени завершенности, то функция выдаст только размер самой длинной цепочки. + +**Возвращаемое значение** + +- Целое число. Максимальное количество последовательно сработавших условий из цепочки в пределах скользящего окна по времени. Исследуются все цепочки в выборке. + +**Пример** + +Определим, успевает ли пользователь за час выбрать телефон в интернет-магазине и купить его. + +Зададим следующую цепочку событий: + +1. Пользователь вошел в личный кабинет магазина (`eventID=1001`). +2. Пользователь ищет телефон (`eventID = 1003, product = 'phone'`). +3. Пользователь сделал заказ (`eventID = 1009`). + +Чтобы узнать, как далеко пользователь `user_id` смог пройти по цепочке за час в январе 2017-го года, составим запрос: + +``` +SELECT + level, + count() AS c +FROM +( + SELECT + user_id, + windowFunnel(3600)(timestamp, eventID = 1001, eventID = 1003 AND product = 'phone', eventID = 1009) AS level + FROM trend_event + WHERE (event_date >= '2017-01-01') AND (event_date <= '2017-01-31') + GROUP BY user_id +) +GROUP BY level +ORDER BY level +``` + +В результате мы можем получить 0, 1, 2 или 3 в зависимости от действий пользователя. + + ## uniqUpTo(N)(x) Вычисляет количество различных значений аргумента, если оно меньше или равно N. diff --git a/docs/ru/query_language/alter.md b/docs/ru/query_language/alter.md index b26a3ba9e32..2478954f26e 100644 --- a/docs/ru/query_language/alter.md +++ b/docs/ru/query_language/alter.md @@ -225,7 +225,7 @@ ALTER TABLE [db.]table FETCH PARTITION 'name' FROM 'path-in-zookeeper' Мутации - разновидность запроса ALTER, позволяющая изменять или удалять данные в таблице. В отличие от стандартных запросов `DELETE` и `UPDATE`, рассчитанных на точечное изменение данных, область применения мутаций - достаточно тяжёлые изменения, затрагивающие много строк в таблице. -Функциональность находится в состоянии beta и доступна начиная с версии 1.1.54388. Реализована поддержка *MergeTree таблиц (с репликацией и без). +Функциональность находится в состоянии beta и доступна начиная с версии 1.1.54388. Реализована поддержка \*MergeTree таблиц (с репликацией и без). Конвертировать существующие таблицы для работы с мутациями не нужно. Но после применения первой мутации формат данных таблицы становится несовместимым с предыдущими версиями и откатиться на предыдущую версию уже не получится. @@ -239,12 +239,14 @@ ALTER TABLE [db.]table DELETE WHERE expr В одном запросе можно указать несколько команд через запятую. -Для *MergeTree-таблиц мутации выполняются, перезаписывая данные по кускам (parts). При этом атомарности нет - куски заменяются на помутированные по мере выполнения и запрос `SELECT`, заданный во время выполнения мутации, увидит данные как из измененных кусков, так и из кусков, которые еще не были изменены. +Для \*MergeTree-таблиц мутации выполняются, перезаписывая данные по кускам (parts). При этом атомарности нет — куски заменяются на помутированные по мере выполнения и запрос `SELECT`, заданный во время выполнения мутации, увидит данные как из измененных кусков, так и из кусков, которые еще не были изменены. Мутации линейно упорядочены между собой и накладываются на каждый кусок в порядке добавления. Мутации также упорядочены со вставками - гарантируется, что данные, вставленные в таблицу до начала выполнения запроса мутации, будут изменены, а данные, вставленные после окончания запроса мутации, изменены не будут. При этом мутации никак не блокируют вставки. Запрос завершается немедленно после добавления информации о мутации (для реплицированных таблиц - в ZooKeeper, для нереплицированных - на файловую систему). Сама мутация выполняется асинхронно, используя настройки системного профиля. Следить за ходом её выполнения можно по таблице `system.mutations`. Добавленные мутации будут выполняться до конца даже в случае перезапуска серверов ClickHouse. Откатить мутацию после её добавления нельзя. +Записи о последних выполненных мутациях удаляются не сразу (количество сохраняемых мутаций определяется параметром движка таблиц `finished_mutations_to_keep`). Более старые записи удаляются. + #### Таблица system.mutations Таблица содержит информацию о ходе выполнения мутаций MergeTree-таблиц. Каждой команде мутации соответствует одна строка. В таблице есть следующие столбцы: @@ -262,4 +264,3 @@ ALTER TABLE [db.]table DELETE WHERE expr **parts_to_do** - Количество кусков таблицы, которые ещё предстоит изменить. **is_done** - Завершена ли мутация. Замечание: даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся вставки, которая добавляет данные, которые нужно будет мутировать. - diff --git a/docs/ru/query_language/dicts/external_dicts_dict_layout.md b/docs/ru/query_language/dicts/external_dicts_dict_layout.md index efe872f616c..d2ee91d5d3d 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict_layout.md +++ b/docs/ru/query_language/dicts/external_dicts_dict_layout.md @@ -217,11 +217,8 @@ 3. Оценить потребление оперативной памяти с помощью таблицы `system.dictionaries`. 4. Увеличивать/уменьшать количество ячеек до получения требуемого расхода оперативной памяти. -
- -Не используйте в качестве источника ClickHouse, поскольку он медленно обрабатывает запросы со случайным чтением. - -
+!!! warning + Не используйте в качестве источника ClickHouse, поскольку он медленно обрабатывает запросы со случайным чтением. diff --git a/docs/ru/query_language/dicts/external_dicts_dict_sources.md b/docs/ru/query_language/dicts/external_dicts_dict_sources.md index 3e30cfba845..2cb4754b934 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict_sources.md +++ b/docs/ru/query_language/dicts/external_dicts_dict_sources.md @@ -156,34 +156,36 @@ Конфигурация словаря в ClickHouse: ```xml - - table_name - - - - - DSN=myconnection - postgresql_table
-
- - - 300 - 360 - - - - - - - id - - - some_column - UInt64 - 0 - - -
+ + + table_name + + + + + DSN=myconnection + postgresql_table
+
+ + + 300 + 360 + + + + + + + id + + + some_column + UInt64 + 0 + + +
+
``` Может понадобиться в `odbc.ini` указать полный путь до библиотеки с драйвером `DRIVER=/usr/local/lib/psqlodbcw.so`. diff --git a/docs/ru/query_language/dicts/external_dicts_dict_structure.md b/docs/ru/query_language/dicts/external_dicts_dict_structure.md index b032003ec8c..15952024c06 100644 --- a/docs/ru/query_language/dicts/external_dicts_dict_structure.md +++ b/docs/ru/query_language/dicts/external_dicts_dict_structure.md @@ -39,11 +39,8 @@ ClickHouse поддерживает следующие виды ключей: Структура может содержать либо `` либо ``. -
- -Ключ не надо дополнительно описывать в атрибутах. - -
+!!! attention "Обратите внимание" + Ключ не надо дополнительно описывать в атрибутах. ### Числовой ключ @@ -65,9 +62,8 @@ ClickHouse поддерживает следующие виды ключей: Ключем может быть кортеж (`tuple`) из полей произвольных типов. [layout](external_dicts_dict_layout.md#dicts-external_dicts_dict_layout) в этом случае должен быть `complex_key_hashed` или `complex_key_cache`. -
-Cоставной ключ может состоять из одного элемента. Это даёт возможность использовать в качестве ключа, например, строку. -
+!!! tip "Совет" + Cоставной ключ может состоять из одного элемента. Это даёт возможность использовать в качестве ключа, например, строку. Структура ключа задаётся в элементе ``. Поля ключа задаются в том же формате, что и [атрибуты](external_dicts_dict_structure.md#dicts-external_dicts_dict_structure-attributes) словаря. Пример: diff --git a/docs/ru/query_language/dicts/index.md b/docs/ru/query_language/dicts/index.md index f474a241db6..30d5d705b1e 100644 --- a/docs/ru/query_language/dicts/index.md +++ b/docs/ru/query_language/dicts/index.md @@ -4,6 +4,7 @@ ClickHouse поддерживает специальные функции для работы со словарями, которые можно использовать в запросах. Проще и эффективнее использовать словари с помощью функций, чем `JOIN` с таблицами-справочниками. +В словаре нельзя хранить значения [NULL](../syntax.md#null-literal). ClickHouse поддерживает: diff --git a/docs/ru/query_language/functions/array_functions.md b/docs/ru/query_language/functions/array_functions.md index 9c9b244f2ce..4fab96f32e7 100644 --- a/docs/ru/query_language/functions/array_functions.md +++ b/docs/ru/query_language/functions/array_functions.md @@ -75,11 +75,48 @@ n должен быть любым целочисленным типом. Проверяет наличие элемента elem в массиве arr. Возвращает 0, если элемента в массиве нет, или 1, если есть. +`NULL` обрабатывается как значение. + +``` +SELECT has([1, 2, NULL], NULL) + +┌─has([1, 2, NULL], NULL)─┐ +│ 1 │ +└─────────────────────────┘ +``` + ## indexOf(arr, x) -Возвращает индекс элемента x (начиная с 1), если он есть в массиве, или 0, если его нет. +Возвращает индекс первого элемента x (начиная с 1), если он есть в массиве, или 0, если его нет. + +Пример: + +``` +:) select indexOf([1,3,NULL,NULL],NULL) + +SELECT indexOf([1, 3, NULL, NULL], NULL) + +┌─indexOf([1, 3, NULL, NULL], NULL)─┐ +│ 3 │ +└───────────────────────────────────┘ +``` + +Элементы, равные `NULL`, обрабатываются как обычные значения. ## countEqual(arr, x) -Возвращает количество элементов массива, равных x. Эквивалентно arrayCount(elem -> elem = x, arr). +Возвращает количество элементов массива, равных x. Эквивалентно arrayCount(elem -> elem = x, arr). + +Элементы `NULL` обрабатываются как отдельные значения. + +Пример: + +``` +SELECT countEqual([1, 2, NULL, NULL], NULL) + +┌─countEqual([1, 2, NULL, NULL], NULL)─┐ +│ 2 │ +└──────────────────────────────────────┘ +``` + ## arrayEnumerate(arr) Возвращает массив \[1, 2, 3, ..., length(arr)\] @@ -232,7 +269,7 @@ arrayPushBack(array, single_value) **Аргументы** - `array` - Массив. -- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе "[Типы данных](../../data_types/index.md#data_types)". +- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе "[Типы данных](../../data_types/index.md#data_types)". Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** @@ -256,7 +293,7 @@ arrayPushFront(array, single_value) **Аргументы** - `array` - Массив. -- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе "[Типы данных](../../data_types/index.md#data_types)". +- `single_value` - Одиночное значение. В массив с числам можно добавить только числа, в массив со строками только строки. При добавлении чисел ClickHouse автоматически приводит тип `single_value` к типу данных массива. Подробнее о типах данных в ClickHouse читайте в разделе "[Типы данных](../../data_types/index.md#data_types)". Может быть равно `NULL`. Функция добавит элемент `NULL` в массив, а тип элементов массива преобразует в `Nullable`. **Пример** @@ -269,6 +306,43 @@ SELECT arrayPushBack(['b'], 'a') AS res └───────────┘ ``` +## arrayResize + +Изменяет длину массива. + +``` +arrayResize(array, size[, extender]) +``` + +**Параметры:** + +- `array` — массив. +- `size` — необходимая длина массива. + - Если `size` меньше изначального размера массива, то массив обрезается справа. + - Если `size` больше изначального размера массива, массив дополняется справа значениями `extender` или значениями по умолчанию для типа данных элементов массива. +- `extender` — значение для дополнения массива. Может быть `NULL`. + +**Возвращаемое значение:** + +Массив длины `size`. + +**Примеры вызовов** + +``` +SELECT arrayResize([1], 3) + +┌─arrayResize([1], 3)─┐ +│ [1,0,0] │ +└─────────────────────┘ +``` +``` +SELECT arrayResize([1], 3, NULL) + +┌─arrayResize([1], 3, NULL)─┐ +│ [1,NULL,NULL] │ +└───────────────────────────┘ +``` + ## arraySlice Возвращает срез массива. @@ -286,14 +360,15 @@ arraySlice(array, offset[, length]) **Пример** ```sql -SELECT arraySlice([1, 2, 3, 4, 5], 2, 3) AS res +SELECT arraySlice([1, 2, NULL, 4, 5], 2, 3) AS res ``` ``` -┌─res─────┐ -│ [2,3,4] │ -└─────────┘ +┌─res────────┐ +│ [2,NULL,4] │ +└────────────┘ ``` +Элементы массива равные `NULL` обрабатываются как обычные значения. ## arrayUniq(arr, ...) Если передан один аргумент, считает количество разных элементов в массиве. @@ -303,4 +378,3 @@ SELECT arraySlice([1, 2, 3, 4, 5], 2, 3) AS res ## arrayJoin(arr) Особенная функция. Смотрите раздел ["Функция arrayJoin"](array_join.md#functions_arrayjoin). - diff --git a/docs/ru/query_language/functions/conditional_functions.md b/docs/ru/query_language/functions/conditional_functions.md index f7c58807138..a6ce4c73adb 100644 --- a/docs/ru/query_language/functions/conditional_functions.md +++ b/docs/ru/query_language/functions/conditional_functions.md @@ -1,5 +1,48 @@ # Условные функции ## if(cond, then, else), оператор cond ? then : else -Возвращает then, если cond != 0 или else, если cond = 0. -cond должно иметь тип UInt8, а then и else должны иметь тип, для которого есть наименьший общий тип. + +Возвращает `then`, если `cond != 0` или `else`, если `cond = 0`. +`cond` должно иметь тип `UInt8`, а `then` и `else` должны иметь тип, для которого есть наименьший общий тип. + +`then` и `else` могут быть `NULL` + +## multiIf + +Позволяет более компактно записать оператор [CASE](../operators/index.html#operator_case) в запросе. + +``` +multiIf(cond_1, then_1, cond_2, then_2...else) +``` + +**Параметры** + +- `cond_N` — Условие, при выполнении которого функция вернёт `then_N`. +- `then_N` — Результат функции при выполнении. +- `else` — Результат функции, если ни одно из условий не выполнено. + +Функция принимает `2N+1` параметров. + +**Возвращаемые значения** + +Функция возвращает одно из значений `then_N` или `else`, в зависимости от условий `cond_N`. + +**Пример** + +Рассмотрим таблицу + +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +Выполним запрос `SELECT multiIf(isNull(y), x, y < 3, y, NULL) FROM t_null`. Результат: + +``` +┌─multiIf(isNull(y), x, less(y, 3), y, NULL)─┐ +│ 1 │ +│ ᴺᵁᴸᴸ │ +└────────────────────────────────────────────┘ +``` diff --git a/docs/ru/query_language/functions/functions_for_nulls.md b/docs/ru/query_language/functions/functions_for_nulls.md new file mode 100644 index 00000000000..5d5314efb6b --- /dev/null +++ b/docs/ru/query_language/functions/functions_for_nulls.md @@ -0,0 +1,289 @@ +# Функции для работы с Nullable-агрументами + +## isNull + +Проверяет является ли аргумент [NULL](../syntax.md#null-literal). + +``` +isNull(x) +``` + +**Параметры** + +- `x` — значение с не составным типом данных. + +**Возвращаемое значение** + +- `1`, если `x` — `NULL`. +- `0`, если `x` — не `NULL`. + +**Пример** + +Входная таблица + +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +Запрос + +``` +:) SELECT x FROM t_null WHERE isNull(y) + +SELECT x +FROM t_null +WHERE isNull(y) + +┌─x─┐ +│ 1 │ +└───┘ + +1 rows in set. Elapsed: 0.010 sec. +``` + +## isNotNull + +Проверяет не является ли аргумент [NULL](../syntax.md#null-literal). + +``` +isNotNull(x) +``` + +**Параметры** + +- `x` — значение с не составным типом данных. + +**Возвращаемое значение** + +- `0`, если `x` — `NULL`. +- `1`, если `x` — не `NULL`. + +**Пример** + +Входная таблица + +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +Запрос + +``` +:) SELECT x FROM t_null WHERE isNotNull(y) + +SELECT x +FROM t_null +WHERE isNotNull(y) + +┌─x─┐ +│ 2 │ +└───┘ + +1 rows in set. Elapsed: 0.010 sec. +``` + +## coalesce + +Последовательно слева-направо проверяет являются ли переданные аргументы `NULL` и возвращает первый не `NULL`. + +``` +coalesce(x,...) +``` +**Параметры** + +- Произвольное количество параметров не составного типа. Все параметры должны быть совместимы по типу данных. + +**Возвращаемые значения** + +- Первый не `NULL` аргумент. +- `NULL`, если все аргументы — `NULL`. + +**Пример** + +Рассмотрим адресную книгу, в которой может быть указано несколько способов связи с клиентом. + +``` +┌─name─────┬─mail─┬─phone─────┬──icq─┐ +│ client 1 │ ᴺᵁᴸᴸ │ 123-45-67 │ 123 │ +│ client 2 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +└──────────┴──────┴───────────┴──────┘ +``` + +Поля `mail` и `phone` имеют тип String, а поле `icq` — `UInt32`, его необходимо будет преобразовать в `String`. + +Получим из адресной книги первый доступный способ связаться с клиентом: + +``` +:) SELECT coalesce(mail, phone, CAST(icq,'Nullable(String)')) FROM aBook + +SELECT coalesce(mail, phone, CAST(icq, 'Nullable(String)')) +FROM aBook + +┌─name─────┬─coalesce(mail, phone, CAST(icq, 'Nullable(String)'))─┐ +│ client 1 │ 123-45-67 │ +│ client 2 │ ᴺᵁᴸᴸ │ +└──────────┴──────────────────────────────────────────────────────┘ + +2 rows in set. Elapsed: 0.006 sec. +``` + +## ifNull + +Возвращает альтернативное значение, если основной аргумент — `NULL`. + +``` +ifNull(x,alt) +``` + +**Параметры** + +- `x` — значение для проверки на `NULL`, +- `alt` — значение, которое функция вернёт, если `x` — `NULL`. + +**Возвращаемые значения** + +- Значение `x`, если `x` — не `NULL`. +- Значение `alt`, если `x` — `NULL`. + +**Пример** + +``` +SELECT ifNull('a', 'b') + +┌─ifNull('a', 'b')─┐ +│ a │ +└──────────────────┘ +``` +``` +SELECT ifNull(NULL, 'b') + +┌─ifNull(NULL, 'b')─┐ +│ b │ +└───────────────────┘ +``` + +## nullIf + +Возвращает `NULL`, если аргументы равны. + +``` +nullIf(x, y) +``` + +**Параметры** + +`x`, `y` — значения для сравнивания. Они должны быть совместимых типов, иначе ClickHouse сгенерирует исключение. + +**Возвращаемые значения** + +- `NULL`, если аргументы равны. +- Значение `x`, если аргументы не равны. + +**Пример** + +``` +SELECT nullIf(1, 1) + +┌─nullIf(1, 1)─┐ +│ ᴺᵁᴸᴸ │ +└──────────────┘ +``` +``` +SELECT nullIf(1, 2) + +┌─nullIf(1, 2)─┐ +│ 1 │ +└──────────────┘ +``` + +## assumeNotNull + +Приводит значение типа [Nullable](../../data_types/nullable.md#data_type-nullable) к не `Nullable`, если значение не `NULL`. + +``` +assumeNotNull(x) +``` + +**Параметры** + +- `x` — исходное значение. + +**Возвращаемые значения** + +- Исходное значение с не `Nullable` типом, если оно — не `NULL`. +- Значение по умолчанию для не `Nullable` типа, если исходное значение — `NULL`. + +**Пример** + +Рассмотрим таблицу `t_null`. + +``` +SHOW CREATE TABLE t_null + +┌─statement─────────────────────────────────────────────────────────────────┐ +│ CREATE TABLE default.t_null ( x Int8, y Nullable(Int8)) ENGINE = TinyLog │ +└───────────────────────────────────────────────────────────────────────────┘ +``` +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +Применим функцию `assumeNotNull` к столбцу `y`. + +``` +SELECT assumeNotNull(y) FROM t_null + +┌─assumeNotNull(y)─┐ +│ 0 │ +│ 3 │ +└──────────────────┘ +``` +``` +SELECT toTypeName(assumeNotNull(y)) FROM t_null + +┌─toTypeName(assumeNotNull(y))─┐ +│ Int8 │ +│ Int8 │ +└──────────────────────────────┘ +``` + +## toNullable + +Преобразует тип аргумента к `Nullable`. + +``` +toNullable(x) +``` + +**Параметры** + +- `x` — значение произвольного не составного типа. + +**Возвращаемое значение** + +- Входное значение с типом не `Nullable`. + +**Пример** + +``` +SELECT toTypeName(10) + +┌─toTypeName(10)─┐ +│ UInt8 │ +└────────────────┘ + +SELECT toTypeName(toNullable(10)) + +┌─toTypeName(toNullable(10))─┐ +│ Nullable(UInt8) │ +└────────────────────────────┘ +``` diff --git a/docs/ru/query_language/functions/higher_order_functions.md b/docs/ru/query_language/functions/higher_order_functions.md index 857dd4e0e03..2dfe08066bc 100644 --- a/docs/ru/query_language/functions/higher_order_functions.md +++ b/docs/ru/query_language/functions/higher_order_functions.md @@ -1,3 +1,5 @@ + + # Функции высшего порядка ## Оператор `->`, функция lambda(params, expr) @@ -101,5 +103,3 @@ SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); ### arrayReverseSort(\[func,\] arr1, ...) Возвращает отсортированный в нисходящем порядке массив `arr1`. Если задана функция `func`, то порядок сортировки определяется результатом применения функции `func` на элементы массива (массивов). - - diff --git a/docs/ru/query_language/functions/index.md b/docs/ru/query_language/functions/index.md index c8e86b44327..870eccb24ab 100644 --- a/docs/ru/query_language/functions/index.md +++ b/docs/ru/query_language/functions/index.md @@ -27,6 +27,13 @@ Функции могут быть по-разному реализованы для константных и не константных аргументов (выполняется разный код). Но результат работы для константы и полноценного столбца, содержащего только одно такое же значение, должен совпадать. +## Обработка NULL + +Функции имеют следующие виды поведения: + +- Если хотя бы один из аргументов функции — `NULL`, то результат функции тоже `NULL`. +- Специальное поведение, указанное в описании каждой функции отдельно. В исходном коде ClickHouse такие функции можно определить по свойству `UseDefaultImplementationForNulls=false`. + ## Неизменяемость Функции не могут поменять значения своих аргументов - любые изменения возвращаются в качестве результата. Соответственно, от порядка записи функций в запросе, результат вычислений отдельных функций не зависит. diff --git a/docs/ru/query_language/functions/other_functions.md b/docs/ru/query_language/functions/other_functions.md index b9aecec9f7d..fb41049f23c 100644 --- a/docs/ru/query_language/functions/other_functions.md +++ b/docs/ru/query_language/functions/other_functions.md @@ -7,9 +7,21 @@ Вычисляет приблизительную ширину при выводе значения в текстовом (tab-separated) виде на консоль. Функция используется системой для реализации Pretty форматов. +`NULL` представляется как строка, соответствующая отображению `NULL` в форматах `Pretty`. + +``` +SELECT visibleWidth(NULL) + +┌─visibleWidth(NULL)─┐ +│ 4 │ +└────────────────────┘ +``` + ## toTypeName(x) Возвращает строку, содержащую имя типа переданного аргумента. +Если на вход функции передать `NULL`, то она вернёт тип `Nullable(Nothing)`, что соответствует внутреннему представлению `NULL` в ClickHouse. + ## blockSize() Получить размер блока. В ClickHouse выполнение запроса всегда идёт по блокам (наборам кусочков столбцов). Функция позволяет получить размер блока, для которого её вызвали. @@ -19,7 +31,7 @@ В ClickHouse полноценные столбцы и константы представлены в памяти по-разному. Функции по-разному работают для аргументов-констант и обычных аргументов (выполняется разный код), хотя результат почти всегда должен быть одинаковым. Эта функция предназначена для отладки такого поведения. ## ignore(...) -Принимает любые аргументы, всегда возвращает 0. +Принимает любые аргументы, в т.ч. `NULL`, всегда возвращает 0. При этом, аргумент всё равно вычисляется. Это может использоваться для бенчмарков. ## sleep(seconds) @@ -261,3 +273,270 @@ FROM ## MACStringToOUI(s) Принимает MAC адрес в формате AA:BB:CC:DD:EE:FF (числа в шестнадцатеричной форме через двоеточие). Возвращает первые три октета как число в формате UInt64. Если MAC адрес в неправильном формате, то возвращает 0. + +## getSizeOfEnumType + +Возвращает количество полей в [Enum](../../data_types/enum.md#data_type-enum). + +``` +getSizeOfEnumType(value) +``` + +**Параметры** + +- `value` — Значение типа `Enum`. + + +**Возвращаемые значения** + +- Количество полей входного значения типа `Enum`. +- Исключение, если тип не `Enum`. + +**Пример** + +``` +SELECT getSizeOfEnumType( CAST('a' AS Enum8('a' = 1, 'b' = 2) ) ) AS x + +┌─x─┐ +│ 2 │ +└───┘ +``` + +## toColumnTypeName + +Возвращает имя класса, которым представлен тип данных столбца в оперативной памяти. + +``` +toColumnTypeName(value) +``` + +**Параметры** + +- `value` — Значение произвольного типа. + +**Возвращаемые значения** + +- Строка с именем класса, который используется для представления типа данных `value` в оперативной памяти. + +**Пример разницы между `toTypeName` и `toColumnTypeName`** + +``` +:) select toTypeName(cast('2018-01-01 01:02:03' AS DateTime)) + +SELECT toTypeName(CAST('2018-01-01 01:02:03', 'DateTime')) + +┌─toTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime │ +└─────────────────────────────────────────────────────┘ + +1 rows in set. Elapsed: 0.008 sec. + +:) select toColumnTypeName(cast('2018-01-01 01:02:03' AS DateTime)) + +SELECT toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime')) + +┌─toColumnTypeName(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ Const(UInt32) │ +└───────────────────────────────────────────────────────────┘ +``` + +В примере видно, что тип данных `DateTime` хранится в памяти как `Const(UInt32)`. + +## dumpColumnStructure + +Выводит развернутое описание структур данных в оперативной памяти + +``` +dumpColumnStructure(value) +``` + +**Параметры** + +- `value` — Значение произвольного типа. + +**Возвращаемые значения** + +- Строка с описанием структуры, которая используется для представления типа данных `value` в оперативной памяти. + +**Пример** + +``` +SELECT dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime')) + +┌─dumpColumnStructure(CAST('2018-01-01 01:02:03', 'DateTime'))─┐ +│ DateTime, Const(size = 1, UInt32(size = 1)) │ +└──────────────────────────────────────────────────────────────┘ +``` + +## defaultValueOfArgumentType + +Выводит значение по умолчанию для типа данных. + +Не учитывает значения по умолчанию для столбцов, заданные пользователем. + +``` +defaultValueOfArgumentType(expression) +``` + +**Параметры** + +- `expression` — Значение произвольного типа или выражение, результатом которого является значение произвольного типа. + +**Возвращаемые значения** + +- `0` для чисел; +- Пустая строка для строк; +- `ᴺᵁᴸᴸ` для [Nullable](../../data_types/nullable.md#data_type-nullable). + +**Пример** + +``` +:) SELECT defaultValueOfArgumentType( CAST(1 AS Int8) ) + +SELECT defaultValueOfArgumentType(CAST(1, 'Int8')) + +┌─defaultValueOfArgumentType(CAST(1, 'Int8'))─┐ +│ 0 │ +└─────────────────────────────────────────────┘ + +1 rows in set. Elapsed: 0.002 sec. + +:) SELECT defaultValueOfArgumentType( CAST(1 AS Nullable(Int8) ) ) + +SELECT defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)')) + +┌─defaultValueOfArgumentType(CAST(1, 'Nullable(Int8)'))─┐ +│ ᴺᵁᴸᴸ │ +└───────────────────────────────────────────────────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + +## indexHint + +Выводит данные, попавшие в диапазон, выбранный по индексу без фильтрации по указанному в качестве аргумента выражению. + +Переданное в функцию выражение не вычисляется, но при этом ClickHouse применяет к этому выражению индекс таким же образом, как если бы выражение участвовало в запросе без `indexHint`. + + +**Возвращаемое значение** + +- 1. + + +**Пример** + +Рассмотрим таблицу с тестовыми данными [ontime](../../getting_started/example_datasets/ontime.md#example_datasets-ontime). + +``` +SELECT count() FROM ontime + +┌─count()─┐ +│ 4276457 │ +└─────────┘ +``` + +В таблице есть индексы по полям `(FlightDate, (Year, FlightDate))`. + +Выполним выборку по дате следующим образом: + +``` +:) SELECT FlightDate AS k, count() FROM ontime GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-01-01 │ 13970 │ +│ 2017-01-02 │ 15882 │ +........................ +│ 2017-09-28 │ 16411 │ +│ 2017-09-29 │ 16384 │ +│ 2017-09-30 │ 12520 │ +└────────────┴─────────┘ + +273 rows in set. Elapsed: 0.072 sec. Processed 4.28 million rows, 8.55 MB (59.00 million rows/s., 118.01 MB/s.) +``` + +В этой выборке индекс не используется и ClickHouse обработал всю таблицу (`Processed 4.28 million rows`). Для подключения индекса выберем конкретную дату и выполним следующий запрос: + +``` +:) SELECT FlightDate AS k, count() FROM ontime WHERE k = '2017-09-15' GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE k = '2017-09-15' +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-09-15 │ 16428 │ +└────────────┴─────────┘ + +1 rows in set. Elapsed: 0.014 sec. Processed 32.74 thousand rows, 65.49 KB (2.31 million rows/s., 4.63 MB/s.) +``` + +В последней строке выдачи видно, что благодаря использованию индекса, ClickHouse обработал значительно меньшее количество строк (`Processed 32.74 thousand rows`). + + +Теперь передадим выражение `k = '2017-09-15'` в функцию `indexHint`: + +``` +:) SELECT FlightDate AS k, count() FROM ontime WHERE indexHint(k = '2017-09-15') GROUP BY k ORDER BY k + +SELECT + FlightDate AS k, + count() +FROM ontime +WHERE indexHint(k = '2017-09-15') +GROUP BY k +ORDER BY k ASC + +┌──────────k─┬─count()─┐ +│ 2017-09-14 │ 7071 │ +│ 2017-09-15 │ 16428 │ +│ 2017-09-16 │ 1077 │ +│ 2017-09-30 │ 8167 │ +└────────────┴─────────┘ + +4 rows in set. Elapsed: 0.004 sec. Processed 32.74 thousand rows, 65.49 KB (8.97 million rows/s., 17.94 MB/s.) +``` + +В ответе на запрос видно, что ClickHouse применил индекс таким же образом, что и в предыдущий раз (`Processed 32.74 thousand rows`). Однако по результирующему набору строк видно, что выражение `k = '2017-09-15'` не использовалось при формировании результата. + +Поскольку индекс в ClickHouse разреженный, то при чтении диапазона в ответ попадают "лишние" данные, в данном случае соседние даты. Функция `indexHint` позволяет их увидеть. + +## replicate + +Создает массив, заполненный одним значением. + +Используется для внутренней реализации [arrayJoin](array_join.md#functions_arrayjoin). + +``` +replicate(x, arr) +``` + +**Параметры** + +- `arr` — Исходный массив. ClickHouse создаёт новый массив такой же длины как исходный и заполняет его значением `x`. +- `x` — Значение, которым будет заполнен результирующий массив. + +**Выходное значение** + +- Массив, заполненный значением `x`. + +**Пример** + +``` +SELECT replicate(1, ['a', 'b', 'c']) + +┌─replicate(1, ['a', 'b', 'c'])─┐ +│ [1,1,1] │ +└───────────────────────────────┘ +``` diff --git a/docs/ru/query_language/functions/type_conversion_functions.md b/docs/ru/query_language/functions/type_conversion_functions.md index 7676586db6f..21c8556f255 100644 --- a/docs/ru/query_language/functions/type_conversion_functions.md +++ b/docs/ru/query_language/functions/type_conversion_functions.md @@ -113,3 +113,21 @@ SELECT ``` Преобразование в FixedString(N) работает только для аргументов типа String или FixedString(N). + +Поддержано преобразование к типу [Nullable](../../data_types/nullable.md#data_type-nullable) и обратно. Пример: + +``` +SELECT toTypeName(x) FROM t_null + +┌─toTypeName(x)─┐ +│ Int8 │ +│ Int8 │ +└───────────────┘ + +SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null + +┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐ +│ Nullable(UInt16) │ +│ Nullable(UInt16) │ +└─────────────────────────────────────────┘ +``` diff --git a/docs/ru/query_language/index.md b/docs/ru/query_language/index.md index 28482aa6120..01666d3e0e7 100644 --- a/docs/ru/query_language/index.md +++ b/docs/ru/query_language/index.md @@ -1,4 +1,4 @@ -# Диалект SQL +# Справка по SQL * [SELECT](select.md#select) * [INSERT INTO](insert_into.md#queries-insert) diff --git a/docs/ru/query_language/misc.md b/docs/ru/query_language/misc.md index 4027a5fba43..5385c0f20fd 100644 --- a/docs/ru/query_language/misc.md +++ b/docs/ru/query_language/misc.md @@ -13,7 +13,7 @@ Если таблица перед этим была отсоединена (`DETACH`), т.е. её структура известна, то можно использовать сокращенную форму записи без определения структуры. ```sql -ATTACH TABLE [IF NOT EXISTS] [db.]name +ATTACH TABLE [IF NOT EXISTS] [db.]name [ON CLUSTER cluster] ``` Этот запрос используется при старте сервера. Сервер хранит метаданные таблиц в виде файлов с запросами `ATTACH`, которые он просто исполняет при запуске (за исключением системных таблиц, создание которых явно вписано в сервер). @@ -39,7 +39,7 @@ DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] Удаляет из сервера информацию о таблице name. Сервер перестаёт знать о существовании таблицы. ```sql -DETACH TABLE [IF EXISTS] [db.]name +DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] ``` Но ни данные, ни метаданные таблицы не удаляются. При следующем запуске сервера, сервер прочитает метаданные и снова узнает о таблице. @@ -166,7 +166,7 @@ SET param = value ## OPTIMIZE ```sql -OPTIMIZE TABLE [db.]name [PARTITION partition] [FINAL] +OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition] [FINAL] ``` Просит движок таблицы сделать что-нибудь, что может привести к более оптимальной работе. @@ -174,14 +174,13 @@ OPTIMIZE TABLE [db.]name [PARTITION partition] [FINAL] Если указан `PARTITION`, то оптимизация будет производиться только для указаной партиции. Если указан `FINAL`, то оптимизация будет производиться даже когда все данные уже лежат в одном куске. -
-Запрос OPTIMIZE не может устранить причину появления ошибки "Too many parts". -
+!!! warning "Внимание" + Запрос OPTIMIZE не может устранить причину появления ошибки "Too many parts". ## KILL QUERY ```sql -KILL QUERY +KILL QUERY [ON CLUSTER cluster] WHERE [SYNC|ASYNC|TEST] [FORMAT format] diff --git a/docs/ru/query_language/operators.md b/docs/ru/query_language/operators.md index 7f8a4d8c692..e143214137e 100644 --- a/docs/ru/query_language/operators.md +++ b/docs/ru/query_language/operators.md @@ -79,9 +79,11 @@ `a ? b : c` - функция `if(a, b, c)` -Примечание: +Примечание: -Условный оператор сначала вычисляет значения b и c, затем проверяет выполнение условия a, и только после этого возвращает соответствующее значение. Если в качестве b или с выступает функция arrayJoin(), то размножение каждой строки произойдет вне зависимости от условия а. +Условный оператор сначала вычисляет значения b и c, затем проверяет выполнение условия a, и только после этого возвращает соответствующее значение. Если в качестве b или с выступает функция [arrayJoin()](functions/array_join.md#functions_arrayjoin), то размножение каждой строки произойдет вне зависимости от условия а. + + ## Условное выражение @@ -119,3 +121,53 @@ END Иногда это работает не так, как ожидается. Например, `SELECT 4 > 3 > 2` выдаст 0. Для эффективности, реализованы функции `and` и `or`, принимающие произвольное количество аргументов. Соответствующие цепочки операторов `AND` и `OR`, преобразуются в один вызов этих функций. + +## Проверка на `NULL` + +ClickHouse поддерживает операторы `IS NULL` и `IS NOT NULL`. + + + +### IS NULL + +- Для значений типа [Nullable](../data_types/nullable.md#data_type-nullable) оператор `IS NULL` возвращает: + - `1`, если значение — `NULL`. + - `0` в обратном случае. +- Для прочих значений оператор `IS NULL` всегда возвращает `0`. + +```bash +:) SELECT x+100 FROM t_null WHERE y IS NULL + +SELECT x + 100 +FROM t_null +WHERE isNull(y) + +┌─plus(x, 100)─┐ +│ 101 │ +└──────────────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + + + +### IS NOT NULL + +- Для значений типа [Nullable](../data_types/nullable.md#data_type-nullable) оператор `IS NOT NULL` возвращает: + - `0`, если значение — `NULL`. + - `1`, в обратном случае. +- Для прочих значений оператор `IS NOT NULL` всегда возвращает `1`. + +```bash +:) SELECT * FROM t_null WHERE y IS NOT NULL + +SELECT * +FROM t_null +WHERE isNotNull(y) + +┌─x─┬─y─┐ +│ 2 │ 3 │ +└───┴───┘ + +1 rows in set. Elapsed: 0.002 sec. +``` diff --git a/docs/ru/query_language/select.md b/docs/ru/query_language/select.md index 8f006ef4965..232386c485f 100644 --- a/docs/ru/query_language/select.md +++ b/docs/ru/query_language/select.md @@ -26,6 +26,8 @@ SELECT [DISTINCT] expr_list Если в запросе отсутствуют секции `DISTINCT`, `GROUP BY`, `ORDER BY`, подзапросы в `IN` и `JOIN`, то запрос будет обработан полностью потоково, с использованием O(1) количества оперативки. Иначе запрос может съесть много оперативки, если не указаны подходящие ограничения `max_memory_usage`, `max_rows_to_group_by`, `max_rows_to_sort`, `max_rows_in_distinct`, `max_bytes_in_distinct`, `max_rows_in_set`, `max_bytes_in_set`, `max_rows_in_join`, `max_bytes_in_join`, `max_bytes_before_external_sort`, `max_bytes_before_external_group_by`. Подробнее смотрите в разделе "Настройки". Присутствует возможность использовать внешнюю сортировку (с сохранением временных данных на диск) и внешнюю агрегацию. `Merge join` в системе нет. + + ### Секция FROM Если секция FROM отсутствует, то данные будут читаться из таблицы `system.one`. @@ -332,6 +334,9 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num Соответствующее преобразование может выполняться как до секции WHERE/PREWHERE (если его результат нужен в этой секции), так и после выполнения WHERE/PREWHERE (чтобы уменьшить объём вычислений). + + + ### Секция JOIN Обычный JOIN, не имеет отношения к ARRAY JOIN, который описан выше. @@ -428,29 +433,59 @@ LIMIT 10 Если JOIN необходим для соединения с таблицами измерений (dimension tables - сравнительно небольшие таблицы, которые содержат свойства измерений - например, имена для рекламных кампаний), то использование JOIN может быть не очень удобным из-за громоздкости синтаксиса, а также из-за того, что правая таблица читается заново при каждом запросе. Специально для таких случаев существует функциональность "Внешние словари", которую следует использовать вместо JOIN. Подробнее смотрите раздел "Внешние словари". +#### Обработка NULL + +На поведение JOIN влияет настройка [join_use_nulls](../operations/settings/settings.md#settings-join_use_nulls). При `join_use_nulls=1` `JOIN` работает как в стандартном SQL. + +Если ключами JOIN выступают поля типа [Nullable](../data_types/nullable.md#data_types-nullable), то строки, где хотя бы один из ключей имеет значение [NULL](syntax.md#null-literal), не соединяются. + + + + ### Секция WHERE -Секция WHERE, если есть, должна содержать выражение, имеющее тип UInt8. Обычно это какое-либо выражение с операторами сравнения и логическими операторами. -Это выражение будет использовано для фильтрации данных перед всеми остальными преобразованиями. +Позволяет задать выражение, которое ClickHouse использует для фильтрации данных перед всеми другими действиями в запросе кроме выражений, содержащихся в секции [PREWHERE](#query_language-queries-prewhere). Обычно, это выражение с логическими операторами. -Выражение анализируется на возможность использования индексов, если индексы поддерживаются движком таблицы. +Результат выражения должен иметь тип `UInt8`. + +ClickHouse использует в выражении индексы, если это позволяет [движок таблицы](../operations/table_engines/index.md#table_engines). + +Если в секции необходимо проверить [NULL](syntax.md#null-literal), то используйте операторы [IS NULL](operators.md#operator-is-null) и [IS NOT NULL](operators.md#operator-is-not-null), а также соответствующие функции `isNull` и `isNotNull`. В противном случае выражение будет считаться всегда не выполненным. + +Пример проверки на `NULL`: + +```bash +:) SELECT * FROM t_null WHERE y IS NULL + +SELECT * +FROM t_null +WHERE isNull(y) + +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└───┴──────┘ + +1 rows in set. Elapsed: 0.002 sec. +``` + + ### Секция PREWHERE -Имеет такой же смысл, как и секция WHERE. Отличие состоит в том, какие данные читаются из таблицы. -При использовании PREWHERE, из таблицы сначала читаются только столбцы, необходимые для выполнения PREWHERE. Затем читаются остальные столбцы, нужные для выполнения запроса, но из них только те блоки, в которых выражение в PREWHERE истинное. +Имеет такой же смысл, как и секция [WHERE](#query_language-queries-where). Отличие состоит в том, какие данные читаются из таблицы. +При использовании `PREWHERE`, из таблицы сначала читаются только столбцы, необходимые для выполнения `PREWHERE`. Затем читаются остальные столбцы, нужные для выполнения запроса, но из них только те блоки, в которых выражение в `PREWHERE` истинное. -PREWHERE имеет смысл использовать, если есть условия фильтрации, не подходящие под индексы, которые использует меньшинство столбцов из тех, что есть в запросе, но достаточно сильно фильтрует данные. Таким образом, сокращается количество читаемых данных. +`PREWHERE` имеет смысл использовать, если есть условия фильтрации, не подходящие под индексы, которые использует меньшинство столбцов из тех, что есть в запросе, но достаточно сильно фильтрует данные. Таким образом, сокращается количество читаемых данных. -Например, полезно писать PREWHERE для запросов, которые вынимают много столбцов, но в которых фильтрация производится лишь по нескольким столбцам. +Например, полезно писать `PREWHERE` для запросов, которые вынимают много столбцов, но в которых фильтрация производится лишь по нескольким столбцам. -PREWHERE поддерживается только таблицами семейства `*MergeTree`. +`PREWHERE` поддерживается только таблицами семейства `*MergeTree`. -В запросе могут быть одновременно указаны секции PREWHERE и WHERE. В этом случае, PREWHERE идёт перед WHERE. +В запросе могут быть одновременно указаны секции `PREWHERE` и `WHERE`. В этом случае, `PREWHERE` идёт перед `WHERE`. -Следует иметь ввиду, что указывать в PREWHERE только столбцы, по которым существует индекс, имеет мало смысла, так как при использовании индекса и так читаются лишь блоки данных, соответствующие индексу. +Следует иметь ввиду, что указывать в `PREWHERE` только столбцы, по которым существует индекс, имеет мало смысла, так как при использовании индекса и так читаются лишь блоки данных, соответствующие индексу. -Если настройка optimize_move_to_prewhere выставлена в 1, то при отсутствии PREWHERE, система будет автоматически переносить части выражений из WHERE в PREWHERE согласно некоторой эвристике. +Если настройка `optimize_move_to_prewhere` выставлена в `1`, то при отсутствии `PREWHERE`, система будет автоматически переносить части выражений из `WHERE` в `PREWHERE` согласно некоторой эвристике. ### Секция GROUP BY @@ -492,6 +527,38 @@ GROUP BY вычисляет для каждого встретившегося Не поддерживается указание констант в качестве аргументов агрегатных функций. Пример: sum(1). Вместо этого, вы можете избавиться от констант. Пример: `count()`. +#### Обработка NULL + +При группировке, ClickHouse рассматривает [NULL](syntax.md#null-literal) как значение, причём `NULL=NULL`. + +Рассмотрим, что это значит на примере. + +Пусть есть таблица: + +``` +┌─x─┬────y─┐ +│ 1 │ 2 │ +│ 2 │ ᴺᵁᴸᴸ │ +│ 3 │ 2 │ +│ 3 │ 3 │ +│ 3 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` + +В результате запроса `SELECT sum(x), y FROM t_null_big GROUP BY y` мы получим: + +``` +┌─sum(x)─┬────y─┐ +│ 4 │ 2 │ +│ 3 │ 3 │ +│ 5 │ ᴺᵁᴸᴸ │ +└────────┴──────┘ +``` + +Видно, что `GROUP BY` для `У = NULL` просуммировал `x`, как будто `NULL` — это значение. + +Если в `GROUP BY` передать несколько ключей, то в результате мы получим все комбинации выборки, как если бы `NULL` был конкретным значением. + #### Модификатор WITH TOTALS Если указан модификатор WITH TOTALS, то будет посчитана ещё одна строчка, в которой в столбцах-ключах будут содержаться значения по умолчанию (нули, пустые строки), а в столбцах агрегатных функций - значения, посчитанные по всем строкам ("тотальные" значения). @@ -537,7 +604,7 @@ GROUP BY вычисляет для каждого встретившегося ### Секция LIMIT N BY -LIMIT N BY COLUMNS выбирает топ N строк для каждой группы COLUMNS. LIMIT N BY не связан с LIMIT и они могут использоваться в одном запросе. Ключ для LIMIT N BY может содержать произвольное число колонок или выражений. +`LIMIT N BY COLUMNS` выбирает топ `N` строк для каждой группы `COLUMNS`. `LIMIT N BY` не связан с `LIMIT` и они могут использоваться в одном запросе. Ключ для `LIMIT N BY` может содержать произвольное число колонок или выражений. Пример: @@ -556,6 +623,8 @@ LIMIT 100 Запрос выберет топ 5 рефереров для каждой пары `domain, device_type`, но не более 100 строк (`LIMIT n BY + LIMIT`). +`LIMIT n BY` работает с [NULL](syntax.md#null-literal) как если бы это было конкретное значение. Т.е. в результате запроса пользователь получит все комбинации полей, указанных в `BY`. + ### Секция HAVING Позволяет отфильтровать результат, полученный после GROUP BY, аналогично секции WHERE. @@ -575,7 +644,47 @@ WHERE и HAVING отличаются тем, что WHERE выполняется Строки, для которых список выражений, по которым производится сортировка, принимает одинаковые значения, выводятся в произвольном порядке, который может быть также недетерминированным (каждый раз разным). Если секция ORDER BY отсутствует, то, аналогично, порядок, в котором идут строки, не определён, и может быть недетерминированным. -При сортировке чисел с плавающей запятой, NaN-ы идут отдельно от остальных значений. Вне зависимости от порядка сортировки, NaN-ы помещаются в конец. То есть, при сортировке по возрастанию, они как будто больше всех чисел, а при сортировке по убыванию - как будто меньше всех. +Порядок сортировки `NaN` и `NULL`: + +- С модификатором `NULLS FIRST` — Сначала `NULL`, затем `NaN`, затем остальные значения. +- С модификатором `NULLS LAST` — Сначала значения, затем `NaN`, затем `NULL`. +- По умолчанию — Как с модификатором `NULLS LAST`. + +Пример: + +Для таблицы + +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 2 │ +│ 1 │ nan │ +│ 2 │ 2 │ +│ 3 │ 4 │ +│ 5 │ 6 │ +│ 6 │ nan │ +│ 7 │ ᴺᵁᴸᴸ │ +│ 6 │ 7 │ +│ 8 │ 9 │ +└───┴──────┘ +``` + +Выполним запрос `SELECT * FROM t_null_nan ORDER BY y NULLS FIRST`, получим: + +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 7 │ ᴺᵁᴸᴸ │ +│ 1 │ nan │ +│ 6 │ nan │ +│ 2 │ 2 │ +│ 2 │ 2 │ +│ 3 │ 4 │ +│ 5 │ 6 │ +│ 6 │ 7 │ +│ 8 │ 9 │ +└───┴──────┘ +``` Если кроме ORDER BY указан также не слишком большой LIMIT, то расходуется меньше оперативки. Иначе расходуется количество памяти, пропорциональное количеству данных для сортировки. При распределённой обработке запроса, если отсутствует GROUP BY, сортировка частично делается на удалённых серверах, а на сервере-инициаторе запроса производится слияние результатов. Таким образом, при распределённой сортировке, может сортироваться объём данных, превышающий размер памяти на одном сервере. @@ -594,14 +703,16 @@ WHERE и HAVING отличаются тем, что WHERE выполняется ### Секция DISTINCT -Если указано DISTINCT, то из всех множеств полностью совпадающих строк результата, будет оставляться только одна строка. -Результат выполнения будет таким же, как если указано GROUP BY по всем указанным полям в SELECT-е и не указаны агрегатные функции. Но имеется несколько отличий от GROUP BY: +Если указано `DISTINCT`, то из всех множеств полностью совпадающих строк результата, будет оставляться только одна строка. +Результат выполнения будет таким же, как если указано `GROUP BY` по всем указанным полям в `SELECT` и не указаны агрегатные функции. Но имеется несколько отличий от `GROUP BY`: -- DISTINCT может применяться совместно с GROUP BY; -- при отсутствии ORDER BY и наличии LIMIT, запрос прекратит выполнение сразу после того, как будет прочитано необходимое количество различных строк - в этом случае использование DISTINCT существенно более оптимально; +- `DISTINCT` может применяться совместно с `GROUP BY`; +- при отсутствии `ORDER BY` и наличии `LIMIT`, запрос прекратит выполнение сразу после того, как будет прочитано необходимое количество различных строк - в этом случае использование DISTINCT существенно более оптимально; - блоки данных будут выдаваться по мере их обработки, не дожидаясь выполнения всего запроса. -DISTINCT не поддерживается, если в SELECT-е присутствует хотя бы один столбец типа массив. +`DISTINCT` не поддерживается, если в `SELECT` присутствует хотя бы один столбец типа массив. + +`DISTINCT` работает с [NULL](syntax.md#null-literal) как если бы `NULL` был конкретным значением, причём `NULL=NULL`. Т.е. в результате `DISTINCT` разные комбинации с `NULL` встретятся только по одному разу. ### Секция LIMIT @@ -612,9 +723,11 @@ n и m должны быть неотрицательными целыми чи При отсутствии секции ORDER BY, однозначно сортирующей результат, результат может быть произвольным и может являться недетерминированным. +`DISTINCT` работает с [NULL](syntax.md#null-literal) как если бы `NULL` был конкретным значением, причём `NULL=NULL`. Т.е. в результате `DISTINCT` разные комбинации с `NULL` встретятся только по одному разу. + ### Секция UNION ALL -Произвольное количество запросов может быть объединено с помощью UNION ALL. Пример: +Произвольное количество запросов может быть объединено с помощью `UNION ALL`. Пример: ```sql SELECT CounterID, 1 AS table, toInt64(count()) AS c @@ -629,13 +742,13 @@ SELECT CounterID, 2 AS table, sum(Sign) AS c HAVING c > 0 ``` -Поддерживается только UNION ALL. Обычный UNION (UNION DISTINCT) не поддерживается. Если вам нужен UNION DISTINCT, то вы можете написать SELECT DISTINCT из подзапроса, содержащего UNION ALL. +Поддерживается только `UNION ALL`. Обычный `UNION` (`UNION DISTINCT`) не поддерживается. Если вам нужен `UNION DISTINCT`, то вы можете написать `SELECT DISTINCT` из подзапроса, содержащего `UNION ALL`. -Запросы - части UNION ALL могут выполняться параллельно, и их результаты могут возвращаться вперемешку. +Запросы - части `UNION ALL` могут выполняться параллельно, и их результаты могут возвращаться вперемешку. -Структура результатов (количество и типы столбцов) у запросов должна совпадать. Но имена столбцов могут отличаться. В этом случае, имена столбцов для общего результата будут взяты из первого запроса. +Структура результатов (количество и типы столбцов) у запросов должна совпадать. Но имена столбцов могут отличаться. В этом случае, имена столбцов для общего результата будут взяты из первого запроса. При объединении выполняется приведение типов. Например, если в двух объединяемых запросах одно и тоже поле имеет типы не-`Nullable` и `Nullable` от совместимого типа, то в результате `UNION ALL` получим поле типа `Nullable`. -Запросы - части UNION ALL нельзя заключить в скобки. ORDER BY и LIMIT применяются к отдельным запросам, а не к общему результату. Если вам нужно применить какое-либо преобразование к общему результату, то вы можете разместить все запросы с UNION ALL в подзапросе в секции FROM. +Запросы - части `UNION ALL` нельзя заключить в скобки. `ORDER BY` и `LIMIT` применяются к отдельным запросам, а не к общему результату. Если вам нужно применить какое-либо преобразование к общему результату, то вы можете разместить все запросы с `UNION ALL` в подзапросе в секции `FROM`. ### Секция INTO OUTFILE @@ -654,6 +767,8 @@ SELECT CounterID, 2 AS table, sum(Sign) AS c При использовании клиента командной строки данные на клиент передаются во внутреннем эффективном формате. При этом клиент самостоятельно интерпретирует секцию FORMAT запроса и форматирует данные на своей стороне (снимая нагрузку на сеть и сервер). + + ### Операторы IN Операторы `IN`, `NOT IN`, `GLOBAL IN`, `GLOBAL NOT IN` рассматриваются отдельно, так как их функциональность достаточно богатая. @@ -673,6 +788,7 @@ SELECT (CounterID, UserID) IN ((34, 123), (101500, 456)) FROM ... В качестве правой части оператора может быть множество константных выражений, множество кортежей с константными выражениями (показано в примерах выше), а также имя таблицы или подзапрос SELECT в скобках. + Если в качестве правой части оператора указано имя таблицы (например, `UserID IN users`), то это эквивалентно подзапросу `UserID IN (SELECT * FROM users)`. Это используется при работе с внешними данными, отправляемым вместе с запросом. Например, вместе с запросом может быть отправлено множество идентификаторов посетителей, загруженное во временную таблицу users, по которому следует выполнить фильтрацию. Если качестве правой части оператора, указано имя таблицы, имеющий движок Set (подготовленное множество, постоянно находящееся в оперативке), то множество не будет создаваться заново при каждом запросе. @@ -718,17 +834,48 @@ ORDER BY EventDate ASC за каждый день после 17 марта считаем долю хитов, сделанных посетителями, которые заходили на сайт 17 марта. Подзапрос в секции IN на одном сервере всегда выполняется только один раз. Зависимых подзапросов не существует. + +#### Обработка NULL + +При обработке запроса оператор IN будет считать, что результат операции с [NULL](syntax.md#null-literal) всегда равен `0`, независимо от того, находится `NULL` в правой или левой части оператора. Значения `NULL` не входят ни в какое множество, не соответствуют друг другу и не могут сравниваться. + +Рассмотрим для примера таблицу `t_null`: + +``` +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +│ 2 │ 3 │ +└───┴──────┘ +``` + +При выполнении запроса `SELECT x FROM t_null WHERE y IN (NULL,3)` получим следующий результат: + +``` +┌─x─┐ +│ 2 │ +└───┘ +``` + +Видно, что строка, в которой `y = NULL`, выброшена из результатов запроса. Это произошло потому, что ClickHouse не может решить входит ли `NULL` в множество `(NULL,3)`, возвращает результат операции `0`, а `SELECT` выбрасывает эту строку из финальной выдачи. + +``` +SELECT y IN (NULL, 3) +FROM t_null + +┌─in(y, tuple(NULL, 3))─┐ +│ 0 │ +│ 1 │ +└───────────────────────┘ +``` + #### Распределённые подзапросы Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса. -
- -Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../operations/settings/settings.md#settings-distributed_product_mode) `distributed_product_mode`. - -
+!!! attention + Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../operations/settings/settings.md#settings-distributed_product_mode) `distributed_product_mode`. При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`. diff --git a/docs/ru/query_language/syntax.md b/docs/ru/query_language/syntax.md index 42b1987fc70..192b314cfbf 100644 --- a/docs/ru/query_language/syntax.md +++ b/docs/ru/query_language/syntax.md @@ -40,28 +40,26 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') ## Литералы -Бывают числовые, строковые и составные литералы. - -### Числовые литералы +### Числовые Числовой литерал пытается распарситься: -- сначала как 64-битное число без знака, с помощью функции strtoull; -- если не получилось - то как 64-битное число со знаком, с помощью функции strtoll; -- если не получилось - то как число с плавающей запятой, с помощью функции strtod; +- сначала как 64-битное число без знака, с помощью функции `strtoull`; +- если не получилось - то как 64-битное число со знаком, с помощью функции `strtoll`; +- если не получилось - то как число с плавающей запятой, с помощью функции `strtod`; - иначе - ошибка. Соответствующее значение будет иметь тип минимального размера, который вмещает значение. -Например, 1 парсится как UInt8, а 256 - как UInt16. Подробнее смотрите "Типы данных". +Например, 1 парсится как `UInt8`, а 256 - как `UInt16`. Подробнее смотрите раздел [Типы данных](../data_types/index.md#data_types). Примеры: `1`, `18446744073709551615`, `0xDEADBEEF`, `01`, `0.1`, `1e100`, `-1e-100`, `inf`, `nan`. -### Строковые литералы +### Строковые -Поддерживаются только строковые литералы в одинарных кавычках. Символы внутри могут быть экранированы с помощью обратного слеша. Следующие escape-последовательности имеют соответствующее специальное значение: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Во всех остальных случаях, последовательности вида `\c`, где c - любой символ, преобразуется в c. Таким образом, могут быть использованы последовательности `\'` и `\\`. Значение будет иметь тип String. +Поддерживаются только строковые литералы в одинарных кавычках. Символы внутри могут быть экранированы с помощью обратного слеша. Следующие escape-последовательности имеют соответствующее специальное значение: `\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\a`, `\v`, `\xHH`. Во всех остальных случаях, последовательности вида `\c`, где c - любой символ, преобразуется в c. Таким образом, могут быть использованы последовательности `\'` и `\\`. Значение будет иметь тип [String](../data_types/string.md#data_types-string). Минимальный набор символов, которых вам необходимо экранировать в строковых литералах: `'` и `\`. -### Составные литералы +### Составные Поддерживаются конструкции для массивов: `[1, 2, 3]` и кортежей: `(1, 'Hello, world!', 2)`. На самом деле, это вовсе не литералы, а выражение с оператором создания массива и оператором создания кортежа, соответственно. @@ -69,6 +67,20 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def') Массив должен состоять хотя бы из одного элемента, а кортеж - хотя бы из двух. Кортежи носят служебное значение для использования в секции IN запроса SELECT. Кортежи могут быть получены в качестве результата запроса, но не могут быть сохранены в базу (за исключением таблиц типа Memory). + + +### NULL + +Обозначает, что значение отсутствует. + +Чтобы в поле таблицы можно было хранить `NULL`, оно должно быть типа [Nullable](../data_types/nullable.md#data_type-nullable). + +В зависимости от формата данных (входных или выходных) `NULL` может иметь различное представление. Подробнее смотрите в документации для [форматов данных](../interfaces/formats.md#formats). + +При обработке `NULL` есть множество особенностей. Например, если хотя бы один из аргументов операции сравнения — `NULL`, то результатом такой операции тоже будет `NULL`. Этим же свойством обладают операции умножения, сложения и пр. Подробнее читайте в документации на каждую операцию. + +В запросах можно проверить `NULL` с помощью операторов [IS NULL](operators.md#operator-is-null) и [IS NOT NULL](operators.md#operator-is-not-null), а также соответствующих функций `isNull` и `isNotNull`. + ## Функции Функции записываются как идентификатор со списком аргументов (возможно, пустым) в скобках. В отличие от стандартного SQL, даже в случае пустого списка аргументов, скобки обязательны. Пример: `now()`. diff --git a/docs/ru/query_language/table_functions/numbers.md b/docs/ru/query_language/table_functions/numbers.md index 62da54b8d2b..8ff5ae3ac97 100644 --- a/docs/ru/query_language/table_functions/numbers.md +++ b/docs/ru/query_language/table_functions/numbers.md @@ -1,12 +1,15 @@ # numbers -`numbers(N)` - возвращает таблицу с единственным столбцом number (тип UInt64), содержащим натуральные числа от 0 до N-1. +`numbers(N)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `0` до `N-1`. +`numbers(N, M)` - возвращает таблицу с единственным столбцом `number` (UInt64), содержащим натуральные числа от `N` to `(N + M - 1)`. -Так же как и таблица `system.numbers` может использоваться для тестов и генерации последовательных значений. +Так же как и таблица `system.numbers` может использоваться для тестов и генерации последовательных значений. Функция `numbers(N, M)` работает более эффективно, чем выборка из `system.numbers`. + +Следующие запросы эквивалентны: -Следующие 2 запроса эквивалентны: ```sql SELECT * FROM numbers(10); +SELECT * FROM numbers(0,10); SELECT * FROM system.numbers LIMIT 10; ``` Примеры: diff --git a/docs/ru/query_language/table_functions/remote.md b/docs/ru/query_language/table_functions/remote.md index 71bf70ba8d8..a2366607fe7 100644 --- a/docs/ru/query_language/table_functions/remote.md +++ b/docs/ru/query_language/table_functions/remote.md @@ -13,11 +13,8 @@ remote('addresses_expr', db.table[, 'user'[, 'password']]) `addresses_expr` - выражение, генерирующее адреса удалённых серверов. Это может быть просто один адрес сервера. Адрес сервера - это `хост:порт`, или только `хост`. Хост может быть указан в виде имени сервера, или в виде IPv4 или IPv6 адреса. IPv6 адрес указывается в квадратных скобках. Порт - TCP-порт удалённого сервера. Если порт не указан, используется `tcp_port` из конфигурационного файла сервера (по умолчанию - 9000). -
- -С IPv6-адресом обязательно указывать порт. - -
+!!! important "Важно" + С IPv6-адресом обязательно нужно указывать порт. Примеры: diff --git a/docs/ru/query_language/table_functions/url.md b/docs/ru/query_language/table_functions/url.md new file mode 100644 index 00000000000..7c5068b3caa --- /dev/null +++ b/docs/ru/query_language/table_functions/url.md @@ -0,0 +1,20 @@ + + +# url + +`url(URL, format, structure)` - возвращает таблицу со столбцами, указанными в +`structure`, созданную из данных находящихся по `URL` в формате `format`. + +URL - адрес, по которому сервер принимает `GET` и/или `POST` запросы по +протоколу HTTP или HTTPS. + +format - [формат](../../interfaces/formats.md#formats) данных. + +structure - структура таблицы в форме `'UserID UInt64, Name String'`. Определяет имена и типы столбцов. + +**Пример** + +```sql +-- получение 3-х строк таблицы, состоящей из двух колонк типа String и UInt32 от сервера, отдающего данные в формате CSV +SELECT * FROM url('http://127.0.0.1:12345/', CSV, 'column1 String, column2 UInt32') LIMIT 3 +``` diff --git a/docs/ru/security_changelog.md b/docs/ru/security_changelog.md new file mode 100644 index 00000000000..de3e3b2cbd9 --- /dev/null +++ b/docs/ru/security_changelog.md @@ -0,0 +1,21 @@ +## Исправлено в релизе 1.1.54388 от 28 июня 2018 + +### CVE-2018-14668 +Табличная функция "remote" допускала произвольные символы в полях "user", "password" и "default_database", что позволяло производить атаки класса Cross Protocol Request Forgery. + +Обнаружено благодаря: Андрею Красичкову из Службы Информационной Безопасности Яндекса + +## Исправлено в релизе 1.1.54390 от 6 июля 2018 + +### CVE-2018-14669 +В ClickHouse MySQL клиенте была включена функциональность "LOAD DATA LOCAL INFILE", что позволяло получать доступ на чтение к произвольным файлам на сервере, где запущен ClickHouse. + +Обнаружено благодаря: Андрею Красичкову и Евгению Сидорову из Службы Информационной Безопасности Яндекса + +## Исправлено в релизе 1.1.54131 от 10 января 2017 + +### CVE-2018-14670 + +Некоррректная конфигурация в deb пакете могла привести к неавторизованному доступу к базе данных. + +Обнаружено благодаря: the UK's National Cyber Security Centre (NCSC) \ No newline at end of file diff --git a/docs/toc_en.yml b/docs/toc_en.yml index 74c3b59decb..8280b7f2bab 100644 --- a/docs/toc_en.yml +++ b/docs/toc_en.yml @@ -1,13 +1,10 @@ pages: -- 'ClickHouse': 'index.md' - - 'Introduction': - - 'hidden': 'introduction/index.md' + - 'Overview': 'index.md' - 'Distinctive features of ClickHouse': 'introduction/distinctive_features.md' - 'ClickHouse features that can be considered disadvantages': 'introduction/features_considered_disadvantages.md' - - 'The Yandex.Metrica task': 'introduction/ya_metrika_task.md' - - 'Everything you were afraid to ask': 'introduction/possible_silly_questions.md' - 'Performance': 'introduction/performance.md' + - 'The Yandex.Metrica task': 'introduction/ya_metrika_task.md' - 'Getting started': - 'Deploying and running': 'getting_started/index.md' @@ -50,7 +47,7 @@ pages: - 'Expression': 'data_types/special_data_types/expression.md' - 'Set': 'data_types/special_data_types/set.md' -- 'SQL dialect': +- 'SQL reference': - 'hidden': 'query_language/index.md' - 'SELECT': 'query_language/select.md' - 'INSERT INTO': 'query_language/insert_into.md' @@ -96,6 +93,7 @@ pages: - 'merge': 'query_language/table_functions/merge.md' - 'numbers': 'query_language/table_functions/numbers.md' - 'remote': 'query_language/table_functions/remote.md' + - 'url': 'query_language/table_functions/url.md' - 'Dictionaries': - 'Introduction': 'query_language/dicts/index.md' - 'External dictionaries': @@ -108,9 +106,9 @@ pages: - 'Internal dictionaries': 'query_language/dicts/internal_dicts.md' - 'Operators': 'query_language/operators.md' - 'General syntax': 'query_language/syntax.md' - + - 'Operations': - - 'Operations': 'operations/index.md' + - 'hidden': 'operations/index.md' - 'Table engines': - 'Introduction': 'operations/table_engines/index.md' - 'MergeTree family': @@ -136,6 +134,7 @@ pages: - 'Null': 'operations/table_engines/null.md' - 'Set': 'operations/table_engines/set.md' - 'Join': 'operations/table_engines/join.md' + - 'URL': 'operations/table_engines/url.md' - 'View': 'operations/table_engines/view.md' - 'MaterializedView': 'operations/table_engines/materializedview.md' - 'Integrations': @@ -160,7 +159,10 @@ pages: - 'clickhouse-copier': 'operations/utils/clickhouse-copier.md' - 'clickhouse-local': 'operations/utils/clickhouse-local.md' -- 'ClickHouse Development': +- 'F.A.Q.': + - 'General questions': 'faq/general.md' + +- 'Development': - 'hidden': 'development/index.md' - 'Overview of ClickHouse architecture': 'development/architecture.md' - 'How to build ClickHouse on Linux': 'development/build.md' @@ -170,3 +172,4 @@ pages: - 'Roadmap': 'roadmap.md' - 'Changelog': 'changelog.md' +- 'Security changelog': 'security_changelog.md' diff --git a/docs/toc_ru.yml b/docs/toc_ru.yml index 3d1d9da1e01..5ff7ea894fd 100644 --- a/docs/toc_ru.yml +++ b/docs/toc_ru.yml @@ -1,13 +1,11 @@ pages: -- 'ClickHouse': 'index.md' - 'Введение': - - 'hidden': 'introduction/index.md' + - 'Обзор': 'index.md' - 'Отличительные возможности ClickHouse': 'introduction/distinctive_features.md' - 'Особенности ClickHouse, которые могут считаться недостатками': 'introduction/features_considered_disadvantages.md' - - 'Постановка задачи в Яндекс.Метрике': 'introduction/ya_metrika_task.md' - - 'Возможные глупые вопросы': 'introduction/possible_silly_questions.md' - 'Производительность': 'introduction/performance.md' + - 'Постановка задачи в Яндекс.Метрике': 'introduction/ya_metrika_task.md' - 'Начало работы': - 'Установка и запуск': 'getting_started/index.md' @@ -42,6 +40,7 @@ pages: - 'Array(T)': 'data_types/array.md' - 'AggregateFunction(name, types_of_arguments...)': 'data_types/nested_data_structures/aggregatefunction.md' - 'Tuple(T1, T2, ...)': 'data_types/tuple.md' + - 'Nullable': 'data_types/nullable.md' - 'Вложенные структуры данных': - 'hidden': 'data_types/nested_data_structures/index.md' - 'Nested(Name1 Type1, Name2 Type2, ...)': 'data_types/nested_data_structures/nested.md' @@ -49,8 +48,9 @@ pages: - 'hidden': 'data_types/special_data_types/index.md' - 'Expression': 'data_types/special_data_types/expression.md' - 'Set': 'data_types/special_data_types/set.md' + - 'Nothing': 'data_types/special_data_types/nothing.md' -- 'SQL диалект': +- 'Справка по SQL': - 'hidden': 'query_language/index.md' - 'SELECT': 'query_language/select.md' - 'INSERT INTO': 'query_language/insert_into.md' @@ -86,7 +86,8 @@ pages: - 'Функции для реализации оператора IN.': 'query_language/functions/in_functions.md' - 'Функция arrayJoin': 'query_language/functions/array_join.md' - 'Функции для работы с географическими координатами': 'query_language/functions/geo.md' - + - 'Функции c Nullable агрументами': 'query_language/functions/functions_for_nulls.md' + - 'Агрегатные функции': - 'Введение': 'query_language/agg_functions/index.md' - 'Справочник функций': 'query_language/agg_functions/reference.md' @@ -98,6 +99,7 @@ pages: - 'merge': 'query_language/table_functions/merge.md' - 'numbers': 'query_language/table_functions/numbers.md' - 'remote': 'query_language/table_functions/remote.md' + - 'url': 'query_language/table_functions/url.md' - 'Словари': - 'Введение': 'query_language/dicts/index.md' - 'Внешние словари': @@ -110,7 +112,7 @@ pages: - 'Встроенные словари': 'query_language/dicts/internal_dicts.md' - 'Операторы': 'query_language/operators.md' - 'Общий синтаксис': 'query_language/syntax.md' - + - 'Эксплуатация': - 'hidden': 'operations/index.md' @@ -125,13 +127,13 @@ pages: - 'AggregatingMergeTree': 'operations/table_engines/aggregatingmergetree.md' - 'CollapsingMergeTree': 'operations/table_engines/collapsingmergetree.md' - 'GraphiteMergeTree': 'operations/table_engines/graphitemergetree.md' - - 'Для небольших объемов данных': + - 'Для небольших объемов данных': - 'TinyLog': 'operations/table_engines/tinylog.md' - 'Log': 'operations/table_engines/log.md' - 'Memory': 'operations/table_engines/memory.md' - 'Buffer': 'operations/table_engines/buffer.md' - 'Внешние данные': 'operations/table_engines/external_data.md' - - 'Особые': + - 'Особые': - 'Distributed': 'operations/table_engines/distributed.md' - 'Dictionary': 'operations/table_engines/dictionary.md' - 'Merge': 'operations/table_engines/merge.md' @@ -139,9 +141,10 @@ pages: - 'Null': 'operations/table_engines/null.md' - 'Set': 'operations/table_engines/set.md' - 'Join': 'operations/table_engines/join.md' + - 'URL': 'operations/table_engines/url.md' - 'View': 'operations/table_engines/view.md' - 'MaterializedView': 'operations/table_engines/materializedview.md' - - 'Интеграции': + - 'Интеграции': - 'Kafka': 'operations/table_engines/kafka.md' - 'MySQL': 'operations/table_engines/mysql.md' - 'Права доступа': 'operations/access_rights.md' @@ -162,7 +165,10 @@ pages: - 'clickhouse-copier': 'operations/utils/clickhouse-copier.md' - 'clickhouse-local': 'operations/utils/clickhouse-local.md' -- 'Разработка ClickHouse': +- 'F.A.Q.': + - 'Общие вопросы': 'faq/general.md' + +- 'Разработка': - 'hidden': 'development/index.md' - 'Overview of ClickHouse architecture': 'development/architecture.md' - 'Как собрать ClickHouse на Linux': 'development/build.md' @@ -172,3 +178,4 @@ pages: - 'Roadmap': 'roadmap.md' - 'Changelog': 'changelog.md' +- 'Security changelog': 'security_changelog.md' diff --git a/docs/tools/build.py b/docs/tools/build.py index 7e4ffe2c067..ff1551ed8d7 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -60,6 +60,7 @@ def build_for_lang(lang, args): 'static_templates': ['404.html'], 'extra': { 'single_page': False, + 'opposite_lang': 'en' if lang == 'ru' else 'ru', 'search': { 'language': 'en' if lang == 'en' else 'en, %s' % lang } @@ -79,7 +80,11 @@ def build_for_lang(lang, args): repo_url='https://github.com/yandex/ClickHouse/', edit_uri='edit/master/docs/%s' % lang, extra_css=['assets/stylesheets/custom.css'], - markdown_extensions=['codehilite'] + markdown_extensions=[ + 'admonition', + 'attr_list', + 'codehilite' + ] ) mkdocs_build.build(cfg) @@ -104,6 +109,7 @@ def build_single_page_version(lang, args, cfg): 'site_dir': temp, 'extra': { 'single_page': True, + 'opposite_lang': 'en' if lang == 'ru' else 'ru', 'search': { 'language': 'en, ru' } diff --git a/docs/tools/mkdocs-material-theme/assets/stylesheets/application.ac64251e.css b/docs/tools/mkdocs-material-theme/assets/stylesheets/application.ac64251e.css index 1383191d726..2ee94374a47 100644 --- a/docs/tools/mkdocs-material-theme/assets/stylesheets/application.ac64251e.css +++ b/docs/tools/mkdocs-material-theme/assets/stylesheets/application.ac64251e.css @@ -1,2 +1,2 @@ -html{-webkit-box-sizing:border-box;box-sizing:border-box}*,:after,:before{-webkit-box-sizing:inherit;box-sizing:inherit}html{-webkit-text-size-adjust:none;-moz-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}body{margin:0}hr{overflow:visible;-webkit-box-sizing:content-box;box-sizing:content-box}a{-webkit-text-decoration-skip:objects}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}a:active,a:hover{outline-width:0}small,sub,sup{font-size:80%}sub,sup{position:relative;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}table{border-collapse:collapse;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{padding:0;background:transparent;font-size:inherit}button,input{border:0;outline:0}.md-clipboard:before,.md-icon,.md-nav__button,.md-nav__link:after,.md-nav__title:before,.md-search-result__article--document:before,.md-source-file:before,.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before,.md-typeset summary:after{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;speak:none;word-wrap:normal;direction:ltr}.md-content__icon,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before,.md-search-result__article--document:before{display:inline-block;margin:.4rem;padding:.8rem;font-size:2.4rem;cursor:pointer}.md-icon--arrow-back:before{content:"\E5C4"}.md-icon--arrow-forward:before{content:"\E5C8"}.md-icon--menu:before{content:"\E5D2"}.md-icon--search:before{content:"\E8B6"}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","liga";font-feature-settings:"kern","liga";font-family:Helvetica Neue,Helvetica,Arial,sans-serif}code,kbd,pre{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern";font-feature-settings:"kern";font-family:Courier New,Courier,monospace}.md-typeset{font-size:1.6rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 4rem;color:rgba(0,0,0,.54);font-size:3.125rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:4rem 0 1.6rem;font-size:2.5rem;line-height:1.4}.md-typeset h3{margin:3.2rem 0 1.6rem;font-size:2rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:1.6rem}.md-typeset h4{font-size:1.6rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:1.6rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.1rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,93%,.5);color:#37474f;font-size:85%}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.2rem;-webkit-box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:none;background-color:inherit;color:inherit;-webkit-box-shadow:none;box-shadow:none}.md-typeset pre{position:relative;margin:1em 0;border-radius:.2rem;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset pre>code{display:block;margin:0;padding:1.05rem 1.2rem;background-color:transparent;font-size:inherit;-webkit-box-shadow:none;box-shadow:none;-webkit-box-decoration-break:none;box-decoration-break:none;overflow:auto}.md-typeset pre>code::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset kbd{padding:0 .29412em;border:.1rem solid #c9c9c9;border-radius:.2rem;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;-webkit-box-shadow:0 .1rem 0 #b0b0b0;box-shadow:0 .1rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;background-color:rgba(255,235,59,.5);-webkit-box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.1rem dotted rgba(0,0,0,.54);text-decoration:none;cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}.md-typeset blockquote{padding-left:1.2rem;border-left:.4rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}.md-typeset dd{margin:1em 0 1em 1.875em}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){-webkit-box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);display:inline-block;max-width:100%;border-radius:.2rem;font-size:1.28rem;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}.md-typeset table:not([class]) th{min-width:10rem;padding:1.2rem 1.6rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:1.2rem 1.6rem;border-top:.1rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset__scrollwrap{margin:1em -1.6rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 1.6rem}.md-typeset .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{font-size:62.5%;overflow-x:hidden}body,html{height:100%}body{position:relative}hr{display:block;height:.1rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:122rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;padding-top:4.8rem;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{height:100%;padding-top:3rem;padding-bottom:.1rem}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:3}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}@page{margin:25mm}.md-clipboard{position:absolute;top:.6rem;right:.6rem;width:2.8rem;height:2.8rem;border-radius:.2rem;font-size:1.6rem;cursor:pointer;z-index:1;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-clipboard:before{-webkit-transition:color .25s,opacity .25s;transition:color .25s,opacity .25s;color:rgba(0,0,0,.54);content:"\E14D";opacity:.25}.codehilite:hover .md-clipboard:before,.md-typeset .highlight:hover .md-clipboard:before,pre:hover .md-clipboard:before{opacity:1}.md-clipboard:active:before,.md-clipboard:hover:before{color:#536dfe}.md-clipboard__message{display:block;position:absolute;top:0;right:3.4rem;padding:.6rem 1rem;-webkit-transform:translateX(.8rem);transform:translateX(.8rem);-webkit-transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s;transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);border-radius:.2rem;background-color:rgba(0,0,0,.54);color:#fff;font-size:1.28rem;white-space:nowrap;opacity:0;pointer-events:none}.md-clipboard__message--active{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;pointer-events:auto}.md-clipboard__message:before{content:attr(aria-label)}.md-clipboard__message:after{display:block;position:absolute;top:50%;right:-.4rem;width:0;margin-top:-.4rem;border-width:.4rem 0 .4rem .4rem;border-style:solid;border-color:transparent rgba(0,0,0,.54);content:""}.md-content__inner{margin:0 1.6rem 2.4rem;padding-top:1.2rem}.md-content__inner:before{display:block;height:.8rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__icon{position:relative;margin:.8rem 0;padding:0;float:right}.md-typeset .md-content__icon{color:rgba(0,0,0,.26)}.md-header{position:fixed;top:0;right:0;left:0;height:4.8rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;background-color:#3f51b5;color:#fff;z-index:2;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-header,.no-js .md-header{-webkit-box-shadow:none;box-shadow:none}.md-header-nav{padding:0 .4rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo *{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__topic{display:block;position:absolute;-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(2.5rem);transform:translateX(2.5rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}.no-js .md-header-nav__topic{position:static}.md-header-nav__title{padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(-2.5rem);transform:translateX(-2.5rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}.md-header-nav__title[data-md-state=active] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);opacity:1;z-index:0;pointer-events:auto}.md-header-nav__source{display:none}.md-hero{-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;font-size:2rem;overflow:hidden}.md-hero__inner{margin-top:2rem;padding:1.6rem 1.6rem .8rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);-webkit-transition-delay:.1s;transition-delay:.1s}[data-md-state=hidden] .md-hero__inner{pointer-events:none;-webkit-transform:translateY(1.25rem);transform:translateY(1.25rem);-webkit-transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:transform 0s .4s,opacity .1s 0s;transition:transform 0s .4s,opacity .1s 0s,-webkit-transform 0s .4s;opacity:0}.md-hero--expand .md-hero__inner{margin-bottom:2.4rem}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.4rem;overflow:auto}.md-footer-nav__link{padding-top:2.8rem;padding-bottom:.8rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}.md-footer-nav__link--next{width:75%;float:right;text-align:right}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-2rem;padding:0 2rem;color:hsla(0,0%,100%,.7);font-size:1.5rem}.md-footer-meta{background-color:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.4rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:#fff}.md-footer-copyright{margin:0 1.2rem;padding:.8rem 0;color:hsla(0,0%,100%,.3);font-size:1.28rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .8rem;padding:.4rem 0 1.2rem}.md-footer-social__link{display:inline-block;width:3.2rem;height:3.2rem;font-size:1.6rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:1.4rem;line-height:1.3}.md-nav--secondary .md-nav__link--active{color:#3f51b5}.md-nav__title{display:block;padding:0 1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"\E5C4"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:0 1.2rem}.md-nav__item:last-child{padding-bottom:1.2rem}.md-nav__item .md-nav__item{padding-right:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;margin-top:.625em;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"\E313"}html .md-nav__link[for=toc],html .md-nav__link[for=toc]+.md-nav__link:after,html .md-nav__link[for=toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link:active{color:#3f51b5}.md-nav__item--nested>.md-nav__link{color:inherit}.md-nav__link:focus,.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 4.8rem 0 7.2rem;text-overflow:ellipsis;z-index:2}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-webkit-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;font-size:2.4rem;cursor:pointer;z-index:2}.md-search__icon:hover{opacity:.7}.md-search__icon[for=search]{top:.6rem;left:1rem}.md-search__icon[for=search]:before{content:"\E8B6"}.md-search__icon[type=reset]{top:.6rem;right:1rem;-webkit-transform:scale(.125);transform:scale(.125);-webkit-transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]{-webkit-transform:scale(1);transform:scale(1);opacity:1}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]:hover{opacity:.7}.md-search__output{position:absolute;width:100%;border-radius:0 0 .2rem .2rem;overflow:hidden;z-index:1}.md-search__scrollwrap{height:100%;background-color:#fff;-webkit-box-shadow:0 .1rem 0 rgba(0,0,0,.07) inset;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result{color:rgba(0,0,0,.87);word-break:break-word}.md-search-result__meta{padding:0 1.6rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:1.28rem;line-height:3.6rem}.md-search-result__list{margin:0;padding:0;border-top:.1rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{-webkit-box-shadow:0 -.1rem 0 rgba(0,0,0,.07);box-shadow:0 -.1rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;-webkit-transition:background .25s;transition:background .25s;outline:0;overflow:hidden}.md-search-result__link:hover,.md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}.md-search-result__link:hover .md-search-result__article:before,.md-search-result__link[data-md-state=active] .md-search-result__article:before{opacity:.7}.md-search-result__link:last-child .md-search-result__teaser{margin-bottom:1.2rem}.md-search-result__article{position:relative;padding:0 1.6rem;overflow:auto}.md-search-result__article--document:before{position:absolute;left:0;margin:.2rem;-webkit-transition:opacity .25s;transition:opacity .25s;color:rgba(0,0,0,.54);content:"\E880"}.md-search-result__article--document .md-search-result__title{margin:1.1rem 0;font-size:1.6rem;font-weight:400;line-height:1.4}.md-search-result__title{margin:.5em 0;font-size:1.28rem;font-weight:700;line-height:1.4}.md-search-result__teaser{display:-webkit-box;max-height:3.3rem;margin:.5em 0;color:rgba(0,0,0,.54);font-size:1.28rem;line-height:1.4;text-overflow:ellipsis;overflow:hidden;-webkit-box-orient:vertical;-webkit-line-clamp:2}.md-search-result em{font-style:normal;font-weight:700;text-decoration:underline}.md-sidebar{position:absolute;width:24.2rem;padding:2.4rem 0;overflow:hidden}.md-sidebar[data-md-state=lock]{position:fixed;top:4.8rem}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .4rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes md-source__facts--done{0%{height:0}to{height:1.3rem}}@keyframes md-source__facts--done{0%{height:0}to{height:1.3rem}}@-webkit-keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;padding-right:1.2rem;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:1.3rem;line-height:1.2;white-space:nowrap}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:4.8rem;content:"";vertical-align:middle}.md-source__icon{width:4.8rem}.md-source__icon svg{width:2.4rem;height:2.4rem;margin-top:1.2rem;margin-left:1.2rem}.md-source__icon+.md-source__repository{margin-left:-4.4rem;padding-left:4rem}.md-source__repository{display:inline-block;max-width:100%;margin-left:1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:1.1rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done .25s ease-in;animation:md-source__facts--done .25s ease-in}.md-source__fact{float:left}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done .4s ease-out;animation:md-source__fact--done .4s ease-out}.md-source__fact:before{margin:0 .2rem;content:"\B7"}.md-source__fact:first-child:before{display:none}.md-source-file{display:inline-block;margin:1em .5em 1em 0;padding-right:.5rem;border-radius:.2rem;background-color:rgba(0,0,0,.07);font-size:1.28rem;list-style-type:none;cursor:pointer;overflow:hidden}.md-source-file:before{display:inline-block;margin-right:.5rem;padding:.5rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:1.6rem;content:"\E86F";vertical-align:middle}html .md-source-file{-webkit-transition:background .4s,color .4s,-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1)}html .md-source-file:before{-webkit-transition:inherit;transition:inherit}html body .md-typeset .md-source-file{color:rgba(0,0,0,.54)}.md-source-file:hover{-webkit-box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36);box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36)}.md-source-file:hover:before{background-color:#536dfe}.md-tabs{width:100%;-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;overflow:auto}.md-tabs__list{margin:0;margin-left:.4rem;padding:0;list-style:none;white-space:nowrap}.md-tabs__item{display:inline-block;height:4.8rem;padding-right:1.2rem;padding-left:1.2rem}.md-tabs__link{display:block;margin-top:1.6rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);font-size:1.4rem;opacity:.7}.md-tabs__link--active,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{-webkit-transition-delay:.02s;transition-delay:.02s}.md-tabs__item:nth-child(3) .md-tabs__link{-webkit-transition-delay:.04s;transition-delay:.04s}.md-tabs__item:nth-child(4) .md-tabs__link{-webkit-transition-delay:.06s;transition-delay:.06s}.md-tabs__item:nth-child(5) .md-tabs__link{-webkit-transition-delay:.08s;transition-delay:.08s}.md-tabs__item:nth-child(6) .md-tabs__link{-webkit-transition-delay:.1s;transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{-webkit-transition-delay:.12s;transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{-webkit-transition-delay:.14s;transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{-webkit-transition-delay:.16s;transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{-webkit-transition-delay:.18s;transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{-webkit-transition-delay:.2s;transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{-webkit-transition-delay:.22s;transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{-webkit-transition-delay:.24s;transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{-webkit-transition-delay:.26s;transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{-webkit-transition-delay:.28s;transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{-webkit-transition-delay:.3s;transition-delay:.3s}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs[data-md-state=hidden] .md-tabs__link{-webkit-transform:translateY(50%);transform:translateY(50%);-webkit-transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,transform 0s .4s,opacity .1s;transition:color .25s,transform 0s .4s,opacity .1s,-webkit-transform 0s .4s;opacity:0}.md-typeset .admonition,.md-typeset details{-webkit-box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:relative;margin:1.5625em 0;padding:1.2rem 1.2rem 0;border-left:.4rem solid #448aff;border-radius:.2rem;font-size:1.28rem}.md-typeset .admonition :first-child,.md-typeset details :first-child{margin-top:0}html .md-typeset .admonition :last-child,html .md-typeset details :last-child{margin-bottom:0;padding-bottom:1.2rem}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin:1em 0}.md-typeset .admonition>.admonition-title,.md-typeset .admonition>summary,.md-typeset details>.admonition-title,.md-typeset details>summary{margin:-1.2rem -1.2rem 0;padding:.8rem 1.2rem .8rem 4rem;border-bottom:.1rem solid rgba(68,138,255,.1);background-color:rgba(68,138,255,.1);font-weight:700}html .md-typeset .admonition>.admonition-title,html .md-typeset .admonition>summary,html .md-typeset details>.admonition-title,html .md-typeset details>summary{padding-bottom:.8rem}.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before{position:absolute;left:1.2rem;color:#448aff;font-size:2rem;content:"\E3C9"}.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.summary,.md-typeset details.tldr{border-left:.4rem solid #00b0ff}.md-typeset .admonition.summary>.admonition-title,.md-typeset .admonition.summary>summary,.md-typeset .admonition.tldr>.admonition-title,.md-typeset .admonition.tldr>summary,.md-typeset details.summary>.admonition-title,.md-typeset details.summary>summary,.md-typeset details.tldr>.admonition-title,.md-typeset details.tldr>summary{border-bottom:.1rem solid rgba(0,176,255,.1);background-color:rgba(0,176,255,.1)}.md-typeset .admonition.summary>.admonition-title:before,.md-typeset .admonition.summary>summary:before,.md-typeset .admonition.tldr>.admonition-title:before,.md-typeset .admonition.tldr>summary:before,.md-typeset details.summary>.admonition-title:before,.md-typeset details.summary>summary:before,.md-typeset details.tldr>.admonition-title:before,.md-typeset details.tldr>summary:before{color:#00b0ff;content:"\E8D2"}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-left:.4rem solid #00b8d4}.md-typeset .admonition.info>.admonition-title,.md-typeset .admonition.info>summary,.md-typeset .admonition.todo>.admonition-title,.md-typeset .admonition.todo>summary,.md-typeset details.info>.admonition-title,.md-typeset details.info>summary,.md-typeset details.todo>.admonition-title,.md-typeset details.todo>summary{border-bottom:.1rem solid rgba(0,184,212,.1);background-color:rgba(0,184,212,.1)}.md-typeset .admonition.info>.admonition-title:before,.md-typeset .admonition.info>summary:before,.md-typeset .admonition.todo>.admonition-title:before,.md-typeset .admonition.todo>summary:before,.md-typeset details.info>.admonition-title:before,.md-typeset details.info>summary:before,.md-typeset details.todo>.admonition-title:before,.md-typeset details.todo>summary:before{color:#00b8d4;content:"\E88E"}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-left:.4rem solid #00bfa5}.md-typeset .admonition.hint>.admonition-title,.md-typeset .admonition.hint>summary,.md-typeset .admonition.important>.admonition-title,.md-typeset .admonition.important>summary,.md-typeset .admonition.tip>.admonition-title,.md-typeset .admonition.tip>summary,.md-typeset details.hint>.admonition-title,.md-typeset details.hint>summary,.md-typeset details.important>.admonition-title,.md-typeset details.important>summary,.md-typeset details.tip>.admonition-title,.md-typeset details.tip>summary{border-bottom:.1rem solid rgba(0,191,165,.1);background-color:rgba(0,191,165,.1)}.md-typeset .admonition.hint>.admonition-title:before,.md-typeset .admonition.hint>summary:before,.md-typeset .admonition.important>.admonition-title:before,.md-typeset .admonition.important>summary:before,.md-typeset .admonition.tip>.admonition-title:before,.md-typeset .admonition.tip>summary:before,.md-typeset details.hint>.admonition-title:before,.md-typeset details.hint>summary:before,.md-typeset details.important>.admonition-title:before,.md-typeset details.important>summary:before,.md-typeset details.tip>.admonition-title:before,.md-typeset details.tip>summary:before{color:#00bfa5;content:"\E80E"}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-left:.4rem solid #00c853}.md-typeset .admonition.check>.admonition-title,.md-typeset .admonition.check>summary,.md-typeset .admonition.done>.admonition-title,.md-typeset .admonition.done>summary,.md-typeset .admonition.success>.admonition-title,.md-typeset .admonition.success>summary,.md-typeset details.check>.admonition-title,.md-typeset details.check>summary,.md-typeset details.done>.admonition-title,.md-typeset details.done>summary,.md-typeset details.success>.admonition-title,.md-typeset details.success>summary{border-bottom:.1rem solid rgba(0,200,83,.1);background-color:rgba(0,200,83,.1)}.md-typeset .admonition.check>.admonition-title:before,.md-typeset .admonition.check>summary:before,.md-typeset .admonition.done>.admonition-title:before,.md-typeset .admonition.done>summary:before,.md-typeset .admonition.success>.admonition-title:before,.md-typeset .admonition.success>summary:before,.md-typeset details.check>.admonition-title:before,.md-typeset details.check>summary:before,.md-typeset details.done>.admonition-title:before,.md-typeset details.done>summary:before,.md-typeset details.success>.admonition-title:before,.md-typeset details.success>summary:before{color:#00c853;content:"\E876"}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-left:.4rem solid #64dd17}.md-typeset .admonition.faq>.admonition-title,.md-typeset .admonition.faq>summary,.md-typeset .admonition.help>.admonition-title,.md-typeset .admonition.help>summary,.md-typeset .admonition.question>.admonition-title,.md-typeset .admonition.question>summary,.md-typeset details.faq>.admonition-title,.md-typeset details.faq>summary,.md-typeset details.help>.admonition-title,.md-typeset details.help>summary,.md-typeset details.question>.admonition-title,.md-typeset details.question>summary{border-bottom:.1rem solid rgba(100,221,23,.1);background-color:rgba(100,221,23,.1)}.md-typeset .admonition.faq>.admonition-title:before,.md-typeset .admonition.faq>summary:before,.md-typeset .admonition.help>.admonition-title:before,.md-typeset .admonition.help>summary:before,.md-typeset .admonition.question>.admonition-title:before,.md-typeset .admonition.question>summary:before,.md-typeset details.faq>.admonition-title:before,.md-typeset details.faq>summary:before,.md-typeset details.help>.admonition-title:before,.md-typeset details.help>summary:before,.md-typeset details.question>.admonition-title:before,.md-typeset details.question>summary:before{color:#64dd17;content:"\E887"}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-left:.4rem solid #ff9100}.md-typeset .admonition.attention>.admonition-title,.md-typeset .admonition.attention>summary,.md-typeset .admonition.caution>.admonition-title,.md-typeset .admonition.caution>summary,.md-typeset .admonition.warning>.admonition-title,.md-typeset .admonition.warning>summary,.md-typeset details.attention>.admonition-title,.md-typeset details.attention>summary,.md-typeset details.caution>.admonition-title,.md-typeset details.caution>summary,.md-typeset details.warning>.admonition-title,.md-typeset details.warning>summary{border-bottom:.1rem solid rgba(255,145,0,.1);background-color:rgba(255,145,0,.1)}.md-typeset .admonition.attention>.admonition-title:before,.md-typeset .admonition.attention>summary:before,.md-typeset .admonition.caution>.admonition-title:before,.md-typeset .admonition.caution>summary:before,.md-typeset .admonition.warning>.admonition-title:before,.md-typeset .admonition.warning>summary:before,.md-typeset details.attention>.admonition-title:before,.md-typeset details.attention>summary:before,.md-typeset details.caution>.admonition-title:before,.md-typeset details.caution>summary:before,.md-typeset details.warning>.admonition-title:before,.md-typeset details.warning>summary:before{color:#ff9100;content:"\E002"}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-left:.4rem solid #ff5252}.md-typeset .admonition.fail>.admonition-title,.md-typeset .admonition.fail>summary,.md-typeset .admonition.failure>.admonition-title,.md-typeset .admonition.failure>summary,.md-typeset .admonition.missing>.admonition-title,.md-typeset .admonition.missing>summary,.md-typeset details.fail>.admonition-title,.md-typeset details.fail>summary,.md-typeset details.failure>.admonition-title,.md-typeset details.failure>summary,.md-typeset details.missing>.admonition-title,.md-typeset details.missing>summary{border-bottom:.1rem solid rgba(255,82,82,.1);background-color:rgba(255,82,82,.1)}.md-typeset .admonition.fail>.admonition-title:before,.md-typeset .admonition.fail>summary:before,.md-typeset .admonition.failure>.admonition-title:before,.md-typeset .admonition.failure>summary:before,.md-typeset .admonition.missing>.admonition-title:before,.md-typeset .admonition.missing>summary:before,.md-typeset details.fail>.admonition-title:before,.md-typeset details.fail>summary:before,.md-typeset details.failure>.admonition-title:before,.md-typeset details.failure>summary:before,.md-typeset details.missing>.admonition-title:before,.md-typeset details.missing>summary:before{color:#ff5252;content:"\E14C"}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-left:.4rem solid #ff1744}.md-typeset .admonition.danger>.admonition-title,.md-typeset .admonition.danger>summary,.md-typeset .admonition.error>.admonition-title,.md-typeset .admonition.error>summary,.md-typeset details.danger>.admonition-title,.md-typeset details.danger>summary,.md-typeset details.error>.admonition-title,.md-typeset details.error>summary{border-bottom:.1rem solid rgba(255,23,68,.1);background-color:rgba(255,23,68,.1)}.md-typeset .admonition.danger>.admonition-title:before,.md-typeset .admonition.danger>summary:before,.md-typeset .admonition.error>.admonition-title:before,.md-typeset .admonition.error>summary:before,.md-typeset details.danger>.admonition-title:before,.md-typeset details.danger>summary:before,.md-typeset details.error>.admonition-title:before,.md-typeset details.error>summary:before{color:#ff1744;content:"\E3E7"}.md-typeset .admonition.bug,.md-typeset details.bug{border-left:.4rem solid #f50057}.md-typeset .admonition.bug>.admonition-title,.md-typeset .admonition.bug>summary,.md-typeset details.bug>.admonition-title,.md-typeset details.bug>summary{border-bottom:.1rem solid rgba(245,0,87,.1);background-color:rgba(245,0,87,.1)}.md-typeset .admonition.bug>.admonition-title:before,.md-typeset .admonition.bug>summary:before,.md-typeset details.bug>.admonition-title:before,.md-typeset details.bug>summary:before{color:#f50057;content:"\E868"}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-left:.4rem solid #9e9e9e}.md-typeset .admonition.cite>.admonition-title,.md-typeset .admonition.cite>summary,.md-typeset .admonition.quote>.admonition-title,.md-typeset .admonition.quote>summary,.md-typeset details.cite>.admonition-title,.md-typeset details.cite>summary,.md-typeset details.quote>.admonition-title,.md-typeset details.quote>summary{border-bottom:.1rem solid hsla(0,0%,62%,.1);background-color:hsla(0,0%,62%,.1)}.md-typeset .admonition.cite>.admonition-title:before,.md-typeset .admonition.cite>summary:before,.md-typeset .admonition.quote>.admonition-title:before,.md-typeset .admonition.quote>summary:before,.md-typeset details.cite>.admonition-title:before,.md-typeset details.cite>summary:before,.md-typeset details.quote>.admonition-title:before,.md-typeset details.quote>summary:before{color:#9e9e9e;content:"\E244"}.codehilite .o,.codehilite .ow,.md-typeset .highlight .o,.md-typeset .highlight .ow{color:inherit}.codehilite .ge,.md-typeset .highlight .ge{color:#000}.codehilite .gr,.md-typeset .highlight .gr{color:#a00}.codehilite .gh,.md-typeset .highlight .gh{color:#999}.codehilite .go,.md-typeset .highlight .go{color:#888}.codehilite .gp,.md-typeset .highlight .gp{color:#555}.codehilite .gs,.md-typeset .highlight .gs{color:inherit}.codehilite .gu,.md-typeset .highlight .gu{color:#aaa}.codehilite .gt,.md-typeset .highlight .gt{color:#a00}.codehilite .gd,.md-typeset .highlight .gd{background-color:#fdd}.codehilite .gi,.md-typeset .highlight .gi{background-color:#dfd}.codehilite .k,.md-typeset .highlight .k{color:#3b78e7}.codehilite .kc,.md-typeset .highlight .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn,.md-typeset .highlight .kd,.md-typeset .highlight .kn{color:#3b78e7}.codehilite .kp,.md-typeset .highlight .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt,.md-typeset .highlight .kr,.md-typeset .highlight .kt{color:#3e61a2}.codehilite .c,.codehilite .cm,.md-typeset .highlight .c,.md-typeset .highlight .cm{color:#999}.codehilite .cp,.md-typeset .highlight .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs,.md-typeset .highlight .c1,.md-typeset .highlight .ch,.md-typeset .highlight .cs{color:#999}.codehilite .na,.codehilite .nb,.md-typeset .highlight .na,.md-typeset .highlight .nb{color:#c2185b}.codehilite .bp,.md-typeset .highlight .bp{color:#3e61a2}.codehilite .nc,.md-typeset .highlight .nc{color:#c2185b}.codehilite .no,.md-typeset .highlight .no{color:#3e61a2}.codehilite .nd,.codehilite .ni,.md-typeset .highlight .nd,.md-typeset .highlight .ni{color:#666}.codehilite .ne,.codehilite .nf,.md-typeset .highlight .ne,.md-typeset .highlight .nf{color:#c2185b}.codehilite .nl,.md-typeset .highlight .nl{color:#3b5179}.codehilite .nn,.md-typeset .highlight .nn{color:#ec407a}.codehilite .nt,.md-typeset .highlight .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi,.md-typeset .highlight .nv,.md-typeset .highlight .vc,.md-typeset .highlight .vg,.md-typeset .highlight .vi{color:#3e61a2}.codehilite .nx,.md-typeset .highlight .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo,.md-typeset .highlight .il,.md-typeset .highlight .m,.md-typeset .highlight .mf,.md-typeset .highlight .mh,.md-typeset .highlight .mi,.md-typeset .highlight .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc,.md-typeset .highlight .s,.md-typeset .highlight .sb,.md-typeset .highlight .sc{color:#0d904f}.codehilite .sd,.md-typeset .highlight .sd{color:#999}.codehilite .s2,.md-typeset .highlight .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx,.md-typeset .highlight .se,.md-typeset .highlight .sh,.md-typeset .highlight .si,.md-typeset .highlight .sx{color:#183691}.codehilite .sr,.md-typeset .highlight .sr{color:#009926}.codehilite .s1,.codehilite .ss,.md-typeset .highlight .s1,.md-typeset .highlight .ss{color:#0d904f}.codehilite .err,.md-typeset .highlight .err{color:#a61717}.codehilite .w,.md-typeset .highlight .w{color:transparent}.codehilite .hll,.md-typeset .highlight .hll{display:block;margin:0 -1.2rem;padding:0 1.2rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilite,.md-typeset .highlight{position:relative;margin:1em 0;padding:0;border-radius:.2rem;background-color:hsla(0,0%,93%,.5);color:#37474f;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset .codehilite code,.md-typeset .codehilite pre,.md-typeset .highlight code,.md-typeset .highlight pre{display:block;margin:0;padding:1.05rem 1.2rem;background-color:transparent;overflow:auto;vertical-align:top}.md-typeset .codehilite code::-webkit-scrollbar,.md-typeset .codehilite pre::-webkit-scrollbar,.md-typeset .highlight code::-webkit-scrollbar,.md-typeset .highlight pre::-webkit-scrollbar{width:.4rem;height:.4rem}.md-typeset .codehilite code::-webkit-scrollbar-thumb,.md-typeset .codehilite pre::-webkit-scrollbar-thumb,.md-typeset .highlight code::-webkit-scrollbar-thumb,.md-typeset .highlight pre::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset .codehilite code::-webkit-scrollbar-thumb:hover,.md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,.md-typeset .highlight code::-webkit-scrollbar-thumb:hover,.md-typeset .highlight pre::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset pre.codehilite,.md-typeset pre.highlight{overflow:visible}.md-typeset pre.codehilite code,.md-typeset pre.highlight code{display:block;padding:1.05rem 1.2rem;overflow:auto}.md-typeset .codehilitetable{display:block;margin:1em 0;border-radius:.2em;font-size:1.6rem;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td{display:block;padding:0}.md-typeset .codehilitetable tr{display:-webkit-box;display:-ms-flexbox;display:flex}.md-typeset .codehilitetable .codehilite,.md-typeset .codehilitetable .highlight,.md-typeset .codehilitetable .linenodiv{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv{padding:1.05rem 1.2rem}.md-typeset .codehilitetable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre{margin:0;padding:0;background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable{-webkit-box-shadow:none;box-shadow:none}.md-typeset [id^="fnref:"]{display:inline-block}.md-typeset [id^="fnref:"]:target{margin-top:-7.6rem;padding-top:7.6rem;pointer-events:none}.md-typeset [id^="fn:"]:before{display:none;height:0;content:""}.md-typeset [id^="fn:"]:target:before{display:block;margin-top:-7rem;padding-top:7rem;pointer-events:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-ref{display:inline-block;pointer-events:auto}.md-typeset .footnote-ref:before{display:inline;margin:0 .2em;border-left:.1rem solid rgba(0,0,0,.26);font-size:1.25em;content:"";vertical-align:-.5rem}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.5rem);transform:translateX(.5rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}.md-typeset .footnote-backref:before{font-size:1.6rem;content:"\E31B"}.md-typeset .headerlink{display:inline-block;margin-left:1rem;-webkit-transform:translateY(.5rem);transform:translateY(.5rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset h1[id] .headerlink{display:none}.md-typeset h2[id]:before{display:block;margin-top:-.8rem;padding-top:.8rem;content:""}.md-typeset h2[id]:target:before{margin-top:-6.8rem;padding-top:6.8rem}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink,.md-typeset h2[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink:hover,.md-typeset h2[id]:target .headerlink{color:#536dfe}.md-typeset h3[id]:before{display:block;margin-top:-.9rem;padding-top:.9rem;content:""}.md-typeset h3[id]:target:before{margin-top:-6.9rem;padding-top:6.9rem}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink,.md-typeset h3[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink:hover,.md-typeset h3[id]:target .headerlink{color:#536dfe}.md-typeset h4[id]:before{display:block;margin-top:-.9rem;padding-top:.9rem;content:""}.md-typeset h4[id]:target:before{margin-top:-6.9rem;padding-top:6.9rem}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink,.md-typeset h4[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink:hover,.md-typeset h4[id]:target .headerlink{color:#536dfe}.md-typeset h5[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem;content:""}.md-typeset h5[id]:target:before{margin-top:-7.1rem;padding-top:7.1rem}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink,.md-typeset h5[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink:hover,.md-typeset h5[id]:target .headerlink{color:#536dfe}.md-typeset h6[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem;content:""}.md-typeset h6[id]:target:before{margin-top:-7.1rem;padding-top:7.1rem}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink,.md-typeset h6[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink:hover,.md-typeset h6[id]:target .headerlink{color:#536dfe}.md-typeset .MJXc-display{margin:.75em 0;padding:.75em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;-webkit-box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;-webkit-box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,93%,.5);color:#37474f;-webkit-box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5);box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"\E0B7";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:1.6rem;padding-left:1.6rem;-webkit-box-shadow:none;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset details{padding-top:0}.md-typeset details[open]>summary:after{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.md-typeset details:not([open]){padding-bottom:0}.md-typeset details:not([open])>summary{border-bottom:none}.md-typeset details summary{padding-right:4rem}.no-details .md-typeset details:not([open])>*{display:none}.no-details .md-typeset details:not([open]) summary{display:block}.md-typeset summary{display:block;outline:none;cursor:pointer}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset summary:after{position:absolute;top:.8rem;right:1.2rem;color:rgba(0,0,0,.26);font-size:2rem;content:"\E313"}.md-typeset .emojione{width:2rem;vertical-align:text-top}.md-typeset code.codehilite,.md-typeset code.highlight{margin:0 .29412em;padding:.07353em 0}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.15em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.25em;content:"\E835";vertical-align:-.25em}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"\E834"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code,.md-typeset pre{white-space:pre-wrap}.md-typeset code{-webkit-box-shadow:none;box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-clipboard,.md-content__icon,.md-footer,.md-header,.md-sidebar,.md-tabs,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset pre{margin:1em -1.6rem;border-radius:0}.md-typeset pre>code{padding:1.05rem 1.6rem}.md-footer-nav__link--prev .md-footer-nav__title{display:none}.md-search-result__teaser{max-height:5rem;-webkit-line-clamp:3}.codehilite .hll,.md-typeset .highlight .hll{margin:0 -1.6rem;padding:0 1.6rem}.md-typeset>.codehilite,.md-typeset>.highlight{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilite code,.md-typeset>.codehilite pre,.md-typeset>.highlight code,.md-typeset>.highlight pre{padding:1.05rem 1.6rem}.md-typeset>.codehilitetable{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilitetable .codehilite>code,.md-typeset>.codehilitetable .codehilite>pre,.md-typeset>.codehilitetable .highlight>code,.md-typeset>.codehilitetable .highlight>pre,.md-typeset>.codehilitetable .linenodiv{padding:1rem 1.6rem}.md-typeset>p>.MJXc-display{margin:.75em -1.6rem;padding:.25em 1.6rem}}@media only screen and (min-width:100em){html{font-size:68.75%}}@media only screen and (min-width:125em){html{font-size:75%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}html .md-nav__link[for=toc]{display:block;padding-right:4.8rem}html .md-nav__link[for=toc]:after{color:inherit;content:"\E8DE"}html .md-nav__link[for=toc]+.md-nav__link{display:none}html .md-nav__link[for=toc]~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-nav__source{display:block;padding:0 .4rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{position:absolute;top:.4rem;left:.4rem;width:3.6rem;height:3.6rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:2rem;background-color:#fff;overflow:hidden;pointer-events:none}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;width:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}.md-search__input{width:100%;height:4.8rem;font-size:1.8rem}.md-search__icon[for=search]{top:1.2rem;left:1.6rem}.md-search__icon[for=search][for=search]:before{content:"\E5C4"}.md-search__icon[type=reset]{top:1.2rem;right:1.6rem}.md-search__output{top:4.8rem;bottom:0}.md-search-result__article--document:before{display:none}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-hero__inner{margin-top:4.8rem;margin-bottom:2.4rem}.md-nav{background-color:#fff}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;height:100%;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:1.6rem;line-height:1.5}html .md-nav--primary .md-nav__title{position:relative;height:11.2rem;padding:6rem 1.6rem .4rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:4.8rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.4rem;left:.4rem;width:4rem;height:4rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background-color:#fff;-webkit-box-shadow:0 .1rem 0 rgba(0,0,0,.07) inset;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.4rem;left:.4rem;width:6.4rem;height:6.4rem;font-size:4.8rem}html .md-nav--primary .md-nav__title--site:before{display:none}.md-nav--primary .md-nav__list{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.1rem solid rgba(0,0,0,.07)}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:4.8rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E315"}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:1.2rem 1.6rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:1.2rem;margin-top:-1.2rem;color:inherit;font-size:2.4rem}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:transparent}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:2.8rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:4rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:5.2rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:6.4rem}.md-nav__toggle~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}.no-csstransforms3d .md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.no-csstransforms3d .md-nav__toggle:checked~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-sidebar--primary{position:fixed;top:0;left:-24.2rem;width:24.2rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;transition:-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;background-color:#fff;z-index:3}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{-webkit-box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(24.2rem);transform:translateX(24.2rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}.md-tabs{display:none}}@media only screen and (min-width:60em){.md-content{margin-right:24.2rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:23rem;max-width:23rem;margin-left:2.8rem;padding-right:1.2rem}.md-search{padding:.4rem}.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);cursor:pointer}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-search__inner{position:relative;width:23rem;padding:.2rem 0;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1)}.md-search__form,.md-search__input{border-radius:.2rem}.md-search__input{width:100%;height:3.6rem;padding-left:4.4rem;-webkit-transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);background-color:rgba(0,0,0,.26);color:inherit;font-size:1.6rem}.md-search__input+.md-search__icon{color:inherit}.md-search__input::-webkit-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.2rem .2rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:none}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder,[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:3.8rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{-webkit-box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.4rem;height:.4rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__article,.md-search-result__meta{padding-left:4.4rem}.md-sidebar--secondary{display:block;margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:24.2rem}.md-content__inner{margin-right:2.4rem;margin-left:2.4rem}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:68.8rem}.md-sidebar--secondary{margin-left:122rem}.md-tabs~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{font-size:0}.md-tabs--active~.md-main .md-nav--primary .md-nav__title--site{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item{font-size:0}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{display:none;font-size:1.4rem;overflow:auto}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link{margin-top:0;font-weight:700;pointer-events:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link:after{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active{display:block}.md-tabs--active~.md-main .md-nav[data-md-level="1"]{max-height:none}.md-tabs--active~.md-main .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-left:0}}@media only screen and (min-width:45em){.md-footer-nav__link{width:50%}.md-footer-copyright{max-width:75%;float:left}.md-footer-social{padding:1.2rem 0;float:right}}@media only screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:46.8rem}.md-search-result__teaser{max-height:5rem;-webkit-line-clamp:3}} +html{-webkit-box-sizing:border-box;box-sizing:border-box}*,:after,:before{-webkit-box-sizing:inherit;box-sizing:inherit}html{-webkit-text-size-adjust:none;-moz-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}body{margin:0}hr{overflow:visible;-webkit-box-sizing:content-box;box-sizing:content-box}a{-webkit-text-decoration-skip:objects}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}a:active,a:hover{outline-width:0}small,sub,sup{font-size:80%}sub,sup{position:relative;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}table{border-collapse:collapse;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{padding:0;background:transparent;font-size:inherit}button,input{border:0;outline:0}.md-clipboard:before,.md-icon,.md-nav__button,.md-nav__link:after,.md-nav__title:before,.md-search-result__article--document:before,.md-source-file:before,.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before,.md-typeset summary:after{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;speak:none;word-wrap:normal;direction:ltr}.md-content__icon,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before,.md-search-result__article--document:before{display:inline-block;margin:.4rem;padding:.8rem;font-size:2.4rem;cursor:pointer}.md-icon--arrow-back:before{content:"\E5C4"}.md-icon--arrow-forward:before{content:"\E5C8"}.md-icon--menu:before{content:"\E5D2"}.md-icon--search:before{content:"\E8B6"}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","liga";font-feature-settings:"kern","liga";font-family:Helvetica Neue,Helvetica,Arial,sans-serif}code,kbd,pre{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern";font-feature-settings:"kern";font-family:Courier New,Courier,monospace}.md-typeset{font-size:1.6rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 4rem;color:rgba(0,0,0,.54);font-size:3.125rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:4rem 0 1.6rem;font-size:2.5rem;line-height:1.4}.md-typeset h3{margin:3.2rem 0 1.6rem;font-size:2rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:1.6rem}.md-typeset h4{font-size:1.6rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:1.6rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.1rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,93%,.5);color:#37474f;font-size:85%}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.2rem;-webkit-box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);box-shadow:.29412em 0 0 hsla(0,0%,93%,.5),-.29412em 0 0 hsla(0,0%,93%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:none;background-color:inherit;color:inherit;-webkit-box-shadow:none;box-shadow:none}.md-typeset pre{position:relative;margin:1em 0;border-radius:.2rem;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset pre>code{display:block;margin:0;padding:1.05rem 1.2rem;background-color:transparent;font-size:inherit;-webkit-box-shadow:none;box-shadow:none;-webkit-box-decoration-break:none;box-decoration-break:none;overflow:auto}.md-typeset pre>code::-webkit-scrollbar{width:.8rem;height:.4rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset kbd{padding:0 .29412em;border:.1rem solid #c9c9c9;border-radius:.2rem;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;-webkit-box-shadow:0 .1rem 0 #b0b0b0;box-shadow:0 .1rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;background-color:rgba(255,235,59,.5);-webkit-box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.1rem dotted rgba(0,0,0,.54);text-decoration:none;cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}.md-typeset blockquote{padding-left:1.2rem;border-left:.4rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}.md-typeset dd{margin:1em 0 1em 1.875em}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){-webkit-box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);display:inline-block;max-width:100%;border-radius:.2rem;font-size:1.28rem;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}.md-typeset table:not([class]) th{min-width:10rem;padding:1.2rem 1.6rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:1.2rem 1.6rem;border-top:.1rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset__scrollwrap{margin:1em -1.6rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 1.6rem}.md-typeset .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{font-size:62.5%;overflow-x:hidden}body,html{height:100%}body{position:relative}hr{display:block;height:.1rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:122rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;padding-top:4.8rem;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{height:100%;padding-top:3rem;padding-bottom:.1rem}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:3}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}@page{margin:25mm}.md-clipboard{position:absolute;top:.6rem;right:.6rem;width:2.8rem;height:2.8rem;border-radius:.2rem;font-size:1.6rem;cursor:pointer;z-index:1;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-clipboard:before{-webkit-transition:color .25s,opacity .25s;transition:color .25s,opacity .25s;color:rgba(0,0,0,.54);content:"\E14D";opacity:.25}.codehilite:hover .md-clipboard:before,.md-typeset .highlight:hover .md-clipboard:before,pre:hover .md-clipboard:before{opacity:1}.md-clipboard:active:before,.md-clipboard:hover:before{color:#536dfe}.md-clipboard__message{display:block;position:absolute;top:0;right:3.4rem;padding:.6rem 1rem;-webkit-transform:translateX(.8rem);transform:translateX(.8rem);-webkit-transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s;transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);border-radius:.2rem;background-color:rgba(0,0,0,.54);color:#fff;font-size:1.28rem;white-space:nowrap;opacity:0;pointer-events:none}.md-clipboard__message--active{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;pointer-events:auto}.md-clipboard__message:before{content:attr(aria-label)}.md-clipboard__message:after{display:block;position:absolute;top:50%;right:-.4rem;width:0;margin-top:-.4rem;border-width:.4rem 0 .4rem .4rem;border-style:solid;border-color:transparent rgba(0,0,0,.54);content:""}.md-content__inner{margin:0 1.6rem 2.4rem;padding-top:1.2rem}.md-content__inner:before{display:block;height:.8rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__icon{position:relative;margin:.8rem 0;padding:0;float:right}.md-typeset .md-content__icon{color:rgba(0,0,0,.26)}.md-header{position:fixed;top:0;right:0;left:0;height:4.8rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;background-color:#3f51b5;color:#fff;z-index:2;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-header,.no-js .md-header{-webkit-box-shadow:none;box-shadow:none}.md-header-nav{padding:0 .4rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo *{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__topic{display:block;position:absolute;-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(2.5rem);transform:translateX(2.5rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}.no-js .md-header-nav__topic{position:static}.md-header-nav__title{padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(-2.5rem);transform:translateX(-2.5rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}.md-header-nav__title[data-md-state=active] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);opacity:1;z-index:0;pointer-events:auto}.md-header-nav__source{display:none}.md-hero{-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;font-size:2rem;overflow:hidden}.md-hero__inner{margin-top:2rem;padding:1.6rem 1.6rem .8rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);-webkit-transition-delay:.1s;transition-delay:.1s}[data-md-state=hidden] .md-hero__inner{pointer-events:none;-webkit-transform:translateY(1.25rem);transform:translateY(1.25rem);-webkit-transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:transform 0s .4s,opacity .1s 0s;transition:transform 0s .4s,opacity .1s 0s,-webkit-transform 0s .4s;opacity:0}.md-hero--expand .md-hero__inner{margin-bottom:2.4rem}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.4rem;overflow:auto}.md-footer-nav__link{padding-top:2.8rem;padding-bottom:.8rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}.md-footer-nav__link--next{width:75%;float:right;text-align:right}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 2rem;font-size:1.8rem;line-height:4.8rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-2rem;padding:0 2rem;color:hsla(0,0%,100%,.7);font-size:1.5rem}.md-footer-meta{background-color:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.4rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:#fff}.md-footer-copyright{margin:0 1.2rem;padding:.8rem 0;color:hsla(0,0%,100%,.3);font-size:1.28rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .8rem;padding:.4rem 0 1.2rem}.md-footer-social__link{display:inline-block;width:3.2rem;height:3.2rem;font-size:1.6rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:1.4rem;line-height:1.3}.md-nav--secondary .md-nav__link--active{color:#3f51b5}.md-nav__title{display:block;padding:0 1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"\E5C4"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:0 1.2rem}.md-nav__item:last-child{padding-bottom:1.2rem}.md-nav__item .md-nav__item{padding-right:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;margin-top:.625em;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"\E313"}html .md-nav__link[for=toc],html .md-nav__link[for=toc]+.md-nav__link:after,html .md-nav__link[for=toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link:active{color:#3f51b5}.md-nav__item--nested>.md-nav__link{color:inherit}.md-nav__link:focus,.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 4.8rem 0 7.2rem;text-overflow:ellipsis;z-index:2}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-webkit-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;font-size:2.4rem;cursor:pointer;z-index:2}.md-search__icon:hover{opacity:.7}.md-search__icon[for=search]{top:.6rem;left:1rem}.md-search__icon[for=search]:before{content:"\E8B6"}.md-search__icon[type=reset]{top:.6rem;right:1rem;-webkit-transform:scale(.125);transform:scale(.125);-webkit-transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]{-webkit-transform:scale(1);transform:scale(1);opacity:1}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]:hover{opacity:.7}.md-search__output{position:absolute;width:100%;border-radius:0 0 .2rem .2rem;overflow:hidden;z-index:1}.md-search__scrollwrap{height:100%;background-color:#fff;-webkit-box-shadow:0 .1rem 0 rgba(0,0,0,.07) inset;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result{color:rgba(0,0,0,.87);word-break:break-word}.md-search-result__meta{padding:0 1.6rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:1.28rem;line-height:3.6rem}.md-search-result__list{margin:0;padding:0;border-top:.1rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{-webkit-box-shadow:0 -.1rem 0 rgba(0,0,0,.07);box-shadow:0 -.1rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;-webkit-transition:background .25s;transition:background .25s;outline:0;overflow:hidden}.md-search-result__link:hover,.md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}.md-search-result__link:hover .md-search-result__article:before,.md-search-result__link[data-md-state=active] .md-search-result__article:before{opacity:.7}.md-search-result__link:last-child .md-search-result__teaser{margin-bottom:1.2rem}.md-search-result__article{position:relative;padding:0 1.6rem;overflow:auto}.md-search-result__article--document:before{position:absolute;left:0;margin:.2rem;-webkit-transition:opacity .25s;transition:opacity .25s;color:rgba(0,0,0,.54);content:"\E880"}.md-search-result__article--document .md-search-result__title{margin:1.1rem 0;font-size:1.6rem;font-weight:400;line-height:1.4}.md-search-result__title{margin:.5em 0;font-size:1.28rem;font-weight:700;line-height:1.4}.md-search-result__teaser{display:-webkit-box;max-height:3.3rem;margin:.5em 0;color:rgba(0,0,0,.54);font-size:1.28rem;line-height:1.4;text-overflow:ellipsis;overflow:hidden;-webkit-box-orient:vertical;-webkit-line-clamp:2}.md-search-result em{font-style:normal;font-weight:700;text-decoration:underline}.md-sidebar{position:absolute;width:24.2rem;padding:2.4rem 0;overflow:hidden}.md-sidebar[data-md-state=lock]{position:fixed;top:4.8rem}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .4rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.6rem;height:.6rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes md-source__facts--done{0%{height:0}to{height:1.3rem}}@keyframes md-source__facts--done{0%{height:0}to{height:1.3rem}}@-webkit-keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;padding-right:1.2rem;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:1.3rem;line-height:1.2;white-space:nowrap}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:4.8rem;content:"";vertical-align:middle}.md-source__icon{width:4.8rem}.md-source__icon svg{width:2.4rem;height:2.4rem;margin-top:1.2rem;margin-left:1.2rem}.md-source__icon+.md-source__repository{margin-left:-4.4rem;padding-left:4rem}.md-source__repository{display:inline-block;max-width:100%;margin-left:1.2rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:1.1rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done .25s ease-in;animation:md-source__facts--done .25s ease-in}.md-source__fact{float:left}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done .4s ease-out;animation:md-source__fact--done .4s ease-out}.md-source__fact:before{margin:0 .2rem;content:"\B7"}.md-source__fact:first-child:before{display:none}.md-source-file{display:inline-block;margin:1em .5em 1em 0;padding-right:.5rem;border-radius:.2rem;background-color:rgba(0,0,0,.07);font-size:1.28rem;list-style-type:none;cursor:pointer;overflow:hidden}.md-source-file:before{display:inline-block;margin-right:.5rem;padding:.5rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:1.6rem;content:"\E86F";vertical-align:middle}html .md-source-file{-webkit-transition:background .4s,color .4s,-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .4s cubic-bezier(.4,0,.2,1)}html .md-source-file:before{-webkit-transition:inherit;transition:inherit}html body .md-typeset .md-source-file{color:rgba(0,0,0,.54)}.md-source-file:hover{-webkit-box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36);box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36)}.md-source-file:hover:before{background-color:#536dfe}.md-tabs{width:100%;-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;overflow:auto}.md-tabs__list{margin:0;margin-left:.4rem;padding:0;list-style:none;white-space:nowrap}.md-tabs__item{display:inline-block;height:4.8rem;padding-right:1.2rem;padding-left:1.2rem}.md-tabs__link{display:block;margin-top:1.6rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);font-size:1.4rem;opacity:.7}.md-tabs__link--active,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{-webkit-transition-delay:.02s;transition-delay:.02s}.md-tabs__item:nth-child(3) .md-tabs__link{-webkit-transition-delay:.04s;transition-delay:.04s}.md-tabs__item:nth-child(4) .md-tabs__link{-webkit-transition-delay:.06s;transition-delay:.06s}.md-tabs__item:nth-child(5) .md-tabs__link{-webkit-transition-delay:.08s;transition-delay:.08s}.md-tabs__item:nth-child(6) .md-tabs__link{-webkit-transition-delay:.1s;transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{-webkit-transition-delay:.12s;transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{-webkit-transition-delay:.14s;transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{-webkit-transition-delay:.16s;transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{-webkit-transition-delay:.18s;transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{-webkit-transition-delay:.2s;transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{-webkit-transition-delay:.22s;transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{-webkit-transition-delay:.24s;transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{-webkit-transition-delay:.26s;transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{-webkit-transition-delay:.28s;transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{-webkit-transition-delay:.3s;transition-delay:.3s}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs[data-md-state=hidden] .md-tabs__link{-webkit-transform:translateY(50%);transform:translateY(50%);-webkit-transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,transform 0s .4s,opacity .1s;transition:color .25s,transform 0s .4s,opacity .1s,-webkit-transform 0s .4s;opacity:0}.md-typeset .admonition,.md-typeset details{-webkit-box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:relative;margin:1.5625em 0;padding:1.2rem 1.2rem 0;border-left:.4rem solid #448aff;border-radius:.2rem;font-size:1.28rem}.md-typeset .admonition :first-child,.md-typeset details :first-child{margin-top:0}html .md-typeset .admonition :last-child,html .md-typeset details :last-child{margin-bottom:0;padding-bottom:1.2rem}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin:1em 0}.md-typeset .admonition>.admonition-title,.md-typeset .admonition>summary,.md-typeset details>.admonition-title,.md-typeset details>summary{margin:-1.2rem -1.2rem 0;padding:.8rem 1.2rem .8rem 4rem;border-bottom:.1rem solid rgba(68,138,255,.1);background-color:rgba(68,138,255,.1);font-weight:700}html .md-typeset .admonition>.admonition-title,html .md-typeset .admonition>summary,html .md-typeset details>.admonition-title,html .md-typeset details>summary{padding-bottom:.8rem}.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before{position:absolute;left:1.2rem;color:#448aff;font-size:2rem;content:"\E3C9"}.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.summary,.md-typeset details.tldr{border-left:.4rem solid #00b0ff}.md-typeset .admonition.summary>.admonition-title,.md-typeset .admonition.summary>summary,.md-typeset .admonition.tldr>.admonition-title,.md-typeset .admonition.tldr>summary,.md-typeset details.summary>.admonition-title,.md-typeset details.summary>summary,.md-typeset details.tldr>.admonition-title,.md-typeset details.tldr>summary{border-bottom:.1rem solid rgba(0,176,255,.1);background-color:rgba(0,176,255,.1)}.md-typeset .admonition.summary>.admonition-title:before,.md-typeset .admonition.summary>summary:before,.md-typeset .admonition.tldr>.admonition-title:before,.md-typeset .admonition.tldr>summary:before,.md-typeset details.summary>.admonition-title:before,.md-typeset details.summary>summary:before,.md-typeset details.tldr>.admonition-title:before,.md-typeset details.tldr>summary:before{color:#00b0ff;content:"\E8D2"}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-left:.4rem solid #00b8d4}.md-typeset .admonition.info>.admonition-title,.md-typeset .admonition.info>summary,.md-typeset .admonition.todo>.admonition-title,.md-typeset .admonition.todo>summary,.md-typeset details.info>.admonition-title,.md-typeset details.info>summary,.md-typeset details.todo>.admonition-title,.md-typeset details.todo>summary{border-bottom:.1rem solid rgba(0,184,212,.1);background-color:rgba(0,184,212,.1)}.md-typeset .admonition.info>.admonition-title:before,.md-typeset .admonition.info>summary:before,.md-typeset .admonition.todo>.admonition-title:before,.md-typeset .admonition.todo>summary:before,.md-typeset details.info>.admonition-title:before,.md-typeset details.info>summary:before,.md-typeset details.todo>.admonition-title:before,.md-typeset details.todo>summary:before{color:#00b8d4;content:"\E88E"}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-left:.4rem solid #00bfa5}.md-typeset .admonition.hint>.admonition-title,.md-typeset .admonition.hint>summary,.md-typeset .admonition.important>.admonition-title,.md-typeset .admonition.important>summary,.md-typeset .admonition.tip>.admonition-title,.md-typeset .admonition.tip>summary,.md-typeset details.hint>.admonition-title,.md-typeset details.hint>summary,.md-typeset details.important>.admonition-title,.md-typeset details.important>summary,.md-typeset details.tip>.admonition-title,.md-typeset details.tip>summary{border-bottom:.1rem solid rgba(0,191,165,.1);background-color:rgba(0,191,165,.1)}.md-typeset .admonition.hint>.admonition-title:before,.md-typeset .admonition.hint>summary:before,.md-typeset .admonition.important>.admonition-title:before,.md-typeset .admonition.important>summary:before,.md-typeset .admonition.tip>.admonition-title:before,.md-typeset .admonition.tip>summary:before,.md-typeset details.hint>.admonition-title:before,.md-typeset details.hint>summary:before,.md-typeset details.important>.admonition-title:before,.md-typeset details.important>summary:before,.md-typeset details.tip>.admonition-title:before,.md-typeset details.tip>summary:before{color:#00bfa5;content:"\E80E"}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-left:.4rem solid #00c853}.md-typeset .admonition.check>.admonition-title,.md-typeset .admonition.check>summary,.md-typeset .admonition.done>.admonition-title,.md-typeset .admonition.done>summary,.md-typeset .admonition.success>.admonition-title,.md-typeset .admonition.success>summary,.md-typeset details.check>.admonition-title,.md-typeset details.check>summary,.md-typeset details.done>.admonition-title,.md-typeset details.done>summary,.md-typeset details.success>.admonition-title,.md-typeset details.success>summary{border-bottom:.1rem solid rgba(0,200,83,.1);background-color:rgba(0,200,83,.1)}.md-typeset .admonition.check>.admonition-title:before,.md-typeset .admonition.check>summary:before,.md-typeset .admonition.done>.admonition-title:before,.md-typeset .admonition.done>summary:before,.md-typeset .admonition.success>.admonition-title:before,.md-typeset .admonition.success>summary:before,.md-typeset details.check>.admonition-title:before,.md-typeset details.check>summary:before,.md-typeset details.done>.admonition-title:before,.md-typeset details.done>summary:before,.md-typeset details.success>.admonition-title:before,.md-typeset details.success>summary:before{color:#00c853;content:"\E876"}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-left:.4rem solid #64dd17}.md-typeset .admonition.faq>.admonition-title,.md-typeset .admonition.faq>summary,.md-typeset .admonition.help>.admonition-title,.md-typeset .admonition.help>summary,.md-typeset .admonition.question>.admonition-title,.md-typeset .admonition.question>summary,.md-typeset details.faq>.admonition-title,.md-typeset details.faq>summary,.md-typeset details.help>.admonition-title,.md-typeset details.help>summary,.md-typeset details.question>.admonition-title,.md-typeset details.question>summary{border-bottom:.1rem solid rgba(100,221,23,.1);background-color:rgba(100,221,23,.1)}.md-typeset .admonition.faq>.admonition-title:before,.md-typeset .admonition.faq>summary:before,.md-typeset .admonition.help>.admonition-title:before,.md-typeset .admonition.help>summary:before,.md-typeset .admonition.question>.admonition-title:before,.md-typeset .admonition.question>summary:before,.md-typeset details.faq>.admonition-title:before,.md-typeset details.faq>summary:before,.md-typeset details.help>.admonition-title:before,.md-typeset details.help>summary:before,.md-typeset details.question>.admonition-title:before,.md-typeset details.question>summary:before{color:#64dd17;content:"\E887"}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-left:.4rem solid #ff9100}.md-typeset .admonition.attention>.admonition-title,.md-typeset .admonition.attention>summary,.md-typeset .admonition.caution>.admonition-title,.md-typeset .admonition.caution>summary,.md-typeset .admonition.warning>.admonition-title,.md-typeset .admonition.warning>summary,.md-typeset details.attention>.admonition-title,.md-typeset details.attention>summary,.md-typeset details.caution>.admonition-title,.md-typeset details.caution>summary,.md-typeset details.warning>.admonition-title,.md-typeset details.warning>summary{border-bottom:.1rem solid rgba(255,145,0,.1);background-color:rgba(255,145,0,.1)}.md-typeset .admonition.attention>.admonition-title:before,.md-typeset .admonition.attention>summary:before,.md-typeset .admonition.caution>.admonition-title:before,.md-typeset .admonition.caution>summary:before,.md-typeset .admonition.warning>.admonition-title:before,.md-typeset .admonition.warning>summary:before,.md-typeset details.attention>.admonition-title:before,.md-typeset details.attention>summary:before,.md-typeset details.caution>.admonition-title:before,.md-typeset details.caution>summary:before,.md-typeset details.warning>.admonition-title:before,.md-typeset details.warning>summary:before{color:#ff9100;content:"\E002"}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-left:.4rem solid #ff5252}.md-typeset .admonition.fail>.admonition-title,.md-typeset .admonition.fail>summary,.md-typeset .admonition.failure>.admonition-title,.md-typeset .admonition.failure>summary,.md-typeset .admonition.missing>.admonition-title,.md-typeset .admonition.missing>summary,.md-typeset details.fail>.admonition-title,.md-typeset details.fail>summary,.md-typeset details.failure>.admonition-title,.md-typeset details.failure>summary,.md-typeset details.missing>.admonition-title,.md-typeset details.missing>summary{border-bottom:.1rem solid rgba(255,82,82,.1);background-color:rgba(255,82,82,.1)}.md-typeset .admonition.fail>.admonition-title:before,.md-typeset .admonition.fail>summary:before,.md-typeset .admonition.failure>.admonition-title:before,.md-typeset .admonition.failure>summary:before,.md-typeset .admonition.missing>.admonition-title:before,.md-typeset .admonition.missing>summary:before,.md-typeset details.fail>.admonition-title:before,.md-typeset details.fail>summary:before,.md-typeset details.failure>.admonition-title:before,.md-typeset details.failure>summary:before,.md-typeset details.missing>.admonition-title:before,.md-typeset details.missing>summary:before{color:#ff5252;content:"\E14C"}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-left:.4rem solid #ff1744}.md-typeset .admonition.danger>.admonition-title,.md-typeset .admonition.danger>summary,.md-typeset .admonition.error>.admonition-title,.md-typeset .admonition.error>summary,.md-typeset details.danger>.admonition-title,.md-typeset details.danger>summary,.md-typeset details.error>.admonition-title,.md-typeset details.error>summary{border-bottom:.1rem solid rgba(255,23,68,.1);background-color:rgba(255,23,68,.1)}.md-typeset .admonition.danger>.admonition-title:before,.md-typeset .admonition.danger>summary:before,.md-typeset .admonition.error>.admonition-title:before,.md-typeset .admonition.error>summary:before,.md-typeset details.danger>.admonition-title:before,.md-typeset details.danger>summary:before,.md-typeset details.error>.admonition-title:before,.md-typeset details.error>summary:before{color:#ff1744;content:"\E3E7"}.md-typeset .admonition.bug,.md-typeset details.bug{border-left:.4rem solid #f50057}.md-typeset .admonition.bug>.admonition-title,.md-typeset .admonition.bug>summary,.md-typeset details.bug>.admonition-title,.md-typeset details.bug>summary{border-bottom:.1rem solid rgba(245,0,87,.1);background-color:rgba(245,0,87,.1)}.md-typeset .admonition.bug>.admonition-title:before,.md-typeset .admonition.bug>summary:before,.md-typeset details.bug>.admonition-title:before,.md-typeset details.bug>summary:before{color:#f50057;content:"\E868"}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-left:.4rem solid #9e9e9e}.md-typeset .admonition.cite>.admonition-title,.md-typeset .admonition.cite>summary,.md-typeset .admonition.quote>.admonition-title,.md-typeset .admonition.quote>summary,.md-typeset details.cite>.admonition-title,.md-typeset details.cite>summary,.md-typeset details.quote>.admonition-title,.md-typeset details.quote>summary{border-bottom:.1rem solid hsla(0,0%,62%,.1);background-color:hsla(0,0%,62%,.1)}.md-typeset .admonition.cite>.admonition-title:before,.md-typeset .admonition.cite>summary:before,.md-typeset .admonition.quote>.admonition-title:before,.md-typeset .admonition.quote>summary:before,.md-typeset details.cite>.admonition-title:before,.md-typeset details.cite>summary:before,.md-typeset details.quote>.admonition-title:before,.md-typeset details.quote>summary:before{color:#9e9e9e;content:"\E244"}.codehilite .o,.codehilite .ow,.md-typeset .highlight .o,.md-typeset .highlight .ow{color:inherit}.codehilite .ge,.md-typeset .highlight .ge{color:#000}.codehilite .gr,.md-typeset .highlight .gr{color:#a00}.codehilite .gh,.md-typeset .highlight .gh{color:#999}.codehilite .go,.md-typeset .highlight .go{color:#888}.codehilite .gp,.md-typeset .highlight .gp{color:#555}.codehilite .gs,.md-typeset .highlight .gs{color:inherit}.codehilite .gu,.md-typeset .highlight .gu{color:#aaa}.codehilite .gt,.md-typeset .highlight .gt{color:#a00}.codehilite .gd,.md-typeset .highlight .gd{background-color:#fdd}.codehilite .gi,.md-typeset .highlight .gi{background-color:#dfd}.codehilite .k,.md-typeset .highlight .k{color:#3b78e7}.codehilite .kc,.md-typeset .highlight .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn,.md-typeset .highlight .kd,.md-typeset .highlight .kn{color:#3b78e7}.codehilite .kp,.md-typeset .highlight .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt,.md-typeset .highlight .kr,.md-typeset .highlight .kt{color:#3e61a2}.codehilite .c,.codehilite .cm,.md-typeset .highlight .c,.md-typeset .highlight .cm{color:#999}.codehilite .cp,.md-typeset .highlight .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs,.md-typeset .highlight .c1,.md-typeset .highlight .ch,.md-typeset .highlight .cs{color:#999}.codehilite .na,.codehilite .nb,.md-typeset .highlight .na,.md-typeset .highlight .nb{color:#c2185b}.codehilite .bp,.md-typeset .highlight .bp{color:#3e61a2}.codehilite .nc,.md-typeset .highlight .nc{color:#c2185b}.codehilite .no,.md-typeset .highlight .no{color:#3e61a2}.codehilite .nd,.codehilite .ni,.md-typeset .highlight .nd,.md-typeset .highlight .ni{color:#666}.codehilite .ne,.codehilite .nf,.md-typeset .highlight .ne,.md-typeset .highlight .nf{color:#c2185b}.codehilite .nl,.md-typeset .highlight .nl{color:#3b5179}.codehilite .nn,.md-typeset .highlight .nn{color:#ec407a}.codehilite .nt,.md-typeset .highlight .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi,.md-typeset .highlight .nv,.md-typeset .highlight .vc,.md-typeset .highlight .vg,.md-typeset .highlight .vi{color:#3e61a2}.codehilite .nx,.md-typeset .highlight .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo,.md-typeset .highlight .il,.md-typeset .highlight .m,.md-typeset .highlight .mf,.md-typeset .highlight .mh,.md-typeset .highlight .mi,.md-typeset .highlight .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc,.md-typeset .highlight .s,.md-typeset .highlight .sb,.md-typeset .highlight .sc{color:#0d904f}.codehilite .sd,.md-typeset .highlight .sd{color:#999}.codehilite .s2,.md-typeset .highlight .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx,.md-typeset .highlight .se,.md-typeset .highlight .sh,.md-typeset .highlight .si,.md-typeset .highlight .sx{color:#183691}.codehilite .sr,.md-typeset .highlight .sr{color:#009926}.codehilite .s1,.codehilite .ss,.md-typeset .highlight .s1,.md-typeset .highlight .ss{color:#0d904f}.codehilite .err,.md-typeset .highlight .err{color:#a61717}.codehilite .w,.md-typeset .highlight .w{color:transparent}.codehilite .hll,.md-typeset .highlight .hll{display:block;margin:0 -1.2rem;padding:0 1.2rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilite,.md-typeset .highlight{position:relative;margin:1em 0;padding:0;border-radius:.2rem;background-color:hsla(0,0%,93%,.5);color:#37474f;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset .codehilite code,.md-typeset .codehilite pre,.md-typeset .highlight code,.md-typeset .highlight pre{display:block;margin:0;padding:1.05rem 1.2rem;background-color:transparent;overflow:auto;vertical-align:top}.md-typeset .codehilite code::-webkit-scrollbar,.md-typeset .codehilite pre::-webkit-scrollbar,.md-typeset .highlight code::-webkit-scrollbar,.md-typeset .highlight pre::-webkit-scrollbar{width:.8rem;height:.4rem}.md-typeset .codehilite code::-webkit-scrollbar-thumb,.md-typeset .codehilite pre::-webkit-scrollbar-thumb,.md-typeset .highlight code::-webkit-scrollbar-thumb,.md-typeset .highlight pre::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset .codehilite code::-webkit-scrollbar-thumb:hover,.md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,.md-typeset .highlight code::-webkit-scrollbar-thumb:hover,.md-typeset .highlight pre::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset pre.codehilite,.md-typeset pre.highlight{overflow:visible}.md-typeset pre.codehilite code,.md-typeset pre.highlight code{display:block;padding:1.05rem 1.2rem;overflow:auto}.md-typeset .codehilitetable{display:block;margin:1em 0;border-radius:.2em;font-size:1.6rem;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td{display:block;padding:0}.md-typeset .codehilitetable tr{display:-webkit-box;display:-ms-flexbox;display:flex}.md-typeset .codehilitetable .codehilite,.md-typeset .codehilitetable .highlight,.md-typeset .codehilitetable .linenodiv{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv{padding:1.05rem 1.2rem}.md-typeset .codehilitetable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre{margin:0;padding:0;background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable{-webkit-box-shadow:none;box-shadow:none}.md-typeset [id^="fnref:"]{display:inline-block}.md-typeset [id^="fnref:"]:target{margin-top:-7.6rem;padding-top:7.6rem;pointer-events:none}.md-typeset [id^="fn:"]:before{display:none;height:0;content:""}.md-typeset [id^="fn:"]:target:before{display:block;margin-top:-7rem;padding-top:7rem;pointer-events:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:1.28rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-ref{display:inline-block;pointer-events:auto}.md-typeset .footnote-ref:before{display:inline;margin:0 .2em;border-left:.1rem solid rgba(0,0,0,.26);font-size:1.25em;content:"";vertical-align:-.5rem}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.5rem);transform:translateX(.5rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}.md-typeset .footnote-backref:before{font-size:1.6rem;content:"\E31B"}.md-typeset .headerlink{display:inline-block;margin-left:1rem;-webkit-transform:translateY(.5rem);transform:translateY(.5rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset h1[id] .headerlink{display:none}.md-typeset h2[id]:before{display:block;margin-top:-.8rem;padding-top:.8rem;content:""}.md-typeset h2[id]:target:before{margin-top:-6.8rem;padding-top:6.8rem}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink,.md-typeset h2[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink:hover,.md-typeset h2[id]:target .headerlink{color:#536dfe}.md-typeset h3[id]:before{display:block;margin-top:-.9rem;padding-top:.9rem;content:""}.md-typeset h3[id]:target:before{margin-top:-6.9rem;padding-top:6.9rem}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink,.md-typeset h3[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink:hover,.md-typeset h3[id]:target .headerlink{color:#536dfe}.md-typeset h4[id]:before{display:block;margin-top:-.9rem;padding-top:.9rem;content:""}.md-typeset h4[id]:target:before{margin-top:-6.9rem;padding-top:6.9rem}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink,.md-typeset h4[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink:hover,.md-typeset h4[id]:target .headerlink{color:#536dfe}.md-typeset h5[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem;content:""}.md-typeset h5[id]:target:before{margin-top:-7.1rem;padding-top:7.1rem}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink,.md-typeset h5[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink:hover,.md-typeset h5[id]:target .headerlink{color:#536dfe}.md-typeset h6[id]:before{display:block;margin-top:-1.1rem;padding-top:1.1rem;content:""}.md-typeset h6[id]:target:before{margin-top:-7.1rem;padding-top:7.1rem}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink,.md-typeset h6[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink:hover,.md-typeset h6[id]:target .headerlink{color:#536dfe}.md-typeset .MJXc-display{margin:.75em 0;padding:.75em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.2rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;-webkit-box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;-webkit-box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,93%,.5);color:#37474f;-webkit-box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5);box-shadow:.25em 0 0 hsla(0,0%,93%,.5),-.25em 0 0 hsla(0,0%,93%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"\E0B7";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:1.6rem;padding-left:1.6rem;-webkit-box-shadow:none;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset details{padding-top:0}.md-typeset details[open]>summary:after{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.md-typeset details:not([open]){padding-bottom:0}.md-typeset details:not([open])>summary{border-bottom:none}.md-typeset details summary{padding-right:4rem}.no-details .md-typeset details:not([open])>*{display:none}.no-details .md-typeset details:not([open]) summary{display:block}.md-typeset summary{display:block;outline:none;cursor:pointer}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset summary:after{position:absolute;top:.8rem;right:1.2rem;color:rgba(0,0,0,.26);font-size:2rem;content:"\E313"}.md-typeset .emojione{width:2rem;vertical-align:text-top}.md-typeset code.codehilite,.md-typeset code.highlight{margin:0 .29412em;padding:.07353em 0}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.15em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.25em;content:"\E835";vertical-align:-.25em}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"\E834"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code,.md-typeset pre{white-space:pre-wrap}.md-typeset code{-webkit-box-shadow:none;box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-clipboard,.md-content__icon,.md-footer,.md-header,.md-sidebar,.md-tabs,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset pre{margin:1em -1.6rem;border-radius:0}.md-typeset pre>code{padding:1.05rem 1.6rem}.md-footer-nav__link--prev .md-footer-nav__title{display:none}.md-search-result__teaser{max-height:5rem;-webkit-line-clamp:3}.codehilite .hll,.md-typeset .highlight .hll{margin:0 -1.6rem;padding:0 1.6rem}.md-typeset>.codehilite,.md-typeset>.highlight{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilite code,.md-typeset>.codehilite pre,.md-typeset>.highlight code,.md-typeset>.highlight pre{padding:1.05rem 1.6rem}.md-typeset>.codehilitetable{margin:1em -1.6rem;border-radius:0}.md-typeset>.codehilitetable .codehilite>code,.md-typeset>.codehilitetable .codehilite>pre,.md-typeset>.codehilitetable .highlight>code,.md-typeset>.codehilitetable .highlight>pre,.md-typeset>.codehilitetable .linenodiv{padding:1rem 1.6rem}.md-typeset>p>.MJXc-display{margin:.75em -1.6rem;padding:.25em 1.6rem}}@media only screen and (min-width:100em){html{font-size:68.75%}}@media only screen and (min-width:125em){html{font-size:75%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}html .md-nav__link[for=toc]{display:block;padding-right:4.8rem}html .md-nav__link[for=toc]:after{color:inherit;content:"\E8DE"}html .md-nav__link[for=toc]+.md-nav__link{display:none}html .md-nav__link[for=toc]~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-nav__source{display:block;padding:0 .4rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{position:absolute;top:.4rem;left:.4rem;width:3.6rem;height:3.6rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:2rem;background-color:#fff;overflow:hidden;pointer-events:none}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;width:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}.md-search__input{width:100%;height:4.8rem;font-size:1.8rem}.md-search__icon[for=search]{top:1.2rem;left:1.6rem}.md-search__icon[for=search][for=search]:before{content:"\E5C4"}.md-search__icon[type=reset]{top:1.2rem;right:1.6rem}.md-search__output{top:4.8rem;bottom:0}.md-search-result__article--document:before{display:none}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-hero__inner{margin-top:4.8rem;margin-bottom:2.4rem}.md-nav{background-color:#fff}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;height:100%;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:1.6rem;line-height:1.5}html .md-nav--primary .md-nav__title{position:relative;height:11.2rem;padding:6rem 1.6rem .4rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:4.8rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.4rem;left:.4rem;width:4rem;height:4rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background-color:#fff;-webkit-box-shadow:0 .1rem 0 rgba(0,0,0,.07) inset;box-shadow:inset 0 .1rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.4rem;left:.4rem;width:6.4rem;height:6.4rem;font-size:4.8rem}html .md-nav--primary .md-nav__title--site:before{display:none}.md-nav--primary .md-nav__list{-webkit-box-flex:1;-ms-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.1rem solid rgba(0,0,0,.07)}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:4.8rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E315"}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:1.2rem 1.6rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:1.2rem;margin-top:-1.2rem;color:inherit;font-size:2.4rem}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:transparent}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:2.8rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:4rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:5.2rem}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:6.4rem}.md-nav__toggle~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}.no-csstransforms3d .md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.no-csstransforms3d .md-nav__toggle:checked~.md-nav{display:-webkit-box;display:-ms-flexbox;display:flex}.md-sidebar--primary{position:fixed;top:0;left:-24.2rem;width:24.2rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;transition:-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1),-webkit-box-shadow .25s;background-color:#fff;z-index:3}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{-webkit-box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(24.2rem);transform:translateX(24.2rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}.md-tabs{display:none}}@media only screen and (min-width:60em){.md-content{margin-right:24.2rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:23rem;max-width:23rem;margin-left:2.8rem;padding-right:1.2rem}.md-search{padding:.4rem}.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);cursor:pointer}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-search__inner{position:relative;width:23rem;padding:.2rem 0;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1)}.md-search__form,.md-search__input{border-radius:.2rem}.md-search__input{width:100%;height:3.6rem;padding-left:4.4rem;-webkit-transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);background-color:rgba(0,0,0,.26);color:inherit;font-size:1.6rem}.md-search__input+.md-search__icon{color:inherit}.md-search__input::-webkit-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:-ms-input-placeholder,.md-search__input::-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.2rem .2rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:none}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder,[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:3.8rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{-webkit-box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.8rem;height:.4rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__article,.md-search-result__meta{padding-left:4.4rem}.md-sidebar--secondary{display:block;margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:24.2rem}.md-content__inner{margin-right:2.4rem;margin-left:2.4rem}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:68.8rem}.md-sidebar--secondary{margin-left:122rem}.md-tabs~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{font-size:0}.md-tabs--active~.md-main .md-nav--primary .md-nav__title--site{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item{font-size:0}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{display:none;font-size:1.4rem;overflow:auto}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link{margin-top:0;font-weight:700;pointer-events:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link:after{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active{display:block}.md-tabs--active~.md-main .md-nav[data-md-level="1"]{max-height:none}.md-tabs--active~.md-main .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-left:0}}@media only screen and (min-width:45em){.md-footer-nav__link{width:50%}.md-footer-copyright{max-width:75%;float:left}.md-footer-social{padding:1.2rem 0;float:right}}@media only screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:46.8rem}.md-search-result__teaser{max-height:5rem;-webkit-line-clamp:3}} /*# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IiIsImZpbGUiOiJhc3NldHMvc3R5bGVzaGVldHMvYXBwbGljYXRpb24uYWM2NDI1MWUuY3NzIiwic291cmNlUm9vdCI6IiJ9*/ diff --git a/docs/tools/mkdocs-material-theme/assets/stylesheets/custom.css b/docs/tools/mkdocs-material-theme/assets/stylesheets/custom.css index a51d373c6d5..2fcf11ce447 100644 --- a/docs/tools/mkdocs-material-theme/assets/stylesheets/custom.css +++ b/docs/tools/mkdocs-material-theme/assets/stylesheets/custom.css @@ -128,3 +128,12 @@ h1, h2, h3, .md-logo { .md-hide { display: none; } + +#md-extra-nav { + background: #efefef; + padding-top: 0.5rem; +} + +.grey { + color: #666; +} diff --git a/docs/tools/mkdocs-material-theme/base.html b/docs/tools/mkdocs-material-theme/base.html index 548f57c853c..97283c55078 100644 --- a/docs/tools/mkdocs-material-theme/base.html +++ b/docs/tools/mkdocs-material-theme/base.html @@ -224,34 +224,11 @@ } }); } - function drawLanguageSwitch() { - var url, text, title; - if (window.location.pathname.indexOf('/ru/') >= 0) { - url = window.location.pathname.replace('/ru/', '/en/'); - text = "\n" + - "\n" + - "\n" + - "\n" + - "\n" + - ""; - title = "Switch to English" - } else { - url = window.location.pathname.replace('/en/', '/ru/'); - text = "\n" + - "\n" + - "\n" + - "\n" + - ""; - title = "Переключить на русский язык" - } - document.getElementById("md-language-switch").innerHTML = '' + text + ''; - } ready(function () { {% if config.extra.single_page and page.content %} document.getElementById("content").innerHTML = {{ page.content|tojson|safe }}; document.getElementsByClassName('md-footer')[0].style.display = 'block'; {% endif %} - drawLanguageSwitch(); app.initialize({ version: "{{ mkdocs_version }}", url: { diff --git a/docs/tools/mkdocs-material-theme/partials/footer.html b/docs/tools/mkdocs-material-theme/partials/footer.html index 5f79e8116c6..f8edf2ef005 100644 --- a/docs/tools/mkdocs-material-theme/partials/footer.html +++ b/docs/tools/mkdocs-material-theme/partials/footer.html @@ -3,6 +3,11 @@ {% if page.previous_page or page.next_page %}