Merge branch 'master' into alter-update

Conflicts:
	dbms/src/Parsers/ASTIdentifier.cpp
This commit is contained in:
Alexey Zatelepin 2018-09-04 17:39:08 +03:00
commit 4f91833e67
395 changed files with 6492 additions and 3797 deletions

View File

@ -4,27 +4,27 @@ matrix:
fast_finish: true
include:
# We need to have gcc7 headers to compile c++17 code on clang
- os: linux
cache:
ccache: true
timeout: 1000
directories:
- /home/travis/.ccache
addons:
apt:
update: true
sources:
- ubuntu-toolchain-r-test
- llvm-toolchain-trusty-5.0
packages: [ ninja-build, g++-7, clang-5.0, lld-5.0, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl]
env:
- MATRIX_EVAL="export CC=clang-5.0 CXX=clang++-5.0"
script:
- utils/travis/normal.sh
# - os: linux
#
# cache:
# ccache: true
# timeout: 1000
# directories:
# - /home/travis/.ccache
#
# addons:
# apt:
# update: true
# sources:
# - ubuntu-toolchain-r-test
# - llvm-toolchain-trusty-5.0
# packages: [ ninja-build, g++-7, clang-5.0, lld-5.0, libicu-dev, libreadline-dev, libmysqlclient-dev, unixodbc-dev, libltdl-dev, libssl-dev, libboost-dev, zlib1g-dev, libdouble-conversion-dev, libsparsehash-dev, librdkafka-dev, libcapnp-dev, libsparsehash-dev, libgoogle-perftools-dev, bash, expect, python, python-lxml, python-termcolor, curl, perl, sudo, openssl]
#
# env:
# - MATRIX_EVAL="export CC=clang-5.0 CXX=clang++-5.0"
#
# script:
# - utils/travis/normal.sh
- os: linux

View File

@ -1,3 +1,16 @@
## ClickHouse release 18.6.0, 2018-08-02
### New features:
* Added support for ON expressions for the JOIN ON syntax:
`JOIN ON Expr([table.]column ...) = Expr([table.]column, ...) [AND Expr([table.]column, ...) = Expr([table.]column, ...) ...]`
Выражение должно представлять из себя цепочку равенств, объединенных оператором AND. Каждая часть равенства может являться произвольным выражением над столбцами одной из таблиц. Поддержана возможность использования fully qualified имен столбцов (`table.name`, `database.table.name`, `table_alias.name`, `subquery_alias.name`) for the right table. [#2742](https://github.com/yandex/ClickHouse/pull/2742)
* HTTPS can be enabled for replication. [#2760](https://github.com/yandex/ClickHouse/pull/2760)
### Improvements:
* The server passes the patch component of its version to the client. Data about the patch version component is in `system.processes` and `query_log`. [#2646](https://github.com/yandex/ClickHouse/pull/2646)
## ClickHouse release 18.5.1, 2018-07-31
### New features:
@ -6,7 +19,7 @@
### Improvements:
* Now you can use the `from_env` attribute to set values in config files from environment variables [#2741](https://github.com/yandex/ClickHouse/pull/2741).
* Now you can use the `from_env` [#2741](https://github.com/yandex/ClickHouse/pull/2741) attribute to set values in config files from environment variables.
* Added case-insensitive versions of the `coalesce`, `ifNull`, and `nullIf functions` [#2752](https://github.com/yandex/ClickHouse/pull/2752).
### Bug fixes:
@ -18,21 +31,21 @@
### New features:
* Added system tables: `formats`, `data_type_families`, `aggregate_function_combinators`, `table_functions`, `table_engines`, `collations` [#2721](https://github.com/yandex/ClickHouse/pull/2721).
* Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster` table function [#2708](https://github.com/yandex/ClickHouse/pull/2708).
* Added the ability to use a table function instead of a table as an argument of a `remote` or `cluster table function` [#2708](https://github.com/yandex/ClickHouse/pull/2708).
* Support for `HTTP Basic` authentication in the replication protocol [#2727](https://github.com/yandex/ClickHouse/pull/2727).
* The `has` function now allows searching for a numeric value in an array of `Enum` values [Maxim Khrisanfov](https://github.com/yandex/ClickHouse/pull/2699).
* Support for adding arbitrary message separators when reading from `Kafka` [Amos Bird](https://github.com/yandex/ClickHouse/pull/2701).
### Improvements:
* The `ALTER TABLE t DELETE WHERE` query does not rewrite data chunks that were not affected by the WHERE condition [#2694](https://github.com/yandex/ClickHouse/pull/2694).
* The `ALTER TABLE t DELETE WHERE` query does not rewrite data parts that were not affected by the WHERE condition [#2694](https://github.com/yandex/ClickHouse/pull/2694).
* The `use_minimalistic_checksums_in_zookeeper` option for `ReplicatedMergeTree` tables is enabled by default. This setting was added in version 1.1.54378, 2018-04-16. Versions that are older than 1.1.54378 can no longer be installed.
* Support for running `KILL` and `OPTIMIZE` queries that specify `ON CLUSTER` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2689).
### Bug fixes:
* Fixed the error `Column ... is not under an aggregate function and not in GROUP BY` for aggregation with an IN expression. This bug appeared in version 18.1.0. ([bbdd780b](https://github.com/yandex/ClickHouse/commit/bbdd780be0be06a0f336775941cdd536878dd2c2))
* Fixed a bug in the `windowFunnel` aggregate function [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735).
* Fixed a bug in the `windowFunnel aggregate function` [Winter Zhang](https://github.com/yandex/ClickHouse/pull/2735).
* Fixed a bug in the `anyHeavy` aggregate function ([a2101df2](https://github.com/yandex/ClickHouse/commit/a2101df25a6a0fba99aa71f8793d762af2b801ee))
* Fixed server crash when using the `countArray()` aggregate function.
@ -72,7 +85,6 @@
* Converting a string containing the number zero to DateTime does not work. Example: `SELECT toDateTime('0')`. This is also the reason that `DateTime DEFAULT '0'` does not work in tables, as well as `<null_value>0</null_value>` in dictionaries. Solution: replace `0` with `0000-00-00 00:00:00`.
## ClickHouse release 1.1.54394, 2018-07-12
### New features:
@ -99,7 +111,7 @@
### Improvements:
* Improved performance, reduced memory consumption, and correct tracking of memory consumption with use of the IN operator when a table index could be used ([#2584](https://github.com/yandex/ClickHouse/pull/2584)).
* Improved performance, reduced memory consumption, and correct memory consumption tracking with use of the IN operator when a table index could be used ([#2584](https://github.com/yandex/ClickHouse/pull/2584)).
* Removed redundant checking of checksums when adding a data part. This is important when there are a large number of replicas, because in these cases the total number of checks was equal to N^2.
* Added support for `Array(Tuple(...))` arguments for the `arrayEnumerateUniq` function ([#2573](https://github.com/yandex/ClickHouse/pull/2573)).
* Added `Nullable` support for the `runningDifference` function ([#2594](https://github.com/yandex/ClickHouse/pull/2594)).
@ -126,8 +138,8 @@
### New features:
* Support for the `ALTER TABLE t DELETE WHERE` query for replicated tables. Added the `system.mutations` table to track progress of this type of queries.
* Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for MergeTree tables.
* Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2260)).
* Support for the `ALTER TABLE t [REPLACE|ATTACH] PARTITION` query for \*MergeTree tables.
* Support for the `TRUNCATE TABLE` query ([Winter Zhang](https://github.com/yandex/ClickHouse/pull/2260))
* Several new `SYSTEM` queries for replicated tables (`RESTART REPLICAS`, `SYNC REPLICA`, `[STOP|START] [MERGES|FETCHES|SENDS REPLICATED|REPLICATION QUEUES]`).
* Added the ability to write to a table with the MySQL engine and the corresponding table function ([sundy-li](https://github.com/yandex/ClickHouse/pull/2294)).
* Added the `url()` table function and the `URL` table engine ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2501)).
@ -137,13 +149,13 @@
* The password to `clickhouse-client` can be entered interactively.
* Server logs can now be sent to syslog ([Alexander Krasheninnikov](https://github.com/yandex/ClickHouse/pull/2459)).
* Support for logging in dictionaries with a shared library source ([Alexander Sapin](https://github.com/yandex/ClickHouse/pull/2472)).
* Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/yandex/ClickHouse/pull/2263)).
* Support for custom CSV delimiters ([Ivan Zhukov](https://github.com/yandex/ClickHouse/pull/2263))
* Added the `date_time_input_format` setting. If you switch this setting to `'best_effort'`, DateTime values will be read in a wide range of formats.
* Added the `clickhouse-obfuscator` utility for data obfuscation. Usage example: publishing data used in performance tests.
### Experimental features:
* Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/yandex/ClickHouse/pull/2272)).
* Added the ability to calculate `and` arguments only where they are needed ([Anastasia Tsarkova](https://github.com/yandex/ClickHouse/pull/2272))
* JIT compilation to native code is now available for some expressions ([pyos](https://github.com/yandex/ClickHouse/pull/2277)).
### Bug fixes:
@ -151,11 +163,11 @@
* Duplicates no longer appear for a query with `DISTINCT` and `ORDER BY`.
* Queries with `ARRAY JOIN` and `arrayFilter` no longer return an incorrect result.
* Fixed an error when reading an array column from a Nested structure ([#2066](https://github.com/yandex/ClickHouse/issues/2066)).
* Fixed an error when analyzing queries with a HAVING section like `HAVING tuple IN (...)`.
* Fixed an error when analyzing queries with a HAVING clause like `HAVING tuple IN (...)`.
* Fixed an error when analyzing queries with recursive aliases.
* Fixed an error when reading from ReplacingMergeTree with a condition in PREWHERE that filters all rows ([#2525](https://github.com/yandex/ClickHouse/issues/2525)).
* User profile settings were not applied when using sessions in the HTTP interface.
* Fixed how settings are applied from the command line parameters in `clickhouse-local`.
* Fixed how settings are applied from the command line parameters in clickhouse-local.
* The ZooKeeper client library now uses the session timeout received from the server.
* Fixed a bug in the ZooKeeper client library when the client waited for the server response longer than the timeout.
* Fixed pruning of parts for queries with conditions on partition key columns ([#2342](https://github.com/yandex/ClickHouse/issues/2342)).
@ -165,7 +177,7 @@
* Fixed syntactic parsing and formatting of the `CAST` operator.
* Fixed insertion into a materialized view for the Distributed table engine ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2411)).
* Fixed a race condition when writing data from the `Kafka` engine to materialized views ([Yangkuan Liu](https://github.com/yandex/ClickHouse/pull/2448)).
* Fixed SSRF in the `remote()` table function.
* Fixed SSRF in the remote() table function.
* Fixed exit behavior of `clickhouse-client` in multiline mode ([#2510](https://github.com/yandex/ClickHouse/issues/2510)).
### Improvements:
@ -184,7 +196,7 @@
### Build changes:
* The gcc8 compiler can be used for builds.
* Added the ability to build llvm from a submodule.
* Added the ability to build llvm from submodule.
* The version of the librdkafka library has been updated to v0.11.4.
* Added the ability to use the system libcpuid library. The library version has been updated to 0.4.0.
* Fixed the build using the vectorclass library ([Babacar Diassé](https://github.com/yandex/ClickHouse/pull/2274)).
@ -195,44 +207,52 @@
### Backward incompatible changes:
* Removed escaping in `Vertical` and `Pretty*` formats and deleted the `VerticalRaw` format.
* If servers with version 1.1.54388 (or newer) and servers with older version are used simultaneously in distributed query and the query has `cast(x, 'Type')` expression in the form without `AS` keyword and with `cast` not in uppercase, then the exception with message like `Not found column cast(0, 'UInt8') in block` will be thrown. Solution: update server on all cluster nodes.
* If servers with version 1.1.54388 (or newer) and servers with an older version are used simultaneously in a distributed query and the query has the `cast(x, 'Type')` expression without the `AS` keyword and doesn't have the word `cast` in uppercase, an exception will be thrown with a message like `Not found column cast(0, 'UInt8') in block`. Solution: Update the server on the entire cluster.
## ClickHouse release 1.1.54385, 2018-06-01
### Bug fixes:
* Fixed an error that in some cases caused ZooKeeper operations to block.
## ClickHouse release 1.1.54383, 2018-05-22
### Bug fixes:
* Fixed a slowdown of replication queue if a table has many replicas.
## ClickHouse release 1.1.54381, 2018-05-14
### Bug fixes:
* Fixed a nodes leak in ZooKeeper when ClickHouse loses connection to ZooKeeper server.
## ClickHouse release 1.1.54380, 2018-04-21
### New features:
* Added table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random` `clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`.
* Added the table function `file(path, format, structure)`. An example reading bytes from `/dev/urandom`: `ln -s /dev/urandom /var/lib/clickhouse/user_files/random``clickhouse-client -q "SELECT * FROM file('random', 'RowBinary', 'd UInt8') LIMIT 10"`.
### Improvements:
* Subqueries could be wrapped by `()` braces (to enhance queries readability). For example, `(SELECT 1) UNION ALL (SELECT 1)`.
* Simple `SELECT` queries from table `system.processes` are not counted in `max_concurrent_queries` limit.
* Subqueries can be wrapped in `()` brackets to enhance query readability. For example: `(SELECT 1) UNION ALL (SELECT 1)`.
* Simple `SELECT` queries from the `system.processes` table are not included in the `max_concurrent_queries` limit.
### Bug fixes:
* Fixed incorrect behaviour of `IN` operator when select from `MATERIALIZED VIEW`.
* Fixed incorrect filtering by partition index in expressions like `WHERE partition_key_column IN (...)`
* Fixed inability to execute `OPTIMIZE` query on non-leader replica if the table was `REANAME`d.
* Fixed authorization error when execute `OPTIMIZE` or `ALTER` queries on a non-leader replica.
* Fixed freezing of `KILL QUERY` queries.
* Fixed an error in ZooKeeper client library which led to watches loses, freezing of distributed DDL queue and slowing replication queue if non-empty `chroot` prefix is used in ZooKeeper configuration.
* Fixed incorrect behavior of the `IN` operator when select from `MATERIALIZED VIEW`.
* Fixed incorrect filtering by partition index in expressions like `partition_key_column IN (...)`.
* Fixed inability to execute `OPTIMIZE` query on non-leader replica if `REANAME` was performed on the table.
* Fixed the authorization error when executing `OPTIMIZE` or `ALTER` queries on a non-leader replica.
* Fixed freezing of `KILL QUERY`.
* Fixed an error in ZooKeeper client library which led to loss of watches, freezing of distributed DDL queue, and slowdowns in the replication queue if a non-empty `chroot` prefix is used in the ZooKeeper configuration.
### Backward incompatible changes:
* Removed support of expressions like `(a, b) IN (SELECT (a, b))` (instead of them you can use their equivalent `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined data filtering or caused errors.
* Removed support for expressions like `(a, b) IN (SELECT (a, b))` (you can use the equivalent expression `(a, b) IN (SELECT a, b)`). In previous releases, these expressions led to undetermined `WHERE` filtering or caused errors.
## ClickHouse release 1.1.54378, 2018-04-16
### New features:
* Logging level can be changed without restarting the server.
@ -242,10 +262,10 @@
* Added support for `ALTER TABLE ... PARTITION ... ` for `MATERIALIZED VIEW`.
* Added information about the size of data parts in uncompressed form in the system table.
* Server-to-server encryption support for distributed tables (`<secure>1</secure>` in the replica config in `<remote_servers>`).
* Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in zookeeper: `use_minimalistic_checksums_in_zookeeper = 1`
* Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed; it's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov).
* Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson).
* When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was cancelled` exception instead of an incomplete response.
* Configuration of the table level for the `ReplicatedMergeTree` family in order to minimize the amount of data stored in Zookeeper: : `use_minimalistic_checksums_in_zookeeper = 1`
* Configuration of the `clickhouse-client` prompt. By default, server names are now output to the prompt. The server's display name can be changed. It's also sent in the `X-ClickHouse-Display-Name` HTTP header (Kirill Shvakov).
* Multiple comma-separated `topics` can be specified for the `Kafka` engine (Tobias Adamson)
* When a query is stopped by `KILL QUERY` or `replace_running_query`, the client receives the `Query was cancelled` exception instead of an incomplete result.
### Improvements:
@ -264,11 +284,11 @@
* Correct results are now returned when using tuples with `IN` when some of the tuple components are in the table index.
* The `max_execution_time` limit now works correctly with distributed queries.
* Fixed errors when calculating the size of composite columns in the `system.columns` table.
* Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS`.
* Fixed errors in `StorageKafka` (#2075)
* Fixed an error when creating a temporary table `CREATE TEMPORARY TABLE IF NOT EXISTS.`
* Fixed errors in `StorageKafka` (##2075)
* Fixed server crashes from invalid arguments of certain aggregate functions.
* Fixed the error that prevented the `DETACH DATABASE` query from stopping background tasks for `ReplicatedMergeTree` tables.
* `Too many parts` state is less likely to happen when inserting into aggregated materialized views (#2084).
* `Too many parts` state is less likely to happen when inserting into aggregated materialized views (##2084).
* Corrected recursive handling of substitutions in the config if a substitution must be followed by another substitution on the same level.
* Corrected the syntax in the metadata file when creating a `VIEW` that uses a query with `UNION ALL`.
* `SummingMergeTree` now works correctly for summation of nested data structures with a composite key.
@ -276,15 +296,14 @@
### Build changes:
* The build supports `ninja` instead of `make` and uses it by default for building releases.
* Renamed packages: `clickhouse-server-base` is now `clickhouse-common-static`; `clickhouse-server-common` is now `clickhouse-server`; `clickhouse-common-dbg` is now `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility.
* The build supports `ninja` instead of `make` and uses `ninja` by default for building releases.
* Renamed packages: `clickhouse-server-base` in `clickhouse-common-static`; `clickhouse-server-common` in `clickhouse-server`; `clickhouse-common-dbg` in `clickhouse-common-static-dbg`. To install, use `clickhouse-server clickhouse-client`. Packages with the old names will still load in the repositories for backward compatibility.
### Backward-incompatible changes:
### Backward incompatible changes:
* Removed the special interpretation of an IN expression if an array is specified on the left side. Previously, the expression `arr IN (set)` was interpreted as "at least one `arr` element belongs to the `set`". To get the same behavior in the new version, write `arrayExists(x -> x IN (set), arr)`.
* Disabled the incorrect use of the socket option `SO_REUSEPORT`, which was incorrectly enabled by default in the Poco library. Note that on Linux there is no longer any reason to simultaneously specify the addresses `::` and `0.0.0.0` for listen use just `::`, which allows listening to the connection both over IPv4 and IPv6 (with the default kernel config settings). You can also revert to the behavior from previous versions by specifying `<listen_reuse_port>1</listen_reuse_port>` in the config.
## ClickHouse release 1.1.54370, 2018-03-16
### New features:
@ -296,43 +315,44 @@
### Improvements:
* When inserting data in a `Replicated` table, fewer requests are made to `ZooKeeper` (and most of the user-level errors have disappeared from the `ZooKeeper` log).
* Added the ability to create aliases for sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`.
* Added the ability to create aliases for data sets. Example: `WITH (1, 2, 3) AS set SELECT number IN set FROM system.numbers LIMIT 10`.
### Bug fixes:
* Fixed the `Illegal PREWHERE` error when reading from `Merge` tables over `Distributed` tables.
* Added fixes that allow you to run `clickhouse-server` in IPv4-only Docker containers.
* Fixed a race condition when reading from system `system.parts_columns` tables.
* Fixed the `Illegal PREWHERE` error when reading from Merge tables for `Distributed`tables.
* Added fixes that allow you to start clickhouse-server in IPv4-only Docker containers.
* Fixed a race condition when reading from system `system.parts_columns tables.`
* Removed double buffering during a synchronous insert to a `Distributed` table, which could have caused the connection to timeout.
* Fixed a bug that caused excessively long waits for an unavailable replica before beginning a `SELECT` query.
* Fixed incorrect dates in the `system.parts` table.
* Fixed a bug that made it impossible to insert data in a `Replicated` table if `chroot` was non-empty in the configuration of the `ZooKeeper` cluster.
* Fixed the vertical merging algorithm for an empty `ORDER BY` table.
* Restored the ability to use dictionaries in queries to remote tables, even if these dictionaries are not present on the requestor server. This functionality was lost in release 1.1.54362.
* Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side argument of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358.
* Restored the behavior for queries like `SELECT * FROM remote('server2', default.table) WHERE col IN (SELECT col2 FROM default.table)` when the right side of the `IN` should use a remote `default.table` instead of a local one. This behavior was broken in version 1.1.54358.
* Removed extraneous error-level logging of `Not found column ... in block`.
## ClickHouse release 1.1.54356, 2018-03-06
## Clickhouse Release 1.1.54362, 2018-03-11
### New features:
* Aggregation without `GROUP BY` for an empty set (such as `SELECT count(*) FROM table WHERE 0`) now returns a result with one row with null values for aggregate functions, in compliance with the SQL standard. To restore the old behavior (return an empty result), set `empty_result_for_aggregation_by_empty_set` to 1.
* Added type conversion for `UNION ALL`. Different alias names are allowed in `SELECT` positions in `UNION ALL`, in compliance with the SQL standard.
* Arbitrary expressions are supported in `LIMIT BY` sections. Previously, it was only possible to use columns resulting from `SELECT`.
* Arbitrary expressions are supported in `LIMIT BY` clauses. Previously, it was only possible to use columns resulting from `SELECT`.
* An index of `MergeTree` tables is used when `IN` is applied to a tuple of expressions from the columns of the primary key. Example: `WHERE (UserID, EventDate) IN ((123, '2000-01-01'), ...)` (Anastasiya Tsarkova).
* Added the `clickhouse-copier` tool for copying between clusters and resharding data (beta).
* Added consistent hashing functions: `yandexConsistentHash`, `jumpConsistentHash`, `sumburConsistentHash`. They can be used as a sharding key in order to reduce the amount of network traffic during subsequent reshardings.
* Added functions: `arrayAny`, `arrayAll`, `hasAny`, `hasAll`, `arrayIntersect`, `arrayResize`.
* Added the `arrayCumSum` function (Javi Santana).
* Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull`functions to read the DateTime from a string containing text in a wide variety of possible formats.
* It is now possible to change the logging settings without restarting the server.
* Added the `parseDateTimeBestEffort`, `parseDateTimeBestEffortOrZero`, and `parseDateTimeBestEffortOrNull` functions to read the DateTime from a string containing text in a wide variety of possible formats.
* Data can be partially reloaded from external dictionaries during updating (load just the records in which the value of the specified field greater than in the previous download) (Arsen Hakobyan).
* Added the `cluster` table function. Example: `cluster(cluster_name, db, table)`. The `remote` table function can accept the cluster name as the first argument, if it is specified as an identifier.
* The `remote` and `cluster` table functions can be used in `INSERT` requests.
* Added the `create_table_query` and `engine_full` virtual columns to the `system.tables`table . The `metadata_modification_time` column is virtual.
* Added the `data_path` and `metadata_path` columns to `system.tables` and` system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables.
* Added the `data_path` and `metadata_path` columns to `system.tables`and` system.databases` tables, and added the `path` column to the `system.parts` and `system.parts_columns` tables.
* Added additional information about merges in the `system.part_log` table.
* An arbitrary partitioning key can be used for the `system.query_log` table (Kirill Shvakov).
* The `SHOW TABLES` query now also shows temporary tables. Added temporary tables and the `is_temporary` column to `system.tables` (zhang2014).
* Added the `DROP TEMPORARY TABLE` query (zhang2014).
* Added `DROP TEMPORARY TABLE` and `EXISTS TEMPORARY TABLE` queries (zhang2014).
* Support for `SHOW CREATE TABLE` for temporary tables (zhang2014).
* Added the `system_profile` configuration parameter for the settings used by internal processes.
* Support for loading `object_id` as an attribute in `MongoDB` dictionaries (Pavel Litvinenko).
@ -347,7 +367,9 @@
* `MergeTree` tables can be used without a primary key (you need to specify `ORDER BY tuple()`).
* A `Nullable` type can be `CAST` to a non-`Nullable` type if the argument is not `NULL`.
* `RENAME TABLE` can be performed for `VIEW`.
* Added the `throwIf` function.
* Added the `odbc_default_field_size` option, which allows you to extend the maximum size of the value loaded from an ODBC source (by default, it is 1024).
* The `system.processes` table and `SHOW PROCESSLIST` now have the `is_cancelled` and `peak_memory_usage` columns.
### Improvements:
@ -371,6 +393,7 @@
* Fixed a bug in merges for `ReplacingMergeTree` tables.
* Fixed synchronous insertions in `Distributed` tables (`insert_distributed_sync = 1`).
* Fixed segfault for certain uses of `FULL` and `RIGHT JOIN` with duplicate columns in subqueries.
* Fixed segfault for certain uses of `replace_running_query` and `KILL QUERY`.
* Fixed the order of the `source` and `last_exception` columns in the `system.dictionaries` table.
* Fixed a bug when the `DROP DATABASE` query did not delete the file with metadata.
* Fixed the `DROP DATABASE` query for `Dictionary` databases.
@ -390,147 +413,153 @@
* Fixed a race condition in the query execution pipeline that occurred in very rare cases when using `Merge` tables with a large number of tables, and when using `GLOBAL` subqueries.
* Fixed a crash when passing arrays of different sizes to an `arrayReduce` function when using aggregate functions from multiple arguments.
* Prohibited the use of queries with `UNION ALL` in a `MATERIALIZED VIEW`.
* Fixed an error during initialization of the `part_log` system table when the server starts (by default, `part_log` is disabled).
### Backward incompatible changes:
* Removed the `distributed_ddl_allow_replicated_alter` option. This behavior is enabled by default.
* Removed the `strict_insert_defaults` setting. If you were using this functionality, write to `clickhouse-feedback@yandex-team.com`.
* Removed the `UnsortedMergeTree` engine.
## ClickHouse release 1.1.54343, 2018-02-05
## Clickhouse Release 1.1.54343, 2018-02-05
* Added macros support for defining cluster names in distributed DDL queries and constructors of Distributed tables: `CREATE TABLE distr ON CLUSTER '{cluster}' (...) ENGINE = Distributed('{cluster}', 'db', 'table')`.
* Now the table index is used for conditions like `expr IN (subquery)`.
* Now queries like `SELECT ... FROM table WHERE expr IN (subquery)` are processed using the `table` index.
* Improved processing of duplicates when inserting to Replicated tables, so they no longer slow down execution of the replication queue.
## ClickHouse release 1.1.54342, 2018-01-22
## Clickhouse Release 1.1.54342, 2018-01-22
This release contains bug fixes for the previous release 1.1.54337:
* Fixed a regression in 1.1.54337: if the default user has readonly access, then the server refuses to start up with the message `Cannot create database in readonly mode`.
* Fixed a regression in 1.1.54337: on systems with `systemd`, logs are always written to syslog regardless of the configuration; the watchdog script still uses `init.d`.
* Fixed a regression in 1.1.54337: on systems with systemd, logs are always written to syslog regardless of the configuration; the watchdog script still uses init.d.
* Fixed a regression in 1.1.54337: wrong default configuration in the Docker image.
* Fixed nondeterministic behaviour of GraphiteMergeTree (you can notice it in log messages `Data after merge is not byte-identical to data on another replicas`).
* Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may notice it in log messages `Part ... intersects previous part`).
* Fixed nondeterministic behavior of GraphiteMergeTree (you can see it in log messages `Data after merge is not byte-identical to the data on another replicas`).
* Fixed a bug that may lead to inconsistent merges after OPTIMIZE query to Replicated tables (you may see it in log messages `Part ... intersects the previous part`).
* Buffer tables now work correctly when MATERIALIZED columns are present in the destination table (by zhang2014).
* Fixed a bug in implementation of NULL.
## ClickHouse release 1.1.54337, 2018-01-18
## Clickhouse Release 1.1.54337, 2018-01-18
### New features:
* Added support for storage of multidimensional arrays and tuples (`Tuple` data type) in tables.
* Added support for table functions in `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` syntax in addition to `INSERT INTO`.
* Improved support for timezones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for DateTime arguments, the return type will track the timezone, and the value will be displayed as expected.
* Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The name of the `now` function has been made case-insensitive.
* Added support for storage of multi-dimensional arrays and tuples (`Tuple` data type) in tables.
* Support for table functions for `DESCRIBE` and `INSERT` queries. Added support for subqueries in `DESCRIBE`. Examples: `DESC TABLE remote('host', default.hits)`; `DESC TABLE (SELECT 1)`; `INSERT INTO TABLE FUNCTION remote('host', default.hits)`. Support for `INSERT INTO TABLE` in addition to `INSERT INTO`.
* Improved support for time zones. The `DateTime` data type can be annotated with the timezone that is used for parsing and formatting in text formats. Example: `DateTime('Europe/Moscow')`. When timezones are specified in functions for `DateTime` arguments, the return type will track the timezone, and the value will be displayed as expected.
* Added the functions `toTimeZone`, `timeDiff`, `toQuarter`, `toRelativeQuarterNum`. The `toRelativeHour`/`Minute`/`Second` functions can take a value of type `Date` as an argument. The `now` function name is case-sensitive.
* Added the `toStartOfFifteenMinutes` function (Kirill Shvakov).
* Added the `clickhouse format` tool for formatting queries.
* Added the `format_schema_path` configuration parameter (Marek Vavruša). It is used for specifying a schema in `Cap'n'Proto` format. Schema files can be located only in the specified directory.
* Added the `format_schema_path` configuration parameter (Marek Vavruşa). It is used for specifying a schema in `Cap'n Proto` format. Schema files can be located only in the specified directory.
* Added support for config substitutions (`incl` and `conf.d`) for configuration of external dictionaries and models (Pavel Yakunin).
* Added a column with documentation for the `system.settings` table (Kirill Shvakov).
* Added the `system.parts_columns` table with information about column sizes in each data part of `MergeTree` tables.
* Added the `system.models` table with information about loaded `CatBoost` machine learning models.
* Added the `mysql` and `odbc` table functions along with the corresponding `MySQL` and `ODBC` table engines for working with foreign databases. This feature is in the beta stage.
* Added the `mysql` and `odbc` table function and corresponding `MySQL` and `ODBC` table engines for accessing remote databases. This functionality is in the beta stage.
* Added the possibility to pass an argument of type `AggregateFunction` for the `groupArray` aggregate function (so you can create an array of states of some aggregate function).
* Removed restrictions on various combinations of aggregate function combinators. For example, you can use `avgForEachIf` as well as `avgIfForEach` aggregate functions, which have different behaviors.
* The `-ForEach` aggregate function combinator is extended for the case of aggregate functions of multiple arguments.
* Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Examples: `groupArray`, `groupUniqArray`, `topK`.
* Added the `max_client_network_bandwidth` command line parameter for `clickhouse-client` (Kirill Shvakov).
* Users with the `readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT...) (Kirill Shvakov).
* Added support for aggregate functions of `Nullable` arguments even for cases when the function returns a non-`Nullable` result (added with the contribution of Silviu Caragea). Example: `groupArray`, `groupUniqArray`, `topK`.
* Added the `max_client_network_bandwidth` for `clickhouse-client` (Kirill Shvakov).
* Users with the ` readonly = 2` setting are allowed to work with TEMPORARY tables (CREATE, DROP, INSERT...) (Kirill Shvakov).
* Added support for using multiple consumers with the `Kafka` engine. Extended configuration options for `Kafka` (Marek Vavruša).
* Added the `intExp2` and `intExp10` functions.
* Added the `sumKahan` aggregate function (computationally stable summation of floating point numbers).
* Added to*Number*OrNull functions, where *Number* is a numeric type.
* Added support for the `WITH` clause for an `INSERT SELECT` query (by zhang2014).
* Added the settings `http_connection_timeout`, `http_send_timeout`, and `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded.
* Added support for the `ALTER` query for tables of type `Null` (Anastasiya Tsarkova). Tables of type `Null` are often used with materialized views.
* Added the `intExp3` and `intExp4` functions.
* Added the `sumKahan` aggregate function.
* Added the to * Number* OrNull functions, where * Number* is a numeric type.
* Added support for `WITH` clauses for an `INSERT SELECT` query (author: zhang2014).
* Added settings: `http_connection_timeout`, `http_send_timeout`, `http_receive_timeout`. In particular, these settings are used for downloading data parts for replication. Changing these settings allows for faster failover if the network is overloaded.
* Added support for `ALTER` for tables of type `Null` (Anastasiya Tsarkova).
* The `reinterpretAsString` function is extended for all data types that are stored contiguously in memory.
* Added the `--silent` option for the `clickhouse-local` tool. It suppresses printing query execution info in stderr.
* Added support for reading values of type `Date` from text in a format where the month and/or day of the month is specified using a single digit instead of two digits (Amos Bird).
### Performance optimizations:
* Improved performance of `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` aggregate functions for String arguments.
* Improved performance of `isInfinite`, `isFinite`, `isNaN`, `roundToExp2` functions.
* Improved performance of parsing and formatting values of type `Date` and `DateTime` in text formats.
* Improved performance of aggregate functions `min`, `max`, `any`, `anyLast`, `anyHeavy`, `argMin`, `argMax` from string arguments.
* Improved performance of the functions `isInfinite`, `isFinite`, `isNaN`, `roundToExp2`.
* Improved performance of parsing and formatting `Date` and `DateTime` type values in text format.
* Improved performance and precision of parsing floating point numbers.
* Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING`.
* Improved performance of `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, and `corr` aggregate functions by reducing computational stability. The old functions are available under the names: `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`.
* Lowered memory usage for `JOIN` in the case when the left and right parts have columns with identical names that are not contained in `USING` .
* Improved performance of aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr` by reducing computational stability. The old functions are available under the names `varSampStable`, `varPopStable`, `stddevSampStable`, `stddevPopStable`, `covarSampStable`, `covarPopStable`, `corrStable`.
### Bug fixes:
* Fixed data deduplication after running a `DROP PARTITION` query. In the previous version, dropping a partition and INSERTing the same data again was not working because INSERTed blocks were considered duplicates.
* Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for `CREATE MATERIALIZED VIEW` queries with `POPULATE`.
* Fixed data deduplication after running a `DROP` or `DETACH PARTITION` query. In the previous version, dropping a partition and inserting the same data again was not working because inserted blocks were considered duplicates.
* Fixed a bug that could lead to incorrect interpretation of the `WHERE` clause for ` CREATE MATERIALIZED VIEW` queries with `POPULATE` .
* Fixed a bug in using the `root_path` parameter in the `zookeeper_servers` configuration.
* Fixed unexpected results of passing the `Date` argument to `toStartOfDay`.
* Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for `INTERVAL n MONTH` in cases when the result has the previous year.
* Added missing support for the `UUID` data type for `DISTINCT`, `JOIN`, and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete.
* Fixed unexpected results of passing the `Date` argument to `toStartOfDay` .
* Fixed the `addMonths` and `subtractMonths` functions and the arithmetic for ` INTERVAL n MONTH` in cases when the result has the previous year.
* Added missing support for the `UUID` data type for `DISTINCT` , `JOIN` , and `uniq` aggregate functions and external dictionaries (Evgeniy Ivanov). Support for `UUID` is still incomplete.
* Fixed `SummingMergeTree` behavior in cases when the rows summed to zero.
* Various fixes for the `Kafka` engine (Marek Vavruša).
* Fixed incorrect behavior of the `Join` table engine (Amos Bird).
* Fixed incorrect allocator behavior under FreeBSD and OS X.
* The `extractAll` function now supports empty matches.
* Fixed an error that blocked usage of `libressl` instead of `openssl`.
* Fixed the `CREATE TABLE AS SELECT` query from temporary tables.
* Fixed an error that blocked usage of `libressl` instead of `openssl` .
* Fixed the ` CREATE TABLE AS SELECT` query from temporary tables.
* Fixed non-atomicity of updating the replication queue. This could lead to replicas being out of sync until the server restarts.
* Fixed possible overflow in `gcd`, `lcm` and `modulo` (`%` operator) (Maks Skorokhod).
* Fixed possible overflow in `gcd` , `lcm` and `modulo` (`%` operator) (Maks Skorokhod).
* `-preprocessed` files are now created after changing `umask` (`umask` can be changed in the config).
* Fixed a bug in the background check of parts (`MergeTreePartChecker`) when using a custom partition key.
* Fixed a bug in the background check of parts (`MergeTreePartChecker` ) when using a custom partition key.
* Fixed parsing of tuples (values of the `Tuple` data type) in text formats.
* Improved error messages about incompatible types passed to `multiIf`, `array` and some other functions.
* Support for `Nullable` types is completely reworked. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to NULL support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of OR operator, etc.
* Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree`; alignment of `Enum` types in Pretty formats, etc.
* Stricter checks for allowed combinations of composite columns. Fixed several bugs that could lead to a server crash.
* Improved error messages about incompatible types passed to `multiIf` , `array` and some other functions.
* Redesigned support for `Nullable` types. Fixed bugs that may lead to a server crash. Fixed almost all other bugs related to ` NULL` support: incorrect type conversions in INSERT SELECT, insufficient support for Nullable in HAVING and PREWHERE, `join_use_nulls` mode, Nullable types as arguments of `OR` operator, etc.
* Fixed various bugs related to internal semantics of data types. Examples: unnecessary summing of `Enum` type fields in `SummingMergeTree` ; alignment of `Enum` types in `Pretty` formats, etc.
* Stricter checks for allowed combinations of composite columns.
* Fixed the overflow when specifying a very large parameter for the `FixedString` data type.
* Fixed a bug in the `topK` aggregate function in a generic case.
* Added the missing check for equality of array sizes in arguments of n-ary variants of aggregate functions with an `-Array` combinator.
* Fixed the `--pager` option for `clickhouse-client` (by ks1322).
* Fixed a bug in `--pager` for `clickhouse-client` (author: ks1322).
* Fixed the precision of the `exp10` function.
* Fixed the behavior of the `visitParamExtract` function for better compliance with documentation.
* Fixed the crash when incorrect data types are specified.
* Fixed the behavior of `DISTINCT` in the case when all columns are constants.
* Fixed query formatting in the case of using the `tupleElement` function with a complex constant expression as the tuple element index.
* Fixed the `Dictionary` table engine for dictionaries of type `range_hashed`.
* Fixed a bug that leads to excessive rows in the result of `FULL` and `RIGHT JOIN` (Amos Bird).
* Fixed a bug in `Dictionary` tables for `range_hashed` dictionaries.
* Fixed a bug that leads to excessive rows in the result of `FULL` and ` RIGHT JOIN` (Amos Bird).
* Fixed a server crash when creating and removing temporary files in `config.d` directories during config reload.
* Fixed the `SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated.
* Fixed the behavior of `MATERIALIZED VIEW` after executing `DETACH TABLE` for the table under the view (Marek Vavruša).
* Fixed the ` SYSTEM DROP DNS CACHE` query: the cache was flushed but addresses of cluster nodes were not updated.
* Fixed the behavior of ` MATERIALIZED VIEW` after executing ` DETACH TABLE` for the table under the view (Marek Vavruša).
### Build improvements:
* Builds use `pbuilder`. The build process is almost completely independent of the build host environment.
* The `pbuilder` tool is used for builds. The build process is almost completely independent of the build host environment.
* A single build is used for different OS versions. Packages and binaries have been made compatible with a wide range of Linux systems.
* Added the `clickhouse-test` package. It can be used to run functional tests.
* The source tarball can now be published to the repository. It can be used to reproduce the build without using GitHub.
* Added limited integration with Travis CI. Due to limits on build time in Travis, only the debug build is tested and a limited subset of tests are run.
* Added support for `Cap'n'Proto` in the default build.
* Changed the format of documentation sources from `Restructured Text` to `Markdown`.
* Changed the format of documentation sources from `Restricted Text` to `Markdown`.
* Added support for `systemd` (Vladimir Smirnov). It is disabled by default due to incompatibility with some OS images and can be enabled manually.
* For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as `clickhouse clang` and `clickhouse lld`.
* Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang`, `libc++` is used instead of `libstdc++`.
* For dynamic code generation, `clang` and `lld` are embedded into the `clickhouse` binary. They can also be invoked as ` clickhouse clang` and ` clickhouse lld` .
* Removed usage of GNU extensions from the code. Enabled the `-Wextra` option. When building with `clang` the default is `libc++` instead of `libstdc++`.
* Extracted `clickhouse_parsers` and `clickhouse_common_io` libraries to speed up builds of various tools.
### Backward incompatible changes:
* The format for marks in `Log` type tables that contain `Nullable` columns was changed in a backward incompatible way. If you have these tables, you should convert them to the `TinyLog` type before starting up the new server version. To do this, replace `ENGINE = Log` with `ENGINE = TinyLog` in the corresponding `.sql` file in the `metadata` directory. If your table doesn't have `Nullable` columns or if the type of your table is not `Log`, then you don't need to do anything.
* Removed the `experimental_allow_extended_storage_definition_syntax` setting. Now this feature is enabled by default.
* To avoid confusion, the `runningIncome` function has been renamed to `runningDifferenceStartingWithFirstValue`.
* Removed the `FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird).
* The `runningIncome` function was renamed to `runningDifferenceStartingWithFirstvalue` to avoid confusion.
* Removed the ` FROM ARRAY JOIN arr` syntax when ARRAY JOIN is specified directly after FROM with no table (Amos Bird).
* Removed the `BlockTabSeparated` format that was used solely for demonstration purposes.
* Changed the serialization format of intermediate states of the aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, and `corr`. If you have stored states of these aggregate functions in tables (using the AggregateFunction data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com.
* Changed the state format for aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. If you have stored states of these aggregate functions in tables (using the `AggregateFunction` data type or materialized views with corresponding states), please write to clickhouse-feedback@yandex-team.com.
* In previous server versions there was an undocumented feature: if an aggregate function depends on parameters, you can still specify it without parameters in the AggregateFunction data type. Example: `AggregateFunction(quantiles, UInt64)` instead of `AggregateFunction(quantiles(0.5, 0.9), UInt64)`. This feature was lost. Although it was undocumented, we plan to support it again in future releases.
* Enum data types cannot be used in min/max aggregate functions. The possibility will be returned back in future release.
* Enum data types cannot be used in min/max aggregate functions. This ability will be returned in the next release.
### Please note when upgrading:
* When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message `unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated.
* If you have different ClickHouse versions on the cluster, you can get incorrect results for distributed queries with the aggregate functions `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, and `corr`. You should update all cluster nodes.
* When doing a rolling update on a cluster, at the point when some of the replicas are running the old version of ClickHouse and some are running the new version, replication is temporarily stopped and the message ` unknown parameter 'shard'` appears in the log. Replication will continue after all replicas of the cluster are updated.
* If different versions of ClickHouse are running on the cluster servers, it is possible that distributed queries using the following functions will have incorrect results: `varSamp`, `varPop`, `stddevSamp`, `stddevPop`, `covarSamp`, `covarPop`, `corr`. You should update all cluster nodes.
## ClickHouse release 1.1.54327, 2017-12-21
This release contains bug fixes for the previous release 1.1.54318:
* Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like `Part ... from own log doesn't exist.` The issue is relevant even if you don't see these messages in logs.
* Fixed bug with possible race condition in replication that could lead to data loss. This issue affects versions 1.1.54310 and 1.1.54318. If you use one of these versions with Replicated tables, the update is strongly recommended. This issue shows in logs in Warning messages like ` Part ... from own log doesn't exist.` The issue is relevant even if you don't see these messages in logs.
## ClickHouse release 1.1.54318, 2017-11-30
This release contains bug fixes for the previous release 1.1.54310:
* Fixed incorrect row deletions during merges in the SummingMergeTree engine
* Fixed a memory leak in unreplicated MergeTree engines
* Fixed performance degradation with frequent inserts in MergeTree engines
@ -540,27 +569,30 @@ This release contains bug fixes for the previous release 1.1.54310:
## ClickHouse release 1.1.54310, 2017-11-01
### New features:
* Custom partitioning key for the MergeTree family of table engines.
* [Kafka](https://clickhouse.yandex/docs/en/single/index.html#document-table_engines/kafka) table engine.
* [ Kafka](https://clickhouse.yandex/docs/en/single/index.html#document-table_engines/kafka) table engine.
* Added support for loading [CatBoost](https://catboost.yandex/) models and applying them to data stored in ClickHouse.
* Added support for time zones with non-integer offsets from UTC.
* Added support for arithmetic operations with time intervals.
* The range of values for the Date and DateTime types is extended to the year 2105.
* Added the `CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view).
* Added the ` CREATE MATERIALIZED VIEW x TO y` query (specifies an existing table for storing the data of a materialized view).
* Added the `ATTACH TABLE` query without arguments.
* The processing logic for Nested columns with names ending in -Map in a SummingMergeTree table was extracted to the sumMap aggregate function. You can now specify such columns explicitly.
* Max size of the IP trie dictionary is increased to 128M entries.
* Added the `getSizeOfEnumType` function.
* Added the `sumWithOverflow` aggregate function.
* Added the getSizeOfEnumType function.
* Added the sumWithOverflow aggregate function.
* Added support for the Cap'n Proto input format.
* You can now customize compression level when using the zstd algorithm.
### Backward incompatible changes:
* Creation of temporary tables with an engine other than Memory is forbidden.
* Explicit creation of tables with the View or MaterializedView engine is forbidden.
* Creation of temporary tables with an engine other than Memory is not allowed.
* Explicit creation of tables with the View or MaterializedView engine is not allowed.
* During table creation, a new check verifies that the sampling key expression is included in the primary key.
### Bug fixes:
* Fixed hangups when synchronously inserting into a Distributed table.
* Fixed nonatomic adding and removing of parts in Replicated tables.
* Data inserted into a materialized view is not subjected to unnecessary deduplication.
@ -568,40 +600,45 @@ This release contains bug fixes for the previous release 1.1.54310:
* Users don't need access permissions to the `default` database to create temporary tables anymore.
* Fixed crashing when specifying the Array type without arguments.
* Fixed hangups when the disk volume containing server logs is full.
* Fixed an overflow in the `toRelativeWeekNum` function for the first week of the Unix epoch.
* Fixed an overflow in the toRelativeWeekNum function for the first week of the Unix epoch.
### Build improvements:
* Several third-party libraries (notably Poco) were updated and converted to git submodules.
## ClickHouse release 1.1.54304, 2017-10-19
### New features:
* TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml`)
* TLS support in the native protocol (to enable, set `tcp_ssl_port` in `config.xml` ).
### Bug fixes:
* `ALTER` for replicated tables now tries to start running as soon as possible
* Fixed crashing when reading data with the setting `preferred_block_size_bytes=0`
* Fixed crashes of `clickhouse-client` when `Page Down` is pressed
* `ALTER` for replicated tables now tries to start running as soon as possible.
* Fixed crashing when reading data with the setting `preferred_block_size_bytes=0.`
* Fixed crashes of `clickhouse-client` when pressing ` Page Down`
* Correct interpretation of certain complex queries with `GLOBAL IN` and `UNION ALL`
* `FREEZE PARTITION` always works atomically now
* Empty POST requests now return a response with code 411
* Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8))`
* Fixed an error when reading columns like `Array(Nullable(String))` from `MergeTree` tables
* `FREEZE PARTITION` always works atomically now.
* Empty POST requests now return a response with code 411.
* Fixed interpretation errors for expressions like `CAST(1 AS Nullable(UInt8)).`
* Fixed an error when reading `Array(Nullable(String))` columns from `MergeTree` tables.
* Fixed crashing when parsing queries like `SELECT dummy AS dummy, dummy AS b`
* Users are updated correctly when `users.xml` is invalid
* Correct handling when an executable dictionary returns a non-zero response code
* Users are updated correctly with invalid `users.xml`
* Correct handling when an executable dictionary returns a non-zero response code.
## ClickHouse release 1.1.54292, 2017-09-20
### New features:
* Added the `pointInPolygon` function for working with coordinates on a coordinate plane.
* Added the `sumMap` aggregate function for calculating the sum of arrays, similar to `SummingMergeTree`.
* Added the `trunc` function. Improved performance of the rounding functions (`round`, `floor`, `ceil`, `roundToExp2`) and corrected the logic of how they work. Changed the logic of the `roundToExp2` function for fractions and negative numbers.
* The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. Note: There is still a dependency when using compiled queries (with the setting `compile = 1`, which is not used by default).
* The ClickHouse executable file is now less dependent on the libc version. The same ClickHouse executable file can run on a wide variety of Linux systems. There is still a dependency when using compiled queries (with the setting ` compile = 1` , which is not used by default).
* Reduced the time needed for dynamic compilation of queries.
### Bug fixes:
* Fixed an error that sometimes produced `part ... intersects previous part` messages and weakened replica consistency.
* Fixed an error that sometimes produced ` part ... intersects previous part` messages and weakened replica consistency.
* Fixed an error that caused the server to lock up if ZooKeeper was unavailable during shutdown.
* Removed excessive logging when restoring replicas.
* Fixed an error in the UNION ALL implementation.
@ -611,57 +648,62 @@ This release contains bug fixes for the previous release 1.1.54310:
## ClickHouse release 1.1.54289, 2017-09-13
### New features:
* `SYSTEM` queries for server administration: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`.
* Added functions for working with arrays: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`.
* Added the `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster.
* Added the aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they can also be accessed with the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`).
* Added `root` and `identity` parameters for the ZooKeeper configuration. This allows you to isolate individual users on the same ZooKeeper cluster.
* Added aggregate functions `groupBitAnd`, `groupBitOr`, and `groupBitXor` (for compatibility, they are also available under the names `BIT_AND`, `BIT_OR`, and `BIT_XOR`).
* External dictionaries can be loaded from MySQL by specifying a socket in the filesystem.
* External dictionaries can be loaded from MySQL over SSL (the `ssl_cert`, `ssl_key`, and `ssl_ca` parameters).
* External dictionaries can be loaded from MySQL over SSL (`ssl_cert`, `ssl_key`, `ssl_ca` parameters).
* Added the `max_network_bandwidth_for_user` setting to restrict the overall bandwidth use for queries per user.
* Support for `DROP TABLE` for temporary tables.
* Support for reading `DateTime` values in Unix timestamp format from the `CSV` and `JSONEachRow` formats.
* Lagging replicas in distributed queries are now excluded by default (the default threshold is 5 minutes).
* FIFO locking is used during ALTER: an ALTER query isn't blocked indefinitely for continuously running queries.
* Option to set `umask` in the config file.
* Improved performance for queries with `DISTINCT`.
* Improved performance for queries with `DISTINCT` .
### Bug fixes:
* Improved the process for deleting old nodes in ZooKeeper. Previously, old nodes sometimes didn't get deleted if there were very frequent inserts, which caused the server to be slow to shut down, among other things.
* Fixed randomization when choosing hosts for the connection to ZooKeeper.
* Fixed the exclusion of lagging replicas in distributed queries if the replica is localhost.
* Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running `ALTER MODIFY` on an element in a `Nested` structure.
* Fixed an error where a data part in a `ReplicatedMergeTree` table could be broken after running ` ALTER MODIFY` on an element in a `Nested` structure.
* Fixed an error that could cause SELECT queries to "hang".
* Improvements to distributed DDL queries.
* Fixed the query `CREATE TABLE ... AS <materialized view>`.
* Resolved the deadlock in the `ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables.
* Fixed the invalid default value for `Enum`s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats.
* Resolved the deadlock in the ` ALTER ... CLEAR COLUMN IN PARTITION` query for `Buffer` tables.
* Fixed the invalid default value for `Enum` s (0 instead of the minimum) when using the `JSONEachRow` and `TSKV` formats.
* Resolved the appearance of zombie processes when using a dictionary with an `executable` source.
* Fixed segfault for the HEAD query.
### Improvements to development workflow and ClickHouse build:
### Improved workflow for developing and assembling ClickHouse:
* You can use `pbuilder` to build ClickHouse.
* You can use `libc++` instead of `libstdc++` for builds on Linux.
* Added instructions for using static code analysis tools: `Coverity`, `clang-tidy`, and `cppcheck`.
* Added instructions for using static code analysis tools: `Coverage`, `clang-tidy`, `cppcheck`.
### Please note when upgrading:
* There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT requests will fail with the message "Merges are processing significantly slower than inserts." Use the `SELECT * FROM system.merges` request to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don't need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting (to do this, go to the `<merge_tree>` section in config.xml, set `<max_bytes_to_merge_at_max_space_in_pool>107374182400</max_bytes_to_merge_at_max_space_in_pool>` and restart the server).
* There is now a higher default value for the MergeTree setting `max_bytes_to_merge_at_max_space_in_pool` (the maximum total size of data parts to merge, in bytes): it has increased from 100 GiB to 150 GiB. This might result in large merges running after the server upgrade, which could cause an increased load on the disk subsystem. If the free space available on the server is less than twice the total amount of the merges that are running, this will cause all other merges to stop running, including merges of small data parts. As a result, INSERT requests will fail with the message "Merges are processing significantly slower than inserts." Use the ` SELECT * FROM system.merges` request to monitor the situation. You can also check the `DiskSpaceReservedForMerge` metric in the `system.metrics` table, or in Graphite. You don't need to do anything to fix this, since the issue will resolve itself once the large merges finish. If you find this unacceptable, you can restore the previous value for the `max_bytes_to_merge_at_max_space_in_pool` setting. To do this, go to the <merge_tree> section in config.xml, set `<merge_tree>``<max_bytes_to_merge_at_max_space_in_pool>107374182400</max_bytes_to_merge_at_max_space_in_pool>` and restart the server.
## ClickHouse release 1.1.54284, 2017-08-29
* This is bugfix release for previous 1.1.54282 release. It fixes ZooKeeper nodes leak in `parts/` directory.
* This is a bugfix release for the previous 1.1.54282 release. It fixes leaks in the parts directory in ZooKeeper.
## ClickHouse release 1.1.54282, 2017-08-23
This is a bugfix release. The following bugs were fixed:
* `DB::Exception: Assertion violation: !_path.empty()` error when inserting into a Distributed table.
* Error when parsing inserted data in RowBinary format if the data begins with ';' character.
This release contains bug fixes for the previous release 1.1.54276:
* Fixed `DB::Exception: Assertion violation: !_path.empty()` when inserting into a Distributed table.
* Fixed parsing when inserting in RowBinary format if input data starts with';'.
* Errors during runtime compilation of certain aggregate functions (e.g. `groupArray()`).
## ClickHouse release 1.1.54276, 2017-08-16
## Clickhouse Release 1.1.54276, 2017-08-16
### New features:
* You can use an optional WITH clause in a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a`
* Added an optional WITH section for a SELECT query. Example query: `WITH 1+1 AS a SELECT a, a*a`
* INSERT can be performed synchronously in a Distributed table: OK is returned only after all the data is saved on all the shards. This is activated by the setting insert_distributed_sync=1.
* Added the UUID data type for working with 16-byte identifiers.
* Added aliases of CHAR, FLOAT and other types for compatibility with the Tableau.
@ -670,13 +712,13 @@ This is a bugfix release. The following bugs were fixed:
* Added support for non-constant arguments and negative offsets in the function `substring(str, pos, len).`
* Added the max_size parameter for the `groupArray(max_size)(column)` aggregate function, and optimized its performance.
### Major changes:
### Main changes:
* Improved security: all server files are created with 0640 permissions (can be changed via <umask> config parameter).
* Security improvements: all server files are created with 0640 permissions (can be changed via <umask> config parameter).
* Improved error messages for queries with invalid syntax.
* Significantly reduced memory consumption and improved performance when merging large sections of MergeTree data.
* Significantly increased the performance of data merges for the ReplacingMergeTree engine.
* Improved performance for asynchronous inserts from a Distributed table by batching multiple source inserts. To enable this functionality, use the setting distributed_directory_monitor_batch_inserts=1.
* Improved performance for asynchronous inserts from a Distributed table by combining multiple source inserts. To enable this functionality, use the setting distributed_directory_monitor_batch_inserts=1.
### Backward incompatible changes:
@ -685,12 +727,12 @@ This is a bugfix release. The following bugs were fixed:
### Complete list of changes:
* Added the `output_format_json_quote_denormals` setting, which enables outputting nan and inf values in JSON format.
* Optimized thread allocation when reading from a Distributed table.
* Settings can be modified in readonly mode if the value doesn't change.
* Added the ability to read fractional granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred_block_size_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns.
* Optimized stream allocation when reading from a Distributed table.
* Settings can be configured in readonly mode if the value doesn't change.
* Added the ability to retrieve non-integer granules of the MergeTree engine in order to meet restrictions on the block size specified in the preferred_block_size_bytes setting. The purpose is to reduce the consumption of RAM and increase cache locality when processing queries from tables with large columns.
* Efficient use of indexes that contain expressions like `toStartOfHour(x)` for conditions like `toStartOfHour(x) op сonstexpr.`
* Added new settings for MergeTree engines (the merge_tree section in config.xml):
- replicated_deduplication_window_seconds sets the size of deduplication window in seconds for Replicated tables.
- replicated_deduplication_window_seconds sets the number of seconds allowed for deduplicating inserts in Replicated tables.
- cleanup_delay_period sets how often to start cleanup to remove outdated data.
- replicated_can_become_leader can prevent a replica from becoming the leader (and assigning merges).
* Accelerated cleanup to remove outdated data from ZooKeeper.
@ -699,11 +741,11 @@ This is a bugfix release. The following bugs were fixed:
* Added the "none" value for the compression method.
* You can use multiple dictionaries_config sections in config.xml.
* It is possible to connect to MySQL through a socket in the file system.
* The `system.parts` table has a new column with information about the size of marks, in bytes.
* The system.parts table has a new column with information about the size of marks, in bytes.
### Bug fixes:
* Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the _table field.
* Distributed tables using a Merge table now work correctly for a SELECT query with a condition on the `_table` field.
* Fixed a rare race condition in ReplicatedMergeTree when checking data parts.
* Fixed possible freezing on "leader election" when starting a server.
* The max_replica_delay_for_distributed_queries setting was ignored when using a local replica of the data source. This has been fixed.
@ -717,11 +759,11 @@ This is a bugfix release. The following bugs were fixed:
* Too many threads were used for parallel aggregation. This has been fixed.
* Fixed how the "if" function works with FixedString arguments.
* SELECT worked incorrectly from a Distributed table for shards with a weight of 0. This has been fixed.
* Crashes no longer occur when running `CREATE VIEW IF EXISTS.`
* Running `CREATE VIEW IF EXISTS no longer causes crashes.`
* Fixed incorrect behavior when input_format_skip_unknown_fields=1 is set and there are negative numbers.
* Fixed an infinite loop in the `dictGetHierarchy()` function if there is some invalid data in the dictionary.
* Fixed `Syntax error: unexpected (...)` errors when running distributed queries with subqueries in an IN or JOIN clause and Merge tables.
* Fixed the incorrect interpretation of a SELECT query from Dictionary tables.
* Fixed an incorrect interpretation of a SELECT query from Dictionary tables.
* Fixed the "Cannot mremap" error when using arrays in IN and JOIN clauses with more than 2 billion elements.
* Fixed the failover for dictionaries with MySQL as the source.
@ -735,7 +777,7 @@ This is a bugfix release. The following bugs were fixed:
### New features:
* Distributed DDL (for example, `CREATE TABLE ON CLUSTER`).
* Distributed DDL (for example, `CREATE TABLE ON CLUSTER`)
* The replicated request `ALTER TABLE CLEAR COLUMN IN PARTITION.`
* The engine for Dictionary tables (access to dictionary data in the form of a table).
* Dictionary database engine (this type of database automatically has Dictionary tables available for all the connected external dictionaries).
@ -751,8 +793,8 @@ This is a bugfix release. The following bugs were fixed:
### Minor changes:
* If an alert is triggered, the full stack trace is printed into the log.
* Relaxed the verification of the number of damaged or extra data parts at startup (there were too many false positives).
* Now after an alert is triggered, the log prints the full stack trace.
* Relaxed the verification of the number of damaged/extra data parts at startup (there were too many false positives).
### Bug fixes:
@ -762,7 +804,7 @@ This is a bugfix release. The following bugs were fixed:
* Changes in how an executable source of cached external dictionaries works.
* Fixed the comparison of strings containing null characters.
* Fixed the comparison of Float32 primary key fields with constants.
* Previously, an incorrect estimate of the size of a field could lead to overly large allocations. This has been fixed.
* Previously, an incorrect estimate of the size of a field could lead to overly large allocations.
* Fixed a crash when querying a Nullable column added to a table using ALTER.
* Fixed a crash when sorting by a Nullable column, if the number of rows is less than LIMIT.
* Fixed an ORDER BY subquery consisting of only constant values.

View File

@ -254,6 +254,7 @@ include (cmake/find_rdkafka.cmake)
include (cmake/find_capnp.cmake)
include (cmake/find_llvm.cmake)
include (cmake/find_cpuid.cmake)
include (cmake/find_consistent-hashing.cmake)
if (ENABLE_TESTS)
include (cmake/find_gtest.cmake)
endif ()

View File

@ -0,0 +1,14 @@
option (USE_INTERNAL_CONSISTENT_HASHING_LIBRARY "Set to FALSE to use consistent-hashing library from Arcadia (Yandex internal repository) instead of bundled" ${NOT_UNBUNDLED})
if (NOT USE_INTERNAL_CONSISTENT_HASHING_LIBRARY)
find_library (CONSISTENT_HASHING_LIBRARY consistent-hashing)
find_path (CONSISTENT_HASHING_INCLUDE_DIR NAMES consistent_hashing.h PATHS ${CONSISTENT_HASHING_INCLUDE_PATHS})
endif ()
if (CONSISTENT_HASHING_LIBRARY AND CONSISTENT_HASHING_INCLUDE_DIR)
else ()
set (USE_INTERNAL_CONSISTENT_HASHING_LIBRARY 1)
set (CONSISTENT_HASHING_LIBRARY consistent-hashing)
endif ()
message (STATUS "Using consistent-hashing: ${CONSISTENT_HASHING_INCLUDE_DIR} : ${CONSISTENT_HASHING_LIBRARY}")

View File

@ -4,9 +4,9 @@ set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER")
if (SANITIZE)
if (SANITIZE STREQUAL "address")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=address")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=address")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=address -fsanitize-address-use-after-scope")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=address -fsanitize-address-use-after-scope")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address -fsanitize-address-use-after-scope")
if (MAKE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan")
endif ()

View File

@ -2,7 +2,7 @@
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-maybe-uninitialized -Wno-format -Wno-misleading-indentation -Wno-stringop-overflow")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-non-virtual-dtor -Wno-maybe-uninitialized -Wno-format -Wno-misleading-indentation -Wno-implicit-fallthrough -std=c++1z")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-unused-variable -Wno-unused-but-set-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-non-virtual-dtor -Wno-maybe-uninitialized -Wno-format -Wno-misleading-indentation -Wno-implicit-fallthrough -Wno-class-memaccess -std=c++1z")
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-function -Wno-unused-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-format -Wno-parentheses-equality")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast -Wno-unused-function -Wno-unused-variable -Wno-unused-result -Wno-deprecated-declarations -Wno-non-virtual-dtor -Wno-format -std=c++1z")
@ -137,7 +137,6 @@ if (USE_INTERNAL_CAPNP_LIBRARY)
target_include_directories(${CAPNP_LIBRARY} PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/capnproto/c++/src>)
endif ()
if (USE_INTERNAL_POCO_LIBRARY)
set (save_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
set (save_CMAKE_C_FLAGS ${CMAKE_C_FLAGS})
@ -166,3 +165,11 @@ if (USE_INTERNAL_LLVM_LIBRARY)
endif ()
add_subdirectory (llvm/llvm)
endif ()
if (USE_INTERNAL_GTEST_LIBRARY)
# Google Test from sources
add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest)
# avoid problems with <regexp.h>
target_compile_definitions (gtest INTERFACE GTEST_HAS_POSIX_RE=0)
target_include_directories (gtest SYSTEM INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/googletest/include)
endif ()

View File

@ -27,10 +27,32 @@ if (NOT NO_WERROR)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror")
endif ()
# Add some warnings that are not available even with -Wall -Wextra.
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra-semi -Wcomma -Winconsistent-missing-destructor-override -Wunused-exception-parameter -Wshadow-uncaptured-local")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wredundant-parens -Wzero-as-null-pointer-constant")
endif ()
endif ()
if (USE_DEBUG_HELPERS)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -include ${ClickHouse_SOURCE_DIR}/libs/libcommon/include/common/iostream_debug_helpers.h")
endif ()
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
# If we leave this optimization enabled, gcc-7 replaces a pair of SSE intrinsics (16 byte load, store) with a call to memcpy.
# It leads to slow code. This is compiler bug. It looks like this:
#
# (gdb) bt
#0 memcpy (destination=0x7faa6e9f1638, source=0x7faa81d9e9a8, size=16) at ../libs/libmemcpy/memcpy.h:11
#1 0x0000000005341c5f in _mm_storeu_si128 (__B=..., __P=<optimized out>) at /usr/lib/gcc/x86_64-linux-gnu/7/include/emmintrin.h:720
#2 memcpySmallAllowReadWriteOverflow15Impl (n=<optimized out>, src=<optimized out>, dst=<optimized out>) at ../dbms/src/Common/memcpySmall.h:37
add_definitions ("-fno-tree-loop-distribute-patterns")
endif ()
find_package (Threads)
add_subdirectory (src)
@ -252,15 +274,7 @@ add_subdirectory (programs)
add_subdirectory (tests)
if (ENABLE_TESTS)
if (USE_INTERNAL_GTEST_LIBRARY)
# Google Test from sources
add_subdirectory(${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest ${CMAKE_CURRENT_BINARY_DIR}/googletest)
# avoid problems with <regexp.h>
target_compile_definitions (gtest INTERFACE GTEST_HAS_POSIX_RE=0)
target_include_directories (gtest SYSTEM INTERFACE ${ClickHouse_SOURCE_DIR}/contrib/googletest/include)
endif ()
macro(grep_gtest_sources BASE_DIR DST_VAR)
macro (grep_gtest_sources BASE_DIR DST_VAR)
# Cold match files that are not in tests/ directories
file(GLOB_RECURSE "${DST_VAR}" RELATIVE "${BASE_DIR}" "gtest*.cpp")
endmacro()

View File

@ -2,10 +2,10 @@
set(VERSION_REVISION 54407 CACHE STRING "")
set(VERSION_MAJOR 18 CACHE STRING "")
set(VERSION_MINOR 12 CACHE STRING "")
set(VERSION_PATCH 0 CACHE STRING "")
set(VERSION_GITHASH b262715bd8f384eca613ec7337883c3d38083f52 CACHE STRING "")
set(VERSION_DESCRIBE v18.12.0-testing CACHE STRING "")
set(VERSION_STRING 18.12.0 CACHE STRING "")
set(VERSION_PATCH 1 CACHE STRING "")
set(VERSION_GITHASH 76eaacf1be15102a732a90949739b6605d8596a1 CACHE STRING "")
set(VERSION_DESCRIBE v18.12.1-testing CACHE STRING "")
set(VERSION_STRING 18.12.1 CACHE STRING "")
# end of autochange
set(VERSION_EXTRA "" CACHE STRING "")

View File

@ -403,6 +403,10 @@ public:
}
#ifndef __clang__
#pragma GCC optimize("-fno-var-tracking-assignments")
#endif
int mainEntryClickHouseBenchmark(int argc, char ** argv)
{
using namespace DB;

View File

@ -63,6 +63,10 @@
#include "Suggest.h"
#endif
#ifndef __clang__
#pragma GCC optimize("-fno-var-tracking-assignments")
#endif
/// http://en.wikipedia.org/wiki/ANSI_escape_code
@ -553,7 +557,7 @@ private:
fd_set fds;
FD_ZERO(&fds);
FD_SET(STDIN_FILENO, &fds);
return select(1, &fds, 0, 0, &timeout) == 1;
return select(1, &fds, nullptr, nullptr, &timeout) == 1;
}
inline const String prompt() const

View File

@ -1990,7 +1990,7 @@ protected:
if (increment_and_check_exit())
return;
}
catch (const Exception & e)
catch (const Exception &)
{
LOG_INFO(log, getCurrentExceptionMessage(false, true));
}

View File

@ -16,7 +16,6 @@ class Context;
class LocalServer : public Poco::Util::Application
{
public:
LocalServer();
void initialize(Poco::Util::Application & self) override;
@ -25,10 +24,9 @@ public:
void init(int argc, char ** argv);
~LocalServer();
~LocalServer() override;
private:
/** Composes CREATE subquery based on passed arguments (--structure --file --table and --input-format)
* This query will be executed first, before queries passed through --query argument
* Returns empty string if it cannot compose that query.
@ -46,7 +44,6 @@ private:
std::string getHelpFooter() const;
protected:
std::unique_ptr<Context> context;
/// Settings specified via command line args

View File

@ -123,7 +123,7 @@ int printHelp(int, char **)
for (auto & application : clickhouse_applications)
std::cerr << "clickhouse " << application.first << " [args] " << std::endl;
return -1;
};
}
bool isClickhouseApp(const std::string & app_suffix, std::vector<char *> & argv)

View File

@ -108,7 +108,7 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
select->format(settings);
std::string query = ss.str();
if (Poco::Data::ODBC::Utility::isError(Poco::Data::ODBC::SQLPrepare(hstmt, reinterpret_cast<SQLCHAR *>(&query[0]), query.size())))
if (Poco::Data::ODBC::Utility::isError(Poco::Data::ODBC::SQLPrepare(hstmt, reinterpret_cast<SQLCHAR *>(query.data()), query.size())))
throw Poco::Data::ODBC::DescriptorException(session.dbc());
if (Poco::Data::ODBC::Utility::isError(SQLExecute(hstmt)))
@ -127,7 +127,7 @@ void ODBCColumnsInfoHandler::handleRequest(Poco::Net::HTTPServerRequest & reques
/// TODO Why 301?
SQLCHAR column_name[301];
/// TODO Result is not checked.
Poco::Data::ODBC::SQLDescribeCol(hstmt, ncol, column_name, sizeof(column_name), NULL, &type, NULL, NULL, NULL);
Poco::Data::ODBC::SQLDescribeCol(hstmt, ncol, column_name, sizeof(column_name), nullptr, &type, nullptr, nullptr, nullptr);
columns.emplace_back(reinterpret_cast<char *>(column_name), getDataType(type));
}

View File

@ -66,7 +66,7 @@ namespace
socket.listen(/* backlog = */ 64);
return address;
};
}
}
void ODBCBridge::handleHelp(const std::string &, const std::string &)

View File

@ -31,6 +31,11 @@
#include <Poco/XML/XMLStream.h>
#include <Common/InterruptListener.h>
#ifndef __clang__
#pragma GCC optimize("-fno-var-tracking-assignments")
#endif
/** Tests launcher for ClickHouse.
* The tool walks through given or default folder in order to find files with
* tests' descriptions and launches it.
@ -1387,6 +1392,7 @@ static void getFilesFromDir(const fs::path & dir, std::vector<String> & input_fi
}
}
int mainEntryClickHousePerformanceTest(int argc, char ** argv)
try
{

View File

@ -14,6 +14,7 @@
#include <Common/escapeForFileName.h>
#include <Common/getFQDNOrHostName.h>
#include <Common/CurrentThread.h>
#include <Common/setThreadName.h>
#include <IO/ReadBufferFromIStream.h>
#include <IO/ZlibInflatingReadBuffer.h>
#include <IO/ReadBufferFromString.h>
@ -644,6 +645,8 @@ void HTTPHandler::trySendExceptionToClient(const std::string & s, int exception_
void HTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response)
{
setThreadName("HTTPHandler");
Output used_output;
/// In case of exception, send stack trace to client.

View File

@ -5,6 +5,7 @@
#include <common/logger_useful.h>
#include <Common/HTMLForm.h>
#include <Common/setThreadName.h>
#include <IO/CompressedWriteBuffer.h>
#include <IO/ReadBufferFromIStream.h>
#include <IO/WriteBufferFromHTTPServerResponse.h>
@ -86,6 +87,8 @@ void InterserverIOHTTPHandler::processQuery(Poco::Net::HTTPServerRequest & reque
void InterserverIOHTTPHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response)
{
setThreadName("IntersrvHandler");
/// In order to work keep-alive.
if (request.getVersion() == Poco::Net::HTTPServerRequest::HTTP_1_1)
response.setChunkedTransferEncoding(true);

View File

@ -85,7 +85,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
const auto counter_increment = counter - prev_counters[i];
prev_counters[i] = counter;
std::string key{ProfileEvents::getDescription(static_cast<ProfileEvents::Event>(i))};
std::string key{ProfileEvents::getName(static_cast<ProfileEvents::Event>(i))};
key_vals.emplace_back(profile_events_path_prefix + key, counter_increment);
}
}
@ -96,7 +96,7 @@ void MetricsTransmitter::transmit(std::vector<ProfileEvents::Count> & prev_count
{
const auto value = CurrentMetrics::values[i].load(std::memory_order_relaxed);
std::string key{CurrentMetrics::getDescription(static_cast<CurrentMetrics::Metric>(i))};
std::string key{CurrentMetrics::getName(static_cast<CurrentMetrics::Metric>(i))};
key_vals.emplace_back(current_metrics_path_prefix + key, value);
}
}

View File

@ -369,9 +369,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
#if defined(__linux__)
if (!TaskStatsInfoGetter::checkPermissions())
{
LOG_INFO(log, "It looks like the process has no CAP_NET_ADMIN capability, some performance statistics will be disabled."
LOG_INFO(log, "It looks like the process has no CAP_NET_ADMIN capability, 'taskstats' performance statistics will be disabled."
" It could happen due to incorrect ClickHouse package installation."
" You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep /usr/bin/clickhouse'");
" You could resolve the problem manually with 'sudo setcap cap_net_admin=+ep /usr/bin/clickhouse'."
" Note that it will not work on 'nosuid' mounted filesystems."
" It also doesn't work if you run clickhouse-server inside network namespace as it happens in some containers.");
}
#else
LOG_INFO(log, "TaskStats is not implemented for this OS. IO accounting will be disabled.");

View File

@ -9,6 +9,7 @@
#include <Common/ClickHouseRevision.h>
#include <Common/Stopwatch.h>
#include <Common/NetException.h>
#include <Common/setThreadName.h>
#include <Common/config_version.h>
#include <IO/Progress.h>
#include <IO/CompressedReadBuffer.h>
@ -49,6 +50,8 @@ namespace ErrorCodes
void TCPHandler::runImpl()
{
setThreadName("TCPHandler");
connection_context = server.context();
connection_context.setSessionContext(connection_context);

View File

@ -59,10 +59,11 @@ private:
size_t old_size = state.dynamic_array_size;
if (old_size < new_size)
{
state.array_of_aggregate_datas = arena.realloc(
state.array_of_aggregate_datas = arena.alignedRealloc(
state.array_of_aggregate_datas,
old_size * nested_size_of_data,
new_size * nested_size_of_data);
new_size * nested_size_of_data,
nested_func->alignOfData());
size_t i = old_size;
char * nested_state = state.array_of_aggregate_datas + i * nested_size_of_data;

View File

@ -36,7 +36,7 @@ inline AggregateFunctionPtr createAggregateFunctionGroupArrayImpl(const DataType
return std::make_shared<GroupArrayGeneralListImpl<GroupArrayListNodeString, has_limit::value>>(argument_type, std::forward<TArgs>(args)...);
return std::make_shared<GroupArrayGeneralListImpl<GroupArrayListNodeGeneral, has_limit::value>>(argument_type, std::forward<TArgs>(args)...);
};
}
static AggregateFunctionPtr createAggregateFunctionGroupArray(const std::string & name, const DataTypes & argument_types, const Array & parameters)

View File

@ -91,7 +91,7 @@ public:
const auto & value = this->data(place).value;
size_t size = value.size();
writeVarUInt(size, buf);
buf.write(reinterpret_cast<const char *>(&value[0]), size * sizeof(value[0]));
buf.write(reinterpret_cast<const char *>(value.data()), size * sizeof(value[0]));
}
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena * arena) const override
@ -108,7 +108,7 @@ public:
auto & value = this->data(place).value;
value.resize(size, arena);
buf.read(reinterpret_cast<char *>(&value[0]), size * sizeof(value[0]));
buf.read(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
}
void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const override
@ -155,7 +155,7 @@ struct GroupArrayListNodeBase
/// Clones existing node (does not modify next field)
Node * clone(Arena * arena)
{
return reinterpret_cast<Node *>(const_cast<char *>(arena->insert(reinterpret_cast<char *>(this), sizeof(Node) + size)));
return reinterpret_cast<Node *>(const_cast<char *>(arena->alignedInsert(reinterpret_cast<char *>(this), sizeof(Node) + size, alignof(Node))));
}
/// Write node to buffer
@ -171,7 +171,7 @@ struct GroupArrayListNodeBase
UInt64 size;
readVarUInt(size, buf);
Node * node = reinterpret_cast<Node *>(arena->alloc(sizeof(Node) + size));
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + size, alignof(Node)));
node->size = size;
buf.read(node->data(), size);
return node;
@ -187,7 +187,7 @@ struct GroupArrayListNodeString : public GroupArrayListNodeBase<GroupArrayListNo
{
StringRef string = static_cast<const ColumnString &>(column).getDataAt(row_num);
Node * node = reinterpret_cast<Node *>(arena->alloc(sizeof(Node) + string.size));
Node * node = reinterpret_cast<Node *>(arena->alignedAlloc(sizeof(Node) + string.size, alignof(Node)));
node->next = nullptr;
node->size = string.size;
memcpy(node->data(), string.data, string.size);
@ -207,7 +207,7 @@ struct GroupArrayListNodeGeneral : public GroupArrayListNodeBase<GroupArrayListN
static Node * allocate(const IColumn & column, size_t row_num, Arena * arena)
{
const char * begin = arena->alloc(sizeof(Node));
const char * begin = arena->alignedAlloc(sizeof(Node), alignof(Node));
StringRef value = column.serializeValueIntoArena(row_num, *arena, begin);
Node * node = reinterpret_cast<Node *>(const_cast<char *>(begin));

View File

@ -111,7 +111,7 @@ public:
const auto & value = this->data(place).value;
size_t size = value.size();
writeVarUInt(size, buf);
buf.write(reinterpret_cast<const char *>(&value[0]), size * sizeof(value[0]));
buf.write(reinterpret_cast<const char *>(value.data()), size * sizeof(value[0]));
}
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena * arena) const override
@ -125,7 +125,7 @@ public:
auto & value = this->data(place).value;
value.resize(size, arena);
buf.read(reinterpret_cast<char *>(&value[0]), size * sizeof(value[0]));
buf.read(reinterpret_cast<char *>(value.data()), size * sizeof(value[0]));
}
void insertResultInto(ConstAggregateDataPtr place, IColumn & to) const override

View File

@ -31,20 +31,23 @@ class AggregateFunctionNullBase : public IAggregateFunctionHelper<Derived>
{
protected:
AggregateFunctionPtr nested_function;
size_t prefix_size;
/** In addition to data for nested aggregate function, we keep a flag
* indicating - was there at least one non-NULL value accumulated.
* In case of no not-NULL values, the function will return NULL.
*
* We use prefix_size bytes for flag to satisfy the alignment requirement of nested state.
*/
static AggregateDataPtr nestedPlace(AggregateDataPtr place) noexcept
AggregateDataPtr nestedPlace(AggregateDataPtr place) const noexcept
{
return place + (result_is_nullable ? 1 : 0);
return place + prefix_size;
}
static ConstAggregateDataPtr nestedPlace(ConstAggregateDataPtr place) noexcept
ConstAggregateDataPtr nestedPlace(ConstAggregateDataPtr place) const noexcept
{
return place + (result_is_nullable ? 1 : 0);
return place + prefix_size;
}
static void initFlag(AggregateDataPtr place) noexcept
@ -68,6 +71,10 @@ public:
AggregateFunctionNullBase(AggregateFunctionPtr nested_function_)
: nested_function{nested_function_}
{
if (result_is_nullable)
prefix_size = nested_function->alignOfData();
else
prefix_size = 0;
}
String getName() const override
@ -101,12 +108,12 @@ public:
size_t sizeOfData() const override
{
return 1 + nested_function->sizeOfData();
return prefix_size + nested_function->sizeOfData();
}
size_t alignOfData() const override
{
return 1; /// NOTE This works fine on x86_64 and ok on AArch64. Doesn't work under UBSan.
return nested_function->alignOfData();
}
void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena * arena) const override

View File

@ -134,7 +134,7 @@ public:
size_t old_size = data_to.size();
data_to.resize(data_to.size() + size);
data.getManyFloat(&levels.levels[0], &levels.permutation[0], size, &data_to[old_size]);
data.getManyFloat(levels.levels.data(), levels.permutation.data(), size, &data_to[old_size]);
}
else
{
@ -142,7 +142,7 @@ public:
size_t old_size = data_to.size();
data_to.resize(data_to.size() + size);
data.getMany(&levels.levels[0], &levels.permutation[0], size, &data_to[old_size]);
data.getMany(levels.levels.data(), levels.permutation.data(), size, &data_to[old_size]);
}
}
else

View File

@ -82,7 +82,7 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData
/// Write values only if the state is not overflowed. Otherwise, they are not needed, and only the fact that the state is overflowed is important.
if (count <= threshold)
wb.write(reinterpret_cast<const char *>(&data[0]), count * sizeof(data[0]));
wb.write(reinterpret_cast<const char *>(data), count * sizeof(data[0]));
}
void read(ReadBuffer & rb, UInt8 threshold)
@ -90,7 +90,7 @@ struct __attribute__((__packed__)) AggregateFunctionUniqUpToData
readBinary(count, rb);
if (count <= threshold)
rb.read(reinterpret_cast<char *>(&data[0]), count * sizeof(data[0]));
rb.read(reinterpret_cast<char *>(data), count * sizeof(data[0]));
}
void add(const IColumn & column, size_t row_num, UInt8 threshold)

View File

@ -66,7 +66,7 @@ struct AggregateFunctionWindowFunnelData
/// either sort whole container or do so partially merging ranges afterwards
if (!sorted && !other.sorted)
std::sort(std::begin(events_list), std::end(events_list), Comparator{});
std::stable_sort(std::begin(events_list), std::end(events_list), Comparator{});
else
{
const auto begin = std::begin(events_list);
@ -74,10 +74,10 @@ struct AggregateFunctionWindowFunnelData
const auto end = std::end(events_list);
if (!sorted)
std::sort(begin, middle, Comparator{});
std::stable_sort(begin, middle, Comparator{});
if (!other.sorted)
std::sort(middle, end, Comparator{});
std::stable_sort(middle, end, Comparator{});
std::inplace_merge(begin, middle, end, Comparator{});
}
@ -89,7 +89,7 @@ struct AggregateFunctionWindowFunnelData
{
if (!sorted)
{
std::sort(std::begin(events_list), std::end(events_list), Comparator{});
std::stable_sort(std::begin(events_list), std::end(events_list), Comparator{});
sorted = true;
}
}
@ -215,19 +215,13 @@ public:
void add(AggregateDataPtr place, const IColumn ** columns, const size_t row_num, Arena *) const override
{
UInt8 event_level = 0;
for (const auto i : ext::range(1, events_size + 1))
const auto timestamp = static_cast<const ColumnVector<UInt32> *>(columns[0])->getData()[row_num];
// reverse iteration and stable sorting are needed for events that are qualified by more than one condition.
for (auto i = events_size; i > 0; --i)
{
auto event = static_cast<const ColumnVector<UInt8> *>(columns[i])->getData()[row_num];
if (event)
{
event_level = i;
break;
}
}
if (event_level)
{
this->data(place).add(static_cast<const ColumnVector<UInt32> *>(columns[0])->getData()[row_num], event_level);
this->data(place).add(timestamp, i);
}
}

View File

@ -53,7 +53,7 @@ struct QuantileExact
{
size_t size = array.size();
writeVarUInt(size, buf);
buf.write(reinterpret_cast<const char *>(&array[0]), size * sizeof(array[0]));
buf.write(reinterpret_cast<const char *>(array.data()), size * sizeof(array[0]));
}
void deserialize(ReadBuffer & buf)
@ -61,7 +61,7 @@ struct QuantileExact
size_t size = 0;
readVarUInt(size, buf);
array.resize(size);
buf.read(reinterpret_cast<char *>(&array[0]), size * sizeof(array[0]));
buf.read(reinterpret_cast<char *>(array.data()), size * sizeof(array[0]));
}
/// Get the value of the `level` quantile. The level must be between 0 and 1.

View File

@ -136,7 +136,7 @@ class QuantileTDigest
{
if (unmerged > 0)
{
RadixSort<RadixSortTraits>::execute(&summary[0], summary.size());
RadixSort<RadixSortTraits>::execute(summary.data(), summary.size());
if (summary.size() > 3)
{
@ -212,7 +212,7 @@ public:
{
compress();
writeVarUInt(summary.size(), buf);
buf.write(reinterpret_cast<const char *>(&summary[0]), summary.size() * sizeof(summary[0]));
buf.write(reinterpret_cast<const char *>(summary.data()), summary.size() * sizeof(summary[0]));
}
void deserialize(ReadBuffer & buf)
@ -224,7 +224,7 @@ public:
throw Exception("Too large t-digest summary size", ErrorCodes::TOO_LARGE_ARRAY_SIZE);
summary.resize(size);
buf.read(reinterpret_cast<char *>(&summary[0]), size * sizeof(summary[0]));
buf.read(reinterpret_cast<char *>(summary.data()), size * sizeof(summary[0]));
}
/** Calculates the quantile q [0, 1] based on the digest.

View File

@ -158,7 +158,7 @@ namespace detail
void serialize(WriteBuffer & buf) const
{
writeBinary(elems.size(), buf);
buf.write(reinterpret_cast<const char *>(&elems[0]), elems.size() * sizeof(elems[0]));
buf.write(reinterpret_cast<const char *>(elems.data()), elems.size() * sizeof(elems[0]));
}
void deserialize(ReadBuffer & buf)
@ -166,7 +166,7 @@ namespace detail
size_t size = 0;
readBinary(size, buf);
elems.resize(size);
buf.readStrict(reinterpret_cast<char *>(&elems[0]), size * sizeof(elems[0]));
buf.readStrict(reinterpret_cast<char *>(elems.data()), size * sizeof(elems[0]));
}
UInt16 get(double level) const

View File

@ -3,11 +3,15 @@
#include <DataStreams/ColumnGathererStream.h>
#include <IO/WriteBufferFromArena.h>
#include <Common/SipHash.h>
#include <Common/AlignedBuffer.h>
#include <Common/typeid_cast.h>
#include <Common/Arena.h>
#include <Columns/ColumnsCommon.h>
namespace DB
{
namespace ErrorCodes
{
extern const int PARAMETER_OUT_OF_BOUND;
@ -106,7 +110,6 @@ void ColumnAggregateFunction::insertRangeFrom(const IColumn & from, size_t start
/// Keep shared ownership of aggregation states.
src = from_concrete.getPtr();
auto & data = getData();
size_t old_size = data.size();
data.resize(old_size + length);
memcpy(&data[old_size], &from_concrete.getData()[start], length * sizeof(data[0]));
@ -179,7 +182,7 @@ ColumnPtr ColumnAggregateFunction::indexImpl(const PaddedPODArray<Type> & indexe
return res;
}
INSTANTIATE_INDEX_IMPL(ColumnAggregateFunction);
INSTANTIATE_INDEX_IMPL(ColumnAggregateFunction)
/// Is required to support operations with Set
void ColumnAggregateFunction::updateHashWithValue(size_t n, SipHash & hash) const
@ -246,13 +249,13 @@ void ColumnAggregateFunction::insertData(const char * pos, size_t /*length*/)
getData().push_back(*reinterpret_cast<const AggregateDataPtr *>(pos));
}
void ColumnAggregateFunction::insertFrom(const IColumn & src, size_t n)
void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n)
{
/// Must create new state of aggregate function and take ownership of it,
/// because ownership of states of aggregate function cannot be shared for individual rows,
/// (only as a whole, see comment above).
insertDefault();
insertMergeFrom(src, n);
insertMergeFrom(from, n);
}
void ColumnAggregateFunction::insertFrom(ConstAggregateDataPtr place)
@ -266,9 +269,9 @@ void ColumnAggregateFunction::insertMergeFrom(ConstAggregateDataPtr place)
func->merge(getData().back(), place, &createOrGetArena());
}
void ColumnAggregateFunction::insertMergeFrom(const IColumn & src, size_t n)
void ColumnAggregateFunction::insertMergeFrom(const IColumn & from, size_t n)
{
insertMergeFrom(static_cast<const ColumnAggregateFunction &>(src).getData()[n]);
insertMergeFrom(static_cast<const ColumnAggregateFunction &>(from).getData()[n]);
}
Arena & ColumnAggregateFunction::createOrGetArena()
@ -284,7 +287,7 @@ void ColumnAggregateFunction::insert(const Field & x)
Arena & arena = createOrGetArena();
getData().push_back(arena.alloc(function->sizeOfData()));
getData().push_back(arena.alignedAlloc(function->sizeOfData(), function->alignOfData()));
function->create(getData().back());
ReadBufferFromString read_buffer(x.get<const String &>());
function->deserialize(getData().back(), read_buffer, &arena);
@ -296,7 +299,7 @@ void ColumnAggregateFunction::insertDefault()
Arena & arena = createOrGetArena();
getData().push_back(arena.alloc(function->sizeOfData()));
getData().push_back(arena.alignedAlloc(function->sizeOfData(), function->alignOfData()));
function->create(getData().back());
}
@ -317,7 +320,7 @@ const char * ColumnAggregateFunction::deserializeAndInsertFromArena(const char *
*/
Arena & dst_arena = createOrGetArena();
getData().push_back(dst_arena.alloc(function->sizeOfData()));
getData().push_back(dst_arena.alignedAlloc(function->sizeOfData(), function->alignOfData()));
function->create(getData().back());
/** We will read from src_arena.
@ -411,7 +414,7 @@ void ColumnAggregateFunction::getExtremes(Field & min, Field & max) const
{
/// Place serialized default values into min/max.
PODArrayWithStackMemory<char, 16> place_buffer(func->sizeOfData());
AlignedBuffer place_buffer(func->sizeOfData(), func->alignOfData());
AggregateDataPtr place = place_buffer.data();
String serialized;

View File

@ -1,7 +1,5 @@
#pragma once
#include <Common/Arena.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <Columns/IColumn.h>
@ -16,6 +14,10 @@
namespace DB
{
class Arena;
using ArenaPtr = std::shared_ptr<Arena>;
using Arenas = std::vector<ArenaPtr>;
/** Column of states of aggregate functions.
* Presented as an array of pointers to the states of aggregate functions (data).
@ -123,14 +125,14 @@ public:
void insertData(const char * pos, size_t length) override;
void insertFrom(const IColumn & src, size_t n) override;
void insertFrom(const IColumn & from, size_t n) override;
void insertFrom(ConstAggregateDataPtr place);
/// Merge state at last row with specified state in another column.
void insertMergeFrom(ConstAggregateDataPtr place);
void insertMergeFrom(const IColumn & src, size_t n);
void insertMergeFrom(const IColumn & from, size_t n);
Arena & createOrGetArena();

View File

@ -237,11 +237,11 @@ void ColumnArray::insertDefault()
void ColumnArray::popBack(size_t n)
{
auto & offsets = getOffsets();
size_t nested_n = offsets.back() - offsetAt(offsets.size() - n);
auto & offsets_data = getOffsets();
size_t nested_n = offsets_data.back() - offsetAt(offsets_data.size() - n);
if (nested_n)
getData().popBack(nested_n);
offsets.resize_assume_reserved(offsets.size() - n);
offsets_data.resize_assume_reserved(offsets_data.size() - n);
}
@ -313,7 +313,8 @@ bool ColumnArray::hasEqualOffsets(const ColumnArray & other) const
const Offsets & offsets1 = getOffsets();
const Offsets & offsets2 = other.getOffsets();
return offsets1.size() == offsets2.size() && 0 == memcmp(&offsets1[0], &offsets2[0], sizeof(offsets1[0]) * offsets1.size());
return offsets1.size() == offsets2.size()
&& (offsets1.size() == 0 || 0 == memcmp(offsets1.data(), offsets2.data(), sizeof(offsets1[0]) * offsets1.size()));
}
@ -662,7 +663,7 @@ ColumnPtr ColumnArray::indexImpl(const PaddedPODArray<T> & indexes, size_t limit
return res;
}
INSTANTIATE_INDEX_IMPL(ColumnArray);
INSTANTIATE_INDEX_IMPL(ColumnArray)
void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_hint, Permutation & res) const
{
@ -693,6 +694,9 @@ void ColumnArray::getPermutation(bool reverse, size_t limit, int nan_direction_h
ColumnPtr ColumnArray::replicate(const Offsets & replicate_offsets) const
{
if (replicate_offsets.empty())
return cloneEmpty();
if (typeid_cast<const ColumnUInt8 *>(data.get())) return replicateNumber<UInt8>(replicate_offsets);
if (typeid_cast<const ColumnUInt16 *>(data.get())) return replicateNumber<UInt16>(replicate_offsets);
if (typeid_cast<const ColumnUInt32 *>(data.get())) return replicateNumber<UInt32>(replicate_offsets);
@ -748,9 +752,12 @@ ColumnPtr ColumnArray::replicateNumber(const Offsets & replicate_offsets) const
current_new_offset += value_size;
res_offsets.push_back(current_new_offset);
if (value_size)
{
res_data.resize(res_data.size() + value_size);
memcpy(&res_data[res_data.size() - value_size], &src_data[prev_data_offset], value_size * sizeof(T));
}
}
prev_replicate_offset = replicate_offsets[i];
prev_data_offset = src_offsets[i];
@ -820,11 +827,14 @@ ColumnPtr ColumnArray::replicateString(const Offsets & replicate_offsets) const
prev_src_string_offset_local += chars_size;
}
if (sum_chars_size)
{
/// Copies the characters of the array of rows.
res_chars.resize(res_chars.size() + sum_chars_size);
memcpySmallAllowReadWriteOverflow15(
&res_chars[res_chars.size() - sum_chars_size], &src_chars[prev_src_string_offset], sum_chars_size);
}
}
prev_replicate_offset = replicate_offsets[i];
prev_src_offset = src_offsets[i];

View File

@ -27,7 +27,7 @@ int ColumnDecimal<T>::compareAt(size_t n, size_t m, const IColumn & rhs_, int )
{
auto other = static_cast<const Self &>(rhs_);
const T & a = data[n];
const T & b = static_cast<const Self &>(rhs_).data[m];
const T & b = other.data[m];
return decimalLess<T>(b, a, other.scale, scale) ? 1 : (decimalLess<T>(a, b, scale, other.scale) ? -1 : 0);
}
@ -56,28 +56,36 @@ void ColumnDecimal<T>::updateHashWithValue(size_t n, SipHash & hash) const
template <typename T>
void ColumnDecimal<T>::getPermutation(bool reverse, size_t limit, int , IColumn::Permutation & res) const
{
size_t s = data.size();
res.resize(s);
for (size_t i = 0; i < s; ++i)
res[i] = i;
if (limit >= s)
limit = 0;
if (limit)
#if 1 /// TODO: perf test
if (data.size() <= std::numeric_limits<UInt32>::max())
{
if (reverse)
std::partial_sort(res.begin(), res.begin() + limit, res.end(), [](T a, T b) { return a > b; });
else
std::partial_sort(res.begin(), res.begin() + limit, res.end(), [](T a, T b) { return a < b; });
}
else
{
if (reverse)
std::sort(res.begin(), res.end(), [](T a, T b) { return a > b; });
else
std::sort(res.begin(), res.end(), [](T a, T b) { return a < b; });
PaddedPODArray<UInt32> tmp_res;
permutation(reverse, limit, tmp_res);
res.resize(tmp_res.size());
for (size_t i = 0; i < tmp_res.size(); ++i)
res[i] = tmp_res[i];
return;
}
#endif
permutation(reverse, limit, res);
}
template <typename T>
ColumnPtr ColumnDecimal<T>::permute(const IColumn::Permutation & perm, size_t limit) const
{
size_t size = limit ? std::min(data.size(), limit) : data.size();
if (perm.size() < size)
throw Exception("Size of permutation is less than required.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
auto res = this->create(size, scale);
typename Self::Container & res_data = res->getData();
for (size_t i = 0; i < size; ++i)
res_data[i] = data[perm[i]];
return std::move(res);
}
template <typename T>
@ -91,10 +99,13 @@ MutableColumnPtr ColumnDecimal<T>::cloneResized(size_t size) const
new_col.data.resize(size);
size_t count = std::min(this->size(), size);
memcpy(&new_col.data[0], &data[0], count * sizeof(data[0]));
memcpy(new_col.data.data(), data.data(), count * sizeof(data[0]));
if (size > count)
memset(static_cast<void *>(&new_col.data[count]), static_cast<int>(value_type()), (size - count) * sizeof(value_type));
{
void * tail = &new_col.data[count];
memset(tail, 0, (size - count) * sizeof(T));
}
}
return std::move(res);
@ -106,10 +117,8 @@ void ColumnDecimal<T>::insertRangeFrom(const IColumn & src, size_t start, size_t
const ColumnDecimal & src_vec = static_cast<const ColumnDecimal &>(src);
if (start + length > src_vec.data.size())
throw Exception("Parameters start = "
+ toString(start) + ", length = "
+ toString(length) + " are out of bound in ColumnVector<T>::insertRangeFrom method"
" (data.size() = " + toString(src_vec.data.size()) + ").",
throw Exception("Parameters start = " + toString(start) + ", length = " + toString(length) +
" are out of bound in ColumnDecimal<T>::insertRangeFrom method (data.size() = " + toString(src_vec.data.size()) + ").",
ErrorCodes::PARAMETER_OUT_OF_BOUND);
size_t old_size = data.size();
@ -130,9 +139,9 @@ ColumnPtr ColumnDecimal<T>::filter(const IColumn::Filter & filt, ssize_t result_
if (result_size_hint)
res_data.reserve(result_size_hint > 0 ? result_size_hint : size);
const UInt8 * filt_pos = &filt[0];
const UInt8 * filt_pos = filt.data();
const UInt8 * filt_end = filt_pos + size;
const T * data_pos = &data[0];
const T * data_pos = data.data();
while (filt_pos < filt_end)
{
@ -146,27 +155,6 @@ ColumnPtr ColumnDecimal<T>::filter(const IColumn::Filter & filt, ssize_t result_
return std::move(res);
}
template <typename T>
ColumnPtr ColumnDecimal<T>::permute(const IColumn::Permutation & perm, size_t limit) const
{
size_t size = data.size();
if (limit == 0)
limit = size;
else
limit = std::min(size, limit);
if (perm.size() < limit)
throw Exception("Size of permutation is less than required.", ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
auto res = this->create(limit, scale);
typename Self::Container & res_data = res->getData();
for (size_t i = 0; i < limit; ++i)
res_data[i] = data[perm[i]];
return std::move(res);
}
template <typename T>
ColumnPtr ColumnDecimal<T>::index(const IColumn & indexes, size_t limit) const
{

View File

@ -60,8 +60,7 @@ private:
friend class COWPtrHelper<IColumn, Self>;
public:
using value_type = T;
using Container = DecimalPaddedPODArray<value_type>;
using Container = DecimalPaddedPODArray<T>;
private:
ColumnDecimal(const size_t n, UInt32 scale_)
@ -138,6 +137,24 @@ public:
protected:
Container data;
UInt32 scale;
template <typename U>
void permutation(bool reverse, size_t limit, PaddedPODArray<U> & res) const
{
size_t s = data.size();
res.resize(s);
for (U i = 0; i < s; ++i)
res[i] = i;
auto sort_end = res.end();
if (limit && limit < s)
sort_end = res.begin() + limit;
if (reverse)
std::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; });
else
std::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; });
}
};
template <typename T>

View File

@ -36,7 +36,7 @@ MutableColumnPtr ColumnFixedString::cloneResized(size_t size) const
new_col.chars.resize(size * n);
size_t count = std::min(this->size(), size);
memcpy(&(new_col.chars[0]), &chars[0], count * n * sizeof(chars[0]));
memcpy(new_col.chars.data(), chars.data(), count * n * sizeof(chars[0]));
if (size > count)
memset(&(new_col.chars[count * n]), '\0', (size - count) * n);
@ -165,9 +165,9 @@ ColumnPtr ColumnFixedString::filter(const IColumn::Filter & filt, ssize_t result
if (result_size_hint)
res->chars.reserve(result_size_hint > 0 ? result_size_hint * n : chars.size());
const UInt8 * filt_pos = &filt[0];
const UInt8 * filt_pos = filt.data();
const UInt8 * filt_end = filt_pos + col_size;
const UInt8 * data_pos = &chars[0];
const UInt8 * data_pos = chars.data();
#if __SSE2__
/** A slightly more optimized version.

View File

@ -366,28 +366,28 @@ void ColumnNullable::getExtremes(Field & min, Field & max) const
min = Null();
max = Null();
const auto & null_map = getNullMapData();
const auto & null_map_data = getNullMapData();
if (const auto col = typeid_cast<const ColumnInt8 *>(nested_column.get()))
getExtremesFromNullableContent<Int8>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnInt16 *>(nested_column.get()))
getExtremesFromNullableContent<Int16>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnInt32 *>(nested_column.get()))
getExtremesFromNullableContent<Int32>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnInt64 *>(nested_column.get()))
getExtremesFromNullableContent<Int64>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnUInt8 *>(nested_column.get()))
getExtremesFromNullableContent<UInt8>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnUInt16 *>(nested_column.get()))
getExtremesFromNullableContent<UInt16>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnUInt32 *>(nested_column.get()))
getExtremesFromNullableContent<UInt32>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnUInt64 *>(nested_column.get()))
getExtremesFromNullableContent<UInt64>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnFloat32 *>(nested_column.get()))
getExtremesFromNullableContent<Float32>(*col, null_map, min, max);
else if (const auto col = typeid_cast<const ColumnFloat64 *>(nested_column.get()))
getExtremesFromNullableContent<Float64>(*col, null_map, min, max);
if (const auto col_i8 = typeid_cast<const ColumnInt8 *>(nested_column.get()))
getExtremesFromNullableContent<Int8>(*col_i8, null_map_data, min, max);
else if (const auto col_i16 = typeid_cast<const ColumnInt16 *>(nested_column.get()))
getExtremesFromNullableContent<Int16>(*col_i16, null_map_data, min, max);
else if (const auto col_i32 = typeid_cast<const ColumnInt32 *>(nested_column.get()))
getExtremesFromNullableContent<Int32>(*col_i32, null_map_data, min, max);
else if (const auto col_i64 = typeid_cast<const ColumnInt64 *>(nested_column.get()))
getExtremesFromNullableContent<Int64>(*col_i64, null_map_data, min, max);
else if (const auto col_u8 = typeid_cast<const ColumnUInt8 *>(nested_column.get()))
getExtremesFromNullableContent<UInt8>(*col_u8, null_map_data, min, max);
else if (const auto col_u16 = typeid_cast<const ColumnUInt16 *>(nested_column.get()))
getExtremesFromNullableContent<UInt16>(*col_u16, null_map_data, min, max);
else if (const auto col_u32 = typeid_cast<const ColumnUInt32 *>(nested_column.get()))
getExtremesFromNullableContent<UInt32>(*col_u32, null_map_data, min, max);
else if (const auto col_u64 = typeid_cast<const ColumnUInt64 *>(nested_column.get()))
getExtremesFromNullableContent<UInt64>(*col_u64, null_map_data, min, max);
else if (const auto col_f32 = typeid_cast<const ColumnFloat32 *>(nested_column.get()))
getExtremesFromNullableContent<Float32>(*col_f32, null_map_data, min, max);
else if (const auto col_f64 = typeid_cast<const ColumnFloat64 *>(nested_column.get()))
getExtremesFromNullableContent<Float64>(*col_f64, null_map_data, min, max);
}

View File

@ -1,5 +1,5 @@
#include <Core/Defines.h>
#include <Common/Arena.h>
#include <Columns/Collator.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnsCommon.h>
@ -159,6 +159,36 @@ ColumnPtr ColumnString::permute(const Permutation & perm, size_t limit) const
}
StringRef ColumnString::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
{
size_t string_size = sizeAt(n);
size_t offset = offsetAt(n);
StringRef res;
res.size = sizeof(string_size) + string_size;
char * pos = arena.allocContinue(res.size, begin);
memcpy(pos, &string_size, sizeof(string_size));
memcpy(pos + sizeof(string_size), &chars[offset], string_size);
res.data = pos;
return res;
}
const char * ColumnString::deserializeAndInsertFromArena(const char * pos)
{
const size_t string_size = *reinterpret_cast<const size_t *>(pos);
pos += sizeof(string_size);
const size_t old_size = chars.size();
const size_t new_size = old_size + string_size;
chars.resize(new_size);
memcpy(&chars[old_size], pos, string_size);
offsets.push_back(new_size);
return pos + string_size;
}
ColumnPtr ColumnString::index(const IColumn & indexes, size_t limit) const
{
return selectIndexImpl(*this, indexes, limit);

View File

@ -4,7 +4,6 @@
#include <Columns/IColumn.h>
#include <Common/PODArray.h>
#include <Common/Arena.h>
#include <Common/SipHash.h>
#include <Common/memcpySmall.h>
@ -176,34 +175,9 @@ public:
offsets.resize_assume_reserved(offsets.size() - n);
}
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override
{
size_t string_size = sizeAt(n);
size_t offset = offsetAt(n);
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
StringRef res;
res.size = sizeof(string_size) + string_size;
char * pos = arena.allocContinue(res.size, begin);
memcpy(pos, &string_size, sizeof(string_size));
memcpy(pos + sizeof(string_size), &chars[offset], string_size);
res.data = pos;
return res;
}
const char * deserializeAndInsertFromArena(const char * pos) override
{
const size_t string_size = *reinterpret_cast<const size_t *>(pos);
pos += sizeof(string_size);
const size_t old_size = chars.size();
const size_t new_size = old_size + string_size;
chars.resize(new_size);
memcpy(&chars[old_size], pos, string_size);
offsets.push_back(new_size);
return pos + string_size;
}
const char * deserializeAndInsertFromArena(const char * pos) override;
void updateHashWithValue(size_t n, SipHash & hash) const override
{

View File

@ -513,4 +513,4 @@ IColumnUnique::IndexesWithOverflow ColumnUnique<ColumnType>::uniqueInsertRangeWi
return indexes_with_overflow;
}
};
}

View File

@ -1,6 +1,7 @@
#include <cstring>
#include <cmath>
#include <common/unaligned.h>
#include <Common/Exception.h>
#include <Common/Arena.h>
#include <Common/SipHash.h>
@ -36,14 +37,14 @@ template <typename T>
StringRef ColumnVector<T>::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
{
auto pos = arena.allocContinue(sizeof(T), begin);
memcpy(pos, &data[n], sizeof(T));
unalignedStore(pos, data[n]);
return StringRef(pos, sizeof(T));
}
template <typename T>
const char * ColumnVector<T>::deserializeAndInsertFromArena(const char * pos)
{
data.push_back(*reinterpret_cast<const T *>(pos));
data.push_back(unalignedLoad<T>(pos));
return pos + sizeof(T);
}
@ -115,7 +116,7 @@ MutableColumnPtr ColumnVector<T>::cloneResized(size_t size) const
new_col.data.resize(size);
size_t count = std::min(this->size(), size);
memcpy(&new_col.data[0], &data[0], count * sizeof(data[0]));
memcpy(new_col.data.data(), data.data(), count * sizeof(data[0]));
if (size > count)
memset(static_cast<void *>(&new_col.data[count]), static_cast<int>(value_type()), (size - count) * sizeof(value_type));
@ -160,9 +161,9 @@ ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_s
if (result_size_hint)
res_data.reserve(result_size_hint > 0 ? result_size_hint : size);
const UInt8 * filt_pos = &filt[0];
const UInt8 * filt_pos = filt.data();
const UInt8 * filt_end = filt_pos + size;
const T * data_pos = &data[0];
const T * data_pos = data.data();
#if __SSE2__
/** A slightly more optimized version.

View File

@ -21,7 +21,7 @@ size_t countBytesInFilter(const IColumn::Filter & filt)
* It would be better to use != 0, then this does not allow SSE2.
*/
const Int8 * pos = reinterpret_cast<const Int8 *>(&filt[0]);
const Int8 * pos = reinterpret_cast<const Int8 *>(filt.data());
const Int8 * end = pos + filt.size();
#if __SSE2__ && __POPCNT__
@ -196,10 +196,10 @@ namespace
res_elems.reserve((result_size_hint * src_elems.size() + size - 1) / size);
}
const UInt8 * filt_pos = &filt[0];
const UInt8 * filt_pos = filt.data();
const auto filt_end = filt_pos + size;
auto offsets_pos = &src_offsets[0];
auto offsets_pos = src_offsets.data();
const auto offsets_begin = offsets_pos;
/// copy array ending at *end_offset_ptr

View File

@ -0,0 +1,49 @@
#include <Common/AlignedBuffer.h>
#include <Common/Exception.h>
#include <Common/formatReadable.h>
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_ALLOCATE_MEMORY;
}
void AlignedBuffer::alloc(size_t size, size_t alignment)
{
void * new_buf;
int res = ::posix_memalign(&new_buf, std::max(alignment, sizeof(void*)), size);
if (0 != res)
throwFromErrno("Cannot allocate memory (posix_memalign), size: "
+ formatReadableSizeWithBinarySuffix(size) + ", alignment: " + formatReadableSizeWithBinarySuffix(alignment) + ".",
ErrorCodes::CANNOT_ALLOCATE_MEMORY, res);
buf = new_buf;
}
void AlignedBuffer::dealloc()
{
if (buf)
::free(buf);
}
void AlignedBuffer::reset(size_t size, size_t alignment)
{
dealloc();
alloc(size, alignment);
}
AlignedBuffer::AlignedBuffer(size_t size, size_t alignment)
{
alloc(size, alignment);
}
AlignedBuffer::~AlignedBuffer()
{
dealloc();
}
}

View File

@ -0,0 +1,36 @@
#pragma once
#include <cstdlib>
#include <utility>
#include <boost/noncopyable.hpp>
namespace DB
{
/** Aligned piece of memory.
* It can only be allocated and destroyed.
* MemoryTracker is not used. It is intended for small pieces of memory.
*/
class AlignedBuffer : private boost::noncopyable
{
private:
void * buf = nullptr;
void alloc(size_t size, size_t alignment);
void dealloc();
public:
AlignedBuffer() {}
AlignedBuffer(size_t size, size_t alignment);
AlignedBuffer(AlignedBuffer && old) { std::swap(buf, old.buf); }
~AlignedBuffer();
void reset(size_t size, size_t alignment);
char * data() { return static_cast<char *>(buf); }
const char * data() const { return static_cast<const char *>(buf); }
};
}

View File

@ -6,6 +6,7 @@
#include <boost/noncopyable.hpp>
#include <common/likely.h>
#include <Core/Defines.h>
#include <Common/memcpySmall.h>
#include <Common/ProfileEvents.h>
#include <Common/Allocator.h>
@ -31,12 +32,15 @@ namespace DB
class Arena : private boost::noncopyable
{
private:
/// Padding allows to use 'memcpySmallAllowReadWriteOverflow15' instead of 'memcpy'.
static constexpr size_t pad_right = 15;
/// Contiguous chunk of memory and pointer to free space inside it. Member of single-linked list.
struct Chunk : private Allocator<false> /// empty base optimization
{
char * begin;
char * pos;
char * end;
char * end; /// does not include padding.
Chunk * prev;
@ -47,7 +51,7 @@ private:
begin = reinterpret_cast<char *>(Allocator::alloc(size_));
pos = begin;
end = begin + size_;
end = begin + size_ - pad_right;
prev = prev_;
}
@ -59,7 +63,7 @@ private:
delete prev;
}
size_t size() const { return end - begin; }
size_t size() const { return end + pad_right - begin; }
size_t remaining() const { return end - pos; }
};
@ -95,11 +99,12 @@ private:
/// Add next contiguous chunk of memory with size not less than specified.
void NO_INLINE addChunk(size_t min_size)
{
head = new Chunk(nextSize(min_size), head);
head = new Chunk(nextSize(min_size + pad_right), head);
size_in_bytes += head->size();
}
friend class ArenaAllocator;
template <size_t> friend class AlignedArenaAllocator;
public:
Arena(size_t initial_size_ = 4096, size_t growth_factor_ = 2, size_t linear_growth_threshold_ = 128 * 1024 * 1024)
@ -124,6 +129,26 @@ public:
return res;
}
/// Get peice of memory with alignment
char * alignedAlloc(size_t size, size_t alignment)
{
do
{
void * head_pos = head->pos;
size_t space = head->end - head->pos;
auto res = static_cast<char *>(std::align(alignment, size, head_pos, space));
if (res)
{
head->pos = static_cast<char *>(head_pos);
head->pos += size;
return res;
}
addChunk(size + alignment);
} while (true);
}
/** Rollback just performed allocation.
* Must pass size not more that was just allocated.
*/
@ -132,7 +157,7 @@ public:
head->pos -= size;
}
/** Begin or expand allocation of contiguous piece of memory.
/** Begin or expand allocation of contiguous piece of memory without alignment.
* 'begin' - current begin of piece of memory, if it need to be expanded, or nullptr, if it need to be started.
* If there is no space in chunk to expand current piece of memory - then copy all piece to new chunk and change value of 'begin'.
* NOTE This method is usable only for latest allocation. For earlier allocations, see 'realloc' method.
@ -159,12 +184,51 @@ public:
return res;
}
char * alignedAllocContinue(size_t size, char const *& begin, size_t alignment)
{
char * res;
do
{
void * head_pos = head->pos;
size_t space = head->end - head->pos;
res = static_cast<char *>(std::align(alignment, size, head_pos, space));
if (res)
{
head->pos = static_cast<char *>(head_pos);
head->pos += size;
break;
}
char * prev_end = head->pos;
addChunk(size + alignment);
if (begin)
begin = alignedInsert(begin, prev_end - begin, alignment);
else
break;
} while (true);
if (!begin)
begin = res;
return res;
}
/// NOTE Old memory region is wasted.
char * realloc(const char * old_data, size_t old_size, size_t new_size)
{
char * res = alloc(new_size);
if (old_data)
memcpy(res, old_data, old_size);
memcpySmallAllowReadWriteOverflow15(res, old_data, old_size);
return res;
}
char * alignedRealloc(const char * old_data, size_t old_size, size_t new_size, size_t alignment)
{
char * res = alignedAlloc(new_size, alignment);
if (old_data)
memcpySmallAllowReadWriteOverflow15(res, old_data, old_size);
return res;
}
@ -172,7 +236,14 @@ public:
const char * insert(const char * data, size_t size)
{
char * res = alloc(size);
memcpy(res, data, size);
memcpySmallAllowReadWriteOverflow15(res, data, size);
return res;
}
const char * alignedInsert(const char * data, size_t size, size_t alignment)
{
char * res = alignedAlloc(size, alignment);
memcpySmallAllowReadWriteOverflow15(res, data, size);
return res;
}

View File

@ -38,6 +38,36 @@ public:
};
template <size_t alignment>
class AlignedArenaAllocator
{
public:
static void * alloc(size_t size, Arena * arena)
{
return arena->alignedAlloc(size, alignment);
}
static void * realloc(void * buf, size_t old_size, size_t new_size, Arena * arena)
{
char const * data = reinterpret_cast<char *>(buf);
if (data + old_size == arena->head->pos)
{
arena->alignedAllocContinue(new_size - old_size, data, alignment);
return reinterpret_cast<void *>(const_cast<char *>(data));
}
else
{
return arena->alignedRealloc(data, old_size, new_size, alignment);
}
}
static void free(void * /*buf*/, size_t /*size*/)
{
}
};
/// Switches to ordinary Allocator after REAL_ALLOCATION_TRESHOLD bytes to avoid fragmentation and trash in Arena.
template <size_t REAL_ALLOCATION_TRESHOLD = 4096, typename TRealAllocator = Allocator<false>, typename TArenaAllocator = ArenaAllocator>
class MixedArenaAllocator : private TRealAllocator
@ -72,6 +102,10 @@ public:
};
template <size_t alignment, size_t REAL_ALLOCATION_TRESHOLD = 4096>
using MixedAlignedArenaAllocator = MixedArenaAllocator<REAL_ALLOCATION_TRESHOLD, Allocator<false>, AlignedArenaAllocator<alignment>>;
template <size_t N = 64, typename Base = ArenaAllocator>
class ArenaAllocatorWithStackMemoty : public Base
{

View File

@ -1,13 +1,20 @@
#pragma once
#include <cstddef>
#include <cstdlib>
#include <common/likely.h>
#include <Common/Exception.h>
#include <Common/formatReadable.h>
namespace DB
{
namespace ErrorCodes
{
extern const int CANNOT_ALLOCATE_MEMORY;
}
/** An array of (almost) unchangable size:
* the size is specified in the constructor;
* `resize` method removes old data, and necessary only for
@ -20,6 +27,7 @@ namespace DB
* `sizeof` is equal to the size of one pointer.
*
* Not exception-safe.
*
* Copying is supported via assign() method. Moving empties the original object.
* That is, it is inconvenient to use this array in many cases.
*
@ -78,7 +86,7 @@ public:
if (this == &src)
return;
setEmpty();
data = src.data;
data_ptr = src.data_ptr;
src.setEmpty();
}
@ -87,7 +95,7 @@ public:
if (this == &src)
return *this;
uninit();
data = src.data;
data_ptr = src.data_ptr;
src.setEmpty();
return *this;
@ -147,6 +155,16 @@ public:
return elem(i);
}
T * data()
{
return elemPtr(0);
}
const T * data() const
{
return elemPtr(0);
}
/** Get the piece of memory in which the element should be located.
* The function is intended to initialize an element,
* which has not yet been initialized
@ -154,17 +172,17 @@ public:
*/
char * place(size_t i)
{
return data + sizeof(T) * i;
return data_ptr + sizeof(T) * i;
}
using iterator = T *;
using const_iterator = const T *;
iterator begin() { return &elem(0); }
iterator end() { return &elem(size()); }
iterator begin() { return elemPtr(0); }
iterator end() { return elemPtr(size()); }
const_iterator begin() const { return &elem(0); }
const_iterator end() const { return &elem(size()); }
const_iterator begin() const { return elemPtr(0); }
const_iterator end() const { return elemPtr(size()); }
bool operator== (const AutoArray<T> & rhs) const
{
@ -207,47 +225,69 @@ public:
}
private:
char * data;
static constexpr size_t alignment = alignof(T);
/// Bytes allocated to store size of array before data. It is padded to have minimum size as alignment.
/// Padding is at left and the size is stored at right (just before the first data element).
static constexpr size_t prefix_size = std::max(sizeof(size_t), alignment);
char * data_ptr;
size_t & m_size()
{
return reinterpret_cast<size_t *>(data)[-1];
return reinterpret_cast<size_t *>(data_ptr)[-1];
}
size_t m_size() const
{
return reinterpret_cast<const size_t *>(data)[-1];
return reinterpret_cast<const size_t *>(data_ptr)[-1];
}
T * elemPtr(size_t i)
{
return reinterpret_cast<T *>(data_ptr) + i;
}
const T * elemPtr(size_t i) const
{
return reinterpret_cast<const T *>(data_ptr) + i;
}
T & elem(size_t i)
{
return reinterpret_cast<T *>(data)[i];
return *elemPtr(i);
}
const T & elem(size_t i) const
{
return reinterpret_cast<const T *>(data)[i];
return *elemPtr(i);
}
void setEmpty()
{
data = const_cast<char *>(reinterpret_cast<const char *>(&empty_auto_array_helper)) + sizeof(size_t);
data_ptr = const_cast<char *>(reinterpret_cast<const char *>(&empty_auto_array_helper)) + sizeof(size_t);
}
void init(size_t size_, bool dont_init_elems)
void init(size_t new_size, bool dont_init_elems)
{
if (!size_)
if (!new_size)
{
setEmpty();
return;
}
data = new char[size_ * sizeof(T) + sizeof(size_t)];
data += sizeof(size_t);
m_size() = size_;
void * new_data = nullptr;
int res = posix_memalign(&new_data, alignment, prefix_size + new_size * sizeof(T));
if (0 != res)
throwFromErrno("Cannot allocate memory (posix_memalign) " + formatReadableSizeWithBinarySuffix(new_size) + ".",
ErrorCodes::CANNOT_ALLOCATE_MEMORY, res);
data_ptr = static_cast<char *>(new_data);
data_ptr += prefix_size;
m_size() = new_size;
if (!dont_init_elems)
for (size_t i = 0; i < size_; ++i)
for (size_t i = 0; i < new_size; ++i)
new (place(i)) T();
}
@ -255,13 +295,13 @@ private:
{
size_t s = size();
if (likely(s))
if (s)
{
for (size_t i = 0; i < s; ++i)
elem(i).~T();
data -= sizeof(size_t);
delete[] data;
data_ptr -= prefix_size;
free(data_ptr);
}
}
};

View File

@ -141,12 +141,10 @@ struct ConfigReloader::FileWithTimestamp
};
void ConfigReloader::FilesChangesTracker::addIfExists(const std::string & path)
void ConfigReloader::FilesChangesTracker::addIfExists(const std::string & path_to_add)
{
if (!path.empty() && Poco::File(path).exists())
{
files.emplace(path, Poco::File(path).getLastModified().epochTime());
}
if (!path_to_add.empty() && Poco::File(path_to_add).exists())
files.emplace(path_to_add, Poco::File(path_to_add).getLastModified().epochTime());
}
bool ConfigReloader::FilesChangesTracker::isDifferOrNewerThan(const FilesChangesTracker & rhs)

View File

@ -56,7 +56,7 @@ private:
{
std::set<FileWithTimestamp> files;
void addIfExists(const std::string & path);
void addIfExists(const std::string & path_to_add);
bool isDifferOrNewerThan(const FilesChangesTracker & rhs);
};

View File

@ -3,68 +3,80 @@
/// Available metrics. Add something here as you wish.
#define APPLY_FOR_METRICS(M) \
M(Query) \
M(Merge) \
M(PartMutation) \
M(ReplicatedFetch) \
M(ReplicatedSend) \
M(ReplicatedChecks) \
M(BackgroundPoolTask) \
M(BackgroundSchedulePoolTask) \
M(DiskSpaceReservedForMerge) \
M(DistributedSend) \
M(QueryPreempted) \
M(TCPConnection) \
M(HTTPConnection) \
M(InterserverConnection) \
M(OpenFileForRead) \
M(OpenFileForWrite) \
M(Read) \
M(Write) \
M(SendExternalTables) \
M(QueryThread) \
M(ReadonlyReplica) \
M(LeaderReplica) \
M(MemoryTracking) \
M(MemoryTrackingInBackgroundProcessingPool) \
M(MemoryTrackingInBackgroundSchedulePool) \
M(MemoryTrackingForMerges) \
M(LeaderElection) \
M(EphemeralNode) \
M(ZooKeeperSession) \
M(ZooKeeperWatch) \
M(ZooKeeperRequest) \
M(DelayedInserts) \
M(ContextLockWait) \
M(StorageBufferRows) \
M(StorageBufferBytes) \
M(DictCacheRequests) \
M(Revision) \
M(RWLockWaitingReaders) \
M(RWLockWaitingWriters) \
M(RWLockActiveReaders) \
M(RWLockActiveWriters)
M(Query, "Number of executing queries") \
M(Merge, "Number of executing background merges") \
M(PartMutation, "Number of mutations (ALTER DELETE/UPDATE)") \
M(ReplicatedFetch, "Number of data parts fetching from replica") \
M(ReplicatedSend, "Number of data parts sending to replicas") \
M(ReplicatedChecks, "Number of data parts checking for consistency") \
M(BackgroundPoolTask, "Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches or replication queue bookkeeping)") \
M(BackgroundSchedulePoolTask, "Number of active tasks in BackgroundSchedulePool. This pool is used for periodic tasks of ReplicatedMergeTree like cleaning old data parts, altering data parts, replica re-initialization, etc.") \
M(DiskSpaceReservedForMerge, "Disk space reserved for currently running background merges. It is slightly more than total size of currently merging parts.") \
M(DistributedSend, "Number of connections sending data, that was INSERTed to Distributed tables, to remote servers. Both synchronous and asynchronous mode.") \
M(QueryPreempted, "Number of queries that are stopped and waiting due to 'priority' setting.") \
M(TCPConnection, "Number of connections to TCP server (clients with native interface)") \
M(HTTPConnection, "Number of connections to HTTP server") \
M(InterserverConnection, "Number of connections from other replicas to fetch parts") \
M(OpenFileForRead, "Number of files open for reading") \
M(OpenFileForWrite, "Number of files open for writing") \
M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \
M(Write, "Number of write (write, pwrite, io_getevents, etc.) syscalls in fly") \
M(SendExternalTables, "Number of connections that are sending data for external tables to remote servers. External tables are used to implement GLOBAL IN and GLOBAL JOIN operators with distributed subqueries.") \
M(QueryThread, "Number of query processing threads") \
M(ReadonlyReplica, "Number of Replicated tables that are currently in readonly state due to re-initialization after ZooKeeper session loss or due to startup without ZooKeeper configured.") \
M(LeaderReplica, "Number of Replicated tables that are leaders. Leader replica is responsible for assigning merges, cleaning old blocks for deduplications and a few more bookkeeping tasks. There may be no more than one leader across all replicas at one moment of time. If there is no leader it will be elected soon or it indicate an issue.") \
M(MemoryTracking, "Total amount of memory (bytes) allocated in currently executing queries. Note that some memory allocations may not be accounted.") \
M(MemoryTrackingInBackgroundProcessingPool, "Total amount of memory (bytes) allocated in background processing pool (that is dedicated for backround merges, mutations and fetches). Note that this value may include a drift when the memory was allocated in a context of background processing pool and freed in other context or vice-versa. This happens naturally due to caches for tables indexes and doesn't indicate memory leaks.") \
M(MemoryTrackingInBackgroundSchedulePool, "Total amount of memory (bytes) allocated in background schedule pool (that is dedicated for bookkeeping tasks of Replicated tables).") \
M(MemoryTrackingForMerges, "Total amount of memory (bytes) allocated for background merges. Included in MemoryTrackingInBackgroundProcessingPool. Note that this value may include a drift when the memory was allocated in a context of background processing pool and freed in other context or vice-versa. This happens naturally due to caches for tables indexes and doesn't indicate memory leaks.") \
M(LeaderElection, "Number of Replicas participating in leader election. Equals to total number of replicas in usual cases.") \
M(EphemeralNode, "Number of ephemeral nodes hold in ZooKeeper.") \
M(ZooKeeperSession, "Number of sessions (connections) to ZooKeeper. Should be no more than one, because using more than one connection to ZooKeeper may lead to bugs due to lack of linearizability (stale reads) that ZooKeeper consistency model allows.") \
M(ZooKeeperWatch, "Number of watches (event subscriptions) in ZooKeeper.") \
M(ZooKeeperRequest, "Number of requests to ZooKeeper in fly.") \
M(DelayedInserts, "Number of INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree table.") \
M(ContextLockWait, "Number of threads waiting for lock in Context. This is global lock.") \
M(StorageBufferRows, "Number of rows in buffers of Buffer tables") \
M(StorageBufferBytes, "Number of bytes in buffers of Buffer tables") \
M(DictCacheRequests, "Number of requests in fly to data sources of dictionaries of cache type.") \
M(Revision, "Revision of the server. It is a number incremented for every release or release candidate.") \
M(RWLockWaitingReaders, "Number of threads waiting for read on a table RWLock.") \
M(RWLockWaitingWriters, "Number of threads waiting for write on a table RWLock.") \
M(RWLockActiveReaders, "Number of threads holding read lock in a table RWLock.") \
M(RWLockActiveWriters, "Number of threads holding write lock in a table RWLock.") \
namespace CurrentMetrics
{
#define M(NAME) extern const Metric NAME = __COUNTER__;
#define M(NAME, DOCUMENTATION) extern const Metric NAME = __COUNTER__;
APPLY_FOR_METRICS(M)
#undef M
constexpr Metric END = __COUNTER__;
std::atomic<Value> values[END] {}; /// Global variable, initialized by zeros.
const char * getDescription(Metric event)
const char * getName(Metric event)
{
static const char * descriptions[] =
static const char * strings[] =
{
#define M(NAME) #NAME,
#define M(NAME, DOCUMENTATION) #NAME,
APPLY_FOR_METRICS(M)
#undef M
};
return descriptions[event];
return strings[event];
}
const char * getDocumentation(Metric event)
{
static const char * strings[] =
{
#define M(NAME, DOCUMENTATION) DOCUMENTATION,
APPLY_FOR_METRICS(M)
#undef M
};
return strings[event];
}
Metric end() { return END; }

View File

@ -24,8 +24,10 @@ namespace CurrentMetrics
using Metric = size_t;
using Value = DB::Int64;
/// Get name of metric by identifier. Returns statically allocated string.
const char * getName(Metric event);
/// Get text description of metric by identifier. Returns statically allocated string.
const char * getDescription(Metric event);
const char * getDocumentation(Metric event);
/// Metric identifier -> current value of metric.
extern std::atomic<Value> values[];

View File

@ -48,7 +48,7 @@ bool FileChecker::check() const
* `check` method is rarely called.
*/
Map local_map;
load(local_map);
load(local_map, files_info_path);
if (local_map.empty())
return true;
@ -78,7 +78,7 @@ void FileChecker::initialize()
if (initialized)
return;
load(map);
load(map, files_info_path);
initialized = true;
}
@ -125,14 +125,14 @@ void FileChecker::save() const
Poco::File(tmp_files_info_path).renameTo(files_info_path);
}
void FileChecker::load(Map & map) const
void FileChecker::load(Map & local_map, const std::string & path)
{
map.clear();
local_map.clear();
if (!Poco::File(files_info_path).exists())
if (!Poco::File(path).exists())
return;
ReadBufferFromFile in(files_info_path);
ReadBufferFromFile in(path);
WriteBufferFromOwnString out;
/// The JSON library does not support whitespace. We delete them. Inefficient.
@ -147,7 +147,7 @@ void FileChecker::load(Map & map) const
JSON files = json["yandex"];
for (const auto & name_value : files)
map[unescapeForFileName(name_value.getName())] = name_value.getValue()["size"].toUInt();
local_map[unescapeForFileName(name_value.getName())] = name_value.getValue()["size"].toUInt();
}
}

View File

@ -30,7 +30,7 @@ private:
void initialize();
void updateImpl(const Poco::File & file);
void save() const;
void load(Map & map) const;
static void load(Map & local_map, const std::string & path);
std::string files_info_path;
std::string tmp_files_info_path;

View File

@ -227,10 +227,10 @@ private:
struct Cell
{
bool expired(const Timestamp & last_timestamp, const Delay & expiration_delay) const
bool expired(const Timestamp & last_timestamp, const Delay & delay) const
{
return (expiration_delay == Delay::zero()) ||
((last_timestamp > timestamp) && ((last_timestamp - timestamp) > expiration_delay));
return (delay == Delay::zero()) ||
((last_timestamp > timestamp) && ((last_timestamp - timestamp) > delay));
}
MappedPtr value;

View File

@ -8,6 +8,7 @@
#include <Poco/Path.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/ShellCommand.h>
#include <Common/config.h>
#include <common/logger_useful.h>
#include <ext/range.h>
@ -36,13 +37,28 @@ ODBCBridgeHelper::ODBCBridgeHelper(
void ODBCBridgeHelper::startODBCBridge() const
{
Poco::Path path{config.getString("application.dir", "")};
path.setFileName("clickhouse-odbc-bridge");
path.setFileName(
#if CLICKHOUSE_SPLIT_BINARY
"clickhouse-odbc-bridge"
#else
"clickhouse"
#endif
);
if (!Poco::File(path).exists())
throw Exception("clickhouse binary is not found", ErrorCodes::EXTERNAL_EXECUTABLE_NOT_FOUND);
throw Exception("clickhouse binary (" + path.toString() + ") is not found", ErrorCodes::EXTERNAL_EXECUTABLE_NOT_FOUND);
std::stringstream command;
command << path.toString() << " ";
command << path.toString() <<
#if CLICKHOUSE_SPLIT_BINARY
" "
#else
" odbc-bridge "
#endif
;
command << "--http-port " << config.getUInt("odbc_bridge.port", DEFAULT_PORT) << ' ';
command << "--listen-host " << config.getString("odbc_bridge.listen_host", DEFAULT_HOST) << ' ';
command << "--http-timeout " << http_timeout.totalMicroseconds() << ' ';

View File

@ -1,4 +1,5 @@
#pragma once
#include <map>
#include <memory>
#include <stack>

View File

@ -14,6 +14,7 @@
#include <Common/Allocator.h>
#include <Common/Exception.h>
#include <Common/BitHelpers.h>
#include <Common/memcpySmall.h>
namespace DB
@ -288,21 +289,36 @@ public:
/// Do not insert into the array a piece of itself. Because with the resize, the iterators on themselves can be invalidated.
template <typename It1, typename It2, typename ... TAllocatorParams>
void insert(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
void insertPrepare(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{
size_t required_capacity = size() + (from_end - from_begin);
if (required_capacity > capacity())
reserve(roundUpToPowerOfTwoOrZero(required_capacity), std::forward<TAllocatorParams>(allocator_params)...);
}
/// Do not insert into the array a piece of itself. Because with the resize, the iterators on themselves can be invalidated.
template <typename It1, typename It2, typename ... TAllocatorParams>
void insert(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{
insertPrepare(from_begin, from_end, std::forward<TAllocatorParams>(allocator_params)...);
insert_assume_reserved(from_begin, from_end);
}
/// Works under assumption, that it's possible to read up to 15 excessive bytes after `from_end` and this PODArray is padded.
template <typename It1, typename It2, typename ... TAllocatorParams>
void insertSmallAllowReadWriteOverflow15(It1 from_begin, It2 from_end, TAllocatorParams &&... allocator_params)
{
static_assert(pad_right_ >= 15);
insertPrepare(from_begin, from_end, std::forward<TAllocatorParams>(allocator_params)...);
size_t bytes_to_copy = byte_size(from_end - from_begin);
memcpySmallAllowReadWriteOverflow15(c_end, reinterpret_cast<const void *>(&*from_begin), bytes_to_copy);
c_end += bytes_to_copy;
}
template <typename It1, typename It2>
void insert(iterator it, It1 from_begin, It2 from_end)
{
size_t required_capacity = size() + (from_end - from_begin);
if (required_capacity > capacity())
reserve(roundUpToPowerOfTwoOrZero(required_capacity));
insertPrepare(from_begin, from_end);
size_t bytes_to_copy = byte_size(from_end - from_begin);
size_t bytes_to_move = (end() - it) * sizeof(T);

View File

@ -5,176 +5,174 @@
/// Available events. Add something here as you wish.
#define APPLY_FOR_EVENTS(M) \
M(Query) \
M(SelectQuery) \
M(InsertQuery) \
M(FileOpen) \
M(FileOpenFailed) \
M(Seek) \
M(ReadBufferFromFileDescriptorRead) \
M(ReadBufferFromFileDescriptorReadFailed) \
M(ReadBufferFromFileDescriptorReadBytes) \
M(WriteBufferFromFileDescriptorWrite) \
M(WriteBufferFromFileDescriptorWriteFailed) \
M(WriteBufferFromFileDescriptorWriteBytes) \
M(ReadBufferAIORead) \
M(ReadBufferAIOReadBytes) \
M(WriteBufferAIOWrite) \
M(WriteBufferAIOWriteBytes) \
M(ReadCompressedBytes) \
M(CompressedReadBufferBlocks) \
M(CompressedReadBufferBytes) \
M(UncompressedCacheHits) \
M(UncompressedCacheMisses) \
M(UncompressedCacheWeightLost) \
M(IOBufferAllocs) \
M(IOBufferAllocBytes) \
M(ArenaAllocChunks) \
M(ArenaAllocBytes) \
M(FunctionExecute) \
M(TableFunctionExecute) \
M(MarkCacheHits) \
M(MarkCacheMisses) \
M(CreatedReadBufferOrdinary) \
M(CreatedReadBufferAIO) \
M(CreatedWriteBufferOrdinary) \
M(CreatedWriteBufferAIO) \
M(DiskReadElapsedMicroseconds) \
M(DiskWriteElapsedMicroseconds) \
M(NetworkReceiveElapsedMicroseconds) \
M(NetworkSendElapsedMicroseconds) \
M(ThrottlerSleepMicroseconds) \
M(Query, "Number of queries started to be interpreted and maybe executed. Does not include queries that are failed to parse, that are rejected due to AST size limits; rejected due to quota limits or limits on number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
M(FileOpen, "Number of files opened.") \
M(Seek, "Number of times the 'lseek' function was called.") \
M(ReadBufferFromFileDescriptorRead, "Number of reads (read/pread) from a file descriptor. Does not include sockets.") \
M(ReadBufferFromFileDescriptorReadFailed, "Number of times the read (read/pread) from a file descriptor have failed.") \
M(ReadBufferFromFileDescriptorReadBytes, "Number of bytes read from file descriptors. If the file is compressed, this will show compressed data size.") \
M(WriteBufferFromFileDescriptorWrite, "Number of writes (write/pwrite) to a file descriptor. Does not include sockets.") \
M(WriteBufferFromFileDescriptorWriteFailed, "Number of times the write (write/pwrite) to a file descriptor have failed.") \
M(WriteBufferFromFileDescriptorWriteBytes, "Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size.") \
M(ReadBufferAIORead, "") \
M(ReadBufferAIOReadBytes, "") \
M(WriteBufferAIOWrite, "") \
M(WriteBufferAIOWriteBytes, "") \
M(ReadCompressedBytes, "") \
M(CompressedReadBufferBlocks, "") \
M(CompressedReadBufferBytes, "") \
M(UncompressedCacheHits, "") \
M(UncompressedCacheMisses, "") \
M(UncompressedCacheWeightLost, "") \
M(IOBufferAllocs, "") \
M(IOBufferAllocBytes, "") \
M(ArenaAllocChunks, "") \
M(ArenaAllocBytes, "") \
M(FunctionExecute, "") \
M(TableFunctionExecute, "") \
M(MarkCacheHits, "") \
M(MarkCacheMisses, "") \
M(CreatedReadBufferOrdinary, "") \
M(CreatedReadBufferAIO, "") \
M(CreatedWriteBufferOrdinary, "") \
M(CreatedWriteBufferAIO, "") \
M(DiskReadElapsedMicroseconds, "Total time spent waiting for read syscall. This include reads from page cache.") \
M(DiskWriteElapsedMicroseconds, "Total time spent waiting for write syscall. This include writes to page cache.") \
M(NetworkReceiveElapsedMicroseconds, "") \
M(NetworkSendElapsedMicroseconds, "") \
M(ThrottlerSleepMicroseconds, "Total time a query was sleeping to conform the 'max_network_bandwidth' setting.") \
\
M(ReplicatedPartFetches) \
M(ReplicatedPartFailedFetches) \
M(ObsoleteReplicatedParts) \
M(ReplicatedPartMerges) \
M(ReplicatedPartFetchesOfMerged) \
M(ReplicatedPartMutations) \
M(ReplicatedPartChecks) \
M(ReplicatedPartChecksFailed) \
M(ReplicatedDataLoss) \
M(ReplicatedPartFetches, "Number of times a data part was downloaded from replica of a ReplicatedMergeTree table.") \
M(ReplicatedPartFailedFetches, "") \
M(ObsoleteReplicatedParts, "") \
M(ReplicatedPartMerges, "") \
M(ReplicatedPartFetchesOfMerged, "Number of times we prefer to download already merged part from replica of ReplicatedMergeTree table instead of performing a merge ourself (usually we prefer doing a merge ourself to save network traffic). This happens when we have not all source parts to perform a merge or when the data part is old enough.") \
M(ReplicatedPartMutations, "") \
M(ReplicatedPartChecks, "") \
M(ReplicatedPartChecksFailed, "") \
M(ReplicatedDataLoss, "Number of times a data part that we wanted doesn't exist on any replica (even on replicas that are offline right now). That data parts are definitely lost. This is normal due to asynchronous replication (if quorum inserts were not enabled), when the replica on which the data part was written was failed and when it became online after fail it doesn't contain that data part.") \
\
M(InsertedRows) \
M(InsertedBytes) \
M(DelayedInserts) \
M(RejectedInserts) \
M(DelayedInsertsMilliseconds) \
M(DuplicatedInsertedBlocks) \
M(InsertedRows, "Number of rows INSERTed to all tables.") \
M(InsertedBytes, "Number of bytes (uncompressed; for columns as they stored in memory) INSERTed to all tables.") \
M(DelayedInserts, "Number of times the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition.") \
M(RejectedInserts, "Number of times the INSERT of a block to a MergeTree table was rejected with 'Too many parts' exception due to high number of active data parts for partition.") \
M(DelayedInsertsMilliseconds, "Total number of milliseconds spent while the INSERT of a block to a MergeTree table was throttled due to high number of active data parts for partition.") \
M(DuplicatedInsertedBlocks, "Number of times the INSERTed block to a ReplicatedMergeTree table was deduplicated.") \
\
M(ZooKeeperInit) \
M(ZooKeeperTransactions) \
M(ZooKeeperList) \
M(ZooKeeperCreate) \
M(ZooKeeperRemove) \
M(ZooKeeperExists) \
M(ZooKeeperGet) \
M(ZooKeeperSet) \
M(ZooKeeperMulti) \
M(ZooKeeperCheck) \
M(ZooKeeperClose) \
M(ZooKeeperWatchResponse) \
M(ZooKeeperUserExceptions) \
M(ZooKeeperHardwareExceptions) \
M(ZooKeeperOtherExceptions) \
M(ZooKeeperWaitMicroseconds) \
M(ZooKeeperBytesSent) \
M(ZooKeeperBytesReceived) \
M(ZooKeeperInit, "") \
M(ZooKeeperTransactions, "") \
M(ZooKeeperList, "") \
M(ZooKeeperCreate, "") \
M(ZooKeeperRemove, "") \
M(ZooKeeperExists, "") \
M(ZooKeeperGet, "") \
M(ZooKeeperSet, "") \
M(ZooKeeperMulti, "") \
M(ZooKeeperCheck, "") \
M(ZooKeeperClose, "") \
M(ZooKeeperWatchResponse, "") \
M(ZooKeeperUserExceptions, "") \
M(ZooKeeperHardwareExceptions, "") \
M(ZooKeeperOtherExceptions, "") \
M(ZooKeeperWaitMicroseconds, "") \
M(ZooKeeperBytesSent, "") \
M(ZooKeeperBytesReceived, "") \
\
M(DistributedConnectionFailTry) \
M(DistributedConnectionMissingTable) \
M(DistributedConnectionStaleReplica) \
M(DistributedConnectionFailAtAll) \
M(DistributedConnectionFailTry, "") \
M(DistributedConnectionMissingTable, "") \
M(DistributedConnectionStaleReplica, "") \
M(DistributedConnectionFailAtAll, "") \
\
M(CompileAttempt) \
M(CompileSuccess) \
M(CompileAttempt, "Number of times a compilation of generated C++ code was initiated.") \
M(CompileSuccess, "Number of times a compilation of generated C++ code was successful.") \
\
M(CompileFunction) \
M(CompileFunction, "Number of times a compilation of generated LLVM code (to create fused function for complex expressions) was initiated.") \
\
M(ExternalSortWritePart) \
M(ExternalSortMerge) \
M(ExternalAggregationWritePart) \
M(ExternalAggregationMerge) \
M(ExternalAggregationCompressedBytes) \
M(ExternalAggregationUncompressedBytes) \
M(ExternalSortWritePart, "") \
M(ExternalSortMerge, "") \
M(ExternalAggregationWritePart, "") \
M(ExternalAggregationMerge, "") \
M(ExternalAggregationCompressedBytes, "") \
M(ExternalAggregationUncompressedBytes, "") \
\
M(SlowRead) \
M(ReadBackoff) \
M(SlowRead, "Number of reads from a file that were slow. This indicate system overload. Thresholds are controlled by read_backoff_* settings.") \
M(ReadBackoff, "Number of times the number of query processing threads was lowered due to slow reads.") \
\
M(ReplicaYieldLeadership) \
M(ReplicaPartialShutdown) \
M(ReplicaYieldLeadership, "Number of times Replicated table was yielded its leadership due to large replication lag relative to other replicas.") \
M(ReplicaPartialShutdown, "") \
\
M(SelectedParts) \
M(SelectedRanges) \
M(SelectedMarks) \
M(SelectedParts, "Number of data parts selected to read from a MergeTree table.") \
M(SelectedRanges, "Number of (non-adjacent) ranges in all data parts selected to read from a MergeTree table.") \
M(SelectedMarks, "Number of marks (index granules) selected to read from a MergeTree table.") \
\
M(MergedRows) \
M(MergedUncompressedBytes) \
M(MergesTimeMilliseconds)\
M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \
M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \
M(MergesTimeMilliseconds, "Total time spent for background merges.")\
\
M(MergeTreeDataWriterRows) \
M(MergeTreeDataWriterUncompressedBytes) \
M(MergeTreeDataWriterCompressedBytes) \
M(MergeTreeDataWriterBlocks) \
M(MergeTreeDataWriterBlocksAlreadySorted) \
M(MergeTreeDataWriterRows, "Number of rows INSERTed to MergeTree tables.") \
M(MergeTreeDataWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \
M(MergeTreeDataWriterCompressedBytes, "Bytes written to filesystem for data INSERTed to MergeTree tables.") \
M(MergeTreeDataWriterBlocks, "Number of blocks INSERTed to MergeTree tables. Each block forms a data part of level zero.") \
M(MergeTreeDataWriterBlocksAlreadySorted, "Number of blocks INSERTed to MergeTree tables that appeared to be already sorted.") \
\
M(ObsoleteEphemeralNode) \
M(CannotRemoveEphemeralNode) \
M(LeaderElectionAcquiredLeadership) \
M(CannotRemoveEphemeralNode, "Number of times an error happened while trying to remove ephemeral node. This is not an issue, because our implementation of ZooKeeper library guarantee that the session will expire and the node will be removed.") \
M(LeaderElectionAcquiredLeadership, "Number of times a ReplicatedMergeTree table became a leader. Leader replica is responsible for assigning merges, cleaning old blocks for deduplications and a few more bookkeeping tasks.") \
\
M(RegexpCreated) \
M(ContextLock) \
M(RegexpCreated, "Compiled regular expressions. Identical regular expressions compiled just once and cached forever.") \
M(ContextLock, "Number of times the lock of Context was acquired or tried to acquire. This is global lock.") \
\
M(StorageBufferFlush) \
M(StorageBufferErrorOnFlush) \
M(StorageBufferPassedAllMinThresholds) \
M(StorageBufferPassedTimeMaxThreshold) \
M(StorageBufferPassedRowsMaxThreshold) \
M(StorageBufferPassedBytesMaxThreshold) \
M(StorageBufferFlush, "") \
M(StorageBufferErrorOnFlush, "") \
M(StorageBufferPassedAllMinThresholds, "") \
M(StorageBufferPassedTimeMaxThreshold, "") \
M(StorageBufferPassedRowsMaxThreshold, "") \
M(StorageBufferPassedBytesMaxThreshold, "") \
\
M(DictCacheKeysRequested) \
M(DictCacheKeysRequestedMiss) \
M(DictCacheKeysRequestedFound) \
M(DictCacheKeysExpired) \
M(DictCacheKeysNotFound) \
M(DictCacheKeysHit) \
M(DictCacheRequestTimeNs) \
M(DictCacheRequests) \
M(DictCacheLockWriteNs) \
M(DictCacheLockReadNs) \
M(DictCacheKeysRequested, "") \
M(DictCacheKeysRequestedMiss, "") \
M(DictCacheKeysRequestedFound, "") \
M(DictCacheKeysExpired, "") \
M(DictCacheKeysNotFound, "") \
M(DictCacheKeysHit, "") \
M(DictCacheRequestTimeNs, "") \
M(DictCacheRequests, "") \
M(DictCacheLockWriteNs, "") \
M(DictCacheLockReadNs, "") \
\
M(DistributedSyncInsertionTimeoutExceeded) \
M(DataAfterMergeDiffersFromReplica) \
M(DataAfterMutationDiffersFromReplica) \
M(PolygonsAddedToPool) \
M(PolygonsInPoolAllocatedBytes) \
M(RWLockAcquiredReadLocks) \
M(RWLockAcquiredWriteLocks) \
M(RWLockReadersWaitMilliseconds) \
M(RWLockWritersWaitMilliseconds) \
M(NetworkErrors) \
M(DistributedSyncInsertionTimeoutExceeded, "") \
M(DataAfterMergeDiffersFromReplica, "") \
M(DataAfterMutationDiffersFromReplica, "") \
M(PolygonsAddedToPool, "") \
M(PolygonsInPoolAllocatedBytes, "") \
M(RWLockAcquiredReadLocks, "") \
M(RWLockAcquiredWriteLocks, "") \
M(RWLockReadersWaitMilliseconds, "") \
M(RWLockWritersWaitMilliseconds, "") \
M(NetworkErrors, "") \
\
M(RealTimeMicroseconds) \
M(UserTimeMicroseconds) \
M(SystemTimeMicroseconds) \
M(SoftPageFaults) \
M(HardPageFaults) \
M(VoluntaryContextSwitches) \
M(InvoluntaryContextSwitches) \
M(RealTimeMicroseconds, "Total (wall clock) time spent in processing (queries and other tasks) threads (not that this is a sum).") \
M(UserTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in user space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \
M(SystemTimeMicroseconds, "Total time spent in processing (queries and other tasks) threads executing CPU instructions in OS kernel space. This include time CPU pipeline was stalled due to cache misses, branch mispredictions, hyper-threading, etc.") \
M(SoftPageFaults, "") \
M(HardPageFaults, "") \
M(VoluntaryContextSwitches, "") \
M(InvoluntaryContextSwitches, "") \
\
M(OSIOWaitMicroseconds) \
M(OSCPUWaitMicroseconds) \
M(OSCPUVirtualTimeMicroseconds) \
M(OSReadBytes) \
M(OSWriteBytes) \
M(OSReadChars) \
M(OSWriteChars) \
M(OSIOWaitMicroseconds, "Total time a thread spent waiting for a result of IO operation, from the OS point of view. This is real IO that doesn't include page cache.") \
M(OSCPUWaitMicroseconds, "Total time a thread was ready for execution but waiting to be scheduled by OS, from the OS point of view.") \
M(OSCPUVirtualTimeMicroseconds, "CPU time spent seen by OS. Does not include involuntary waits due to virtualization.") \
M(OSReadBytes, "Number of bytes read from disks or block devices. Doesn't include bytes read from page cache. May include excessive data due to block size, readahead, etc.") \
M(OSWriteBytes, "Number of bytes written to disks or block devices. Doesn't include bytes that are in page cache dirty pages. May not include data that was written by OS asynchronously.") \
M(OSReadChars, "Number of bytes read from filesystem, including page cache.") \
M(OSWriteChars, "Number of bytes written to filesystem, including page cache.") \
namespace ProfileEvents
{
#define M(NAME) extern const Event NAME = __COUNTER__;
#define M(NAME, DOCUMENTATION) extern const Event NAME = __COUNTER__;
APPLY_FOR_EVENTS(M)
#undef M
constexpr Event END = __COUNTER__;
@ -218,16 +216,28 @@ Counters Counters::getPartiallyAtomicSnapshot() const
return res;
}
const char * getDescription(Event event)
const char * getName(Event event)
{
static const char * descriptions[] =
static const char * strings[] =
{
#define M(NAME) #NAME,
#define M(NAME, DOCUMENTATION) #NAME,
APPLY_FOR_EVENTS(M)
#undef M
};
return descriptions[event];
return strings[event];
}
const char * getDocumentation(Event event)
{
static const char * strings[] =
{
#define M(NAME, DOCUMENTATION) DOCUMENTATION,
APPLY_FOR_EVENTS(M)
#undef M
};
return strings[event];
}

View File

@ -86,8 +86,11 @@ namespace ProfileEvents
/// Increment a counter for event. Thread-safe.
void increment(Event event, Count amount = 1);
/// Get text description of event by identifier. Returns statically allocated string.
const char * getDescription(Event event);
/// Get name of event by identifier. Returns statically allocated string.
const char * getName(Event event);
/// Get description of event by identifier. Returns statically allocated string.
const char * getDocumentation(Event event);
/// Get index just after last event identifier.
Event end();

View File

@ -172,11 +172,11 @@ RWLockFIFO::LockHandlerImpl::~LockHandlerImpl()
/// Remove the group if we were the last client and notify the next group
if (it_group->clients.empty())
{
auto & queue = parent->queue;
queue.erase(it_group);
auto & parent_queue = parent->queue;
parent_queue.erase(it_group);
if (!queue.empty())
queue.front().cv.notify_all();
if (!parent_queue.empty())
parent_queue.front().cv.notify_all();
}
parent.reset();

View File

@ -40,7 +40,7 @@ private:
int thread_number = 0;
std::time_t enqueue_time = 0;
std::time_t start_time = 0;
Type type;
Type type = Read;
};
public:

View File

@ -14,6 +14,7 @@
*/
#include <common/Types.h>
#include <common/unaligned.h>
#include <type_traits>
#define ROTL(x, b) static_cast<UInt64>(((x) << (b)) | ((x) >> (64 - (b))))
@ -106,7 +107,7 @@ public:
while (data + 8 <= end)
{
current_word = *reinterpret_cast<const UInt64 *>(data);
current_word = unalignedLoad<UInt64>(data);
v3 ^= current_word;
SIPROUND;

View File

@ -182,7 +182,8 @@ public:
{
/// @note assuming sequences for lowercase and uppercase have exact same length
const auto len = UTF8::seqLength(*pos);
pos += len, needle_pos += len;
pos += len;
needle_pos += len;
}
if (needle_pos == needle_end)
@ -206,7 +207,8 @@ public:
Poco::Unicode::toLower(utf8.convert(needle_pos)))
{
const auto len = UTF8::seqLength(*pos);
pos += len, needle_pos += len;
pos += len;
needle_pos += len;
}
if (needle_pos == needle_end)
@ -266,7 +268,8 @@ public:
{
/// @note assuming sequences for lowercase and uppercase have exact same length
const auto len = UTF8::seqLength(*haystack_pos);
haystack_pos += len, needle_pos += len;
haystack_pos += len;
needle_pos += len;
}
if (needle_pos == needle_end)
@ -296,7 +299,8 @@ public:
Poco::Unicode::toLower(utf8.convert(needle_pos)))
{
const auto len = UTF8::seqLength(*haystack_pos);
haystack_pos += len, needle_pos += len;
haystack_pos += len;
needle_pos += len;
}
if (needle_pos == needle_end)
@ -389,7 +393,10 @@ public:
auto needle_pos = needle + n;
while (needle_pos < needle_end && std::tolower(*pos) == std::tolower(*needle_pos))
++pos, ++needle_pos;
{
++pos;
++needle_pos;
}
if (needle_pos == needle_end)
return true;
@ -408,7 +415,10 @@ public:
auto needle_pos = needle + 1;
while (needle_pos < needle_end && std::tolower(*pos) == std::tolower(*needle_pos))
++pos, ++needle_pos;
{
++pos;
++needle_pos;
}
if (needle_pos == needle_end)
return true;
@ -460,7 +470,10 @@ public:
while (haystack_pos < haystack_end && needle_pos < needle_end &&
std::tolower(*haystack_pos) == std::tolower(*needle_pos))
++haystack_pos, ++needle_pos;
{
++haystack_pos;
++needle_pos;
}
if (needle_pos == needle_end)
return haystack;
@ -485,7 +498,10 @@ public:
while (haystack_pos < haystack_end && needle_pos < needle_end &&
std::tolower(*haystack_pos) == std::tolower(*needle_pos))
++haystack_pos, ++needle_pos;
{
++haystack_pos;
++needle_pos;
}
if (needle_pos == needle_end)
return haystack;

View File

@ -78,7 +78,10 @@ void ThreadStatus::initPerformanceCounters()
if (TaskStatsInfoGetter::checkPermissions())
{
if (!taskstats_getter)
taskstats_getter = std::make_unique<TaskStatsInfoGetter>();
{
static SimpleObjectPool<TaskStatsInfoGetter> pool;
taskstats_getter = pool.getDefault();
}
*last_taskstats = TasksStatsCounters::current();
}
}

View File

@ -1,7 +1,11 @@
#pragma once
#include <Common/ProfileEvents.h>
#include <Common/MemoryTracker.h>
#include <Common/ObjectPool.h>
#include <IO/Progress.h>
#include <memory>
#include <map>
#include <mutex>
@ -159,8 +163,9 @@ protected:
std::unique_ptr<RUsageCounters> last_rusage;
std::unique_ptr<TasksStatsCounters> last_taskstats;
/// Set only if we have enough capabilities.
std::unique_ptr<TaskStatsInfoGetter> taskstats_getter;
/// Set to non-nullptr only if we have enough capabilities.
/// We use pool because creation and destruction of TaskStatsInfoGetter objects are expensive.
SimpleObjectPool<TaskStatsInfoGetter>::Pointer taskstats_getter;
public:
/// Implicitly finalizes current thread in the destructor
@ -171,16 +176,10 @@ public:
CurrentThreadScope() = default;
~CurrentThreadScope()
{
try
{
if (deleter)
deleter();
}
catch (...)
{
std::terminate();
}
/// std::terminate on exception: this is Ok.
}
};

View File

@ -29,15 +29,7 @@ struct UInt128
UInt128() = default;
explicit UInt128(const UInt64 low, const UInt64 high) : low(low), high(high) {}
#if 1
explicit UInt128(const unsigned __int128 rhs)
: low(rhs & 0xffffffffffffffffll),
high(rhs >> 64)
{}
#else
explicit UInt128(const UInt64 rhs) : low(rhs), high() {}
#endif
auto tuple() const { return std::tie(high, low); }

View File

@ -57,7 +57,7 @@ namespace UnicodeBar
inline std::string render(double width)
{
std::string res(getWidthInBytes(width), '\0');
render(width, &res[0]);
render(width, res.data());
return res;
}
}

View File

@ -11,7 +11,6 @@
namespace ProfileEvents
{
extern const Event ObsoleteEphemeralNode;
extern const Event LeaderElectionAcquiredLeadership;
}

View File

@ -340,7 +340,7 @@ void read(String & s, ReadBuffer & in)
throw Exception("Too large string size while reading from ZooKeeper", ZMARSHALLINGERROR);
s.resize(size);
in.read(&s[0], size);
in.read(s.data(), size);
}
template <size_t N> void read(std::array<char, N> & s, ReadBuffer & in)
@ -349,7 +349,7 @@ template <size_t N> void read(std::array<char, N> & s, ReadBuffer & in)
read(size, in);
if (size != N)
throw Exception("Unexpected array size while reading from ZooKeeper", ZMARSHALLINGERROR);
in.read(&s[0], N);
in.read(s.data(), N);
}
void read(Stat & stat, ReadBuffer & in)
@ -674,24 +674,24 @@ struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
for (const auto & generic_request : generic_requests)
{
if (auto * concrete_request = dynamic_cast<const CreateRequest *>(generic_request.get()))
if (auto * concrete_request_create = dynamic_cast<const CreateRequest *>(generic_request.get()))
{
auto create = std::make_shared<ZooKeeperCreateRequest>(*concrete_request);
auto create = std::make_shared<ZooKeeperCreateRequest>(*concrete_request_create);
if (create->acls.empty())
create->acls = default_acls;
requests.push_back(create);
}
else if (auto * concrete_request = dynamic_cast<const RemoveRequest *>(generic_request.get()))
else if (auto * concrete_request_remove = dynamic_cast<const RemoveRequest *>(generic_request.get()))
{
requests.push_back(std::make_shared<ZooKeeperRemoveRequest>(*concrete_request));
requests.push_back(std::make_shared<ZooKeeperRemoveRequest>(*concrete_request_remove));
}
else if (auto * concrete_request = dynamic_cast<const SetRequest *>(generic_request.get()))
else if (auto * concrete_request_set = dynamic_cast<const SetRequest *>(generic_request.get()))
{
requests.push_back(std::make_shared<ZooKeeperSetRequest>(*concrete_request));
requests.push_back(std::make_shared<ZooKeeperSetRequest>(*concrete_request_set));
}
else if (auto * concrete_request = dynamic_cast<const CheckRequest *>(generic_request.get()))
else if (auto * concrete_request_check = dynamic_cast<const CheckRequest *>(generic_request.get()))
{
requests.push_back(std::make_shared<ZooKeeperCheckRequest>(*concrete_request));
requests.push_back(std::make_shared<ZooKeeperCheckRequest>(*concrete_request_check));
}
else
throw Exception("Illegal command as part of multi ZooKeeper request", ZBADARGUMENTS);
@ -914,11 +914,11 @@ void ZooKeeper::connect(
connected = true;
break;
}
catch (const Poco::Net::NetException & e)
catch (const Poco::Net::NetException &)
{
fail_reasons << "\n" << getCurrentExceptionMessage(false) << ", " << address.toString();
}
catch (const Poco::TimeoutException & e)
catch (const Poco::TimeoutException &)
{
fail_reasons << "\n" << getCurrentExceptionMessage(false);
}
@ -930,20 +930,20 @@ void ZooKeeper::connect(
if (!connected)
{
WriteBufferFromOwnString out;
out << "All connection tries failed while connecting to ZooKeeper. Addresses: ";
WriteBufferFromOwnString message;
message << "All connection tries failed while connecting to ZooKeeper. Addresses: ";
bool first = true;
for (const auto & address : addresses)
{
if (first)
first = false;
else
out << ", ";
out << address.toString();
message << ", ";
message << address.toString();
}
out << fail_reasons.str() << "\n";
throw Exception(out.str(), ZCONNECTIONLOSS);
message << fail_reasons.str() << "\n";
throw Exception(message.str(), ZCONNECTIONLOSS);
}
}
@ -953,7 +953,7 @@ void ZooKeeper::sendHandshake()
int32_t handshake_length = 44;
int64_t last_zxid_seen = 0;
int32_t timeout = session_timeout.totalMilliseconds();
int64_t session_id = 0;
int64_t previous_session_id = 0; /// We don't support session restore. So previous session_id is always zero.
constexpr int32_t passwd_len = 16;
std::array<char, passwd_len> passwd {};
@ -961,7 +961,7 @@ void ZooKeeper::sendHandshake()
write(protocol_version);
write(last_zxid_seen);
write(timeout);
write(session_id);
write(previous_session_id);
write(passwd);
out->next();
@ -1003,18 +1003,18 @@ void ZooKeeper::sendAuth(const String & scheme, const String & data)
request.write(*out);
int32_t length;
XID xid;
XID read_xid;
int64_t zxid;
int32_t err;
read(length);
size_t count_before_event = in->count();
read(xid);
read(read_xid);
read(zxid);
read(err);
if (xid != auth_xid)
throw Exception("Unexpected event recieved in reply to auth request: " + toString(xid),
if (read_xid != auth_xid)
throw Exception("Unexpected event recieved in reply to auth request: " + toString(read_xid),
ZMARSHALLINGERROR);
int32_t actual_length = in->count() - count_before_event;
@ -1434,7 +1434,7 @@ void ZooKeeper::pushRequest(RequestInfo && info)
if (!info.request->xid)
{
info.request->xid = xid.fetch_add(1);
info.request->xid = next_xid.fetch_add(1);
if (info.request->xid < 0)
throw Exception("XID overflow", ZSESSIONEXPIRED);
}

View File

@ -111,7 +111,7 @@ public:
Poco::Timespan connection_timeout,
Poco::Timespan operation_timeout);
~ZooKeeper();
~ZooKeeper() override;
/// If expired, you can only destroy the object. All other methods will throw exception.
@ -179,7 +179,7 @@ private:
int64_t session_id = 0;
std::atomic<XID> xid {1};
std::atomic<XID> next_xid {1};
std::atomic<bool> expired {false};
std::mutex push_request_mutex;

View File

@ -32,10 +32,10 @@ std::optional<std::string> ZooKeeperNodeCache::get(const std::string & path)
if (!zookeeper)
throw DB::Exception("Could not get znode: `" + path + "'. ZooKeeper not configured.", DB::ErrorCodes::NO_ZOOKEEPER);
for (const auto & path : invalidated_paths)
for (const auto & invalidated_path : invalidated_paths)
{
nonexistent_nodes.erase(path);
node_cache.erase(path);
nonexistent_nodes.erase(invalidated_path);
node_cache.erase(invalidated_path);
}
if (nonexistent_nodes.count(path))

View File

@ -5,14 +5,15 @@
#include <iostream>
#include <chrono>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <gtest/gtest.h>
#include <Common/ShellCommand.h>
#pragma GCC diagnostic pop
using namespace DB;
TEST(zkutil, zookeeper_connected)

View File

@ -57,7 +57,7 @@ int main(int argc, char ** argv)
sleep(1);
}
}
catch (Coordination::Exception & e)
catch (Coordination::Exception &)
{
std::cerr << "KeeperException: " << DB::getCurrentExceptionMessage(true) << std::endl;
return 1;

View File

@ -14,3 +14,4 @@
#cmakedefine01 USE_POCO_DATAODBC
#cmakedefine01 USE_POCO_MONGODB
#cmakedefine01 USE_POCO_NETSSL
#cmakedefine01 CLICKHOUSE_SPLIT_BINARY

View File

@ -85,7 +85,7 @@ void calcBias(pfHash hash, std::vector<int> & counts, int reps, Rand & r)
hash(&K, keybytes, 0, &A);
int * cursor = &counts[0];
int * cursor = counts.data();
for (int iBit = 0; iBit < keybits; iBit++)
{
@ -210,13 +210,13 @@ void BicTest3(pfHash hash, const int reps, bool verbose = true)
for (int keybit = 0; keybit < keybits; keybit++)
{
int * page = &bins[keybit * pagesize];
int * bins = &page[(out1 * hashbits + out2) * 4];
int * bins_in_page = &page[(out1 * hashbits + out2) * 4];
double bias = 0;
for (int b = 0; b < 4; b++)
{
double b2 = static_cast<double>(bins[b]) / static_cast<double>(reps / 2);
double b2 = static_cast<double>(bins_in_page[b]) / static_cast<double>(reps / 2);
b2 = fabs(b2 * 2 - 1);
if (b2 > bias)

View File

@ -133,13 +133,14 @@ int main(int argc, char ** argv)
}
}
if (argc == 2 && !strcmp(argv[1], "1"))
{
size_t n = 5;
size_t map_size = 1000000;
using T = DB::Field;
T field = std::string("Hello, world");
if (argc == 2 && !strcmp(argv[1], "1"))
{
using Arr = std::vector<T>;
using Map = HashMap<UInt64, Arr>;
@ -185,7 +186,7 @@ int main(int argc, char ** argv)
std::cerr
<< "arr1.size(): " << arr1.size() << ", arr2.size(): " << arr2.size() << std::endl
<< "&arr1[0]: " << &arr1[0] << ", &arr2[0]: " << &arr2[0] << std::endl
<< "arr1.data(): " << arr1.data() << ", arr2.data(): " << arr2.data() << std::endl
<< "arr1[0]: " << arr1[0] << ", arr2[0]: " << arr2[0] << std::endl;
}

View File

@ -1,7 +1,8 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <gtest/gtest.h>
#pragma GCC diagnostic pop
#include <Common/RWLockFIFO.h>
#include <Common/Stopwatch.h>

View File

@ -1,9 +1,11 @@
#include <Common/escapeForFileName.h>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-compare"
#ifdef __clang__
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#include <gtest/gtest.h>
#pragma GCC diagnostic pop
using namespace DB;

View File

@ -53,7 +53,7 @@ int main(int, char **)
watch.stop();
UInt64 check = CityHash_v1_0_2::CityHash64(&hashes[0], hashes.size());
UInt64 check = CityHash_v1_0_2::CityHash64(hashes.data(), hashes.size());
std::cerr << std::fixed << std::setprecision(2)
<< "CityHash64 (check = " << check << ")"
@ -73,12 +73,12 @@ int main(int, char **)
reinterpret_cast<unsigned char *>(&hashes[i * 16]),
reinterpret_cast<const unsigned char *>(strings[i].data()),
strings[i].size(),
reinterpret_cast<const unsigned char *>(&seed[0]));
reinterpret_cast<const unsigned char *>(seed.data()));
}
watch.stop();
UInt64 check = CityHash_v1_0_2::CityHash64(&hashes[0], hashes.size());
UInt64 check = CityHash_v1_0_2::CityHash64(hashes.data(), hashes.size());
std::cerr << std::fixed << std::setprecision(2)
<< "SipHash (check = " << check << ")"
@ -99,7 +99,7 @@ int main(int, char **)
watch.stop();
UInt64 check = CityHash_v1_0_2::CityHash64(&hashes[0], hashes.size());
UInt64 check = CityHash_v1_0_2::CityHash64(hashes.data(), hashes.size());
std::cerr << std::fixed << std::setprecision(2)
<< "SipHash, stream (check = " << check << ")"
@ -121,7 +121,7 @@ int main(int, char **)
watch.stop();
UInt64 check = CityHash_v1_0_2::CityHash64(&hashes[0], hashes.size());
UInt64 check = CityHash_v1_0_2::CityHash64(hashes.data(), hashes.size());
std::cerr << std::fixed << std::setprecision(2)
<< "MD5 (check = " << check << ")"

View File

@ -274,14 +274,11 @@ static inline void test(size_t n, const UInt64 * data, const char * name)
int main(int argc, char ** argv)
{
const size_t BUF_SIZE = 1024;
size_t n = (atoi(argv[1]) + (BUF_SIZE - 1)) / BUF_SIZE * BUF_SIZE;
size_t method = argc <= 2 ? 0 : atoi(argv[2]);
std::cerr << std::fixed << std::setprecision(2);
using Source = std::vector<UInt64>;
Source data(BUF_SIZE);
{
@ -302,18 +299,18 @@ int main(int argc, char ** argv)
setAffinity();
if (!method || method == 1) test<identity> (n, &data[0], "0: identity");
if (!method || method == 2) test<intHash32> (n, &data[0], "1: intHash32");
if (!method || method == 3) test<_intHash64>(n, &data[0], "2: intHash64");
if (!method || method == 4) test<hash3> (n, &data[0], "3: two rounds");
if (!method || method == 5) test<hash4> (n, &data[0], "4: two rounds and two variables");
if (!method || method == 6) test<hash5> (n, &data[0], "5: two rounds with less ops");
if (!method || method == 7) test<murmurMix> (n, &data[0], "6: murmur64 mixer");
if (!method || method == 8) test<mulShift> (n, &data[0], "7: mulShift");
if (!method || method == 9) test<tabulation>(n, &data[0], "8: tabulation");
if (!method || method == 1) test<identity> (n, data.data(), "0: identity");
if (!method || method == 2) test<intHash32> (n, data.data(), "1: intHash32");
if (!method || method == 3) test<_intHash64>(n, data.data(), "2: intHash64");
if (!method || method == 4) test<hash3> (n, data.data(), "3: two rounds");
if (!method || method == 5) test<hash4> (n, data.data(), "4: two rounds and two variables");
if (!method || method == 6) test<hash5> (n, data.data(), "5: two rounds with less ops");
if (!method || method == 7) test<murmurMix> (n, data.data(), "6: murmur64 mixer");
if (!method || method == 8) test<mulShift> (n, data.data(), "7: mulShift");
if (!method || method == 9) test<tabulation>(n, data.data(), "8: tabulation");
#if __x86_64__
if (!method || method == 10) test<crc32Hash> (n, &data[0], "9: crc32");
if (!method || method == 10) test<crc32Hash> (n, data.data(), "9: crc32");
#endif
return 0;

View File

@ -359,7 +359,7 @@ int main(int argc, char ** argv)
DB::ReadBufferFromFileDescriptor in1(STDIN_FILENO);
DB::CompressedReadBuffer in2(in1);
in2.readStrict(reinterpret_cast<char*>(&data[0]), sizeof(data[0]) * n);
in2.readStrict(reinterpret_cast<char*>(data.data()), sizeof(data[0]) * n);
watch.stop();
std::cerr

View File

@ -262,7 +262,7 @@ int main(int argc, char ** argv)
DB::ReadBufferFromFileDescriptor in1(STDIN_FILENO);
DB::CompressedReadBuffer in2(in1);
in2.readStrict(reinterpret_cast<char*>(&data[0]), sizeof(data[0]) * n);
in2.readStrict(reinterpret_cast<char*>(data.data()), sizeof(data[0]) * n);
watch.stop();
std::cerr << std::fixed << std::setprecision(2)
@ -500,7 +500,7 @@ int main(int argc, char ** argv)
for (size_t i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i)
pool.schedule(std::bind(merge2,
&maps[0], num_threads, i));
maps.data(), num_threads, i));
pool.wait();
@ -553,8 +553,7 @@ int main(int argc, char ** argv)
watch.restart();
for (size_t i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i)
pool.schedule(std::bind(merge2,
&maps[0], num_threads, i));
pool.schedule(std::bind(merge2, maps.data(), num_threads, i));
pool.wait();
@ -731,7 +730,7 @@ int main(int argc, char ** argv)
pool.schedule(std::bind(aggregate4,
std::ref(local_maps[i]),
std::ref(global_map),
&mutexes[0],
mutexes.data(),
data.begin() + (data.size() * i) / num_threads,
data.begin() + (data.size() * (i + 1)) / num_threads));

View File

@ -301,7 +301,7 @@ int main(int argc, char ** argv)
DB::ReadBufferFromFileDescriptor in1(STDIN_FILENO);
DB::CompressedReadBuffer in2(in1);
in2.readStrict(reinterpret_cast<char*>(&data[0]), sizeof(data[0]) * n);
in2.readStrict(reinterpret_cast<char*>(data.data()), sizeof(data[0]) * n);
watch.stop();
std::cerr << std::fixed << std::setprecision(2)

View File

@ -71,9 +71,9 @@ int main(int argc, char ** argv)
{
Stopwatch watch;
if (method == 1) sort1(&data[0], n);
if (method == 2) sort2(&data[0], n);
if (method == 3) sort3(&data[0], n);
if (method == 1) sort1(data.data(), n);
if (method == 2) sort2(data.data(), n);
if (method == 3) sort3(data.data(), n);
watch.stop();
double elapsed = watch.elapsedSeconds();

View File

@ -1,6 +1,9 @@
#pragma once
#include <cmath>
#include <limits>
#include <Common/NaNUtils.h>
#include <Core/Types.h>
#include <Common/UInt128.h>
@ -32,7 +35,10 @@ using DB::UInt64;
// Case 1. Is pair of floats or pair of ints or pair of uints
template <typename A, typename B>
constexpr bool is_safe_conversion = (std::is_floating_point_v<A> && std::is_floating_point_v<B>)
|| (std::is_integral_v<A> && std::is_integral_v<B> && !(std::is_signed_v<A> ^ std::is_signed_v<B>));
|| (std::is_integral_v<A> && std::is_integral_v<B> && !(std::is_signed_v<A> ^ std::is_signed_v<B>))
|| (std::is_same_v<A, DB::Int128> && std::is_same_v<B, DB::Int128>)
|| (std::is_integral_v<A> && std::is_same_v<B, DB::Int128>)
|| (std::is_same_v<A, DB::Int128> && std::is_integral_v<B>);
template <typename A, typename B>
using bool_if_safe_conversion = std::enable_if_t<is_safe_conversion<A, B>, bool>;
template <typename A, typename B>
@ -393,6 +399,8 @@ inline bool_if_safe_conversion<A, B> lessOp(A a, B b)
template <typename A, typename B>
inline bool_if_not_safe_conversion<A, B> lessOrEqualsOp(A a, B b)
{
if (isNaN(a) || isNaN(b))
return false;
return !greaterOp(a, b);
}
@ -406,6 +414,8 @@ inline bool_if_safe_conversion<A, B> lessOrEqualsOp(A a, B b)
template <typename A, typename B>
inline bool_if_not_safe_conversion<A, B> greaterOrEqualsOp(A a, B b)
{
if (isNaN(a) || isNaN(b))
return false;
return !greaterOp(b, a);
}
@ -415,5 +425,52 @@ inline bool_if_safe_conversion<A, B> greaterOrEqualsOp(A a, B b)
return a >= b;
}
}
namespace DB
{
template <typename A, typename B> struct EqualsOp
{
/// An operation that gives the same result, if arguments are passed in reverse order.
using SymmetricOp = EqualsOp<B, A>;
static UInt8 apply(A a, B b) { return accurate::equalsOp(a, b); }
};
template <typename A, typename B> struct NotEqualsOp
{
using SymmetricOp = NotEqualsOp<B, A>;
static UInt8 apply(A a, B b) { return accurate::notEqualsOp(a, b); }
};
template <typename A, typename B> struct GreaterOp;
template <typename A, typename B> struct LessOp
{
using SymmetricOp = GreaterOp<B, A>;
static UInt8 apply(A a, B b) { return accurate::lessOp(a, b); }
};
template <typename A, typename B> struct GreaterOp
{
using SymmetricOp = LessOp<B, A>;
static UInt8 apply(A a, B b) { return accurate::greaterOp(a, b); }
};
template <typename A, typename B> struct GreaterOrEqualsOp;
template <typename A, typename B> struct LessOrEqualsOp
{
using SymmetricOp = GreaterOrEqualsOp<B, A>;
static UInt8 apply(A a, B b) { return accurate::lessOrEqualsOp(a, b); }
};
template <typename A, typename B> struct GreaterOrEqualsOp
{
using SymmetricOp = LessOrEqualsOp<B, A>;
static UInt8 apply(A a, B b) { return accurate::greaterOrEqualsOp(a, b); }
};
}

View File

@ -0,0 +1,312 @@
#pragma once
#include <common/arithmeticOverflow.h>
#include <Core/Block.h>
#include <Core/AccurateComparison.h>
#include <Core/callOnTypeIndex.h>
#include <DataTypes/DataTypesDecimal.h>
#include <Columns/ColumnVector.h>
#include <Columns/ColumnsNumber.h>
#include <Columns/ColumnConst.h>
#include <Functions/FunctionHelpers.h> /// TODO Core should not depend on Functions
namespace DB
{
namespace ErrorCodes
{
extern const int DECIMAL_OVERFLOW;
}
///
inline bool allowDecimalComparison(const IDataType & left_type, const IDataType & right_type)
{
if (isDecimal(left_type))
{
if (isDecimal(right_type) || notDecimalButComparableToDecimal(right_type))
return true;
}
else if (notDecimalButComparableToDecimal(left_type) && isDecimal(right_type))
return true;
return false;
}
template <size_t > struct ConstructDecInt { using Type = Int32; };
template <> struct ConstructDecInt<8> { using Type = Int64; };
template <> struct ConstructDecInt<16> { using Type = Int128; };
template <typename T, typename U>
struct DecCompareInt
{
using Type = typename ConstructDecInt<(!IsDecimalNumber<U> || sizeof(T) > sizeof(U)) ? sizeof(T) : sizeof(U)>::Type;
using TypeA = Type;
using TypeB = Type;
};
///
template <typename A, typename B, template <typename, typename> typename Operation, bool _check_overflow = true,
bool _actual = IsDecimalNumber<A> || IsDecimalNumber<B>>
class DecimalComparison
{
public:
using CompareInt = typename DecCompareInt<A, B>::Type;
using Op = Operation<CompareInt, CompareInt>;
using ColVecA = std::conditional_t<IsDecimalNumber<A>, ColumnDecimal<A>, ColumnVector<A>>;
using ColVecB = std::conditional_t<IsDecimalNumber<B>, ColumnDecimal<B>, ColumnVector<B>>;
using ArrayA = typename ColVecA::Container;
using ArrayB = typename ColVecB::Container;
DecimalComparison(Block & block, size_t result, const ColumnWithTypeAndName & col_left, const ColumnWithTypeAndName & col_right)
{
if (!apply(block, result, col_left, col_right))
throw Exception("Wrong decimal comparison with " + col_left.type->getName() + " and " + col_right.type->getName(),
ErrorCodes::LOGICAL_ERROR);
}
static bool apply(Block & block, size_t result [[maybe_unused]],
const ColumnWithTypeAndName & col_left, const ColumnWithTypeAndName & col_right)
{
if constexpr (_actual)
{
ColumnPtr c_res;
Shift shift = getScales<A, B>(col_left.type, col_right.type);
c_res = applyWithScale(col_left.column, col_right.column, shift);
if (c_res)
block.getByPosition(result).column = std::move(c_res);
return true;
}
return false;
}
static bool compare(A a, B b, UInt32 scale_a, UInt32 scale_b)
{
static const UInt32 max_scale = maxDecimalPrecision<Decimal128>();
if (scale_a > max_scale || scale_b > max_scale)
throw Exception("Bad scale of decimal field", ErrorCodes::DECIMAL_OVERFLOW);
Shift shift;
if (scale_a < scale_b)
shift.a = DataTypeDecimal<B>(maxDecimalPrecision<B>(), scale_b).getScaleMultiplier(scale_b - scale_a);
if (scale_a > scale_b)
shift.b = DataTypeDecimal<A>(maxDecimalPrecision<A>(), scale_a).getScaleMultiplier(scale_a - scale_b);
return applyWithScale(a, b, shift);
}
private:
struct Shift
{
CompareInt a = 1;
CompareInt b = 1;
bool none() const { return a == 1 && b == 1; }
bool left() const { return a != 1; }
bool right() const { return b != 1; }
};
template <typename T, typename U>
static auto applyWithScale(T a, U b, const Shift & shift)
{
if (shift.left())
return apply<true, false>(a, b, shift.a);
else if (shift.right())
return apply<false, true>(a, b, shift.b);
return apply<false, false>(a, b, 1);
}
template <typename T, typename U>
static std::enable_if_t<IsDecimalNumber<T> && IsDecimalNumber<U>, Shift>
getScales(const DataTypePtr & left_type, const DataTypePtr & right_type)
{
const DataTypeDecimal<T> * decimal0 = checkDecimal<T>(*left_type);
const DataTypeDecimal<U> * decimal1 = checkDecimal<U>(*right_type);
Shift shift;
if (decimal0 && decimal1)
{
auto result_type = decimalResultType(*decimal0, *decimal1, false, false);
shift.a = result_type.scaleFactorFor(*decimal0, false);
shift.b = result_type.scaleFactorFor(*decimal1, false);
}
else if (decimal0)
shift.b = decimal0->getScaleMultiplier();
else if (decimal1)
shift.a = decimal1->getScaleMultiplier();
return shift;
}
template <typename T, typename U>
static std::enable_if_t<IsDecimalNumber<T> && !IsDecimalNumber<U>, Shift>
getScales(const DataTypePtr & left_type, const DataTypePtr &)
{
Shift shift;
const DataTypeDecimal<T> * decimal0 = checkDecimal<T>(*left_type);
if (decimal0)
shift.b = decimal0->getScaleMultiplier();
return shift;
}
template <typename T, typename U>
static std::enable_if_t<!IsDecimalNumber<T> && IsDecimalNumber<U>, Shift>
getScales(const DataTypePtr &, const DataTypePtr & right_type)
{
Shift shift;
const DataTypeDecimal<U> * decimal1 = checkDecimal<U>(*right_type);
if (decimal1)
shift.a = decimal1->getScaleMultiplier();
return shift;
}
template <bool scale_left, bool scale_right>
static ColumnPtr apply(const ColumnPtr & c0, const ColumnPtr & c1, CompareInt scale)
{
auto c_res = ColumnUInt8::create();
if constexpr (_actual)
{
bool c0_is_const = c0->isColumnConst();
bool c1_is_const = c1->isColumnConst();
if (c0_is_const && c1_is_const)
{
const ColumnConst * c0_const = checkAndGetColumnConst<ColVecA>(c0.get());
const ColumnConst * c1_const = checkAndGetColumnConst<ColVecB>(c1.get());
A a = c0_const->template getValue<A>();
B b = c1_const->template getValue<B>();
UInt8 res = apply<scale_left, scale_right>(a, b, scale);
return DataTypeUInt8().createColumnConst(c0->size(), toField(res));
}
ColumnUInt8::Container & vec_res = c_res->getData();
vec_res.resize(c0->size());
if (c0_is_const)
{
const ColumnConst * c0_const = checkAndGetColumnConst<ColVecA>(c0.get());
A a = c0_const->template getValue<A>();
if (const ColVecB * c1_vec = checkAndGetColumn<ColVecB>(c1.get()))
constant_vector<scale_left, scale_right>(a, c1_vec->getData(), vec_res, scale);
else
throw Exception("Wrong column in Decimal comparison", ErrorCodes::LOGICAL_ERROR);
}
else if (c1_is_const)
{
const ColumnConst * c1_const = checkAndGetColumnConst<ColVecB>(c1.get());
B b = c1_const->template getValue<B>();
if (const ColVecA * c0_vec = checkAndGetColumn<ColVecA>(c0.get()))
vector_constant<scale_left, scale_right>(c0_vec->getData(), b, vec_res, scale);
else
throw Exception("Wrong column in Decimal comparison", ErrorCodes::LOGICAL_ERROR);
}
else
{
if (const ColVecA * c0_vec = checkAndGetColumn<ColVecA>(c0.get()))
{
if (const ColVecB * c1_vec = checkAndGetColumn<ColVecB>(c1.get()))
vector_vector<scale_left, scale_right>(c0_vec->getData(), c1_vec->getData(), vec_res, scale);
else
throw Exception("Wrong column in Decimal comparison", ErrorCodes::LOGICAL_ERROR);
}
else
throw Exception("Wrong column in Decimal comparison", ErrorCodes::LOGICAL_ERROR);
}
}
return c_res;
}
template <bool scale_left, bool scale_right>
static NO_INLINE UInt8 apply(A a, B b, CompareInt scale [[maybe_unused]])
{
CompareInt x = a;
CompareInt y = b;
if constexpr (_check_overflow)
{
bool overflow = false;
if constexpr (sizeof(A) > sizeof(CompareInt))
overflow |= (A(x) != a);
if constexpr (sizeof(B) > sizeof(CompareInt))
overflow |= (B(y) != b);
if constexpr (std::is_unsigned_v<A>)
overflow |= (x < 0);
if constexpr (std::is_unsigned_v<B>)
overflow |= (y < 0);
if constexpr (scale_left)
overflow |= common::mulOverflow(x, scale, x);
if constexpr (scale_right)
overflow |= common::mulOverflow(y, scale, y);
if (overflow)
throw Exception("Can't compare", ErrorCodes::DECIMAL_OVERFLOW);
}
else
{
if constexpr (scale_left)
x *= scale;
if constexpr (scale_right)
y *= scale;
}
return Op::apply(x, y);
}
template <bool scale_left, bool scale_right>
static void NO_INLINE vector_vector(const ArrayA & a, const ArrayB & b, PaddedPODArray<UInt8> & c,
CompareInt scale)
{
size_t size = a.size();
const A * a_pos = a.data();
const B * b_pos = b.data();
UInt8 * c_pos = c.data();
const A * a_end = a_pos + size;
while (a_pos < a_end)
{
*c_pos = apply<scale_left, scale_right>(*a_pos, *b_pos, scale);
++a_pos;
++b_pos;
++c_pos;
}
}
template <bool scale_left, bool scale_right>
static void NO_INLINE vector_constant(const ArrayA & a, B b, PaddedPODArray<UInt8> & c, CompareInt scale)
{
size_t size = a.size();
const A * a_pos = a.data();
UInt8 * c_pos = c.data();
const A * a_end = a_pos + size;
while (a_pos < a_end)
{
*c_pos = apply<scale_left, scale_right>(*a_pos, b, scale);
++a_pos;
++c_pos;
}
}
template <bool scale_left, bool scale_right>
static void NO_INLINE constant_vector(A a, const ArrayB & b, PaddedPODArray<UInt8> & c, CompareInt scale)
{
size_t size = b.size();
const B * b_pos = b.data();
UInt8 * c_pos = c.data();
const B * b_end = b_pos + size;
while (b_pos < b_end)
{
*c_pos = apply<scale_left, scale_right>(a, *b_pos, scale);
++b_pos;
++c_pos;
}
}
};
}

View File

@ -4,8 +4,8 @@
#include <IO/WriteHelpers.h>
#include <Core/Field.h>
#include <Core/DecimalComparison.h>
#include <Common/FieldVisitors.h>
#include <Functions/FunctionsComparison.h>
namespace DB

View File

@ -82,6 +82,8 @@ enum class TypeIndex
Decimal64,
Decimal128,
UUID,
Array,
Tuple,
};
template <typename T> struct TypeId;
@ -99,53 +101,17 @@ template <> struct TypeId<Float64> { static constexpr const TypeIndex value = T
/// Not a data type in database, defined just for convenience.
using Strings = std::vector<String>;
}
#if 1 /// __int128
namespace DB
{
using Int128 = __int128;
template <> constexpr bool IsNumber<Int128> = true;
template <> struct TypeName<Int128> { static const char * get() { return "Int128"; } };
template <> struct TypeId<Int128> { static constexpr const TypeIndex value = TypeIndex::Int128; };
}
namespace std
/// Own FieldType for Decimal.
/// It is only a "storage" for decimal. To perform operations, you also have to provide a scale (number of digits after point).
template <typename T>
struct Decimal
{
template <> struct is_signed<__int128>
{
static constexpr bool value = true;
};
template <> struct is_unsigned<__int128>
{
static constexpr bool value = false;
};
template <> struct is_integral<__int128>
{
static constexpr bool value = true;
};
template <> struct is_arithmetic<__int128>
{
static constexpr bool value = true;
};
}
#endif
namespace DB
{
/// Own FieldType for Decimal.
/// It is only a "storage" for decimal. To perform operations, you also have to provide a scale (number of digits after point).
template <typename T>
struct Decimal
{
using NativeType = T;
Decimal() = default;
@ -173,23 +139,24 @@ namespace DB
const Decimal<T> & operator %= (const T & x) { value %= x; return *this; }
T value;
};
};
using Decimal32 = Decimal<Int32>;
using Decimal64 = Decimal<Int64>;
using Decimal128 = Decimal<Int128>;
using Decimal32 = Decimal<Int32>;
using Decimal64 = Decimal<Int64>;
using Decimal128 = Decimal<Int128>;
template <> struct TypeName<Decimal32> { static const char * get() { return "Decimal32"; } };
template <> struct TypeName<Decimal64> { static const char * get() { return "Decimal64"; } };
template <> struct TypeName<Decimal128> { static const char * get() { return "Decimal128"; } };
template <> struct TypeName<Decimal32> { static const char * get() { return "Decimal32"; } };
template <> struct TypeName<Decimal64> { static const char * get() { return "Decimal64"; } };
template <> struct TypeName<Decimal128> { static const char * get() { return "Decimal128"; } };
template <> struct TypeId<Decimal32> { static constexpr const TypeIndex value = TypeIndex::Decimal32; };
template <> struct TypeId<Decimal64> { static constexpr const TypeIndex value = TypeIndex::Decimal64; };
template <> struct TypeId<Decimal128> { static constexpr const TypeIndex value = TypeIndex::Decimal128; };
template <> struct TypeId<Decimal32> { static constexpr const TypeIndex value = TypeIndex::Decimal32; };
template <> struct TypeId<Decimal64> { static constexpr const TypeIndex value = TypeIndex::Decimal64; };
template <> struct TypeId<Decimal128> { static constexpr const TypeIndex value = TypeIndex::Decimal128; };
template <typename T>
constexpr bool IsDecimalNumber = false;
template <> constexpr bool IsDecimalNumber<Decimal32> = true;
template <> constexpr bool IsDecimalNumber<Decimal64> = true;
template <> constexpr bool IsDecimalNumber<Decimal128> = true;
template <typename T>
constexpr bool IsDecimalNumber = false;
template <> constexpr bool IsDecimalNumber<Decimal32> = true;
template <> constexpr bool IsDecimalNumber<Decimal64> = true;
template <> constexpr bool IsDecimalNumber<Decimal128> = true;
}

View File

@ -0,0 +1,168 @@
#pragma once
#include <utility>
#include <Core/Types.h>
namespace DB
{
template <typename T, typename U>
struct TypePair
{
using LeftType = T;
using RightType = U;
};
template <typename T, bool _int, bool _dec, bool _float, typename F>
bool callOnBasicType(TypeIndex number, F && f)
{
if constexpr (_int)
{
switch (number)
{
case TypeIndex::UInt8: return f(TypePair<T, UInt8>());
case TypeIndex::UInt16: return f(TypePair<T, UInt16>());
case TypeIndex::UInt32: return f(TypePair<T, UInt32>());
case TypeIndex::UInt64: return f(TypePair<T, UInt64>());
//case TypeIndex::UInt128>: return f(TypePair<T, UInt128>());
case TypeIndex::Int8: return f(TypePair<T, Int8>());
case TypeIndex::Int16: return f(TypePair<T, Int16>());
case TypeIndex::Int32: return f(TypePair<T, Int32>());
case TypeIndex::Int64: return f(TypePair<T, Int64>());
case TypeIndex::Int128: return f(TypePair<T, Int128>());
default:
break;
}
}
if constexpr (_dec)
{
switch (number)
{
case TypeIndex::Decimal32: return f(TypePair<T, Decimal32>());
case TypeIndex::Decimal64: return f(TypePair<T, Decimal64>());
case TypeIndex::Decimal128: return f(TypePair<T, Decimal128>());
default:
break;
}
}
if constexpr (_float)
{
switch (number)
{
case TypeIndex::Float32: return f(TypePair<T, Float32>());
case TypeIndex::Float64: return f(TypePair<T, Float64>());
default:
break;
}
}
return false;
}
/// Unroll template using TypeIndex
template <typename F, bool _int = true, bool _dec = true, bool _float = false>
inline bool callOnBasicTypes(TypeIndex type_num1, TypeIndex type_num2, F && f)
{
if constexpr (_int)
{
switch (type_num1)
{
case TypeIndex::UInt8: return callOnBasicType<UInt8, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::UInt16: return callOnBasicType<UInt16, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::UInt32: return callOnBasicType<UInt32, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::UInt64: return callOnBasicType<UInt64, _int, _dec, _float>(type_num2, std::forward<F>(f));
//case TypeIndex::UInt128: return callOnBasicType<UInt128, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Int8: return callOnBasicType<Int8, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Int16: return callOnBasicType<Int16, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Int32: return callOnBasicType<Int32, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Int64: return callOnBasicType<Int64, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Int128: return callOnBasicType<Int128, _int, _dec, _float>(type_num2, std::forward<F>(f));
default:
break;
}
}
if constexpr (_dec)
{
switch (type_num1)
{
case TypeIndex::Decimal32: return callOnBasicType<Decimal32, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Decimal64: return callOnBasicType<Decimal64, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Decimal128: return callOnBasicType<Decimal128, _int, _dec, _float>(type_num2, std::forward<F>(f));
default:
break;
}
}
if constexpr (_float)
{
switch (type_num1)
{
case TypeIndex::Float32: return callOnBasicType<Float32, _int, _dec, _float>(type_num2, std::forward<F>(f));
case TypeIndex::Float64: return callOnBasicType<Float64, _int, _dec, _float>(type_num2, std::forward<F>(f));
default:
break;
}
}
return false;
}
class DataTypeDate;
class DataTypeDateTime;
class DataTypeString;
class DataTypeFixedString;
class DataTypeUUID;
template <typename T> class DataTypeEnum;
template <typename T> class DataTypeNumber;
template <typename T> class DataTypeDecimal;
template <typename T, typename F>
bool callOnIndexAndDataType(TypeIndex number, F && f)
{
switch (number)
{
case TypeIndex::UInt8: return f(TypePair<DataTypeNumber<UInt8>, T>());
case TypeIndex::UInt16: return f(TypePair<DataTypeNumber<UInt16>, T>());
case TypeIndex::UInt32: return f(TypePair<DataTypeNumber<UInt32>, T>());
case TypeIndex::UInt64: return f(TypePair<DataTypeNumber<UInt64>, T>());
case TypeIndex::Int8: return f(TypePair<DataTypeNumber<Int8>, T>());
case TypeIndex::Int16: return f(TypePair<DataTypeNumber<Int16>, T>());
case TypeIndex::Int32: return f(TypePair<DataTypeNumber<Int32>, T>());
case TypeIndex::Int64: return f(TypePair<DataTypeNumber<Int64>, T>());
case TypeIndex::Float32: return f(TypePair<DataTypeNumber<Float32>, T>());
case TypeIndex::Float64: return f(TypePair<DataTypeNumber<Float64>, T>());
case TypeIndex::Decimal32: return f(TypePair<DataTypeDecimal<Decimal32>, T>());
case TypeIndex::Decimal64: return f(TypePair<DataTypeDecimal<Decimal64>, T>());
case TypeIndex::Decimal128: return f(TypePair<DataTypeDecimal<Decimal128>, T>());
case TypeIndex::Date: return f(TypePair<DataTypeDate, T>());
case TypeIndex::DateTime: return f(TypePair<DataTypeDateTime, T>());
case TypeIndex::String: return f(TypePair<DataTypeString, T>());
case TypeIndex::FixedString: return f(TypePair<DataTypeFixedString, T>());
case TypeIndex::Enum8: return f(TypePair<DataTypeEnum<Int8>, T>());
case TypeIndex::Enum16: return f(TypePair<DataTypeEnum<Int16>, T>());
case TypeIndex::UUID: return f(TypePair<DataTypeUUID, T>());
default:
break;
}
return false;
}
}

View File

@ -19,11 +19,15 @@ GraphiteRollupSortedBlockInputStream::GraphiteRollupSortedBlockInputStream(
params(params), time_of_merge(time_of_merge)
{
size_t max_size_of_aggregate_state = 0;
for (const auto & pattern : params.patterns)
if (pattern.function->sizeOfData() > max_size_of_aggregate_state)
max_size_of_aggregate_state = pattern.function->sizeOfData();
size_t max_alignment_of_aggregate_state = 1;
place_for_aggregate_state.resize(max_size_of_aggregate_state);
for (const auto & pattern : params.patterns)
{
max_size_of_aggregate_state = std::max(max_size_of_aggregate_state, pattern.function->sizeOfData());
max_alignment_of_aggregate_state = std::max(max_alignment_of_aggregate_state, pattern.function->alignOfData());
}
place_for_aggregate_state.reset(max_size_of_aggregate_state, max_alignment_of_aggregate_state);
/// Memoize column numbers in block.
path_column_num = header.getPositionByName(params.path_column_name);

View File

@ -8,6 +8,7 @@
#include <AggregateFunctions/IAggregateFunction.h>
#include <Columns/ColumnAggregateFunction.h>
#include <Common/OptimizedRegularExpression.h>
#include <Common/AlignedBuffer.h>
namespace DB
@ -186,7 +187,7 @@ private:
time_t current_time_rounded = 0;
const Graphite::Pattern * current_pattern = nullptr;
std::vector<char> place_for_aggregate_state;
AlignedBuffer place_for_aggregate_state;
bool aggregate_state_created = false; /// Invariant: if true then current_pattern is not NULL.
const Graphite::Pattern * selectPatternForPath(StringRef path) const;

View File

@ -65,7 +65,7 @@ size_t IBlockInputStream::checkDepthImpl(size_t max_depth, size_t level) const
}
void IBlockInputStream::dumpTree(std::ostream & ostr, size_t indent, size_t multiplier)
void IBlockInputStream::dumpTree(std::ostream & ostr, size_t indent, size_t multiplier) const
{
ostr << String(indent, ' ') << getName();
if (multiplier > 1)
@ -78,16 +78,16 @@ void IBlockInputStream::dumpTree(std::ostream & ostr, size_t indent, size_t mult
using Multipliers = std::map<String, size_t>;
Multipliers multipliers;
for (BlockInputStreams::const_iterator it = children.begin(); it != children.end(); ++it)
++multipliers[(*it)->getTreeID()];
for (const auto & child : children)
++multipliers[child->getTreeID()];
for (BlockInputStreams::iterator it = children.begin(); it != children.end(); ++it)
for (const auto & child : children)
{
String id = (*it)->getTreeID();
String id = child->getTreeID();
size_t & subtree_multiplier = multipliers[id];
if (subtree_multiplier != 0) /// Already printed subtrees are marked with zero in the array of multipliers.
{
(*it)->dumpTree(ostr, indent, subtree_multiplier);
child->dumpTree(ostr, indent, subtree_multiplier);
subtree_multiplier = 0;
}
}

View File

@ -91,7 +91,7 @@ public:
/** Must be called before read, readPrefix.
*/
void dumpTree(std::ostream & ostr, size_t indent = 0, size_t multiplier = 1);
void dumpTree(std::ostream & ostr, size_t indent = 0, size_t multiplier = 1) const;
/** Check the depth of the pipeline.
* If max_depth is specified and the `depth` is greater - throw an exception.

View File

@ -2,6 +2,7 @@
#include <Core/Row.h>
#include <Core/ColumnNumbers.h>
#include <Common/AlignedBuffer.h>
#include <DataStreams/MergingSortedBlockInputStream.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>
@ -74,7 +75,7 @@ private:
IAggregateFunction::AddFunc add_function = nullptr;
std::vector<size_t> column_numbers;
MutableColumnPtr merged_column;
std::vector<char> state;
AlignedBuffer state;
bool created = false;
/// In case when column has type AggregateFunction: use the aggregate function from itself instead of 'function' above.
@ -84,7 +85,7 @@ private:
{
function = AggregateFunctionFactory::instance().get(function_name, argument_types);
add_function = function->getAddressOfAddFunction();
state.resize(function->sizeOfData());
state.reset(function->sizeOfData(), function->alignOfData());
}
void createState()

View File

@ -4,6 +4,7 @@
#include <Columns/ColumnAggregateFunction.h>
#include <Columns/FilterDescription.h>
#include <Common/typeid_cast.h>
#include <Common/Arena.h>
namespace DB
@ -34,7 +35,7 @@ TotalsHavingBlockInputStream::TotalsHavingBlockInputStream(
IAggregateFunction * function = column->getAggregateFunction().get();
auto target = ColumnAggregateFunction::create(column->getAggregateFunction(), Arenas(1, arena));
AggregateDataPtr data = arena->alloc(function->sizeOfData());
AggregateDataPtr data = arena->alignedAlloc(function->sizeOfData(), function->alignOfData());
function->create(data);
target->getData().push_back(data);
current_totals.emplace_back(std::move(target));

View File

@ -1,12 +1,14 @@
#pragma once
#include <DataStreams/IProfilingBlockInputStream.h>
#include <Common/Arena.h>
namespace DB
{
class Arena;
using ArenaPtr = std::shared_ptr<Arena>;
class ExpressionActions;

View File

@ -6,6 +6,7 @@
#include <Columns/ColumnAggregateFunction.h>
#include <Common/typeid_cast.h>
#include <Common/AlignedBuffer.h>
#include <Formats/FormatSettings.h>
#include <DataTypes/DataTypeAggregateFunction.h>
@ -68,7 +69,7 @@ void DataTypeAggregateFunction::deserializeBinary(Field & field, ReadBuffer & is
field = String();
String & s = get<String &>(field);
s.resize(size);
istr.readStrict(&s[0], size);
istr.readStrict(s.data(), size);
}
void DataTypeAggregateFunction::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr) const
@ -82,7 +83,7 @@ void DataTypeAggregateFunction::deserializeBinary(IColumn & column, ReadBuffer &
Arena & arena = column_concrete.createOrGetArena();
size_t size_of_state = function->sizeOfData();
AggregateDataPtr place = arena.alloc(size_of_state);
AggregateDataPtr place = arena.alignedAlloc(size_of_state, function->alignOfData());
function->create(place);
try
@ -123,13 +124,14 @@ void DataTypeAggregateFunction::deserializeBinaryBulk(IColumn & column, ReadBuff
vec.reserve(vec.size() + limit);
size_t size_of_state = function->sizeOfData();
size_t align_of_state = function->alignOfData();
for (size_t i = 0; i < limit; ++i)
{
if (istr.eof())
break;
AggregateDataPtr place = arena.alloc(size_of_state);
AggregateDataPtr place = arena.alignedAlloc(size_of_state, align_of_state);
function->create(place);
@ -160,7 +162,7 @@ static void deserializeFromString(const AggregateFunctionPtr & function, IColumn
Arena & arena = column_concrete.createOrGetArena();
size_t size_of_state = function->sizeOfData();
AggregateDataPtr place = arena.alloc(size_of_state);
AggregateDataPtr place = arena.alignedAlloc(size_of_state, function->alignOfData());
function->create(place);
@ -257,7 +259,7 @@ Field DataTypeAggregateFunction::getDefault() const
{
Field field = String();
PODArrayWithStackMemory<char, 16> place_buffer(function->sizeOfData());
AlignedBuffer place_buffer(function->sizeOfData(), function->alignOfData());
AggregateDataPtr place = place_buffer.data();
function->create(place);

View File

@ -18,6 +18,8 @@ public:
DataTypeArray(const DataTypePtr & nested_);
TypeIndex getTypeId() const override { return TypeIndex::Array; }
std::string getName() const override
{
return "Array(" + nested->getName() + ")";

Some files were not shown because too many files have changed in this diff Show More