mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge remote-tracking branch 'origin/master' into ADQM-870
This commit is contained in:
commit
fff0c8da92
11
README.md
11
README.md
@ -22,12 +22,13 @@ curl https://clickhouse.com/ | sh
|
||||
|
||||
## Upcoming Events
|
||||
|
||||
* [**v23.5 Release Webinar**](https://clickhouse.com/company/events/v23-5-release-webinar?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-05) - Jun 8 - 23.5 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Bangalore**](https://www.meetup.com/clickhouse-bangalore-user-group/events/293740066/) - Jun 7
|
||||
* [**ClickHouse Meetup in San Francisco**](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/293426725/) - Jun 7
|
||||
* [**v23.6 Release Webinar**](https://clickhouse.com/company/events/v23-6-release-call?utm_source=github&utm_medium=social&utm_campaign=release-webinar-2023-06) - Jun 29 - 23.6 is rapidly approaching. Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release.
|
||||
* [**ClickHouse Meetup in Paris**](https://www.meetup.com/clickhouse-france-user-group/events/294283460) - Jul 4
|
||||
* [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18
|
||||
* [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19
|
||||
* [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20
|
||||
|
||||
|
||||
Also, keep an eye out for upcoming meetups in Amsterdam, Boston, NYC, Beijing, and Toronto. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler <at> clickhouse <dot> com.
|
||||
|
||||
## Recent Recordings
|
||||
* **Recent Meetup Videos**: [Meetup Playlist](https://www.youtube.com/playlist?list=PL0Z2YDlm0b3iNDUzpY1S3L_iV4nARda_U) Whenever possible recordings of the ClickHouse Community Meetups are edited and presented as individual talks. Current featuring "Modern SQL in 2023", "Fast, Concurrent, and Consistent Asynchronous INSERTS in ClickHouse", and "Full-Text Indices: Design and Experiments"
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <array>
|
||||
|
||||
#if defined(__SSE2__)
|
||||
#include <emmintrin.h>
|
||||
|
@ -11,3 +11,8 @@ constexpr double interpolateExponential(double min, double max, double ratio)
|
||||
assert(min > 0 && ratio >= 0 && ratio <= 1);
|
||||
return min * std::pow(max / min, ratio);
|
||||
}
|
||||
|
||||
constexpr double interpolateLinear(double min, double max, double ratio)
|
||||
{
|
||||
return std::lerp(min, max, ratio);
|
||||
}
|
||||
|
@ -16,7 +16,6 @@ def process_result(result_folder):
|
||||
"TLPGroupBy",
|
||||
"TLPHaving",
|
||||
"TLPWhere",
|
||||
"TLPWhereGroupBy",
|
||||
"NoREC",
|
||||
]
|
||||
failed_tests = []
|
||||
|
@ -33,7 +33,7 @@ cd /workspace
|
||||
|
||||
for _ in $(seq 1 60); do if [[ $(wget -q 'localhost:8123' -O-) == 'Ok.' ]]; then break ; else sleep 1; fi ; done
|
||||
|
||||
cd /sqlancer/sqlancer-master
|
||||
cd /sqlancer/sqlancer-main
|
||||
|
||||
TIMEOUT=300
|
||||
NUM_QUERIES=1000
|
||||
|
@ -59,6 +59,8 @@ install_packages previous_release_package_folder
|
||||
# available for dump via clickhouse-local
|
||||
configure
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
|
||||
start
|
||||
@ -85,6 +87,8 @@ export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/merge_tree.xml
|
||||
rm /etc/clickhouse-server/users.d/nonconst_timezone.xml
|
||||
|
||||
start
|
||||
@ -115,6 +119,13 @@ mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/c
|
||||
install_packages package_folder
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
|
||||
# Just in case previous version left some garbage in zk
|
||||
sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||
| sed "s|>1<|>0<|g" \
|
||||
> /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml
|
||||
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||
|| (rg --text "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt \
|
||||
|
@ -35,6 +35,10 @@ The table structure can differ from the original MySQL table structure:
|
||||
- Column types may differ from those in the original MySQL table. ClickHouse tries to [cast](../../../engines/database-engines/mysql.md#data_types-support) values to the ClickHouse data types.
|
||||
- The [external_table_functions_use_nulls](../../../operations/settings/settings.md#external-table-functions-use-nulls) setting defines how to handle Nullable columns. Default value: 1. If 0, the table function does not make Nullable columns and inserts default values instead of nulls. This is also applicable for NULL values inside arrays.
|
||||
|
||||
:::note
|
||||
The MySQL Table Engine is currently not available on the ClickHouse builds for MacOS ([issue](https://github.com/ClickHouse/ClickHouse/issues/21191))
|
||||
:::
|
||||
|
||||
**Engine Parameters**
|
||||
|
||||
- `host:port` — MySQL server address.
|
||||
|
@ -2941,7 +2941,7 @@ Default value: `0`.
|
||||
|
||||
## mutations_sync {#mutations_sync}
|
||||
|
||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||
Allows to execute `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries ([mutations](../../sql-reference/statements/alter/index.md#mutations)) synchronously.
|
||||
|
||||
Possible values:
|
||||
|
||||
|
@ -11,7 +11,8 @@ Columns:
|
||||
- `host` ([String](../../sql-reference/data-types/string.md)) — The hostname/IP of the ZooKeeper node that ClickHouse connected to.
|
||||
- `port` ([String](../../sql-reference/data-types/string.md)) — The port of the ZooKeeper node that ClickHouse connected to.
|
||||
- `index` ([UInt8](../../sql-reference/data-types/int-uint.md)) — The index of the ZooKeeper node that ClickHouse connected to. The index is from ZooKeeper config.
|
||||
- `connected_time` ([String](../../sql-reference/data-types/string.md)) — When the connection was established
|
||||
- `connected_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — When the connection was established
|
||||
- `session_uptime_elapsed_seconds` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Seconds elapsed since the connection was established
|
||||
- `is_expired` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Is the current connection expired.
|
||||
- `keeper_api_version` ([String](../../sql-reference/data-types/string.md)) — Keeper API version.
|
||||
- `client_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Session id of the connection.
|
||||
@ -23,7 +24,7 @@ SELECT * FROM system.zookeeper_connection;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─name──────────────┬─host─────────┬─port─┬─index─┬──────connected_time─┬─is_expired─┬─keeper_api_version─┬──────────client_id─┐
|
||||
│ default_zookeeper │ 127.0.0.1 │ 2181 │ 0 │ 2023-05-19 14:30:16 │ 0 │ 0 │ 216349144108826660 │
|
||||
└───────────────────┴──────────────┴──────┴───────┴─────────────────────┴────────────┴────────────────────┴────────────────────┘
|
||||
┌─name────┬─host──────┬─port─┬─index─┬──────connected_time─┬─session_uptime_elapsed_seconds─┬─is_expired─┬─keeper_api_version─┬─client_id─┐
|
||||
│ default │ 127.0.0.1 │ 9181 │ 0 │ 2023-06-15 14:36:01 │ 3058 │ 0 │ 3 │ 5 │
|
||||
└─────────┴───────────┴──────┴───────┴─────────────────────┴────────────────────────────────┴────────────┴────────────────────┴───────────┘
|
||||
```
|
||||
|
@ -32,7 +32,7 @@ For example, Decimal32(4) can contain numbers from -99999.9999 to 99999.9999 wit
|
||||
|
||||
Internally data is represented as normal signed integers with respective bit width. Real value ranges that can be stored in memory are a bit larger than specified above, which are checked only on conversion from a string.
|
||||
|
||||
Because modern CPUs do not support 128-bit integers natively, operations on Decimal128 are emulated. Because of this Decimal128 works significantly slower than Decimal32/Decimal64.
|
||||
Because modern CPUs do not support 128-bit and 256-bit integers natively, operations on Decimal128 and Decimal256 are emulated. Thus, Decimal128 and Decimal256 work significantly slower than Decimal32/Decimal64.
|
||||
|
||||
## Operations and Result Type
|
||||
|
||||
@ -59,6 +59,10 @@ Some functions on Decimal return result as Float64 (for example, var or stddev).
|
||||
|
||||
During calculations on Decimal, integer overflows might happen. Excessive digits in a fraction are discarded (not rounded). Excessive digits in integer part will lead to an exception.
|
||||
|
||||
:::warning
|
||||
Overflow check is not implemented for Decimal128 and Decimal256. In case of overflow incorrect result is returned, no exception is thrown.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||
```
|
||||
|
@ -232,6 +232,7 @@ ALTER TABLE table_with_ttl MODIFY COLUMN column_ttl REMOVE TTL;
|
||||
|
||||
Materializes or updates a column with an expression for a default value (`DEFAULT` or `MATERIALIZED`).
|
||||
It is used if it is necessary to add or update a column with a complicated expression, because evaluating such an expression directly on `SELECT` executing turns out to be expensive.
|
||||
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
Syntax:
|
||||
|
||||
|
@ -60,7 +60,7 @@ You can specify how long (in seconds) to wait for inactive replicas to execute a
|
||||
For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active for more than the time, specified in the `replication_wait_for_inactive_replica_timeout` setting, then an exception `UNFINISHED` is thrown.
|
||||
:::
|
||||
|
||||
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||
For `ALTER TABLE ... UPDATE|DELETE|MATERIALIZE INDEX|MATERIALIZE PROJECTION|MATERIALIZE COLUMN` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||
|
||||
## Related content
|
||||
|
||||
|
@ -142,19 +142,19 @@ The following operations with [projections](/docs/en/engines/table-engines/merge
|
||||
|
||||
## ADD PROJECTION
|
||||
|
||||
`ALTER TABLE [db].name ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
`ALTER TABLE [db.]name [ON CLUSTER cluster] ADD PROJECTION [IF NOT EXISTS] name ( SELECT <COLUMN LIST EXPR> [GROUP BY] [ORDER BY] )` - Adds projection description to tables metadata.
|
||||
|
||||
## DROP PROJECTION
|
||||
|
||||
`ALTER TABLE [db].name DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]name [ON CLUSTER cluster] DROP PROJECTION [IF EXISTS] name` - Removes projection description from tables metadata and deletes projection files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## MATERIALIZE PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table MATERIALIZE PROJECTION name IN PARTITION partition_name` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]table [ON CLUSTER cluster] MATERIALIZE PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - The query rebuilds the projection `name` in the partition `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
## CLEAR PROJECTION
|
||||
|
||||
`ALTER TABLE [db.]table CLEAR PROJECTION [IF EXISTS] name IN PARTITION partition_name` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]table [ON CLUSTER cluster] CLEAR PROJECTION [IF EXISTS] name [IN PARTITION partition_name]` - Deletes projection files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
|
||||
The commands `ADD`, `DROP` and `CLEAR` are lightweight in a sense that they only change metadata or remove files.
|
||||
|
@ -10,15 +10,25 @@ sidebar_label: INDEX
|
||||
|
||||
The following operations are available:
|
||||
|
||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] ADD INDEX name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
||||
## ADD INDEX
|
||||
|
||||
- `ALTER TABLE [db].table_name [ON CLUSTER cluster] DROP INDEX name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] ADD INDEX [IF NOT EXISTS] name expression TYPE type [GRANULARITY value] [FIRST|AFTER name]` - Adds index description to tables metadata.
|
||||
|
||||
- `ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||
## DROP INDEX
|
||||
|
||||
The first two commands are lightweight in a sense that they only change metadata or remove files.
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] DROP INDEX [IF EXISTS] name` - Removes index description from tables metadata and deletes index files from disk. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
Also, they are replicated, syncing indices metadata via ZooKeeper.
|
||||
## MATERIALIZE INDEX
|
||||
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] MATERIALIZE INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Rebuilds the secondary index `name` for the specified `partition_name`. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations). If `IN PARTITION` part is omitted then it rebuilds the index for the whole table data.
|
||||
|
||||
## CLEAR INDEX
|
||||
|
||||
`ALTER TABLE [db.]table_name [ON CLUSTER cluster] CLEAR INDEX [IF EXISTS] name [IN PARTITION partition_name]` - Deletes the secondary index files from disk without removing description. Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
|
||||
The commands `ADD`, `DROP`, and `CLEAR` are lightweight in the sense that they only change metadata or remove files.
|
||||
Also, they are replicated, syncing indices metadata via ClickHouse Keeper or ZooKeeper.
|
||||
|
||||
:::note
|
||||
Index manipulation is supported only for tables with [`*MergeTree`](/docs/en/engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](/docs/en/engines/table-engines/mergetree-family/replication.md) variants).
|
||||
|
@ -82,6 +82,35 @@ LIFETIME(MIN 0 MAX 1000)
|
||||
LAYOUT(FLAT())
|
||||
```
|
||||
|
||||
:::note
|
||||
When using the SQL console in [ClickHouse Cloud](https://clickhouse.com), you must specify a user (`default` or any other user with the role `default_role`) and password when creating a dictionary.
|
||||
:::note
|
||||
|
||||
```sql
|
||||
CREATE USER IF NOT EXISTS clickhouse_admin
|
||||
IDENTIFIED WITH sha256_password BY 'passworD43$x';
|
||||
|
||||
GRANT default_role TO clickhouse_admin;
|
||||
|
||||
CREATE DATABASE foo_db;
|
||||
|
||||
CREATE TABLE foo_db.source_table (
|
||||
id UInt64,
|
||||
value String
|
||||
) ENGINE = MergeTree
|
||||
PRIMARY KEY id;
|
||||
|
||||
CREATE DICTIONARY foo_db.id_value_dictionary
|
||||
(
|
||||
id UInt64,
|
||||
value String
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(TABLE 'source_table' USER 'clickhouse_admin' PASSWORD 'passworD43$x' DB 'foo_db' ))
|
||||
LAYOUT(FLAT())
|
||||
LIFETIME(MIN 0 MAX 1000);
|
||||
```
|
||||
|
||||
### Create a dictionary from a table in a remote ClickHouse service
|
||||
|
||||
Input table (in the remote ClickHouse service) `source_table`:
|
||||
|
@ -380,11 +380,15 @@ High compression levels are useful for asymmetric scenarios, like compress once,
|
||||
|
||||
`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library. Some limitations apply:
|
||||
|
||||
- DEFLATE_QPL is experimental and can only be used after setting configuration parameter `allow_experimental_codecs=1`.
|
||||
- DEFLATE_QPL is disabled by default and can only be used after setting configuration parameter `enable_deflate_qpl_codec = 1`.
|
||||
- DEFLATE_QPL requires a ClickHouse build compiled with SSE 4.2 instructions (by default, this is the case). Refer to [Build Clickhouse with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Build-Clickhouse-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL works best if the system has a Intel® IAA (In-Memory Analytics Accelerator) offloading device. Refer to [Accelerator Configuration](https://intel.github.io/qpl/documentation/get_started_docs/installation.html#accelerator-configuration) and [Benchmark with DEFLATE_QPL](/docs/en/development/building_and_benchmarking_deflate_qpl.md/#Run-Benchmark-with-DEFLATE_QPL) for more details.
|
||||
- DEFLATE_QPL-compressed data can only be transferred between ClickHouse nodes compiled with SSE 4.2 enabled.
|
||||
|
||||
:::note
|
||||
DEFLATE_QPL is not available in ClickHouse Cloud.
|
||||
:::
|
||||
|
||||
### Specialized Codecs
|
||||
|
||||
These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation.
|
||||
|
@ -31,7 +31,7 @@ sidebar_label: Decimal
|
||||
## Внутреннее представление {#vnutrennee-predstavlenie}
|
||||
|
||||
Внутри данные представляются как знаковые целые числа, соответсвующей разрядности. Реальные диапазоны, хранящиеся в ячейках памяти несколько больше заявленных. Заявленные диапазоны Decimal проверяются только при вводе числа из строкового представления.
|
||||
Поскольку современные CPU не поддерживают 128-битные числа, операции над Decimal128 эмулируются программно. Decimal128 работает в разы медленней чем Decimal32/Decimal64.
|
||||
Поскольку современные CPU не поддерживают 128-битные и 256-битные числа, для операций над Decimal128 и Decimal256 эмулируются программно. Данные типы работают в разы медленнее, чем Decimal32/Decimal64.
|
||||
|
||||
## Операции и типы результата {#operatsii-i-tipy-rezultata}
|
||||
|
||||
@ -59,6 +59,10 @@ sidebar_label: Decimal
|
||||
|
||||
При выполнении операций над типом Decimal могут происходить целочисленные переполнения. Лишняя дробная часть отбрасывается (не округляется). Лишняя целочисленная часть приводит к исключению.
|
||||
|
||||
:::warning
|
||||
Проверка переполнения не реализована для Decimal128 и Decimal256. В случае переполнения неверный результат будёт возвращён без выбрасывания исключения.
|
||||
:::
|
||||
|
||||
``` sql
|
||||
SELECT toDecimal32(2, 4) AS x, x / 3
|
||||
```
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
#include <base/types.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -8,9 +8,7 @@
|
||||
#include <Poco/Logger.h>
|
||||
#include <Poco/NullChannel.h>
|
||||
#include <Poco/SimpleFileChannel.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
@ -52,8 +50,6 @@
|
||||
#include <base/argsToConfig.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if defined(FUZZING_MODE)
|
||||
#include <Functions/getFuzzerData.h>
|
||||
#endif
|
||||
@ -174,13 +170,6 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
|
||||
return system_database;
|
||||
}
|
||||
|
||||
static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
|
||||
{
|
||||
auto databaseCombiner = std::make_shared<DatabasesOverlay>(name_, context_);
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseFilesystem>(name_, "", context_));
|
||||
databaseCombiner->registerNextDatabase(std::make_shared<DatabaseMemory>(name_, context_));
|
||||
return databaseCombiner;
|
||||
}
|
||||
|
||||
/// If path is specified and not empty, will try to setup server environment and load existing metadata
|
||||
void LocalServer::tryInitPath()
|
||||
@ -680,7 +669,7 @@ void LocalServer::processConfig()
|
||||
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
|
||||
*/
|
||||
std::string default_database = config().getString("default_database", "_local");
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
|
||||
DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared<DatabaseMemory>(default_database, global_context));
|
||||
global_context->setCurrentDatabase(default_database);
|
||||
applyCmdOptions(global_context);
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <bitset>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -333,7 +333,7 @@ void ContextAccess::calculateAccessRights() const
|
||||
boost::algorithm::join(roles_info->getCurrentRolesNames(), ", "),
|
||||
boost::algorithm::join(roles_info->getEnabledRolesNames(), ", "));
|
||||
}
|
||||
LOG_TRACE(trace_log, "Settings: readonly={}, allow_ddl={}, allow_introspection_functions={}", params.readonly, params.allow_ddl, params.allow_introspection);
|
||||
LOG_TRACE(trace_log, "Settings: readonly = {}, allow_ddl = {}, allow_introspection_functions = {}", params.readonly, params.allow_ddl, params.allow_introspection);
|
||||
LOG_TRACE(trace_log, "List of all grants: {}", access->toString());
|
||||
LOG_TRACE(trace_log, "List of all grants including implicit: {}", access_with_implicit->toString());
|
||||
}
|
||||
|
@ -146,8 +146,8 @@ public:
|
||||
for (const auto & argument : this->argument_types)
|
||||
can_be_compiled &= canBeNativeType(*argument);
|
||||
|
||||
auto return_type = this->getResultType();
|
||||
can_be_compiled &= canBeNativeType(*return_type);
|
||||
const auto & result_type = this->getResultType();
|
||||
can_be_compiled &= canBeNativeType(*result_type);
|
||||
|
||||
return can_be_compiled;
|
||||
}
|
||||
@ -198,8 +198,8 @@ public:
|
||||
auto * denominator_ptr = b.CreateConstGEP1_32(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||
|
||||
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, b.getDoubleTy());
|
||||
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, b.getDoubleTy());
|
||||
auto * double_numerator = nativeCast<Numerator>(b, numerator_value, this->getResultType());
|
||||
auto * double_denominator = nativeCast<Denominator>(b, denominator_value, this->getResultType());
|
||||
|
||||
return b.CreateFDiv(double_numerator, double_denominator);
|
||||
}
|
||||
@ -308,7 +308,7 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -316,7 +316,7 @@ public:
|
||||
|
||||
auto * numerator_ptr = aggregate_data_ptr;
|
||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||
auto * value_cast_to_numerator = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
||||
auto * value_cast_to_numerator = nativeCast(b, arguments[0], toNativeDataType<Numerator>());
|
||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_cast_to_numerator) : b.CreateFAdd(numerator_value, value_cast_to_numerator);
|
||||
b.CreateStore(numerator_result_value, numerator_ptr);
|
||||
|
||||
|
@ -30,7 +30,7 @@ public:
|
||||
|
||||
using Numerator = typename Base::Numerator;
|
||||
using Denominator = typename Base::Denominator;
|
||||
using Fraction = typename Base::Fraction;
|
||||
using Fraction = typename Base::Fraction;
|
||||
|
||||
void NO_SANITIZE_UNDEFINED add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override
|
||||
{
|
||||
@ -55,7 +55,7 @@ public:
|
||||
return can_be_compiled;
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -63,8 +63,9 @@ public:
|
||||
auto * numerator_ptr = aggregate_data_ptr;
|
||||
auto * numerator_value = b.CreateLoad(numerator_type, numerator_ptr);
|
||||
|
||||
auto * argument = nativeCast(b, arguments_types[0], argument_values[0], numerator_type);
|
||||
auto * weight = nativeCast(b, arguments_types[1], argument_values[1], numerator_type);
|
||||
auto numerator_data_type = toNativeDataType<Numerator>();
|
||||
auto * argument = nativeCast(b, arguments[0], numerator_data_type);
|
||||
auto * weight = nativeCast(b, arguments[1], numerator_data_type);
|
||||
|
||||
llvm::Value * value_weight_multiplication = argument->getType()->isIntegerTy() ? b.CreateMul(argument, weight) : b.CreateFMul(argument, weight);
|
||||
auto * numerator_result_value = numerator_type->isIntegerTy() ? b.CreateAdd(numerator_value, value_weight_multiplication) : b.CreateFAdd(numerator_value, value_weight_multiplication);
|
||||
@ -75,7 +76,7 @@ public:
|
||||
static constexpr size_t denominator_offset = offsetof(Fraction, denominator);
|
||||
auto * denominator_ptr = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, denominator_offset);
|
||||
|
||||
auto * weight_cast_to_denominator = nativeCast(b, arguments_types[1], argument_values[1], denominator_type);
|
||||
auto * weight_cast_to_denominator = nativeCast(b, arguments[1], toNativeDataType<Denominator>());
|
||||
|
||||
auto * denominator_value = b.CreateLoad(denominator_type, denominator_ptr);
|
||||
auto * denominator_value_updated = denominator_type->isIntegerTy() ? b.CreateAdd(denominator_value, weight_cast_to_denominator) : b.CreateFAdd(denominator_value, weight_cast_to_denominator);
|
||||
|
@ -148,7 +148,7 @@ public:
|
||||
Data::compileCreate(builder, value_ptr);
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -157,8 +157,7 @@ public:
|
||||
auto * value_ptr = aggregate_data_ptr;
|
||||
auto * value = b.CreateLoad(return_type, value_ptr);
|
||||
|
||||
const auto & argument_value = argument_values[0];
|
||||
auto * result_value = Data::compileUpdate(builder, value, argument_value);
|
||||
auto * result_value = Data::compileUpdate(builder, value, arguments[0].value);
|
||||
|
||||
b.CreateStore(result_value, value_ptr);
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> &) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType &) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -309,13 +309,13 @@ public:
|
||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), sizeof(AggregateFunctionCountData), llvm::assumeAligned(this->alignOfData()));
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
auto * return_type = toNativeType(b, this->getResultType());
|
||||
|
||||
auto * is_null_value = b.CreateExtractValue(values[0], {1});
|
||||
auto * is_null_value = b.CreateExtractValue(arguments[0].value, {1});
|
||||
auto * increment_value = b.CreateSelect(is_null_value, llvm::ConstantInt::get(return_type, 0), llvm::ConstantInt::get(return_type, 1));
|
||||
|
||||
auto * count_value_ptr = aggregate_data_ptr;
|
||||
|
@ -188,18 +188,18 @@ public:
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
const auto & nullable_type = arguments_types[0];
|
||||
const auto & nullable_value = argument_values[0];
|
||||
const auto & nullable_type = arguments[0].type;
|
||||
const auto & nullable_value = arguments[0].value;
|
||||
|
||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||
|
||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
||||
const auto & predicate_type = arguments.back().type;
|
||||
auto * predicate_value = arguments.back().value;
|
||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
@ -219,7 +219,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
@ -370,38 +370,31 @@ public:
|
||||
return canBeNativeType(*this->argument_types.back()) && this->nested_function->isCompilable();
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
/// TODO: Check
|
||||
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
size_t arguments_size = arguments_types.size();
|
||||
size_t arguments_size = arguments.size();
|
||||
|
||||
ValuesWithType wrapped_arguments;
|
||||
wrapped_arguments.reserve(arguments_size);
|
||||
|
||||
DataTypes non_nullable_types;
|
||||
std::vector<llvm::Value * > wrapped_values;
|
||||
std::vector<llvm::Value * > is_null_values;
|
||||
|
||||
non_nullable_types.resize(arguments_size);
|
||||
wrapped_values.resize(arguments_size);
|
||||
is_null_values.resize(arguments_size);
|
||||
|
||||
for (size_t i = 0; i < arguments_size; ++i)
|
||||
{
|
||||
const auto & argument_value = argument_values[i];
|
||||
const auto & argument_value = arguments[i].value;
|
||||
const auto & argument_type = arguments[i].type;
|
||||
|
||||
if (is_nullable[i])
|
||||
{
|
||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
||||
|
||||
wrapped_values[i] = wrapped_value;
|
||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
||||
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||
}
|
||||
else
|
||||
{
|
||||
wrapped_values[i] = argument_value;
|
||||
non_nullable_types[i] = arguments_types[i];
|
||||
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||
}
|
||||
}
|
||||
|
||||
@ -415,9 +408,6 @@ public:
|
||||
|
||||
for (auto * is_null_value : is_null_values)
|
||||
{
|
||||
if (!is_null_value)
|
||||
continue;
|
||||
|
||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||
}
|
||||
@ -426,8 +416,8 @@ public:
|
||||
|
||||
b.SetInsertPoint(join_block_after_null_checks);
|
||||
|
||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
||||
const auto & predicate_type = arguments.back().type;
|
||||
auto * predicate_value = arguments.back().value;
|
||||
auto * is_predicate_true = nativeBoolCast(b, predicate_type, predicate_value);
|
||||
|
||||
auto * if_true = llvm::BasicBlock::Create(head->getContext(), "if_true", head->getParent());
|
||||
@ -444,7 +434,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, non_nullable_types, wrapped_values);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
|
@ -223,12 +223,12 @@ public:
|
||||
nested_func->compileCreate(builder, aggregate_data_ptr);
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
const auto & predicate_type = arguments_types[argument_values.size() - 1];
|
||||
auto * predicate_value = argument_values[argument_values.size() - 1];
|
||||
const auto & predicate_type = arguments.back().type;
|
||||
auto * predicate_value = arguments.back().value;
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
|
||||
@ -242,21 +242,9 @@ public:
|
||||
|
||||
b.SetInsertPoint(if_true);
|
||||
|
||||
size_t arguments_size_without_predicate = arguments_types.size() - 1;
|
||||
|
||||
DataTypes argument_types_without_predicate;
|
||||
std::vector<llvm::Value *> argument_values_without_predicate;
|
||||
|
||||
argument_types_without_predicate.resize(arguments_size_without_predicate);
|
||||
argument_values_without_predicate.resize(arguments_size_without_predicate);
|
||||
|
||||
for (size_t i = 0; i < arguments_size_without_predicate; ++i)
|
||||
{
|
||||
argument_types_without_predicate[i] = arguments_types[i];
|
||||
argument_values_without_predicate[i] = argument_values[i];
|
||||
}
|
||||
|
||||
nested_func->compileAdd(builder, aggregate_data_ptr, argument_types_without_predicate, argument_values_without_predicate);
|
||||
ValuesWithType arguments_without_predicate = arguments;
|
||||
arguments_without_predicate.pop_back();
|
||||
nested_func->compileAdd(builder, aggregate_data_ptr, arguments_without_predicate);
|
||||
|
||||
b.CreateBr(join_block);
|
||||
|
||||
|
@ -1459,11 +1459,11 @@ public:
|
||||
b.CreateMemSet(aggregate_data_ptr, llvm::ConstantInt::get(b.getInt8Ty(), 0), this->sizeOfData(), llvm::assumeAligned(this->alignOfData()));
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes &, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
if constexpr (Data::is_compilable)
|
||||
{
|
||||
Data::compileChangeIfBetter(builder, aggregate_data_ptr, argument_values[0]);
|
||||
Data::compileChangeIfBetter(builder, aggregate_data_ptr, arguments[0].value);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -378,12 +378,12 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
const auto & nullable_type = arguments_types[0];
|
||||
const auto & nullable_value = argument_values[0];
|
||||
const auto & nullable_type = arguments[0].type;
|
||||
const auto & nullable_value = arguments[0].value;
|
||||
|
||||
auto * wrapped_value = b.CreateExtractValue(nullable_value, {0});
|
||||
auto * is_null_value = b.CreateExtractValue(nullable_value, {1});
|
||||
@ -405,7 +405,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { removeNullable(nullable_type) }, { wrapped_value });
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, { ValueWithType(wrapped_value, removeNullable(nullable_type)) });
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
@ -568,36 +568,32 @@ public:
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
size_t arguments_size = arguments_types.size();
|
||||
size_t arguments_size = arguments.size();
|
||||
|
||||
DataTypes non_nullable_types;
|
||||
std::vector<llvm::Value * > wrapped_values;
|
||||
std::vector<llvm::Value * > is_null_values;
|
||||
ValuesWithType wrapped_arguments;
|
||||
wrapped_arguments.reserve(arguments_size);
|
||||
|
||||
non_nullable_types.resize(arguments_size);
|
||||
wrapped_values.resize(arguments_size);
|
||||
is_null_values.resize(arguments_size);
|
||||
std::vector<llvm::Value *> is_null_values;
|
||||
is_null_values.reserve(arguments_size);
|
||||
|
||||
for (size_t i = 0; i < arguments_size; ++i)
|
||||
{
|
||||
const auto & argument_value = argument_values[i];
|
||||
const auto & argument_value = arguments[i].value;
|
||||
const auto & argument_type = arguments[i].type;
|
||||
|
||||
if (is_nullable[i])
|
||||
{
|
||||
auto * wrapped_value = b.CreateExtractValue(argument_value, {0});
|
||||
is_null_values[i] = b.CreateExtractValue(argument_value, {1});
|
||||
|
||||
wrapped_values[i] = wrapped_value;
|
||||
non_nullable_types[i] = removeNullable(arguments_types[i]);
|
||||
is_null_values.emplace_back(b.CreateExtractValue(argument_value, {1}));
|
||||
wrapped_arguments.emplace_back(wrapped_value, removeNullable(argument_type));
|
||||
}
|
||||
else
|
||||
{
|
||||
wrapped_values[i] = argument_value;
|
||||
non_nullable_types[i] = arguments_types[i];
|
||||
wrapped_arguments.emplace_back(argument_value, argument_type);
|
||||
}
|
||||
}
|
||||
|
||||
@ -612,9 +608,6 @@ public:
|
||||
|
||||
for (auto * is_null_value : is_null_values)
|
||||
{
|
||||
if (!is_null_value)
|
||||
continue;
|
||||
|
||||
auto * values_have_null = b.CreateLoad(b.getInt1Ty(), values_have_null_ptr);
|
||||
b.CreateStore(b.CreateOr(values_have_null, is_null_value), values_have_null_ptr);
|
||||
}
|
||||
@ -630,7 +623,7 @@ public:
|
||||
b.CreateStore(llvm::ConstantInt::get(b.getInt8Ty(), 1), aggregate_data_ptr);
|
||||
|
||||
auto * aggregate_data_ptr_with_prefix_size_offset = b.CreateConstInBoundsGEP1_64(b.getInt8Ty(), aggregate_data_ptr, this->prefix_size);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, arguments_types, wrapped_values);
|
||||
this->nested_function->compileAdd(b, aggregate_data_ptr_with_prefix_size_offset, wrapped_arguments);
|
||||
b.CreateBr(join_block);
|
||||
|
||||
b.SetInsertPoint(join_block);
|
||||
|
@ -588,7 +588,7 @@ public:
|
||||
b.CreateStore(llvm::Constant::getNullValue(return_type), aggregate_sum_ptr);
|
||||
}
|
||||
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector<llvm::Value *> & argument_values) const override
|
||||
void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const ValuesWithType & arguments) const override
|
||||
{
|
||||
llvm::IRBuilder<> & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
|
||||
@ -597,10 +597,7 @@ public:
|
||||
auto * sum_value_ptr = aggregate_data_ptr;
|
||||
auto * sum_value = b.CreateLoad(return_type, sum_value_ptr);
|
||||
|
||||
const auto & argument_type = arguments_types[0];
|
||||
const auto & argument_value = argument_values[0];
|
||||
|
||||
auto * value_cast_to_result = nativeCast(b, argument_type, argument_value, return_type);
|
||||
auto * value_cast_to_result = nativeCast(b, arguments[0], this->getResultType());
|
||||
auto * sum_result_value = sum_value->getType()->isIntegerTy() ? b.CreateAdd(sum_value, value_cast_to_result) : b.CreateFAdd(sum_value, value_cast_to_result);
|
||||
|
||||
b.CreateStore(sum_result_value, sum_value_ptr);
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Core/Block.h>
|
||||
#include <Core/ColumnNumbers.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Core/ValuesWithType.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <base/types.h>
|
||||
#include <Common/Exception.h>
|
||||
@ -389,7 +390,7 @@ public:
|
||||
}
|
||||
|
||||
/// compileAdd should generate code for updating aggregate function state stored in aggregate_data_ptr
|
||||
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const DataTypes & /*arguments_types*/, const std::vector<llvm::Value *> & /*arguments_values*/) const
|
||||
virtual void compileAdd(llvm::IRBuilderBase & /*builder*/, llvm::Value * /*aggregate_data_ptr*/, const ValuesWithType & /*arguments*/) const
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not JIT-compilable", getName());
|
||||
}
|
||||
|
@ -588,7 +588,7 @@ void Connection::sendQuery(
|
||||
if (method == "ZSTD")
|
||||
level = settings->network_zstd_compression_level;
|
||||
|
||||
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs);
|
||||
CompressionCodecFactory::instance().validateCodec(method, level, !settings->allow_suspicious_codecs, settings->allow_experimental_codecs, settings->enable_deflate_qpl_codec);
|
||||
compression_codec = CompressionCodecFactory::instance().get(method, level);
|
||||
}
|
||||
else
|
||||
|
@ -18,7 +18,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
|
||||
String client_name,
|
||||
Protocol::Compression compression,
|
||||
Protocol::Secure secure,
|
||||
Int64 priority)
|
||||
Priority priority)
|
||||
{
|
||||
Key key{
|
||||
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
|
||||
@ -74,7 +74,7 @@ size_t ConnectionPoolFactory::KeyHash::operator()(const ConnectionPoolFactory::K
|
||||
hash_combine(seed, hash_value(k.client_name));
|
||||
hash_combine(seed, hash_value(k.compression));
|
||||
hash_combine(seed, hash_value(k.secure));
|
||||
hash_combine(seed, hash_value(k.priority));
|
||||
hash_combine(seed, hash_value(k.priority.value));
|
||||
return seed;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/PoolBase.h>
|
||||
#include <Common/Priority.h>
|
||||
#include <Client/Connection.h>
|
||||
#include <IO/ConnectionTimeouts.h>
|
||||
#include <Core/Settings.h>
|
||||
@ -34,7 +35,7 @@ public:
|
||||
const Settings * settings = nullptr,
|
||||
bool force_connected = true) = 0;
|
||||
|
||||
virtual Int64 getPriority() const { return 1; }
|
||||
virtual Priority getPriority() const { return Priority{1}; }
|
||||
};
|
||||
|
||||
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
||||
@ -60,7 +61,7 @@ public:
|
||||
const String & client_name_,
|
||||
Protocol::Compression compression_,
|
||||
Protocol::Secure secure_,
|
||||
Int64 priority_ = 1)
|
||||
Priority priority_ = Priority{1})
|
||||
: Base(max_connections_,
|
||||
&Poco::Logger::get("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
||||
host(host_),
|
||||
@ -103,7 +104,7 @@ public:
|
||||
return host + ":" + toString(port);
|
||||
}
|
||||
|
||||
Int64 getPriority() const override
|
||||
Priority getPriority() const override
|
||||
{
|
||||
return priority;
|
||||
}
|
||||
@ -134,7 +135,7 @@ private:
|
||||
String client_name;
|
||||
Protocol::Compression compression; /// Whether to compress data when interacting with the server.
|
||||
Protocol::Secure secure; /// Whether to encrypt data when interacting with the server.
|
||||
Int64 priority; /// priority from <remote_servers>
|
||||
Priority priority; /// priority from <remote_servers>
|
||||
};
|
||||
|
||||
/**
|
||||
@ -157,7 +158,7 @@ public:
|
||||
String client_name;
|
||||
Protocol::Compression compression;
|
||||
Protocol::Secure secure;
|
||||
Int64 priority;
|
||||
Priority priority;
|
||||
};
|
||||
|
||||
struct KeyHash
|
||||
@ -180,7 +181,7 @@ public:
|
||||
String client_name,
|
||||
Protocol::Compression compression,
|
||||
Protocol::Secure secure,
|
||||
Int64 priority);
|
||||
Priority priority);
|
||||
private:
|
||||
mutable std::mutex mutex;
|
||||
using ConnectionPoolWeakPtr = std::weak_ptr<IConnectionPool>;
|
||||
|
@ -71,7 +71,7 @@ IConnectionPool::Entry ConnectionPoolWithFailover::get(const ConnectionTimeouts
|
||||
return Base::get(max_ignored_errors, fallback_to_stale_replicas, try_get_entry, get_priority);
|
||||
}
|
||||
|
||||
Int64 ConnectionPoolWithFailover::getPriority() const
|
||||
Priority ConnectionPoolWithFailover::getPriority() const
|
||||
{
|
||||
return (*std::max_element(nested_pools.begin(), nested_pools.end(), [](const auto & a, const auto & b)
|
||||
{
|
||||
|
@ -48,7 +48,7 @@ public:
|
||||
const Settings * settings,
|
||||
bool force_connected) override; /// From IConnectionPool
|
||||
|
||||
Int64 getPriority() const override; /// From IConnectionPool
|
||||
Priority getPriority() const override; /// From IConnectionPool
|
||||
|
||||
/** Allocates up to the specified number of connections to work.
|
||||
* Connections provide access to different replicas of one shard.
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Common/GetPriorityForLoadBalancing.h>
|
||||
#include <Common/Priority.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -8,23 +9,23 @@ namespace ErrorCodes
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const
|
||||
std::function<Priority(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const
|
||||
{
|
||||
std::function<size_t(size_t index)> get_priority;
|
||||
std::function<Priority(size_t index)> get_priority;
|
||||
switch (load_balance)
|
||||
{
|
||||
case LoadBalancing::NEAREST_HOSTNAME:
|
||||
if (hostname_differences.empty())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "It's a bug: hostname_differences is not initialized");
|
||||
get_priority = [this](size_t i) { return hostname_differences[i]; };
|
||||
get_priority = [this](size_t i) { return Priority{static_cast<Int64>(hostname_differences[i])}; };
|
||||
break;
|
||||
case LoadBalancing::IN_ORDER:
|
||||
get_priority = [](size_t i) { return i; };
|
||||
get_priority = [](size_t i) { return Priority{static_cast<Int64>(i)}; };
|
||||
break;
|
||||
case LoadBalancing::RANDOM:
|
||||
break;
|
||||
case LoadBalancing::FIRST_OR_RANDOM:
|
||||
get_priority = [offset](size_t i) -> size_t { return i != offset; };
|
||||
get_priority = [offset](size_t i) { return i != offset ? Priority{1} : Priority{0}; };
|
||||
break;
|
||||
case LoadBalancing::ROUND_ROBIN:
|
||||
if (last_used >= pool_size)
|
||||
@ -38,8 +39,8 @@ std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc
|
||||
* */
|
||||
get_priority = [this, pool_size](size_t i)
|
||||
{
|
||||
++i;
|
||||
return i < last_used ? pool_size - i : i - last_used;
|
||||
++i; // To make `i` indexing start with 1 instead of 0 as `last_used` does
|
||||
return Priority{static_cast<Int64>(i < last_used ? pool_size - i : i - last_used)};
|
||||
};
|
||||
break;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ public:
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
std::function<size_t(size_t index)> getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const;
|
||||
std::function<Priority(size_t index)> getPriorityFunc(LoadBalancing load_balance, size_t offset, size_t pool_size) const;
|
||||
|
||||
std::vector<size_t> hostname_differences; /// Distances from name of this host to the names of hosts of pools.
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <Common/NetException.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/Priority.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -34,7 +35,7 @@ namespace ProfileEvents
|
||||
/// This class provides a pool with fault tolerance. It is used for pooling of connections to replicated DB.
|
||||
/// Initialized by several PoolBase objects.
|
||||
/// When a connection is requested, tries to create or choose an alive connection from one of the nested pools.
|
||||
/// Pools are tried in the order consistent with lexicographical order of (error count, priority, random number) tuples.
|
||||
/// Pools are tried in the order consistent with lexicographical order of (error count, slowdown count, config priority, priority, random number) tuples.
|
||||
/// Number of tries for a single pool is limited by max_tries parameter.
|
||||
/// The client can set nested pool priority by passing a GetPriority functor.
|
||||
///
|
||||
@ -113,7 +114,7 @@ public:
|
||||
|
||||
/// The client can provide this functor to affect load balancing - the index of a pool is passed to
|
||||
/// this functor. The pools with lower result value will be tried first.
|
||||
using GetPriorityFunc = std::function<size_t(size_t index)>;
|
||||
using GetPriorityFunc = std::function<Priority(size_t index)>;
|
||||
|
||||
/// Returns at least min_entries and at most max_entries connections (at most one connection per nested pool).
|
||||
/// The method will throw if it is unable to get min_entries alive connections or
|
||||
@ -336,9 +337,9 @@ struct PoolWithFailoverBase<TNestedPool>::PoolState
|
||||
/// The number of slowdowns that led to changing replica in HedgedRequestsFactory
|
||||
UInt64 slowdown_count = 0;
|
||||
/// Priority from the <remote_server> configuration.
|
||||
Int64 config_priority = 1;
|
||||
Priority config_priority{1};
|
||||
/// Priority from the GetPriorityFunc.
|
||||
Int64 priority = 0;
|
||||
Priority priority{0};
|
||||
UInt64 random = 0;
|
||||
|
||||
void randomize()
|
||||
|
@ -381,11 +381,25 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(CachedReadBufferReadFromCacheBytes, "Bytes read from filesystem cache") \
|
||||
M(CachedReadBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
|
||||
M(CachedReadBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
|
||||
M(CachedReadBufferCreateBufferMicroseconds, "Prepare buffer time") \
|
||||
M(CachedWriteBufferCacheWriteBytes, "Bytes written from source (remote fs, etc) to filesystem cache") \
|
||||
M(CachedWriteBufferCacheWriteMicroseconds, "Time spent writing data into filesystem cache") \
|
||||
\
|
||||
M(FilesystemCacheEvictedBytes, "Number of bytes evicted from filesystem cache") \
|
||||
M(FilesystemCacheEvictedFileSegments, "Number of file segments evicted from filesystem cache") \
|
||||
M(FilesystemCacheLockKeyMicroseconds, "Lock cache key time") \
|
||||
M(FilesystemCacheLockMetadataMicroseconds, "Lock filesystem cache metadata time") \
|
||||
M(FilesystemCacheLockCacheMicroseconds, "Lock filesystem cache time") \
|
||||
M(FilesystemCacheReserveMicroseconds, "Filesystem cache space reservation time") \
|
||||
M(FilesystemCacheGetOrSetMicroseconds, "Filesystem cache getOrSet() time") \
|
||||
M(FilesystemCacheGetMicroseconds, "Filesystem cache get() time") \
|
||||
M(FileSegmentWaitMicroseconds, "Wait on DOWNLOADING state") \
|
||||
M(FileSegmentCompleteMicroseconds, "Duration of FileSegment::complete() in filesystem cache") \
|
||||
M(FileSegmentLockMicroseconds, "Lock file segment time") \
|
||||
M(FileSegmentWriteMicroseconds, "File segment write() time") \
|
||||
M(FileSegmentUseMicroseconds, "File segment use() time") \
|
||||
M(FileSegmentRemoveMicroseconds, "File segment remove() time") \
|
||||
M(FileSegmentHolderCompleteMicroseconds, "File segments holder complete() time") \
|
||||
\
|
||||
M(RemoteFSSeeks, "Total number of seeks for async buffer") \
|
||||
M(RemoteFSPrefetches, "Number of prefetches made with asynchronous reading from remote filesystem") \
|
||||
@ -407,7 +421,6 @@ The server successfully detected this situation and will download merged part fr
|
||||
\
|
||||
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
|
||||
M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \
|
||||
M(FileSegmentWriteMicroseconds, "Metric per file segment. Time spend writing cache") \
|
||||
M(FileSegmentCacheWriteMicroseconds, "Metric per file segment. Time spend writing data to cache") \
|
||||
M(FileSegmentPredownloadMicroseconds, "Metric per file segment. Time spent predownloading data to cache (predownloading - finishing file segment download (after someone who failed to do that) up to the point current thread was requested to do)") \
|
||||
M(FileSegmentUsedBytes, "Metric per file segment. How many bytes were actually used from current file segment") \
|
||||
|
@ -179,7 +179,7 @@ ZooKeeper::ZooKeeper(const Poco::Util::AbstractConfiguration & config, const std
|
||||
|
||||
std::vector<ShuffleHost> ZooKeeper::shuffleHosts() const
|
||||
{
|
||||
std::function<size_t(size_t index)> get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size());
|
||||
std::function<Priority(size_t index)> get_priority = args.get_priority_load_balancing.getPriorityFunc(args.get_priority_load_balancing.load_balancing, 0, args.hosts.size());
|
||||
std::vector<ShuffleHost> shuffle_hosts;
|
||||
for (size_t i = 0; i < args.hosts.size(); ++i)
|
||||
{
|
||||
|
@ -49,7 +49,7 @@ constexpr size_t MULTI_BATCH_SIZE = 100;
|
||||
struct ShuffleHost
|
||||
{
|
||||
String host;
|
||||
Int64 priority = 0;
|
||||
Priority priority;
|
||||
UInt64 random = 0;
|
||||
|
||||
void randomize()
|
||||
@ -526,6 +526,7 @@ public:
|
||||
String getConnectedZooKeeperHost() const { return connected_zk_host; }
|
||||
UInt16 getConnectedZooKeeperPort() const { return connected_zk_port; }
|
||||
size_t getConnectedZooKeeperIndex() const { return connected_zk_index; }
|
||||
UInt64 getConnectedTime() const { return connected_time; }
|
||||
|
||||
private:
|
||||
void init(ZooKeeperArgs args_);
|
||||
@ -593,6 +594,7 @@ private:
|
||||
String connected_zk_host;
|
||||
UInt16 connected_zk_port;
|
||||
size_t connected_zk_index;
|
||||
UInt64 connected_time = timeInSeconds(std::chrono::system_clock::now());
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <functional>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Poco/Logger.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include "libaccel_config.h"
|
||||
#include <Common/MemorySanitizer.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -382,6 +383,11 @@ UInt32 CompressionCodecDeflateQpl::getMaxCompressedDataSize(UInt32 uncompressed_
|
||||
|
||||
UInt32 CompressionCodecDeflateQpl::doCompressData(const char * source, UInt32 source_size, char * dest) const
|
||||
{
|
||||
/// QPL library is using AVX-512 with some shuffle operations.
|
||||
/// Memory sanitizer don't understand if there was uninitialized memory in SIMD register but it was not used in the result of shuffle.
|
||||
#if defined(MEMORY_SANITIZER)
|
||||
__msan_unpoison(dest, getMaxCompressedDataSize(source_size));
|
||||
#endif
|
||||
Int32 res = HardwareCodecDeflateQpl::RET_ERROR;
|
||||
if (DeflateQplJobHWPool::instance().isJobPoolReady())
|
||||
res = hw_codec->doCompressData(source, source_size, dest, getMaxCompressedDataSize(source_size));
|
||||
@ -392,6 +398,11 @@ UInt32 CompressionCodecDeflateQpl::doCompressData(const char * source, UInt32 so
|
||||
|
||||
void CompressionCodecDeflateQpl::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const
|
||||
{
|
||||
/// QPL library is using AVX-512 with some shuffle operations.
|
||||
/// Memory sanitizer don't understand if there was uninitialized memory in SIMD register but it was not used in the result of shuffle.
|
||||
#if defined(MEMORY_SANITIZER)
|
||||
__msan_unpoison(dest, uncompressed_size);
|
||||
#endif
|
||||
switch (getDecompressMode())
|
||||
{
|
||||
case CodecMode::Synchronous:
|
||||
|
@ -98,7 +98,7 @@ public:
|
||||
protected:
|
||||
bool isCompression() const override { return true; }
|
||||
bool isGenericCompression() const override { return true; }
|
||||
bool isExperimental() const override { return true; }
|
||||
bool isDeflateQpl() const override { return true; }
|
||||
|
||||
UInt32 doCompressData(const char * source, UInt32 source_size, char * dest) const override;
|
||||
void doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const override;
|
||||
|
@ -40,10 +40,10 @@ public:
|
||||
CompressionCodecPtr getDefaultCodec() const;
|
||||
|
||||
/// Validate codecs AST specified by user and parses codecs description (substitute default parameters)
|
||||
ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs) const;
|
||||
ASTPtr validateCodecAndGetPreprocessedAST(const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const;
|
||||
|
||||
/// Validate codecs AST specified by user
|
||||
void validateCodec(const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs) const;
|
||||
void validateCodec(const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const;
|
||||
|
||||
/// Get codec by AST and possible column_type. Some codecs can use
|
||||
/// information about type to improve inner settings, but every codec should
|
||||
|
@ -34,7 +34,7 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
void CompressionCodecFactory::validateCodec(
|
||||
const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs) const
|
||||
const String & family_name, std::optional<int> level, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const
|
||||
{
|
||||
if (family_name.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Compression codec name cannot be empty");
|
||||
@ -43,13 +43,13 @@ void CompressionCodecFactory::validateCodec(
|
||||
{
|
||||
auto literal = std::make_shared<ASTLiteral>(static_cast<UInt64>(*level));
|
||||
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", makeASTFunction(Poco::toUpper(family_name), literal)),
|
||||
{}, sanity_check, allow_experimental_codecs);
|
||||
{}, sanity_check, allow_experimental_codecs, enable_deflate_qpl_codec);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto identifier = std::make_shared<ASTIdentifier>(Poco::toUpper(family_name));
|
||||
validateCodecAndGetPreprocessedAST(makeASTFunction("CODEC", identifier),
|
||||
{}, sanity_check, allow_experimental_codecs);
|
||||
{}, sanity_check, allow_experimental_codecs, enable_deflate_qpl_codec);
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ bool innerDataTypeIsFloat(const DataTypePtr & type)
|
||||
}
|
||||
|
||||
ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
|
||||
const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs) const
|
||||
const ASTPtr & ast, const DataTypePtr & column_type, bool sanity_check, bool allow_experimental_codecs, bool enable_deflate_qpl_codec) const
|
||||
{
|
||||
if (const auto * func = ast->as<ASTFunction>())
|
||||
{
|
||||
@ -159,6 +159,12 @@ ASTPtr CompressionCodecFactory::validateCodecAndGetPreprocessedAST(
|
||||
" You can enable it with the 'allow_experimental_codecs' setting.",
|
||||
codec_family_name);
|
||||
|
||||
if (!enable_deflate_qpl_codec && result_codec->isDeflateQpl())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Codec {} is disabled by default."
|
||||
" You can enable it with the 'enable_deflate_qpl_codec' setting.",
|
||||
codec_family_name);
|
||||
|
||||
codecs_descriptions->children.emplace_back(result_codec->getCodecDesc());
|
||||
}
|
||||
|
||||
|
@ -109,6 +109,9 @@ public:
|
||||
/// It will not be allowed to use unless the user will turn off the safety switch.
|
||||
virtual bool isExperimental() const { return false; }
|
||||
|
||||
/// Is this the DEFLATE_QPL codec?
|
||||
virtual bool isDeflateQpl() const { return false; }
|
||||
|
||||
/// If it does nothing.
|
||||
virtual bool isNone() const { return false; }
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
#include <Core/SettingsFields.h>
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <base/range.h>
|
||||
#include <boost/blank.hpp>
|
||||
#include <unordered_map>
|
||||
@ -547,14 +549,16 @@ void BaseSettings<TTraits>::read(ReadBuffer & in, SettingsWriteFormat format)
|
||||
template <typename TTraits>
|
||||
String BaseSettings<TTraits>::toString() const
|
||||
{
|
||||
String res;
|
||||
for (const auto & field : *this)
|
||||
WriteBufferFromOwnString out;
|
||||
bool first = true;
|
||||
for (const auto & setting : *this)
|
||||
{
|
||||
if (!res.empty())
|
||||
res += ", ";
|
||||
res += field.getName() + " = " + field.getValueString();
|
||||
if (!first)
|
||||
out << ", ";
|
||||
out << setting.getName() << " = " << applyVisitor(FieldVisitorToString(), setting.getValue());
|
||||
first = false;
|
||||
}
|
||||
return res;
|
||||
return out.str();
|
||||
}
|
||||
|
||||
template <typename TTraits>
|
||||
|
@ -53,7 +53,7 @@
|
||||
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
||||
/// later is just a number for server version (one number instead of commit SHA)
|
||||
/// for simplicity (sometimes it may be more convenient in some use cases).
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54462
|
||||
#define DBMS_TCP_PROTOCOL_VERSION 54463
|
||||
|
||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
|
||||
|
||||
@ -73,3 +73,5 @@
|
||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES 54461
|
||||
|
||||
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 54462
|
||||
|
||||
#define DBMS_MIN_PROTOCOL_VERSION_WITH_TOTAL_BYTES_IN_PROGRESS 54463
|
||||
|
@ -127,7 +127,7 @@ namespace fmt
|
||||
template <typename FormatContext>
|
||||
auto format(const DB::QualifiedTableName & name, FormatContext & ctx)
|
||||
{
|
||||
return format_to(ctx.out(), "{}.{}", DB::backQuoteIfNeed(name.database), DB::backQuoteIfNeed(name.table));
|
||||
return fmt::format_to(ctx.out(), "{}.{}", DB::backQuoteIfNeed(name.database), DB::backQuoteIfNeed(name.table));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -199,6 +199,7 @@ class IColumn;
|
||||
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \
|
||||
\
|
||||
M(UInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine", 0) \
|
||||
M(Bool, use_mysql_types_in_show_columns, false, "Use MySQL converted types when connected via MySQL compatibility for show columns query", 0) \
|
||||
\
|
||||
M(UInt64, optimize_min_equality_disjunction_chain_length, 3, "The minimum length of the expression `expr = x1 OR ... expr = xN` for optimization ", 0) \
|
||||
\
|
||||
@ -327,6 +328,7 @@ class IColumn;
|
||||
M(Bool, allow_distributed_ddl, true, "If it is set to true, then a user is allowed to executed distributed DDL queries.", 0) \
|
||||
M(Bool, allow_suspicious_codecs, false, "If it is set to true, allow to specify meaningless compression codecs.", 0) \
|
||||
M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \
|
||||
M(Bool, enable_deflate_qpl_codec, false, "Enable/disable the DEFLATE_QPL codec.", 0) \
|
||||
M(UInt64, query_profiler_real_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for real clock timer of query profiler (in nanoseconds). Set 0 value to turn off the real clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
M(UInt64, query_profiler_cpu_time_period_ns, QUERY_PROFILER_DEFAULT_SAMPLE_RATE_NS, "Period for CPU clock timer of query profiler (in nanoseconds). Set 0 value to turn off the CPU clock query profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
M(Bool, metrics_perf_events_enabled, false, "If enabled, some of the perf events will be measured throughout queries' execution.", 0) \
|
||||
|
26
src/Core/ValueWithType.h
Normal file
26
src/Core/ValueWithType.h
Normal file
@ -0,0 +1,26 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
namespace llvm
|
||||
{
|
||||
class Value;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// LLVM value with its data type
|
||||
struct ValueWithType
|
||||
{
|
||||
llvm::Value * value = nullptr;
|
||||
DataTypePtr type;
|
||||
|
||||
ValueWithType() = default;
|
||||
ValueWithType(llvm::Value * value_, DataTypePtr type_)
|
||||
: value(value_)
|
||||
, type(std::move(type_))
|
||||
{}
|
||||
};
|
||||
|
||||
}
|
13
src/Core/ValuesWithType.h
Normal file
13
src/Core/ValuesWithType.h
Normal file
@ -0,0 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include <Core/ValueWithType.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using ValuesWithType = std::vector<ValueWithType>;
|
||||
|
||||
}
|
@ -54,6 +54,7 @@
|
||||
#include <Common/Elf.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include <Loggers/OwnFormattingChannel.h>
|
||||
@ -80,7 +81,9 @@ namespace DB
|
||||
}
|
||||
}
|
||||
|
||||
DB::PipeFDs signal_pipe;
|
||||
using namespace DB;
|
||||
|
||||
PipeFDs signal_pipe;
|
||||
|
||||
|
||||
/** Reset signal handler to the default and send signal to itself.
|
||||
@ -89,10 +92,10 @@ DB::PipeFDs signal_pipe;
|
||||
static void call_default_signal_handler(int sig)
|
||||
{
|
||||
if (SIG_ERR == signal(sig, SIG_DFL))
|
||||
DB::throwFromErrno("Cannot set signal handler.", DB::ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
|
||||
if (0 != raise(sig))
|
||||
DB::throwFromErrno("Cannot send signal.", DB::ErrorCodes::CANNOT_SEND_SIGNAL);
|
||||
throwFromErrno("Cannot send signal.", ErrorCodes::CANNOT_SEND_SIGNAL);
|
||||
}
|
||||
|
||||
static const size_t signal_pipe_buf_size =
|
||||
@ -110,8 +113,8 @@ static void writeSignalIDtoSignalPipe(int sig)
|
||||
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
||||
|
||||
char buf[signal_pipe_buf_size];
|
||||
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||
DB::writeBinary(sig, out);
|
||||
WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||
writeBinary(sig, out);
|
||||
out.next();
|
||||
|
||||
errno = saved_errno;
|
||||
@ -141,17 +144,17 @@ static void signalHandler(int sig, siginfo_t * info, void * context)
|
||||
auto saved_errno = errno; /// We must restore previous value of errno in signal handler.
|
||||
|
||||
char buf[signal_pipe_buf_size];
|
||||
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||
WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||
|
||||
const ucontext_t * signal_context = reinterpret_cast<ucontext_t *>(context);
|
||||
const StackTrace stack_trace(*signal_context);
|
||||
|
||||
DB::writeBinary(sig, out);
|
||||
DB::writePODBinary(*info, out);
|
||||
DB::writePODBinary(signal_context, out);
|
||||
DB::writePODBinary(stack_trace, out);
|
||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
DB::writePODBinary(DB::current_thread, out);
|
||||
writeBinary(sig, out);
|
||||
writePODBinary(*info, out);
|
||||
writePODBinary(signal_context, out);
|
||||
writePODBinary(stack_trace, out);
|
||||
writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
writePODBinary(current_thread, out);
|
||||
|
||||
out.next();
|
||||
|
||||
@ -203,12 +206,12 @@ public:
|
||||
static_assert(PIPE_BUF >= 512);
|
||||
static_assert(signal_pipe_buf_size <= PIPE_BUF, "Only write of PIPE_BUF to pipe is atomic and the minimal known PIPE_BUF across supported platforms is 512");
|
||||
char buf[signal_pipe_buf_size];
|
||||
DB::ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
|
||||
ReadBufferFromFileDescriptor in(signal_pipe.fds_rw[0], signal_pipe_buf_size, buf);
|
||||
|
||||
while (!in.eof())
|
||||
{
|
||||
int sig = 0;
|
||||
DB::readBinary(sig, in);
|
||||
readBinary(sig, in);
|
||||
// We may log some specific signals afterwards, with different log
|
||||
// levels and more info, but for completeness we log all signals
|
||||
// here at trace level.
|
||||
@ -231,8 +234,8 @@ public:
|
||||
UInt32 thread_num;
|
||||
std::string message;
|
||||
|
||||
DB::readBinary(thread_num, in);
|
||||
DB::readBinary(message, in);
|
||||
readBinary(thread_num, in);
|
||||
readBinary(message, in);
|
||||
|
||||
onTerminate(message, thread_num);
|
||||
}
|
||||
@ -248,17 +251,17 @@ public:
|
||||
ucontext_t * context{};
|
||||
StackTrace stack_trace(NoCapture{});
|
||||
UInt32 thread_num{};
|
||||
DB::ThreadStatus * thread_ptr{};
|
||||
ThreadStatus * thread_ptr{};
|
||||
|
||||
if (sig != SanitizerTrap)
|
||||
{
|
||||
DB::readPODBinary(info, in);
|
||||
DB::readPODBinary(context, in);
|
||||
readPODBinary(info, in);
|
||||
readPODBinary(context, in);
|
||||
}
|
||||
|
||||
DB::readPODBinary(stack_trace, in);
|
||||
DB::readBinary(thread_num, in);
|
||||
DB::readPODBinary(thread_ptr, in);
|
||||
readPODBinary(stack_trace, in);
|
||||
readBinary(thread_num, in);
|
||||
readPODBinary(thread_ptr, in);
|
||||
|
||||
/// This allows to receive more signals if failure happens inside onFault function.
|
||||
/// Example: segfault while symbolizing stack trace.
|
||||
@ -298,9 +301,9 @@ private:
|
||||
ucontext_t * context,
|
||||
const StackTrace & stack_trace,
|
||||
UInt32 thread_num,
|
||||
DB::ThreadStatus * thread_ptr) const
|
||||
ThreadStatus * thread_ptr) const
|
||||
{
|
||||
DB::ThreadStatus thread_status;
|
||||
ThreadStatus thread_status;
|
||||
|
||||
String query_id;
|
||||
String query;
|
||||
@ -314,7 +317,7 @@ private:
|
||||
|
||||
if (auto logs_queue = thread_ptr->getInternalTextLogsQueue())
|
||||
{
|
||||
DB::CurrentThread::attachInternalTextLogsQueue(logs_queue, DB::LogsLevel::trace);
|
||||
CurrentThread::attachInternalTextLogsQueue(logs_queue, LogsLevel::trace);
|
||||
}
|
||||
}
|
||||
|
||||
@ -358,12 +361,12 @@ private:
|
||||
/// NOTE: This still require memory allocations and mutex lock inside logger.
|
||||
/// BTW we can also print it to stderr using write syscalls.
|
||||
|
||||
DB::WriteBufferFromOwnString bare_stacktrace;
|
||||
DB::writeString("Stack trace:", bare_stacktrace);
|
||||
WriteBufferFromOwnString bare_stacktrace;
|
||||
writeString("Stack trace:", bare_stacktrace);
|
||||
for (size_t i = stack_trace.getOffset(); i < stack_trace.getSize(); ++i)
|
||||
{
|
||||
DB::writeChar(' ', bare_stacktrace);
|
||||
DB::writePointerHex(stack_trace.getFramePointers()[i], bare_stacktrace);
|
||||
writeChar(' ', bare_stacktrace);
|
||||
writePointerHex(stack_trace.getFramePointers()[i], bare_stacktrace);
|
||||
}
|
||||
|
||||
LOG_FATAL(log, fmt::runtime(bare_stacktrace.str()));
|
||||
@ -411,8 +414,48 @@ private:
|
||||
|
||||
/// Send crash report to developers (if configured)
|
||||
if (sig != SanitizerTrap)
|
||||
{
|
||||
SentryWriter::onFault(sig, error_message, stack_trace);
|
||||
|
||||
/// Advice the user to send it manually.
|
||||
if constexpr (std::string_view(VERSION_OFFICIAL).contains("official build"))
|
||||
{
|
||||
const auto & date_lut = DateLUT::instance();
|
||||
|
||||
/// Approximate support period, upper bound.
|
||||
if (time(nullptr) - date_lut.makeDate(2000 + VERSION_MAJOR, VERSION_MINOR, 1) < (365 + 30) * 86400)
|
||||
{
|
||||
LOG_FATAL(log, "Report this error to https://github.com/ClickHouse/ClickHouse/issues");
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_FATAL(log, "ClickHouse version {} is old and should be upgraded to the latest version.", VERSION_STRING);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_FATAL(log, "This ClickHouse version is not official and should be upgraded to the official build.");
|
||||
}
|
||||
}
|
||||
|
||||
/// ClickHouse Keeper does not link to some part of Settings.
|
||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
||||
/// List changed settings.
|
||||
if (!query_id.empty())
|
||||
{
|
||||
ContextPtr query_context = thread_ptr->getQueryContext();
|
||||
if (query_context)
|
||||
{
|
||||
String changed_settings = query_context->getSettingsRef().toString();
|
||||
|
||||
if (changed_settings.empty())
|
||||
LOG_FATAL(log, "No settings were changed");
|
||||
else
|
||||
LOG_FATAL(log, "Changed settings: {}", changed_settings);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/// When everything is done, we will try to send these error messages to client.
|
||||
if (thread_ptr)
|
||||
thread_ptr->onFatalError();
|
||||
@ -436,15 +479,15 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
|
||||
/// Also need to send data via pipe. Otherwise it may lead to deadlocks or failures in printing diagnostic info.
|
||||
|
||||
char buf[signal_pipe_buf_size];
|
||||
DB::WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||
WriteBufferFromFileDescriptorDiscardOnFailure out(signal_pipe.fds_rw[1], signal_pipe_buf_size, buf);
|
||||
|
||||
const StackTrace stack_trace;
|
||||
|
||||
int sig = SignalListener::SanitizerTrap;
|
||||
DB::writeBinary(sig, out);
|
||||
DB::writePODBinary(stack_trace, out);
|
||||
DB::writeBinary(UInt32(getThreadId()), out);
|
||||
DB::writePODBinary(DB::current_thread, out);
|
||||
writeBinary(sig, out);
|
||||
writePODBinary(stack_trace, out);
|
||||
writeBinary(UInt32(getThreadId()), out);
|
||||
writePODBinary(current_thread, out);
|
||||
|
||||
out.next();
|
||||
|
||||
@ -470,7 +513,7 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
|
||||
std::string log_message;
|
||||
|
||||
if (std::current_exception())
|
||||
log_message = "Terminate called for uncaught exception:\n" + DB::getCurrentExceptionMessage(true);
|
||||
log_message = "Terminate called for uncaught exception:\n" + getCurrentExceptionMessage(true);
|
||||
else
|
||||
log_message = "Terminate called without an active exception";
|
||||
|
||||
@ -482,11 +525,11 @@ static DISABLE_SANITIZER_INSTRUMENTATION void sanitizerDeathCallback()
|
||||
log_message.resize(buf_size - 16);
|
||||
|
||||
char buf[buf_size];
|
||||
DB::WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
|
||||
WriteBufferFromFileDescriptor out(signal_pipe.fds_rw[1], buf_size, buf);
|
||||
|
||||
DB::writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
|
||||
DB::writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
DB::writeBinary(log_message, out);
|
||||
writeBinary(static_cast<int>(SignalListener::StdTerminate), out);
|
||||
writeBinary(static_cast<UInt32>(getThreadId()), out);
|
||||
writeBinary(log_message, out);
|
||||
out.next();
|
||||
|
||||
abort();
|
||||
@ -512,7 +555,7 @@ static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
LOG_WARNING(logger, "{}: when creating {}, {}", __PRETTY_FUNCTION__, path, DB::getCurrentExceptionMessage(true));
|
||||
LOG_WARNING(logger, "{}: when creating {}, {}", __PRETTY_FUNCTION__, path, getCurrentExceptionMessage(true));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -527,7 +570,7 @@ void BaseDaemon::reloadConfiguration()
|
||||
* (It's convenient to log in console when you start server without any command line parameters.)
|
||||
*/
|
||||
config_path = config().getString("config-file", getDefaultConfigFileName());
|
||||
DB::ConfigProcessor config_processor(config_path, false, true);
|
||||
ConfigProcessor config_processor(config_path, false, true);
|
||||
config_processor.setConfigPath(fs::path(config_path).parent_path());
|
||||
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
|
||||
|
||||
@ -548,7 +591,7 @@ BaseDaemon::~BaseDaemon()
|
||||
/// Reset signals to SIG_DFL to avoid trying to write to the signal_pipe that will be closed after.
|
||||
for (int sig : handled_signals)
|
||||
if (SIG_ERR == signal(sig, SIG_DFL))
|
||||
DB::throwFromErrno("Cannot set signal handler.", DB::ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
throwFromErrno("Cannot set signal handler.", ErrorCodes::CANNOT_SET_SIGNAL_HANDLER);
|
||||
signal_pipe.close();
|
||||
}
|
||||
|
||||
@ -592,7 +635,7 @@ void BaseDaemon::closeFDs()
|
||||
/// Iterate directory separately from closing fds to avoid closing iterated directory fd.
|
||||
std::vector<int> fds;
|
||||
for (const auto & path : fs::directory_iterator(proc_path))
|
||||
fds.push_back(DB::parse<int>(path.path().filename()));
|
||||
fds.push_back(parse<int>(path.path().filename()));
|
||||
|
||||
for (const auto & fd : fds)
|
||||
{
|
||||
@ -662,7 +705,7 @@ void BaseDaemon::initialize(Application & self)
|
||||
}
|
||||
umask(umask_num);
|
||||
|
||||
DB::ConfigProcessor(config_path).savePreprocessedConfig(loaded_config, "");
|
||||
ConfigProcessor(config_path).savePreprocessedConfig(loaded_config, "");
|
||||
|
||||
/// Write core dump on crash.
|
||||
{
|
||||
@ -713,12 +756,12 @@ void BaseDaemon::initialize(Application & self)
|
||||
/// {
|
||||
/// try
|
||||
/// {
|
||||
/// DB::SomeApp app;
|
||||
/// SomeApp app;
|
||||
/// return app.run(argc, argv);
|
||||
/// }
|
||||
/// catch (...)
|
||||
/// {
|
||||
/// std::cerr << DB::getCurrentExceptionMessage(true) << "\n";
|
||||
/// std::cerr << getCurrentExceptionMessage(true) << "\n";
|
||||
/// return 1;
|
||||
/// }
|
||||
/// }
|
||||
@ -772,7 +815,7 @@ void BaseDaemon::initialize(Application & self)
|
||||
|
||||
/// Create pid file.
|
||||
if (config().has("pid"))
|
||||
pid_file.emplace(config().getString("pid"), DB::StatusFile::write_pid);
|
||||
pid_file.emplace(config().getString("pid"), StatusFile::write_pid);
|
||||
|
||||
if (is_daemon)
|
||||
{
|
||||
@ -799,7 +842,7 @@ void BaseDaemon::initialize(Application & self)
|
||||
initializeTerminationAndSignalProcessing();
|
||||
logRevision();
|
||||
|
||||
for (const auto & key : DB::getMultipleKeysFromConfig(config(), "", "graphite"))
|
||||
for (const auto & key : getMultipleKeysFromConfig(config(), "", "graphite"))
|
||||
{
|
||||
graphite_writers.emplace(key, std::make_unique<GraphiteWriter>(key));
|
||||
}
|
||||
@ -887,7 +930,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
signal_listener_thread.start(*signal_listener);
|
||||
|
||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
||||
String build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex();
|
||||
String build_id_hex = SymbolIndex::instance()->getBuildIDHex();
|
||||
if (build_id_hex.empty())
|
||||
build_id = "";
|
||||
else
|
||||
@ -902,7 +945,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
std::string executable_path = getExecutablePath();
|
||||
|
||||
if (!executable_path.empty())
|
||||
stored_binary_hash = DB::Elf(executable_path).getStoredBinaryHash();
|
||||
stored_binary_hash = Elf(executable_path).getStoredBinaryHash();
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -963,7 +1006,7 @@ void BaseDaemon::handleSignal(int signal_id)
|
||||
onInterruptSignals(signal_id);
|
||||
}
|
||||
else
|
||||
throw DB::Exception::createDeprecated(std::string("Unsupported signal: ") + strsignal(signal_id), 0); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
|
||||
throw Exception::createDeprecated(std::string("Unsupported signal: ") + strsignal(signal_id), 0); // NOLINT(concurrency-mt-unsafe) // it is not thread-safe but ok in this context
|
||||
}
|
||||
|
||||
void BaseDaemon::onInterruptSignals(int signal_id)
|
||||
@ -1020,7 +1063,7 @@ void BaseDaemon::setupWatchdog()
|
||||
pid = fork();
|
||||
|
||||
if (-1 == pid)
|
||||
DB::throwFromErrno("Cannot fork", DB::ErrorCodes::SYSTEM_ERROR);
|
||||
throwFromErrno("Cannot fork", ErrorCodes::SYSTEM_ERROR);
|
||||
|
||||
if (0 == pid)
|
||||
{
|
||||
@ -1073,13 +1116,13 @@ void BaseDaemon::setupWatchdog()
|
||||
pf = new OwnJSONPatternFormatter(config());
|
||||
else
|
||||
pf = new OwnPatternFormatter;
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
|
||||
Poco::AutoPtr<OwnFormattingChannel> log = new OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
|
||||
logger().setChannel(log);
|
||||
}
|
||||
|
||||
/// Cuncurrent writing logs to the same file from two threads is questionable on its own,
|
||||
/// but rotating them from two threads is disastrous.
|
||||
if (auto * channel = dynamic_cast<DB::OwnSplitChannel *>(logger().getChannel()))
|
||||
if (auto * channel = dynamic_cast<OwnSplitChannel *>(logger().getChannel()))
|
||||
{
|
||||
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATION, "never");
|
||||
channel->setChannelProperty("log", Poco::FileChannel::PROP_ROTATEONOPEN, "false");
|
||||
@ -1191,7 +1234,7 @@ void systemdNotify(const std::string_view & command)
|
||||
int s = socket(AF_UNIX, SOCK_DGRAM | SOCK_CLOEXEC, 0);
|
||||
|
||||
if (s == -1)
|
||||
DB::throwFromErrno("Can't create UNIX socket for systemd notify.", DB::ErrorCodes::SYSTEM_ERROR);
|
||||
throwFromErrno("Can't create UNIX socket for systemd notify.", ErrorCodes::SYSTEM_ERROR);
|
||||
|
||||
SCOPE_EXIT({ close(s); });
|
||||
|
||||
@ -1202,7 +1245,7 @@ void systemdNotify(const std::string_view & command)
|
||||
addr.sun_family = AF_UNIX;
|
||||
|
||||
if (len < 2 || len > sizeof(addr.sun_path) - 1)
|
||||
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
|
||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "NOTIFY_SOCKET env var value \"{}\" is wrong.", path);
|
||||
|
||||
memcpy(addr.sun_path, path, len + 1); /// write last zero as well.
|
||||
|
||||
@ -1214,7 +1257,7 @@ void systemdNotify(const std::string_view & command)
|
||||
else if (path[0] == '/')
|
||||
addrlen += 1; /// non-abstract-addresses should be zero terminated.
|
||||
else
|
||||
throw DB::Exception(DB::ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
|
||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Wrong UNIX path \"{}\" in NOTIFY_SOCKET env var", path);
|
||||
|
||||
const struct sockaddr *sock_addr = reinterpret_cast <const struct sockaddr *>(&addr);
|
||||
|
||||
@ -1227,7 +1270,7 @@ void systemdNotify(const std::string_view & command)
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
else
|
||||
DB::throwFromErrno("Failed to notify systemd, sendto returned error.", DB::ErrorCodes::SYSTEM_ERROR);
|
||||
throwFromErrno("Failed to notify systemd, sendto returned error.", ErrorCodes::SYSTEM_ERROR);
|
||||
}
|
||||
else
|
||||
sent_bytes_total += sent_bytes;
|
||||
|
@ -45,6 +45,7 @@ public:
|
||||
String doGetName() const override;
|
||||
String getNameWithoutVersion() const;
|
||||
const char * getFamilyName() const override { return "AggregateFunction"; }
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
TypeIndex getTypeId() const override { return TypeIndex::AggregateFunction; }
|
||||
|
||||
Array getParameters() const { return parameters; }
|
||||
|
@ -30,6 +30,10 @@ public:
|
||||
{
|
||||
return "Array";
|
||||
}
|
||||
String getSQLCompatibleName() const override
|
||||
{
|
||||
return "TEXT";
|
||||
}
|
||||
|
||||
bool canBeInsideNullable() const override
|
||||
{
|
||||
|
@ -13,6 +13,7 @@ public:
|
||||
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Date; }
|
||||
const char * getFamilyName() const override { return family_name; }
|
||||
String getSQLCompatibleName() const override { return "DATE"; }
|
||||
|
||||
bool canBeUsedAsVersion() const override { return true; }
|
||||
bool canBeInsideNullable() const override { return true; }
|
||||
|
@ -13,6 +13,7 @@ public:
|
||||
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Date32; }
|
||||
const char * getFamilyName() const override { return family_name; }
|
||||
String getSQLCompatibleName() const override { return "DATE"; }
|
||||
|
||||
Field getDefault() const override
|
||||
{
|
||||
|
@ -36,6 +36,7 @@ public:
|
||||
static constexpr auto family_name = "DateTime";
|
||||
|
||||
const char * getFamilyName() const override { return family_name; }
|
||||
String getSQLCompatibleName() const override { return "DATETIME"; }
|
||||
String doGetName() const override;
|
||||
TypeIndex getTypeId() const override { return TypeIndex::DateTime; }
|
||||
|
||||
|
@ -28,6 +28,7 @@ public:
|
||||
DataTypeDateTime64(UInt32 scale_, const TimezoneMixin & time_zone_info);
|
||||
|
||||
const char * getFamilyName() const override { return family_name; }
|
||||
String getSQLCompatibleName() const override { return "DATETIME"; }
|
||||
std::string doGetName() const override;
|
||||
TypeIndex getTypeId() const override { return type_id; }
|
||||
|
||||
@ -37,6 +38,8 @@ public:
|
||||
|
||||
bool canBeUsedAsVersion() const override { return true; }
|
||||
|
||||
bool isSummable() const override { return false; }
|
||||
|
||||
protected:
|
||||
SerializationPtr doGetDefaultSerialization() const override;
|
||||
};
|
||||
|
@ -36,6 +36,29 @@ const char * DataTypeEnum<Type>::getFamilyName() const
|
||||
return EnumName<FieldType>::value;
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
std::string DataTypeEnum<Type>::generateMySQLName(const Values & values)
|
||||
{
|
||||
WriteBufferFromOwnString out;
|
||||
|
||||
writeString("ENUM", out);
|
||||
writeChar('(', out);
|
||||
|
||||
auto first = true;
|
||||
for (const auto & name_and_value : values)
|
||||
{
|
||||
if (!first)
|
||||
writeString(", ", out);
|
||||
|
||||
first = false;
|
||||
|
||||
writeQuotedString(name_and_value.first, out);
|
||||
}
|
||||
|
||||
writeChar(')', out);
|
||||
|
||||
return out.str();
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
std::string DataTypeEnum<Type>::generateName(const Values & values)
|
||||
|
@ -46,12 +46,14 @@ public:
|
||||
private:
|
||||
std::string type_name;
|
||||
static std::string generateName(const Values & values);
|
||||
static std::string generateMySQLName(const Values & values);
|
||||
|
||||
public:
|
||||
explicit DataTypeEnum(const Values & values_);
|
||||
|
||||
std::string doGetName() const override { return type_name; }
|
||||
const char * getFamilyName() const override;
|
||||
String getSQLCompatibleName() const override { return generateMySQLName(this->getValues()); }
|
||||
|
||||
TypeIndex getTypeId() const override { return type_id; }
|
||||
|
||||
|
@ -42,6 +42,8 @@ public:
|
||||
TypeIndex getTypeId() const override { return type_id; }
|
||||
|
||||
const char * getFamilyName() const override { return "FixedString"; }
|
||||
/// Use TEXT for compatibility with MySQL to allow arbitrary bytes.
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
|
||||
size_t getN() const
|
||||
{
|
||||
|
@ -24,6 +24,7 @@ public:
|
||||
|
||||
std::string doGetName() const override;
|
||||
const char * getFamilyName() const override { return "Function"; }
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Function; }
|
||||
|
||||
const DataTypes & getArgumentTypes() const
|
||||
|
@ -19,6 +19,8 @@ public:
|
||||
static constexpr auto type_id = TypeToTypeIndex<IPv4>;
|
||||
|
||||
const char * getFamilyName() const override { return TypeName<IPv4>.data(); }
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
|
||||
TypeIndex getTypeId() const override { return type_id; }
|
||||
|
||||
Field getDefault() const override { return IPv4{}; }
|
||||
@ -59,6 +61,8 @@ public:
|
||||
static constexpr auto type_id = TypeToTypeIndex<IPv6>;
|
||||
|
||||
const char * getFamilyName() const override { return TypeName<IPv6>.data(); }
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
|
||||
TypeIndex getTypeId() const override { return type_id; }
|
||||
|
||||
Field getDefault() const override { return IPv6{}; }
|
||||
|
@ -26,6 +26,7 @@ public:
|
||||
|
||||
std::string doGetName() const override { return fmt::format("Interval{}", kind.toString()); }
|
||||
const char * getFamilyName() const override { return "Interval"; }
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Interval; }
|
||||
|
||||
bool equals(const IDataType & rhs) const override;
|
||||
|
@ -12,6 +12,7 @@ class DataTypeLowCardinality : public IDataType
|
||||
private:
|
||||
DataTypePtr dictionary_type;
|
||||
|
||||
|
||||
public:
|
||||
explicit DataTypeLowCardinality(DataTypePtr dictionary_type_);
|
||||
|
||||
@ -22,6 +23,8 @@ public:
|
||||
return "LowCardinality(" + dictionary_type->getName() + ")";
|
||||
}
|
||||
const char * getFamilyName() const override { return "LowCardinality"; }
|
||||
String getSQLCompatibleName() const override { return dictionary_type->getSQLCompatibleName(); }
|
||||
|
||||
TypeIndex getTypeId() const override { return TypeIndex::LowCardinality; }
|
||||
|
||||
MutableColumnPtr createColumn() const override;
|
||||
|
@ -30,6 +30,7 @@ public:
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Map; }
|
||||
std::string doGetName() const override;
|
||||
const char * getFamilyName() const override { return "Map"; }
|
||||
String getSQLCompatibleName() const override { return "JSON"; }
|
||||
|
||||
bool canBeInsideNullable() const override { return false; }
|
||||
|
||||
|
@ -16,6 +16,8 @@ public:
|
||||
static constexpr bool is_parametric = false;
|
||||
|
||||
const char * getFamilyName() const override { return "Nothing"; }
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Nothing; }
|
||||
|
||||
MutableColumnPtr createColumn() const override;
|
||||
|
@ -16,6 +16,7 @@ public:
|
||||
explicit DataTypeNullable(const DataTypePtr & nested_data_type_);
|
||||
std::string doGetName() const override { return "Nullable(" + nested_data_type->getName() + ")"; }
|
||||
const char * getFamilyName() const override { return "Nullable"; }
|
||||
String getSQLCompatibleName() const override { return nested_data_type->getSQLCompatibleName(); }
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Nullable; }
|
||||
|
||||
MutableColumnPtr createColumn() const override;
|
||||
|
@ -11,6 +11,33 @@ Field DataTypeNumberBase<T>::getDefault() const
|
||||
{
|
||||
return NearestFieldType<FieldType>();
|
||||
}
|
||||
template <typename T>
|
||||
String DataTypeNumberBase<T>::getSQLCompatibleName() const
|
||||
{
|
||||
if constexpr (std::is_same_v<T, Int8>)
|
||||
return "TINYINT";
|
||||
else if constexpr (std::is_same_v<T, Int16>)
|
||||
return "SMALLINT";
|
||||
else if constexpr (std::is_same_v<T, Int32>)
|
||||
return "INTEGER";
|
||||
else if constexpr (std::is_same_v<T, Int64>)
|
||||
return "BIGINT";
|
||||
else if constexpr (std::is_same_v<T, UInt8>)
|
||||
return "TINYINT UNSIGNED";
|
||||
else if constexpr (std::is_same_v<T, UInt16>)
|
||||
return "SMALLINT UNSIGNED";
|
||||
else if constexpr (std::is_same_v<T, UInt32>)
|
||||
return "INTEGER UNSIGNED";
|
||||
else if constexpr (std::is_same_v<T, UInt64>)
|
||||
return "BIGINT UNSIGNED";
|
||||
else if constexpr (std::is_same_v<T, Float32>)
|
||||
return "FLOAT";
|
||||
else if constexpr (std::is_same_v<T, Float64>)
|
||||
return "DOUBLE";
|
||||
/// Unsupported types are converted to TEXT
|
||||
else
|
||||
return "TEXT";
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
MutableColumnPtr DataTypeNumberBase<T>::createColumn() const
|
||||
@ -30,7 +57,6 @@ bool DataTypeNumberBase<T>::isValueRepresentedByUnsignedInteger() const
|
||||
return is_integer<T> && is_unsigned_v<T>;
|
||||
}
|
||||
|
||||
|
||||
/// Explicit template instantiations - to avoid code bloat in headers.
|
||||
template class DataTypeNumberBase<UInt8>;
|
||||
template class DataTypeNumberBase<UInt16>;
|
||||
|
@ -25,6 +25,7 @@ public:
|
||||
using ColumnType = ColumnVector<T>;
|
||||
|
||||
const char * getFamilyName() const override { return TypeName<T>.data(); }
|
||||
String getSQLCompatibleName() const override;
|
||||
TypeIndex getTypeId() const override { return TypeToTypeIndex<T>; }
|
||||
|
||||
Field getDefault() const override;
|
||||
|
@ -23,6 +23,7 @@ public:
|
||||
DataTypeObject(const String & schema_format_, bool is_nullable_);
|
||||
|
||||
const char * getFamilyName() const override { return "Object"; }
|
||||
String getSQLCompatibleName() const override { return "JSON"; }
|
||||
String doGetName() const override;
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Object; }
|
||||
|
||||
|
@ -15,6 +15,8 @@ class DataTypeSet final : public IDataTypeDummy
|
||||
public:
|
||||
static constexpr bool is_parametric = true;
|
||||
const char * getFamilyName() const override { return "Set"; }
|
||||
String getSQLCompatibleName() const override { return "TEXT"; }
|
||||
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Set; }
|
||||
bool equals(const IDataType & rhs) const override { return typeid(rhs) == typeid(*this); }
|
||||
bool isParametric() const override { return true; }
|
||||
|
@ -21,6 +21,8 @@ public:
|
||||
return "String";
|
||||
}
|
||||
|
||||
String getSQLCompatibleName() const override { return "BLOB"; }
|
||||
|
||||
TypeIndex getTypeId() const override { return type_id; }
|
||||
|
||||
MutableColumnPtr createColumn() const override;
|
||||
|
@ -33,6 +33,7 @@ public:
|
||||
TypeIndex getTypeId() const override { return TypeIndex::Tuple; }
|
||||
std::string doGetName() const override;
|
||||
const char * getFamilyName() const override { return "Tuple"; }
|
||||
String getSQLCompatibleName() const override { return "JSON"; }
|
||||
|
||||
bool canBeInsideNullable() const override { return false; }
|
||||
bool supportsSparseSerialization() const override { return true; }
|
||||
|
@ -18,6 +18,8 @@ public:
|
||||
static constexpr auto type_id = TypeIndex::UUID;
|
||||
|
||||
const char * getFamilyName() const override { return "UUID"; }
|
||||
String getSQLCompatibleName() const override { return "CHAR"; }
|
||||
|
||||
TypeIndex getTypeId() const override { return type_id; }
|
||||
|
||||
Field getDefault() const override;
|
||||
|
@ -28,6 +28,11 @@ std::string DataTypeDecimal<T>::doGetName() const
|
||||
return fmt::format("Decimal({}, {})", this->precision, this->scale);
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
std::string DataTypeDecimal<T>::getSQLCompatibleName() const
|
||||
{
|
||||
return fmt::format("DECIMAL({}, {})", this->precision, this->scale);
|
||||
}
|
||||
|
||||
template <is_decimal T>
|
||||
bool DataTypeDecimal<T>::equals(const IDataType & rhs) const
|
||||
|
@ -39,6 +39,8 @@ public:
|
||||
static constexpr auto family_name = "Decimal";
|
||||
|
||||
const char * getFamilyName() const override { return family_name; }
|
||||
String getSQLCompatibleName() const override;
|
||||
|
||||
std::string doGetName() const override;
|
||||
TypeIndex getTypeId() const override { return TypeToTypeIndex<T>; }
|
||||
bool canBePromoted() const override { return true; }
|
||||
|
@ -75,6 +75,8 @@ public:
|
||||
|
||||
/// Name of data type family (example: FixedString, Array).
|
||||
virtual const char * getFamilyName() const = 0;
|
||||
/// Name of corresponding data type in MySQL (exampe: Bigint, Blob, etc)
|
||||
virtual String getSQLCompatibleName() const = 0;
|
||||
|
||||
/// Data type id. It's used for runtime type checks.
|
||||
virtual TypeIndex getTypeId() const = 0;
|
||||
@ -532,11 +534,6 @@ inline bool isNotDecimalButComparableToDecimal(const DataTypePtr & data_type)
|
||||
return which.isInt() || which.isUInt() || which.isFloat();
|
||||
}
|
||||
|
||||
inline bool isCompilableType(const DataTypePtr & data_type)
|
||||
{
|
||||
return data_type->isValueRepresentedByNumber() && !isDecimal(data_type);
|
||||
}
|
||||
|
||||
inline bool isBool(const DataTypePtr & data_type)
|
||||
{
|
||||
return data_type->getName() == "Bool";
|
||||
|
200
src/DataTypes/Native.cpp
Normal file
200
src/DataTypes/Native.cpp
Normal file
@ -0,0 +1,200 @@
|
||||
#include <DataTypes/Native.h>
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <DataTypes/DataTypeNullable.h>
|
||||
# include <Columns/ColumnConst.h>
|
||||
# include <Columns/ColumnNullable.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
bool typeIsSigned(const IDataType & type)
|
||||
{
|
||||
WhichDataType data_type(type);
|
||||
return data_type.isNativeInt() || data_type.isFloat() || data_type.isEnum() || data_type.isDate32();
|
||||
}
|
||||
|
||||
llvm::Type * toNullableType(llvm::IRBuilderBase & builder, llvm::Type * type)
|
||||
{
|
||||
auto * is_null_type = builder.getInt1Ty();
|
||||
return llvm::StructType::get(type, is_null_type);
|
||||
}
|
||||
|
||||
bool canBeNativeType(const IDataType & type)
|
||||
{
|
||||
WhichDataType data_type(type);
|
||||
|
||||
if (data_type.isNullable())
|
||||
{
|
||||
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
||||
return canBeNativeType(*data_type_nullable.getNestedType());
|
||||
}
|
||||
|
||||
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate()
|
||||
|| data_type.isDate32() || data_type.isDateTime() || data_type.isEnum();
|
||||
}
|
||||
|
||||
bool canBeNativeType(const DataTypePtr & type)
|
||||
{
|
||||
return canBeNativeType(*type);
|
||||
}
|
||||
|
||||
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type)
|
||||
{
|
||||
WhichDataType data_type(type);
|
||||
|
||||
if (data_type.isNullable())
|
||||
{
|
||||
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
||||
auto * nested_type = toNativeType(builder, *data_type_nullable.getNestedType());
|
||||
return toNullableType(builder, nested_type);
|
||||
}
|
||||
|
||||
/// LLVM doesn't have unsigned types, it has unsigned instructions.
|
||||
if (data_type.isInt8() || data_type.isUInt8())
|
||||
return builder.getInt8Ty();
|
||||
else if (data_type.isInt16() || data_type.isUInt16() || data_type.isDate())
|
||||
return builder.getInt16Ty();
|
||||
else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDate32() || data_type.isDateTime())
|
||||
return builder.getInt32Ty();
|
||||
else if (data_type.isInt64() || data_type.isUInt64())
|
||||
return builder.getInt64Ty();
|
||||
else if (data_type.isFloat32())
|
||||
return builder.getFloatTy();
|
||||
else if (data_type.isFloat64())
|
||||
return builder.getDoubleTy();
|
||||
else if (data_type.isEnum8())
|
||||
return builder.getInt8Ty();
|
||||
else if (data_type.isEnum16())
|
||||
return builder.getInt16Ty();
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid cast to native type");
|
||||
}
|
||||
|
||||
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type)
|
||||
{
|
||||
return toNativeType(builder, *type);
|
||||
}
|
||||
|
||||
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value)
|
||||
{
|
||||
if (from_type->isNullable())
|
||||
{
|
||||
auto * inner = nativeBoolCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}));
|
||||
return b.CreateAnd(b.CreateNot(b.CreateExtractValue(value, {1})), inner);
|
||||
}
|
||||
|
||||
auto * zero = llvm::Constant::getNullValue(value->getType());
|
||||
|
||||
if (value->getType()->isIntegerTy())
|
||||
return b.CreateICmpNE(value, zero);
|
||||
else if (value->getType()->isFloatingPointTy())
|
||||
return b.CreateFCmpUNE(value, zero);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast non-number {} to bool", from_type->getName());
|
||||
}
|
||||
|
||||
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const ValueWithType & value_with_type)
|
||||
{
|
||||
return nativeBoolCast(b, value_with_type.type, value_with_type.value);
|
||||
}
|
||||
|
||||
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value, const DataTypePtr & to_type)
|
||||
{
|
||||
if (from_type->equals(*to_type))
|
||||
{
|
||||
return value;
|
||||
}
|
||||
else if (from_type->isNullable() && to_type->isNullable())
|
||||
{
|
||||
auto * inner = nativeCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}), to_type);
|
||||
return b.CreateInsertValue(inner, b.CreateExtractValue(value, {1}), {1});
|
||||
}
|
||||
else if (from_type->isNullable())
|
||||
{
|
||||
return nativeCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}), to_type);
|
||||
}
|
||||
else if (to_type->isNullable())
|
||||
{
|
||||
auto * from_native_type = toNativeType(b, from_type);
|
||||
auto * inner = nativeCast(b, from_type, value, removeNullable(to_type));
|
||||
return b.CreateInsertValue(llvm::Constant::getNullValue(from_native_type), inner, {0});
|
||||
}
|
||||
else
|
||||
{
|
||||
auto * from_native_type = toNativeType(b, from_type);
|
||||
auto * to_native_type = toNativeType(b, to_type);
|
||||
|
||||
if (from_native_type == to_native_type)
|
||||
return value;
|
||||
else if (from_native_type->isIntegerTy() && to_native_type->isFloatingPointTy())
|
||||
return typeIsSigned(*from_type) ? b.CreateSIToFP(value, to_native_type) : b.CreateUIToFP(value, to_native_type);
|
||||
else if (from_native_type->isFloatingPointTy() && to_native_type->isIntegerTy())
|
||||
return typeIsSigned(*to_type) ? b.CreateFPToSI(value, to_native_type) : b.CreateFPToUI(value, to_native_type);
|
||||
else if (from_native_type->isIntegerTy() && from_native_type->isIntegerTy())
|
||||
return b.CreateIntCast(value, to_native_type, typeIsSigned(*from_type));
|
||||
else if (to_native_type->isFloatingPointTy() && to_native_type->isFloatingPointTy())
|
||||
return b.CreateFPCast(value, to_native_type);
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Invalid cast to native value from type {} to type {}",
|
||||
from_type->getName(),
|
||||
to_type->getName());
|
||||
}
|
||||
|
||||
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const ValueWithType & value, const DataTypePtr & to_type)
|
||||
{
|
||||
return nativeCast(b, value.type, value.value, to_type);
|
||||
}
|
||||
|
||||
llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index)
|
||||
{
|
||||
if (const auto * constant = typeid_cast<const ColumnConst *>(&column))
|
||||
return getColumnNativeValue(builder, column_type, constant->getDataColumn(), 0);
|
||||
|
||||
auto * type = toNativeType(builder, column_type);
|
||||
|
||||
WhichDataType column_data_type(column_type);
|
||||
if (column_data_type.isNullable())
|
||||
{
|
||||
const auto & nullable_data_type = assert_cast<const DataTypeNullable &>(*column_type);
|
||||
const auto & nullable_column = assert_cast<const ColumnNullable &>(column);
|
||||
|
||||
auto * value = getColumnNativeValue(builder, nullable_data_type.getNestedType(), nullable_column.getNestedColumn(), index);
|
||||
auto * is_null = llvm::ConstantInt::get(type->getContainedType(1), nullable_column.isNullAt(index));
|
||||
|
||||
return llvm::ConstantStruct::get(static_cast<llvm::StructType *>(type), value, is_null);
|
||||
}
|
||||
else if (column_data_type.isFloat32())
|
||||
{
|
||||
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float32> &>(column).getElement(index));
|
||||
}
|
||||
else if (column_data_type.isFloat64())
|
||||
{
|
||||
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float64> &>(column).getElement(index));
|
||||
}
|
||||
else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime())
|
||||
{
|
||||
return llvm::ConstantInt::get(type, column.getUInt(index));
|
||||
}
|
||||
else if (column_data_type.isNativeInt() || column_data_type.isEnum() || column_data_type.isDate32())
|
||||
{
|
||||
return llvm::ConstantInt::get(type, column.getInt(index));
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Cannot get native value for column with type {}",
|
||||
column_type->getName());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -4,65 +4,53 @@
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# include <Common/Exception.h>
|
||||
|
||||
# include <Core/ValueWithType.h>
|
||||
# include <DataTypes/IDataType.h>
|
||||
# include <DataTypes/DataTypeNullable.h>
|
||||
# include <Columns/ColumnConst.h>
|
||||
# include <Columns/ColumnNullable.h>
|
||||
# include <llvm/IR/IRBuilder.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
static inline bool typeIsSigned(const IDataType & type)
|
||||
/// Returns true if type is signed, false otherwise
|
||||
bool typeIsSigned(const IDataType & type);
|
||||
|
||||
/// Cast LLVM type to nullable LLVM type
|
||||
llvm::Type * toNullableType(llvm::IRBuilderBase & builder, llvm::Type * type);
|
||||
|
||||
/// Returns true if type can be native LLVM type, false otherwise
|
||||
bool canBeNativeType(const IDataType & type);
|
||||
|
||||
/// Returns true if type can be native LLVM type, false otherwise
|
||||
bool canBeNativeType(const DataTypePtr & type);
|
||||
|
||||
template <typename Type>
|
||||
static inline bool canBeNativeType()
|
||||
{
|
||||
WhichDataType data_type(type);
|
||||
return data_type.isNativeInt() || data_type.isFloat() || data_type.isEnum();
|
||||
if constexpr (std::is_same_v<Type, Int8> || std::is_same_v<Type, UInt8>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Int16> || std::is_same_v<Type, UInt16>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Int32> || std::is_same_v<Type, UInt32>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Int64> || std::is_same_v<Type, UInt64>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Float32> || std::is_same_v<Type, Float64>)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline llvm::Type * toNullableType(llvm::IRBuilderBase & builder, llvm::Type * type)
|
||||
{
|
||||
auto * is_null_type = builder.getInt1Ty();
|
||||
return llvm::StructType::get(type, is_null_type);
|
||||
}
|
||||
/// Cast type to native LLVM type
|
||||
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type);
|
||||
|
||||
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const IDataType & type)
|
||||
{
|
||||
WhichDataType data_type(type);
|
||||
|
||||
if (data_type.isNullable())
|
||||
{
|
||||
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
||||
auto * wrapped = toNativeType(builder, *data_type_nullable.getNestedType());
|
||||
auto * is_null_type = builder.getInt1Ty();
|
||||
return wrapped ? llvm::StructType::get(wrapped, is_null_type) : nullptr;
|
||||
}
|
||||
|
||||
/// LLVM doesn't have unsigned types, it has unsigned instructions.
|
||||
if (data_type.isInt8() || data_type.isUInt8())
|
||||
return builder.getInt8Ty();
|
||||
else if (data_type.isInt16() || data_type.isUInt16() || data_type.isDate())
|
||||
return builder.getInt16Ty();
|
||||
else if (data_type.isInt32() || data_type.isUInt32() || data_type.isDate32() || data_type.isDateTime())
|
||||
return builder.getInt32Ty();
|
||||
else if (data_type.isInt64() || data_type.isUInt64())
|
||||
return builder.getInt64Ty();
|
||||
else if (data_type.isFloat32())
|
||||
return builder.getFloatTy();
|
||||
else if (data_type.isFloat64())
|
||||
return builder.getDoubleTy();
|
||||
else if (data_type.isEnum8())
|
||||
return builder.getInt8Ty();
|
||||
else if (data_type.isEnum16())
|
||||
return builder.getInt16Ty();
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
/// Cast type to native LLVM type
|
||||
llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type);
|
||||
|
||||
template <typename ToType>
|
||||
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder)
|
||||
@ -80,203 +68,43 @@ static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder)
|
||||
else if constexpr (std::is_same_v<ToType, Float64>)
|
||||
return builder.getDoubleTy();
|
||||
|
||||
return nullptr;
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid cast to native type");
|
||||
}
|
||||
|
||||
template <typename Type>
|
||||
static inline bool canBeNativeType()
|
||||
template <typename ToType>
|
||||
static inline DataTypePtr toNativeDataType()
|
||||
{
|
||||
if constexpr (std::is_same_v<Type, Int8> || std::is_same_v<Type, UInt8>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Int16> || std::is_same_v<Type, UInt16>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Int32> || std::is_same_v<Type, UInt32>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Int64> || std::is_same_v<Type, UInt64>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Float32>)
|
||||
return true;
|
||||
else if constexpr (std::is_same_v<Type, Float64>)
|
||||
return true;
|
||||
if constexpr (std::is_same_v<ToType, Int8> || std::is_same_v<ToType, UInt8> ||
|
||||
std::is_same_v<ToType, Int16> || std::is_same_v<ToType, UInt16> ||
|
||||
std::is_same_v<ToType, Int32> || std::is_same_v<ToType, UInt32> ||
|
||||
std::is_same_v<ToType, Int64> || std::is_same_v<ToType, UInt64> ||
|
||||
std::is_same_v<ToType, Float32> || std::is_same_v<ToType, Float64>)
|
||||
return std::make_shared<DataTypeNumber<ToType>>();
|
||||
|
||||
return false;
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid cast to native data type");
|
||||
}
|
||||
|
||||
static inline bool canBeNativeType(const IDataType & type)
|
||||
{
|
||||
WhichDataType data_type(type);
|
||||
/// Cast LLVM value with type to bool
|
||||
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value);
|
||||
|
||||
if (data_type.isNullable())
|
||||
{
|
||||
const auto & data_type_nullable = static_cast<const DataTypeNullable&>(type);
|
||||
return canBeNativeType(*data_type_nullable.getNestedType());
|
||||
}
|
||||
/// Cast LLVM value with type to bool
|
||||
llvm::Value * nativeBoolCast(llvm::IRBuilderBase & b, const ValueWithType & value_with_type);
|
||||
|
||||
return data_type.isNativeInt() || data_type.isNativeUInt() || data_type.isFloat() || data_type.isDate()
|
||||
|| data_type.isDate32() || data_type.isDateTime() || data_type.isEnum();
|
||||
}
|
||||
/// Cast LLVM value with type to specified type
|
||||
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const DataTypePtr & from_type, llvm::Value * value, const DataTypePtr & to_type);
|
||||
|
||||
static inline llvm::Type * toNativeType(llvm::IRBuilderBase & builder, const DataTypePtr & type)
|
||||
{
|
||||
return toNativeType(builder, *type);
|
||||
}
|
||||
|
||||
static inline llvm::Value * nativeBoolCast(llvm::IRBuilder<> & b, const DataTypePtr & from_type, llvm::Value * value)
|
||||
{
|
||||
if (from_type->isNullable())
|
||||
{
|
||||
auto * inner = nativeBoolCast(b, removeNullable(from_type), b.CreateExtractValue(value, {0}));
|
||||
return b.CreateAnd(b.CreateNot(b.CreateExtractValue(value, {1})), inner);
|
||||
}
|
||||
auto * zero = llvm::Constant::getNullValue(value->getType());
|
||||
|
||||
if (value->getType()->isIntegerTy())
|
||||
return b.CreateICmpNE(value, zero);
|
||||
if (value->getType()->isFloatingPointTy())
|
||||
return b.CreateFCmpUNE(value, zero);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast non-number {} to bool", from_type->getName());
|
||||
}
|
||||
|
||||
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, llvm::Type * to_type)
|
||||
{
|
||||
auto * from_type = value->getType();
|
||||
|
||||
if (from_type == to_type)
|
||||
return value;
|
||||
else if (from_type->isIntegerTy() && to_type->isFloatingPointTy())
|
||||
return typeIsSigned(*from) ? b.CreateSIToFP(value, to_type) : b.CreateUIToFP(value, to_type);
|
||||
else if (from_type->isFloatingPointTy() && to_type->isIntegerTy())
|
||||
return typeIsSigned(*from) ? b.CreateFPToSI(value, to_type) : b.CreateFPToUI(value, to_type);
|
||||
else if (from_type->isIntegerTy() && to_type->isIntegerTy())
|
||||
return b.CreateIntCast(value, to_type, typeIsSigned(*from));
|
||||
else if (from_type->isFloatingPointTy() && to_type->isFloatingPointTy())
|
||||
return b.CreateFPCast(value, to_type);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast {} to requested type", from->getName());
|
||||
}
|
||||
/// Cast LLVM value with type to specified type
|
||||
llvm::Value * nativeCast(llvm::IRBuilderBase & b, const ValueWithType & value, const DataTypePtr & to_type);
|
||||
|
||||
template <typename FromType>
|
||||
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, llvm::Value * value, llvm::Type * to_type)
|
||||
static inline llvm::Value * nativeCast(llvm::IRBuilderBase & b, llvm::Value * value, const DataTypePtr & to)
|
||||
{
|
||||
auto * from_type = value->getType();
|
||||
|
||||
static constexpr bool from_type_is_signed = std::numeric_limits<FromType>::is_signed;
|
||||
|
||||
if (from_type == to_type)
|
||||
return value;
|
||||
else if (from_type->isIntegerTy() && to_type->isFloatingPointTy())
|
||||
return from_type_is_signed ? b.CreateSIToFP(value, to_type) : b.CreateUIToFP(value, to_type);
|
||||
else if (from_type->isFloatingPointTy() && to_type->isIntegerTy())
|
||||
return from_type_is_signed ? b.CreateFPToSI(value, to_type) : b.CreateFPToUI(value, to_type);
|
||||
else if (from_type->isIntegerTy() && to_type->isIntegerTy())
|
||||
return b.CreateIntCast(value, to_type, from_type_is_signed);
|
||||
else if (from_type->isFloatingPointTy() && to_type->isFloatingPointTy())
|
||||
return b.CreateFPCast(value, to_type);
|
||||
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot cast {} to requested type", TypeName<FromType>);
|
||||
auto native_data_type = toNativeDataType<FromType>();
|
||||
return nativeCast(b, native_data_type, value, to);
|
||||
}
|
||||
|
||||
static inline llvm::Value * nativeCast(llvm::IRBuilder<> & b, const DataTypePtr & from, llvm::Value * value, const DataTypePtr & to)
|
||||
{
|
||||
auto * n_to = toNativeType(b, to);
|
||||
|
||||
if (value->getType() == n_to)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
else if (from->isNullable() && to->isNullable())
|
||||
{
|
||||
auto * inner = nativeCast(b, removeNullable(from), b.CreateExtractValue(value, {0}), to);
|
||||
return b.CreateInsertValue(inner, b.CreateExtractValue(value, {1}), {1});
|
||||
}
|
||||
else if (from->isNullable())
|
||||
{
|
||||
return nativeCast(b, removeNullable(from), b.CreateExtractValue(value, {0}), to);
|
||||
}
|
||||
else if (to->isNullable())
|
||||
{
|
||||
auto * inner = nativeCast(b, from, value, removeNullable(to));
|
||||
return b.CreateInsertValue(llvm::Constant::getNullValue(n_to), inner, {0});
|
||||
}
|
||||
|
||||
return nativeCast(b, from, value, n_to);
|
||||
}
|
||||
|
||||
static inline std::pair<llvm::Value *, llvm::Value *> nativeCastToCommon(llvm::IRBuilder<> & b, const DataTypePtr & lhs_type, llvm::Value * lhs, const DataTypePtr & rhs_type, llvm::Value * rhs) /// NOLINT
|
||||
{
|
||||
llvm::Type * common;
|
||||
|
||||
bool lhs_is_signed = typeIsSigned(*lhs_type);
|
||||
bool rhs_is_signed = typeIsSigned(*rhs_type);
|
||||
|
||||
if (lhs->getType()->isIntegerTy() && rhs->getType()->isIntegerTy())
|
||||
{
|
||||
/// if one integer has a sign bit, make sure the other does as well. llvm generates optimal code
|
||||
/// (e.g. uses overflow flag on x86) for (word size + 1)-bit integer operations.
|
||||
|
||||
size_t lhs_bit_width = lhs->getType()->getIntegerBitWidth() + (!lhs_is_signed && rhs_is_signed);
|
||||
size_t rhs_bit_width = rhs->getType()->getIntegerBitWidth() + (!rhs_is_signed && lhs_is_signed);
|
||||
|
||||
size_t max_bit_width = std::max(lhs_bit_width, rhs_bit_width);
|
||||
common = b.getIntNTy(static_cast<unsigned>(max_bit_width));
|
||||
}
|
||||
else
|
||||
{
|
||||
/// TODO: Check
|
||||
/// (double, float) or (double, int_N where N <= double's mantissa width) -> double
|
||||
common = b.getDoubleTy();
|
||||
}
|
||||
|
||||
auto * cast_lhs_to_common = nativeCast(b, lhs_type, lhs, common);
|
||||
auto * cast_rhs_to_common = nativeCast(b, rhs_type, rhs, common);
|
||||
|
||||
return std::make_pair(cast_lhs_to_common, cast_rhs_to_common);
|
||||
}
|
||||
|
||||
static inline llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index)
|
||||
{
|
||||
if (const auto * constant = typeid_cast<const ColumnConst *>(&column))
|
||||
{
|
||||
return getColumnNativeValue(builder, column_type, constant->getDataColumn(), 0);
|
||||
}
|
||||
|
||||
WhichDataType column_data_type(column_type);
|
||||
|
||||
auto * type = toNativeType(builder, column_type);
|
||||
|
||||
if (!type || column.size() <= index)
|
||||
return nullptr;
|
||||
|
||||
if (column_data_type.isNullable())
|
||||
{
|
||||
const auto & nullable_data_type = assert_cast<const DataTypeNullable &>(*column_type);
|
||||
const auto & nullable_column = assert_cast<const ColumnNullable &>(column);
|
||||
|
||||
auto * value = getColumnNativeValue(builder, nullable_data_type.getNestedType(), nullable_column.getNestedColumn(), index);
|
||||
auto * is_null = llvm::ConstantInt::get(type->getContainedType(1), nullable_column.isNullAt(index));
|
||||
|
||||
return value ? llvm::ConstantStruct::get(static_cast<llvm::StructType *>(type), value, is_null) : nullptr;
|
||||
}
|
||||
else if (column_data_type.isFloat32())
|
||||
{
|
||||
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float32> &>(column).getElement(index));
|
||||
}
|
||||
else if (column_data_type.isFloat64())
|
||||
{
|
||||
return llvm::ConstantFP::get(type, assert_cast<const ColumnVector<Float64> &>(column).getElement(index));
|
||||
}
|
||||
else if (column_data_type.isNativeUInt() || column_data_type.isDate() || column_data_type.isDateTime())
|
||||
{
|
||||
return llvm::ConstantInt::get(type, column.getUInt(index));
|
||||
}
|
||||
else if (column_data_type.isNativeInt() || column_data_type.isEnum() || column_data_type.isDate32())
|
||||
{
|
||||
return llvm::ConstantInt::get(type, column.getInt(index));
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
/// Get column value for specified index as LLVM constant
|
||||
llvm::Constant * getColumnNativeValue(llvm::IRBuilderBase & builder, const DataTypePtr & column_type, const IColumn & column, size_t index);
|
||||
|
||||
}
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include <filesystem>
|
||||
#include <Databases/DatabaseAtomic.h>
|
||||
#include <Databases/DatabaseDictionary.h>
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
#include <Databases/DatabaseLazy.h>
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Databases/DatabaseOrdinary.h>
|
||||
@ -48,14 +47,6 @@
|
||||
#include <Databases/SQLite/DatabaseSQLite.h>
|
||||
#endif
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Databases/DatabaseS3.h>
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
#include <Databases/DatabaseHDFS.h>
|
||||
#endif
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
@ -140,13 +131,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
|
||||
static const std::unordered_set<std::string_view> database_engines{"Ordinary", "Atomic", "Memory",
|
||||
"Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL",
|
||||
"PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
|
||||
"PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
|
||||
|
||||
if (!database_engines.contains(engine_name))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name);
|
||||
|
||||
static const std::unordered_set<std::string_view> engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL",
|
||||
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
|
||||
"Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
|
||||
|
||||
static const std::unordered_set<std::string_view> engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"};
|
||||
bool engine_may_have_arguments = engines_with_arguments.contains(engine_name);
|
||||
@ -441,63 +432,6 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
|
||||
}
|
||||
#endif
|
||||
|
||||
else if (engine_name == "Filesystem")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
/// If init_path is empty, then the current path will be used
|
||||
std::string init_path;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
if (engine->arguments->children.size() != 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path");
|
||||
|
||||
const auto & arguments = engine->arguments->children;
|
||||
init_path = safeGetLiteralValue<String>(arguments[0], engine_name);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseFilesystem>(database_name, init_path, context);
|
||||
}
|
||||
|
||||
#if USE_AWS_S3
|
||||
else if (engine_name == "S3")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
DatabaseS3::Configuration config;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
ASTs & engine_args = engine->arguments->children;
|
||||
config = DatabaseS3::parseArguments(engine_args, context);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseS3>(database_name, config, context);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_HDFS
|
||||
else if (engine_name == "HDFS")
|
||||
{
|
||||
const ASTFunction * engine = engine_define->engine;
|
||||
|
||||
/// If source_url is empty, then table name must contain full url
|
||||
std::string source_url;
|
||||
|
||||
if (engine->arguments && !engine->arguments->children.empty())
|
||||
{
|
||||
if (engine->arguments->children.size() != 1)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS database requires at most 1 argument: source_url");
|
||||
|
||||
const auto & arguments = engine->arguments->children;
|
||||
source_url = safeGetLiteralValue<String>(arguments[0], engine_name);
|
||||
}
|
||||
|
||||
return std::make_shared<DatabaseHDFS>(database_name, source_url, context);
|
||||
}
|
||||
#endif
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name);
|
||||
}
|
||||
|
||||
|
@ -1,247 +0,0 @@
|
||||
#include <Databases/DatabaseFilesystem.h>
|
||||
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int DATABASE_ACCESS_DENIED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
}
|
||||
|
||||
DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_)
|
||||
: IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")"))
|
||||
{
|
||||
bool is_local = context_->getApplicationType() == Context::ApplicationType::LOCAL;
|
||||
fs::path user_files_path = is_local ? "" : fs::canonical(getContext()->getUserFilesPath());
|
||||
|
||||
if (fs::path(path).is_relative())
|
||||
{
|
||||
path = user_files_path / path;
|
||||
}
|
||||
else if (!is_local && !pathStartsWith(fs::path(path), user_files_path))
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Path must be inside user-files path: {}", user_files_path.string());
|
||||
}
|
||||
|
||||
path = fs::absolute(path).lexically_normal();
|
||||
if (!fs::exists(path))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path does not exist: {}", path);
|
||||
}
|
||||
|
||||
std::string DatabaseFilesystem::getTablePath(const std::string & table_name) const
|
||||
{
|
||||
fs::path table_path = fs::path(path) / table_name;
|
||||
return table_path.lexically_normal().string();
|
||||
}
|
||||
|
||||
void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
/// If run in Local mode, no need for path checking.
|
||||
bool check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL;
|
||||
const auto & user_files_path = context_->getUserFilesPath();
|
||||
|
||||
/// Check access for file before checking its existence.
|
||||
if (check_path && !fileOrSymlinkPathStartsWith(table_path, user_files_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::DATABASE_ACCESS_DENIED, "File is not inside {}", user_files_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Check if the corresponding file exists.
|
||||
if (!fs::exists(table_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "File does not exist: {}", table_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!fs::is_regular_file(table_path))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST,
|
||||
"File is directory, but expected a file: {}", table_path);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::tryGetTableFromCache(const std::string & name) const
|
||||
{
|
||||
StoragePtr table = nullptr;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
table = it->second;
|
||||
}
|
||||
|
||||
/// Invalidate cache if file no longer exists.
|
||||
if (table && !fs::exists(getTablePath(name)))
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.erase(name);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
if (tryGetTableFromCache(name))
|
||||
return true;
|
||||
|
||||
return checkTableFilePath(getTablePath(name), context_, /* throw_on_error */false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if table exists in loaded tables map.
|
||||
if (auto table = tryGetTableFromCache(name))
|
||||
return table;
|
||||
|
||||
auto table_path = getTablePath(name);
|
||||
checkTableFilePath(table_path, context_, /* throw_on_error */true);
|
||||
|
||||
/// If the file exists, create a new table using TableFunctionFile and return it.
|
||||
auto args = makeASTFunction("file", std::make_shared<ASTLiteral>(table_path));
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(args, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionFile throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(args, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// getTableImpl can throw exceptions, do not catch them to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Ignore exceptions thrown by TableFunctionFile, which indicate that there is no table
|
||||
/// see tests/02722_database_filesystem.sh for more details.
|
||||
if (e.code() == ErrorCodes::BAD_ARGUMENTS
|
||||
|| e.code() == ErrorCodes::DATABASE_ACCESS_DENIED
|
||||
|| e.code() == ErrorCodes::FILE_DOESNT_EXIST)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseFilesystem::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem('{}')", backQuoteIfNeed(getDatabaseName()), path);
|
||||
|
||||
ParserCreateQuery parser;
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseFilesystem::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseFilesystem::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseFilesystem allows to interact with files stored on the local filesystem.
|
||||
* Uses TableFunctionFile to implicitly load file when a user requests the table,
|
||||
* and provides a read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access
|
||||
*
|
||||
* Used in clickhouse-local to access local files.
|
||||
* For clickhouse-server requires allows to access file only from user_files directory.
|
||||
*/
|
||||
class DatabaseFilesystem : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabaseFilesystem(const String & name, const String & path, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "Filesystem"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
StoragePtr tryGetTableFromCache(const std::string & name) const;
|
||||
|
||||
std::string getTablePath(const std::string & table_name) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
private:
|
||||
String path;
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
@ -1,234 +0,0 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_HDFS
|
||||
|
||||
#include <Databases/DatabaseHDFS.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Storages/HDFS/HDFSCommon.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
|
||||
#include <Poco/URI.h>
|
||||
#include <re2/re2.h>
|
||||
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int UNACCEPTABLE_URL;
|
||||
extern const int ACCESS_DENIED;
|
||||
extern const int DATABASE_ACCESS_DENIED;
|
||||
extern const int HDFS_ERROR;
|
||||
extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
|
||||
}
|
||||
|
||||
static constexpr std::string_view HDFS_HOST_REGEXP = "^hdfs://[^/]*";
|
||||
|
||||
|
||||
DatabaseHDFS::DatabaseHDFS(const String & name_, const String & source_url, ContextPtr context_)
|
||||
: IDatabase(name_)
|
||||
, WithContext(context_->getGlobalContext())
|
||||
, source(source_url)
|
||||
, log(&Poco::Logger::get("DatabaseHDFS(" + name_ + ")"))
|
||||
{
|
||||
if (!source.empty())
|
||||
{
|
||||
if (!re2::RE2::FullMatch(source, std::string(HDFS_HOST_REGEXP)))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs host: {}. "
|
||||
"It should have structure 'hdfs://<host_name>:<port>'", source);
|
||||
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(source));
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseHDFS::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
std::string DatabaseHDFS::getTablePath(const std::string & table_name) const
|
||||
{
|
||||
if (table_name.starts_with("hdfs://"))
|
||||
return table_name;
|
||||
|
||||
if (source.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. "
|
||||
"It should have structure 'hdfs://<host_name>:<port>/path'", table_name);
|
||||
|
||||
return fs::path(source) / table_name;
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
try
|
||||
{
|
||||
checkHDFSURL(url);
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(url));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (loaded_tables.find(name) != loaded_tables.end())
|
||||
return true;
|
||||
|
||||
return checkUrl(name, context_, false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if the table exists in the loaded tables map.
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto url = getTablePath(name);
|
||||
|
||||
checkUrl(url, context_, true);
|
||||
|
||||
auto args = makeASTFunction("hdfs", std::make_shared<ASTLiteral>(url));
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(args, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionHDFS throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(args, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Rethrow all exceptions from TableFunctionHDFS to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseHDFS::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
// Ignore exceptions thrown by TableFunctionHDFS, which indicate that there is no table
|
||||
if (e.code() == ErrorCodes::BAD_ARGUMENTS
|
||||
|| e.code() == ErrorCodes::ACCESS_DENIED
|
||||
|| e.code() == ErrorCodes::DATABASE_ACCESS_DENIED
|
||||
|| e.code() == ErrorCodes::FILE_DOESNT_EXIST
|
||||
|| e.code() == ErrorCodes::UNACCEPTABLE_URL
|
||||
|| e.code() == ErrorCodes::HDFS_ERROR
|
||||
|| e.code() == ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
catch (const Poco::URISyntaxException &)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseHDFS::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseHDFS::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = HDFS('{}')", backQuoteIfNeed(getDatabaseName()), source);
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseHDFS::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseHDFS::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseHDFS::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
} // DB
|
||||
|
||||
#endif
|
@ -1,68 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_HDFS
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseHDFS allows to interact with files stored on the file system.
|
||||
* Uses TableFunctionHDFS to implicitly load file when a user requests the table,
|
||||
* and provides read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access.
|
||||
*/
|
||||
class DatabaseHDFS : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabaseHDFS(const String & name, const String & source_url, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "S3"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
std::string getTablePath(const std::string & table_name) const;
|
||||
|
||||
private:
|
||||
const String source;
|
||||
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -253,7 +253,7 @@ ClusterPtr DatabaseReplicated::getClusterImpl() const
|
||||
treat_local_as_remote,
|
||||
treat_local_port_as_remote,
|
||||
cluster_auth_info.cluster_secure_connection,
|
||||
/*priority=*/ 1,
|
||||
Priority{1},
|
||||
TSA_SUPPRESS_WARNING_FOR_READ(database_name), /// FIXME
|
||||
cluster_auth_info.cluster_secret};
|
||||
|
||||
|
@ -1,312 +0,0 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <Databases/DatabaseS3.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <IO/S3/URI.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/NamedCollectionsHelpers.h>
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <filesystem>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
static const std::unordered_set<std::string_view> optional_configuration_keys = {
|
||||
"url",
|
||||
"access_key_id",
|
||||
"secret_access_key",
|
||||
"no_sign_request"
|
||||
};
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int FILE_DOESNT_EXIST;
|
||||
extern const int UNACCEPTABLE_URL;
|
||||
extern const int S3_ERROR;
|
||||
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
}
|
||||
|
||||
DatabaseS3::DatabaseS3(const String & name_, const Configuration& config_, ContextPtr context_)
|
||||
: IDatabase(name_)
|
||||
, WithContext(context_->getGlobalContext())
|
||||
, config(config_)
|
||||
, log(&Poco::Logger::get("DatabaseS3(" + name_ + ")"))
|
||||
{
|
||||
}
|
||||
|
||||
void DatabaseS3::addTable(const std::string & table_name, StoragePtr table_storage) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
|
||||
if (!inserted)
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Table with name `{}` already exists in database `{}` (engine {})",
|
||||
table_name, getDatabaseName(), getEngineName());
|
||||
}
|
||||
|
||||
std::string DatabaseS3::getFullUrl(const std::string & name) const
|
||||
{
|
||||
if (!config.url_prefix.empty())
|
||||
return fs::path(config.url_prefix) / name;
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
bool DatabaseS3::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
try
|
||||
{
|
||||
S3::URI uri(url);
|
||||
context_->getGlobalContext()->getRemoteHostFilter().checkURL(uri.uri);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DatabaseS3::isTableExist(const String & name, ContextPtr context_) const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (loaded_tables.find(name) != loaded_tables.end())
|
||||
return true;
|
||||
|
||||
return checkUrl(getFullUrl(name), context_, false);
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::getTableImpl(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Check if the table exists in the loaded tables map.
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
auto it = loaded_tables.find(name);
|
||||
if (it != loaded_tables.end())
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto url = getFullUrl(name);
|
||||
checkUrl(url, context_, /* throw_on_error */true);
|
||||
|
||||
auto function = std::make_shared<ASTFunction>();
|
||||
function->name = "s3";
|
||||
function->arguments = std::make_shared<ASTExpressionList>();
|
||||
function->children.push_back(function->arguments);
|
||||
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(url));
|
||||
if (config.no_sign_request)
|
||||
{
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>("NOSIGN"));
|
||||
}
|
||||
else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
|
||||
{
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(config.access_key_id.value()));
|
||||
function->arguments->children.push_back(std::make_shared<ASTLiteral>(config.secret_access_key.value()));
|
||||
}
|
||||
|
||||
auto table_function = TableFunctionFactory::instance().get(function, context_);
|
||||
if (!table_function)
|
||||
return nullptr;
|
||||
|
||||
/// TableFunctionS3 throws exceptions, if table cannot be created.
|
||||
auto table_storage = table_function->execute(function, context_, name);
|
||||
if (table_storage)
|
||||
addTable(name, table_storage);
|
||||
|
||||
return table_storage;
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::getTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
/// Rethrow all exceptions from TableFunctionS3 to show correct error to user.
|
||||
if (auto storage = getTableImpl(name, context_))
|
||||
return storage;
|
||||
|
||||
throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
|
||||
backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
|
||||
}
|
||||
|
||||
StoragePtr DatabaseS3::tryGetTable(const String & name, ContextPtr context_) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return getTableImpl(name, context_);
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
/// Ignore exceptions thrown by TableFunctionS3, which indicate that there is no table.
|
||||
if (e.code() == ErrorCodes::BAD_ARGUMENTS
|
||||
|| e.code() == ErrorCodes::S3_ERROR
|
||||
|| e.code() == ErrorCodes::FILE_DOESNT_EXIST
|
||||
|| e.code() == ErrorCodes::UNACCEPTABLE_URL)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
catch (const Poco::URISyntaxException &)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
bool DatabaseS3::empty() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
return loaded_tables.empty();
|
||||
}
|
||||
|
||||
ASTPtr DatabaseS3::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & settings = getContext()->getSettingsRef();
|
||||
ParserCreateQuery parser;
|
||||
|
||||
std::string creation_args;
|
||||
creation_args += fmt::format("'{}'", config.url_prefix);
|
||||
if (config.no_sign_request)
|
||||
creation_args += ", 'NOSIGN'";
|
||||
else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
|
||||
creation_args += fmt::format(", '{}', '{}'", config.access_key_id.value(), config.secret_access_key.value());
|
||||
|
||||
const String query = fmt::format("CREATE DATABASE {} ENGINE = S3({})", backQuoteIfNeed(getDatabaseName()), creation_args);
|
||||
ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
|
||||
|
||||
if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.set(ast_create_query.comment, std::make_shared<ASTLiteral>(database_comment));
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
void DatabaseS3::shutdown()
|
||||
{
|
||||
Tables tables_snapshot;
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
tables_snapshot = loaded_tables;
|
||||
}
|
||||
|
||||
for (const auto & kv : tables_snapshot)
|
||||
{
|
||||
auto table_id = kv.second->getStorageID();
|
||||
kv.second->flushAndShutdown();
|
||||
}
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
loaded_tables.clear();
|
||||
}
|
||||
|
||||
DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPtr context_)
|
||||
{
|
||||
Configuration result;
|
||||
|
||||
if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context_))
|
||||
{
|
||||
auto & collection = *named_collection;
|
||||
|
||||
validateNamedCollection(collection, {}, optional_configuration_keys);
|
||||
|
||||
result.url_prefix = collection.getOrDefault<String>("url", "");
|
||||
result.no_sign_request = collection.getOrDefault<bool>("no_sign_request", false);
|
||||
|
||||
auto key_id = collection.getOrDefault<String>("access_key_id", "");
|
||||
auto secret_key = collection.getOrDefault<String>("secret_access_key", "");
|
||||
|
||||
if (!key_id.empty())
|
||||
result.access_key_id = key_id;
|
||||
|
||||
if (!secret_key.empty())
|
||||
result.secret_access_key = secret_key;
|
||||
}
|
||||
else
|
||||
{
|
||||
const std::string supported_signature =
|
||||
" - S3()\n"
|
||||
" - S3('url')\n"
|
||||
" - S3('url', 'NOSIGN')\n"
|
||||
" - S3('url', 'access_key_id', 'secret_access_key')\n";
|
||||
const auto error_message =
|
||||
fmt::format("Engine DatabaseS3 must have the following arguments signature\n{}", supported_signature);
|
||||
|
||||
for (auto & arg : engine_args)
|
||||
arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context_);
|
||||
|
||||
if (engine_args.size() > 3)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message.c_str());
|
||||
|
||||
if (engine_args.empty())
|
||||
return result;
|
||||
|
||||
result.url_prefix = checkAndGetLiteralArgument<String>(engine_args[0], "url");
|
||||
|
||||
// url, NOSIGN
|
||||
if (engine_args.size() == 2)
|
||||
{
|
||||
auto second_arg = checkAndGetLiteralArgument<String>(engine_args[1], "NOSIGN");
|
||||
if (boost::iequals(second_arg, "NOSIGN"))
|
||||
result.no_sign_request = true;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
|
||||
}
|
||||
|
||||
// url, access_key_id, secret_access_key
|
||||
if (engine_args.size() == 3)
|
||||
{
|
||||
auto key_id = checkAndGetLiteralArgument<String>(engine_args[1], "access_key_id");
|
||||
auto secret_key = checkAndGetLiteralArgument<String>(engine_args[2], "secret_access_key");
|
||||
|
||||
if (key_id.empty() || secret_key.empty() || boost::iequals(key_id, "NOSIGN"))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
|
||||
|
||||
result.access_key_id = key_id;
|
||||
result.secret_access_key = secret_key;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty vector because the database is read-only and no tables can be backed up
|
||||
*/
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> DatabaseS3::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Returns an empty iterator because the database does not have its own tables
|
||||
* But only caches them for quick access
|
||||
*/
|
||||
DatabaseTablesIteratorPtr DatabaseS3::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
|
||||
{
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(Tables{}, getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,81 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
#include <mutex>
|
||||
#include <Databases/IDatabase.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
|
||||
/**
|
||||
* DatabaseS3 provides access to data stored in S3.
|
||||
* Uses TableFunctionS3 to implicitly load file when a user requests the table,
|
||||
* and provides read-only access to the data in the file.
|
||||
* Tables are cached inside the database for quick access.
|
||||
*/
|
||||
class DatabaseS3 : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
struct Configuration
|
||||
{
|
||||
std::string url_prefix;
|
||||
|
||||
bool no_sign_request = false;
|
||||
|
||||
std::optional<std::string> access_key_id;
|
||||
std::optional<std::string> secret_access_key;
|
||||
};
|
||||
|
||||
DatabaseS3(const String & name, const Configuration& config, ContextPtr context);
|
||||
|
||||
String getEngineName() const override { return "S3"; }
|
||||
|
||||
bool isTableExist(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
|
||||
|
||||
// Contains only temporary tables
|
||||
bool shouldBeEmptyOnDetach() const override { return false; }
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
bool isReadOnly() const override { return true; }
|
||||
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
|
||||
|
||||
static Configuration parseArguments(ASTs engine_args, ContextPtr context);
|
||||
|
||||
protected:
|
||||
StoragePtr getTableImpl(const String & name, ContextPtr context) const;
|
||||
|
||||
void addTable(const std::string & table_name, StoragePtr table_storage) const;
|
||||
|
||||
bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
|
||||
|
||||
std::string getFullUrl(const std::string & name) const;
|
||||
|
||||
private:
|
||||
const Configuration config;
|
||||
|
||||
mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -1,266 +0,0 @@
|
||||
#include <Databases/DatabasesOverlay.h>
|
||||
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/InterpreterCreateQuery.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int CANNOT_GET_CREATE_TABLE_QUERY;
|
||||
}
|
||||
|
||||
DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_)
|
||||
: IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get("DatabaseOverlay(" + name_ + ")"))
|
||||
{
|
||||
}
|
||||
|
||||
DatabasesOverlay & DatabasesOverlay::registerNextDatabase(DatabasePtr database)
|
||||
{
|
||||
databases.push_back(std::move(database));
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool DatabasesOverlay::isTableExist(const String & table_name, ContextPtr context_) const
|
||||
{
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
StoragePtr DatabasesOverlay::tryGetTable(const String & table_name, ContextPtr context_) const
|
||||
{
|
||||
StoragePtr result = nullptr;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetTable(table_name, context_);
|
||||
if (result)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::createTable(ContextPtr context_, const String & table_name, const StoragePtr & table, const ASTPtr & query)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (!db->isReadOnly())
|
||||
{
|
||||
db->createTable(context_, table_name, table, query);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for CREATE TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, bool sync)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
{
|
||||
db->dropTable(context_, table_name, sync);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for DROP TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
void DatabasesOverlay::attachTable(
|
||||
ContextPtr context_, const String & table_name, const StoragePtr & table, const String & relative_table_path)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
try
|
||||
{
|
||||
db->attachTable(context_, table_name, table, relative_table_path);
|
||||
return;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for ATTACH TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & table_name)
|
||||
{
|
||||
StoragePtr result = nullptr;
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (db->isTableExist(table_name, context_))
|
||||
return db->detachTable(context_, table_name);
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for DETACH TABLE `{}` query in database `{}` (engine {})",
|
||||
table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const
|
||||
{
|
||||
ASTPtr result = nullptr;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetCreateTableQuery(name, context_);
|
||||
if (result)
|
||||
break;
|
||||
}
|
||||
if (!result && throw_on_error)
|
||||
throw Exception(
|
||||
ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY,
|
||||
"There is no metadata of table `{}` in database `{}` (engine {})",
|
||||
name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* DatabaseOverlay cannot be constructed by "CREATE DATABASE" query, as it is not a traditional ClickHouse database
|
||||
* To use DatabaseOverlay, it must be constructed programmatically in code
|
||||
*/
|
||||
ASTPtr DatabasesOverlay::getCreateDatabaseQuery() const
|
||||
{
|
||||
return std::make_shared<ASTCreateQuery>();
|
||||
}
|
||||
|
||||
String DatabasesOverlay::getTableDataPath(const String & table_name) const
|
||||
{
|
||||
String result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->getTableDataPath(table_name);
|
||||
if (!result.empty())
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const
|
||||
{
|
||||
String result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->getTableDataPath(query);
|
||||
if (!result.empty())
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const
|
||||
{
|
||||
UUID result = UUIDHelpers::Nil;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
result = db->tryGetTableUUID(table_name);
|
||||
if (result != UUIDHelpers::Nil)
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::drop(ContextPtr context_)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
db->drop(context_);
|
||||
}
|
||||
|
||||
void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata)
|
||||
{
|
||||
for (auto & db : databases)
|
||||
{
|
||||
if (!db->isReadOnly() && db->isTableExist(table_id.table_name, local_context))
|
||||
{
|
||||
db->alterTable(local_context, table_id, metadata);
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"There is no databases for ALTER TABLE `{}` query in database `{}` (engine {})",
|
||||
table_id.table_name,
|
||||
getDatabaseName(),
|
||||
getEngineName());
|
||||
}
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>>
|
||||
DatabasesOverlay::getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const
|
||||
{
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> result;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
auto db_backup = db->getTablesForBackup(filter, local_context);
|
||||
result.insert(result.end(), std::make_move_iterator(db_backup.begin()), std::make_move_iterator(db_backup.end()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::createTableRestoredFromBackup(
|
||||
const ASTPtr & create_table_query,
|
||||
ContextMutablePtr local_context,
|
||||
std::shared_ptr<IRestoreCoordination> /*restore_coordination*/,
|
||||
UInt64 /*timeout_ms*/)
|
||||
{
|
||||
/// Creates a tables by executing a "CREATE TABLE" query.
|
||||
InterpreterCreateQuery interpreter{create_table_query, local_context};
|
||||
interpreter.setInternal(true);
|
||||
interpreter.execute();
|
||||
}
|
||||
|
||||
bool DatabasesOverlay::empty() const
|
||||
{
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
if (!db->empty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void DatabasesOverlay::shutdown()
|
||||
{
|
||||
for (auto & db : databases)
|
||||
db->shutdown();
|
||||
}
|
||||
|
||||
DatabaseTablesIteratorPtr DatabasesOverlay::getTablesIterator(ContextPtr context_, const FilterByNameFunction & filter_by_table_name) const
|
||||
{
|
||||
Tables tables;
|
||||
for (const auto & db : databases)
|
||||
{
|
||||
for (auto table_it = db->getTablesIterator(context_, filter_by_table_name); table_it->isValid(); table_it->next())
|
||||
tables.insert({table_it->name(), table_it->table()});
|
||||
}
|
||||
return std::make_unique<DatabaseTablesSnapshotIterator>(std::move(tables), getDatabaseName());
|
||||
}
|
||||
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <Storages/IStorage_fwd.h>
|
||||
#include <Databases/IDatabase.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/**
|
||||
* Implements the IDatabase interface and combines multiple other databases
|
||||
* Searches for tables in each database in order until found, and delegates operations to the appropriate database
|
||||
* Useful for combining databases
|
||||
*
|
||||
* Used in clickhouse-local to combine DatabaseFileSystem and DatabaseMemory
|
||||
*/
|
||||
class DatabasesOverlay : public IDatabase, protected WithContext
|
||||
{
|
||||
public:
|
||||
DatabasesOverlay(const String & name_, ContextPtr context_);
|
||||
|
||||
/// Not thread-safe. Use only as factory to initialize database
|
||||
DatabasesOverlay & registerNextDatabase(DatabasePtr database);
|
||||
|
||||
String getEngineName() const override { return "Overlay"; }
|
||||
|
||||
public:
|
||||
bool isTableExist(const String & table_name, ContextPtr context) const override;
|
||||
|
||||
StoragePtr tryGetTable(const String & table_name, ContextPtr context) const override;
|
||||
|
||||
void createTable(ContextPtr context, const String & table_name, const StoragePtr & table, const ASTPtr & query) override;
|
||||
|
||||
void dropTable(ContextPtr context, const String & table_name, bool sync) override;
|
||||
|
||||
void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override;
|
||||
|
||||
StoragePtr detachTable(ContextPtr context, const String & table_name) override;
|
||||
|
||||
ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override;
|
||||
ASTPtr getCreateDatabaseQuery() const override;
|
||||
|
||||
String getTableDataPath(const String & table_name) const override;
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override;
|
||||
|
||||
UUID tryGetTableUUID(const String & table_name) const override;
|
||||
|
||||
void drop(ContextPtr context) override;
|
||||
|
||||
void alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) override;
|
||||
|
||||
std::vector<std::pair<ASTPtr, StoragePtr>> getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const override;
|
||||
|
||||
void createTableRestoredFromBackup(const ASTPtr & create_table_query, ContextMutablePtr local_context, std::shared_ptr<IRestoreCoordination> restore_coordination, UInt64 timeout_ms) override;
|
||||
|
||||
DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name) const override;
|
||||
|
||||
bool empty() const override;
|
||||
|
||||
void shutdown() override;
|
||||
|
||||
protected:
|
||||
std::vector<DatabasePtr> databases;
|
||||
Poco::Logger * log;
|
||||
};
|
||||
|
||||
}
|
@ -170,7 +170,7 @@ public:
|
||||
/// Get the table for work. Return nullptr if there is no table.
|
||||
virtual StoragePtr tryGetTable(const String & name, ContextPtr context) const = 0;
|
||||
|
||||
virtual StoragePtr getTable(const String & name, ContextPtr context) const;
|
||||
StoragePtr getTable(const String & name, ContextPtr context) const;
|
||||
|
||||
virtual UUID tryGetTableUUID(const String & /*table_name*/) const { return UUIDHelpers::Nil; }
|
||||
|
||||
@ -183,8 +183,6 @@ public:
|
||||
/// Is the database empty.
|
||||
virtual bool empty() const = 0;
|
||||
|
||||
virtual bool isReadOnly() const { return false; }
|
||||
|
||||
/// Add the table to the database. Record its presence in the metadata.
|
||||
virtual void createTable(
|
||||
ContextPtr /*context*/,
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <IO/BoundedReadBuffer.h>
|
||||
#include <Common/getRandomASCIIString.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/ElapsedTimeProfileEventIncrement.h>
|
||||
#include <base/hex.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
@ -26,6 +27,7 @@ extern const Event CachedReadBufferCacheWriteMicroseconds;
|
||||
extern const Event CachedReadBufferReadFromSourceBytes;
|
||||
extern const Event CachedReadBufferReadFromCacheBytes;
|
||||
extern const Event CachedReadBufferCacheWriteBytes;
|
||||
extern const Event CachedReadBufferCreateBufferMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -145,6 +147,8 @@ void CachedOnDiskReadBufferFromFile::initialize(size_t offset, size_t size)
|
||||
CachedOnDiskReadBufferFromFile::ImplementationBufferPtr
|
||||
CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segment) const
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::CachedReadBufferCreateBufferMicroseconds);
|
||||
|
||||
/// Use is_persistent flag from in-memory state of the filesegment,
|
||||
/// because it is consistent with what is written on disk.
|
||||
auto path = file_segment.getPathInLocalCache();
|
||||
@ -167,6 +171,8 @@ CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segm
|
||||
CachedOnDiskReadBufferFromFile::ImplementationBufferPtr
|
||||
CachedOnDiskReadBufferFromFile::getRemoteReadBuffer(FileSegment & file_segment, ReadType read_type_)
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::CachedReadBufferCreateBufferMicroseconds);
|
||||
|
||||
switch (read_type_)
|
||||
{
|
||||
case ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE:
|
||||
@ -401,6 +407,8 @@ CachedOnDiskReadBufferFromFile::getImplementationBuffer(FileSegment & file_segme
|
||||
current_file_segment_counters.increment(
|
||||
ProfileEvents::FileSegmentWaitReadBufferMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::FileSegmentWaitReadBufferMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
[[maybe_unused]] auto download_current_segment = read_type == ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE;
|
||||
chassert(download_current_segment == file_segment.isDownloader());
|
||||
|
||||
@ -936,9 +944,10 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
|
||||
|
||||
LOG_TEST(
|
||||
log,
|
||||
"Read {} bytes, read type {}, file offset: {}, impl offset: {}/{}, segment: {}",
|
||||
"Read {} bytes, read type {}, file offset: {}, impl offset: {}/{}, impl position: {}, segment: {}",
|
||||
size, toString(read_type), file_offset_of_buffer_end,
|
||||
implementation_buffer->getFileOffsetOfBufferEnd(), read_until_position, file_segment.range().toString());
|
||||
implementation_buffer->getFileOffsetOfBufferEnd(), read_until_position,
|
||||
implementation_buffer->getPosition(), file_segment.range().toString());
|
||||
|
||||
if (read_type == ReadType::CACHED)
|
||||
{
|
||||
@ -947,7 +956,7 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
|
||||
|
||||
const size_t new_file_offset = file_offset_of_buffer_end + size;
|
||||
const size_t file_segment_write_offset = file_segment.getCurrentWriteOffset(true);
|
||||
if (new_file_offset > file_segment.range().right + 1)
|
||||
if (new_file_offset > file_segment.range().right + 1 || new_file_offset > file_segment_write_offset)
|
||||
{
|
||||
auto file_segment_path = file_segment.getPathInLocalCache();
|
||||
throw Exception(
|
||||
@ -955,13 +964,6 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep()
|
||||
"Read unexpected size. File size: {}, file path: {}, file segment info: {}",
|
||||
fs::file_size(file_segment_path), file_segment_path, file_segment.getInfoForLog());
|
||||
}
|
||||
if (new_file_offset > file_segment_write_offset)
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Read unexpected size. Read {} bytes, file offset: {}, segment: {}, segment write offset: {}",
|
||||
size, file_offset_of_buffer_end, file_segment.range().toString(), file_segment_write_offset);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -138,6 +138,7 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c
|
||||
|
||||
void CachedObjectStorage::removeObject(const StoredObject & object)
|
||||
{
|
||||
removeCacheIfExists(object.remote_path);
|
||||
object_storage->removeObject(object);
|
||||
}
|
||||
|
||||
|
@ -2046,51 +2046,68 @@ ColumnPtr executeStringInteger(const ColumnsWithTypeAndName & arguments, const A
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
bool isCompilableImpl(const DataTypes & arguments) const override
|
||||
bool isCompilableImpl(const DataTypes & arguments, const DataTypePtr & result_type) const override
|
||||
{
|
||||
if (2 != arguments.size())
|
||||
return false;
|
||||
|
||||
if (!canBeNativeType(*arguments[0]) || !canBeNativeType(*arguments[1]) || !canBeNativeType(*result_type))
|
||||
return false;
|
||||
|
||||
WhichDataType data_type_lhs(arguments[0]);
|
||||
WhichDataType data_type_rhs(arguments[1]);
|
||||
if ((data_type_lhs.isDateOrDate32() || data_type_lhs.isDateTime()) ||
|
||||
(data_type_rhs.isDateOrDate32() || data_type_rhs.isDateTime()))
|
||||
return false;
|
||||
|
||||
return castBothTypes(arguments[0].get(), arguments[1].get(), [&](const auto & left, const auto & right)
|
||||
{
|
||||
using LeftDataType = std::decay_t<decltype(left)>;
|
||||
using RightDataType = std::decay_t<decltype(right)>;
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, LeftDataType> || std::is_same_v<DataTypeFixedString, RightDataType> || std::is_same_v<DataTypeString, LeftDataType> || std::is_same_v<DataTypeString, RightDataType>)
|
||||
return false;
|
||||
else
|
||||
if constexpr (!std::is_same_v<DataTypeFixedString, LeftDataType> &&
|
||||
!std::is_same_v<DataTypeFixedString, RightDataType> &&
|
||||
!std::is_same_v<DataTypeString, LeftDataType> &&
|
||||
!std::is_same_v<DataTypeString, RightDataType>)
|
||||
{
|
||||
using ResultDataType = typename BinaryOperationTraits<Op, LeftDataType, RightDataType>::ResultDataType;
|
||||
using OpSpec = Op<typename LeftDataType::FieldType, typename RightDataType::FieldType>;
|
||||
return !std::is_same_v<ResultDataType, InvalidType> && !IsDataTypeDecimal<ResultDataType> && OpSpec::compilable;
|
||||
if constexpr (!std::is_same_v<ResultDataType, InvalidType> && !IsDataTypeDecimal<ResultDataType> && OpSpec::compilable)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & builder, const DataTypes & types, Values values) const override
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & builder, const ValuesWithType & arguments, const DataTypePtr & result_type) const override
|
||||
{
|
||||
assert(2 == types.size() && 2 == values.size());
|
||||
assert(2 == arguments.size());
|
||||
|
||||
llvm::Value * result = nullptr;
|
||||
castBothTypes(types[0].get(), types[1].get(), [&](const auto & left, const auto & right)
|
||||
castBothTypes(arguments[0].type.get(), arguments[1].type.get(), [&](const auto & left, const auto & right)
|
||||
{
|
||||
using LeftDataType = std::decay_t<decltype(left)>;
|
||||
using RightDataType = std::decay_t<decltype(right)>;
|
||||
if constexpr (!std::is_same_v<DataTypeFixedString, LeftDataType> && !std::is_same_v<DataTypeFixedString, RightDataType> && !std::is_same_v<DataTypeString, LeftDataType> && !std::is_same_v<DataTypeString, RightDataType>)
|
||||
if constexpr (!std::is_same_v<DataTypeFixedString, LeftDataType> &&
|
||||
!std::is_same_v<DataTypeFixedString, RightDataType> &&
|
||||
!std::is_same_v<DataTypeString, LeftDataType> &&
|
||||
!std::is_same_v<DataTypeString, RightDataType>)
|
||||
{
|
||||
using ResultDataType = typename BinaryOperationTraits<Op, LeftDataType, RightDataType>::ResultDataType;
|
||||
using OpSpec = Op<typename LeftDataType::FieldType, typename RightDataType::FieldType>;
|
||||
if constexpr (!std::is_same_v<ResultDataType, InvalidType> && !IsDataTypeDecimal<ResultDataType> && OpSpec::compilable)
|
||||
{
|
||||
auto & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
auto type = std::make_shared<ResultDataType>();
|
||||
auto * lval = nativeCast(b, types[0], values[0], type);
|
||||
auto * rval = nativeCast(b, types[1], values[1], type);
|
||||
auto * lval = nativeCast(b, arguments[0], result_type);
|
||||
auto * rval = nativeCast(b, arguments[1], result_type);
|
||||
result = OpSpec::compile(b, lval, rval, std::is_signed_v<typename ResultDataType::FieldType>);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <Functions/IFunction.h>
|
||||
#include <DataTypes/Native.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@ -12,8 +13,11 @@ class FunctionIfBase : public IFunction
|
||||
{
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
public:
|
||||
bool isCompilableImpl(const DataTypes & types) const override
|
||||
bool isCompilableImpl(const DataTypes & types, const DataTypePtr & result_type) const override
|
||||
{
|
||||
if (!canBeNativeType(result_type))
|
||||
return false;
|
||||
|
||||
/// It's difficult to compare Date and DateTime - cannot use JIT compilation.
|
||||
bool has_date = false;
|
||||
bool has_datetime = false;
|
||||
@ -31,43 +35,43 @@ public:
|
||||
if (has_date && has_datetime)
|
||||
return false;
|
||||
|
||||
if (!isCompilableType(type_removed_nullable))
|
||||
if (!canBeNativeType(type_removed_nullable))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & builder, const DataTypes & types, Values values) const override
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & builder, const ValuesWithType & arguments, const DataTypePtr & result_type) const override
|
||||
{
|
||||
auto & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
auto return_type = getReturnTypeImpl(types);
|
||||
|
||||
auto * head = b.GetInsertBlock();
|
||||
auto * join = llvm::BasicBlock::Create(head->getContext(), "join_block", head->getParent());
|
||||
|
||||
std::vector<std::pair<llvm::BasicBlock *, llvm::Value *>> returns;
|
||||
for (size_t i = 0; i + 1 < types.size(); i += 2)
|
||||
for (size_t i = 0; i + 1 < arguments.size(); i += 2)
|
||||
{
|
||||
auto * then = llvm::BasicBlock::Create(head->getContext(), "then_" + std::to_string(i), head->getParent());
|
||||
auto * next = llvm::BasicBlock::Create(head->getContext(), "next_" + std::to_string(i), head->getParent());
|
||||
auto * cond = values[i];
|
||||
const auto & cond = arguments[i];
|
||||
|
||||
b.CreateCondBr(nativeBoolCast(b, types[i], cond), then, next);
|
||||
b.CreateCondBr(nativeBoolCast(b, cond), then, next);
|
||||
b.SetInsertPoint(then);
|
||||
|
||||
auto * value = nativeCast(b, types[i + 1], values[i + 1], return_type);
|
||||
auto * value = nativeCast(b, arguments[i + 1], result_type);
|
||||
returns.emplace_back(b.GetInsertBlock(), value);
|
||||
b.CreateBr(join);
|
||||
b.SetInsertPoint(next);
|
||||
}
|
||||
|
||||
auto * else_value = nativeCast(b, types.back(), values.back(), return_type);
|
||||
auto * else_value = nativeCast(b, arguments.back(), result_type);
|
||||
returns.emplace_back(b.GetInsertBlock(), else_value);
|
||||
b.CreateBr(join);
|
||||
|
||||
b.SetInsertPoint(join);
|
||||
|
||||
auto * phi = b.CreatePHI(toNativeType(b, return_type), static_cast<unsigned>(returns.size()));
|
||||
auto * phi = b.CreatePHI(toNativeType(b, result_type), static_cast<unsigned>(returns.size()));
|
||||
for (const auto & [block, value] : returns)
|
||||
phi->addIncoming(value, block);
|
||||
|
||||
|
@ -477,31 +477,45 @@ public:
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
bool isCompilableImpl(const DataTypes & arguments) const override
|
||||
bool isCompilableImpl(const DataTypes & arguments, const DataTypePtr & result_type) const override
|
||||
{
|
||||
if (1 != arguments.size())
|
||||
return false;
|
||||
|
||||
if (!canBeNativeType(*arguments[0]) || !canBeNativeType(*result_type))
|
||||
return false;
|
||||
|
||||
return castType(arguments[0].get(), [&](const auto & type)
|
||||
{
|
||||
using DataType = std::decay_t<decltype(type)>;
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType> || std::is_same_v<DataTypeString, DataType>)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
return !IsDataTypeDecimal<DataType> && Op<typename DataType::FieldType>::compilable;
|
||||
{
|
||||
using T0 = typename DataType::FieldType;
|
||||
using T1 = typename Op<T0>::ResultType;
|
||||
if constexpr (!std::is_same_v<T1, InvalidType> && !IsDataTypeDecimal<DataType> && Op<T0>::compilable)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & builder, const DataTypes & types, Values values) const override
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & builder, const ValuesWithType & arguments, const DataTypePtr & result_type) const override
|
||||
{
|
||||
assert(1 == types.size() && 1 == values.size());
|
||||
assert(1 == arguments.size());
|
||||
|
||||
llvm::Value * result = nullptr;
|
||||
castType(types[0].get(), [&](const auto & type)
|
||||
castType(arguments[0].type.get(), [&](const auto & type)
|
||||
{
|
||||
using DataType = std::decay_t<decltype(type)>;
|
||||
if constexpr (std::is_same_v<DataTypeFixedString, DataType> || std::is_same_v<DataTypeString, DataType>)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
using T0 = typename DataType::FieldType;
|
||||
@ -509,13 +523,16 @@ public:
|
||||
if constexpr (!std::is_same_v<T1, InvalidType> && !IsDataTypeDecimal<DataType> && Op<T0>::compilable)
|
||||
{
|
||||
auto & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
auto * v = nativeCast(b, types[0], values[0], std::make_shared<DataTypeNumber<T1>>());
|
||||
auto * v = nativeCast(b, arguments[0], result_type);
|
||||
result = Op<T0>::compile(b, v, is_signed_v<T1>);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
@ -1382,37 +1382,6 @@ public:
|
||||
return executeGeneric(col_with_type_and_name_left, col_with_type_and_name_right);
|
||||
}
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
bool isCompilableImpl(const DataTypes & types) const override
|
||||
{
|
||||
if (2 != types.size())
|
||||
return false;
|
||||
|
||||
WhichDataType data_type_lhs(types[0]);
|
||||
WhichDataType data_type_rhs(types[1]);
|
||||
|
||||
auto is_big_integer = [](WhichDataType type) { return type.isUInt64() || type.isInt64(); };
|
||||
|
||||
if ((is_big_integer(data_type_lhs) && data_type_rhs.isFloat())
|
||||
|| (is_big_integer(data_type_rhs) && data_type_lhs.isFloat())
|
||||
|| (data_type_lhs.isDate() && data_type_rhs.isDateTime())
|
||||
|| (data_type_rhs.isDate() && data_type_lhs.isDateTime()))
|
||||
return false; /// TODO: implement (double, int_N where N > double's mantissa width)
|
||||
|
||||
return isCompilableType(types[0]) && isCompilableType(types[1]);
|
||||
}
|
||||
|
||||
llvm::Value * compileImpl(llvm::IRBuilderBase & builder, const DataTypes & types, Values values) const override
|
||||
{
|
||||
assert(2 == types.size() && 2 == values.size());
|
||||
|
||||
auto & b = static_cast<llvm::IRBuilder<> &>(builder);
|
||||
auto [x, y] = nativeCastToCommon(b, types[0], values[0], types[1], values[1]);
|
||||
auto * result = CompileOp<Op>::compile(b, x, y, typeIsSigned(*types[0]) || typeIsSigned(*types[1]));
|
||||
return b.CreateSelect(result, b.getInt8(1), b.getInt8(0));
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user