diff --git a/.github/workflows/backport.yml b/.github/workflows/cherry_pick.yml similarity index 98% rename from .github/workflows/backport.yml rename to .github/workflows/cherry_pick.yml index da42bbae78a..e6a10479c7e 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/cherry_pick.yml @@ -8,7 +8,7 @@ concurrency: group: cherry-pick on: # yamllint disable-line rule:truthy schedule: - - cron: '0 */3 * * *' + - cron: '0 * * * *' workflow_dispatch: jobs: diff --git a/.github/workflows/docs_check.yml b/.github/workflows/docs_check.yml index 0c657a245cb..b50584a2c01 100644 --- a/.github/workflows/docs_check.yml +++ b/.github/workflows/docs_check.yml @@ -102,6 +102,9 @@ jobs: run: | cat >> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{ runner.temp }}/style_check + ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_ENV" << 'EOF' TEMP_PATH=${{ runner.temp }}/style_check + ROBOT_CLICKHOUSE_SSH_KEY< **[ClickHouse release v22.6, 2022-06-16](#226)**
**[ClickHouse release v22.5, 2022-05-19](#225)**
**[ClickHouse release v22.4, 2022-04-20](#224)**
@@ -7,6 +8,173 @@ **[ClickHouse release v22.1, 2022-01-18](#221)**
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**
+### ClickHouse release 22.7, 2022-07-21 + +#### Upgrade Notes +* Enable setting `enable_positional_arguments` by default. It allows queries like `SELECT ... ORDER BY 1, 2` where 1, 2 are the references to the select clause. If you need to return the old behavior, disable this setting. [#38204](https://github.com/ClickHouse/ClickHouse/pull/38204) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Disable `format_csv_allow_single_quotes` by default. See [#37096](https://github.com/ClickHouse/ClickHouse/issues/37096). ([Kruglov Pavel](https://github.com/Avogar)). +* `Ordinary` database engine and old storage definition syntax for `*MergeTree` tables are deprecated. By default it's not possible to create new databases with `Ordinary` engine. If `system` database has `Ordinary` engine it will be automatically converted to `Atomic` on server startup. There are settings to keep old behavior (`allow_deprecated_database_ordinary` and `allow_deprecated_syntax_for_merge_tree`), but these settings may be removed in future releases. [#38335](https://github.com/ClickHouse/ClickHouse/pull/38335) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Force rewriting comma join to inner by default (set default value `cross_to_inner_join_rewrite = 2`). To have old behavior set `cross_to_inner_join_rewrite = 1`. [#39326](https://github.com/ClickHouse/ClickHouse/pull/39326) ([Vladimir C](https://github.com/vdimir)). If you will face any incompatibilities, you can turn this setting back. + +#### New Feature +* Support expressions with window functions. Closes [#19857](https://github.com/ClickHouse/ClickHouse/issues/19857). [#37848](https://github.com/ClickHouse/ClickHouse/pull/37848) ([Dmitry Novik](https://github.com/novikd)). +* Add new `direct` join algorithm for `EmbeddedRocksDB` tables, see [#33582](https://github.com/ClickHouse/ClickHouse/issues/33582). [#35363](https://github.com/ClickHouse/ClickHouse/pull/35363) ([Vladimir C](https://github.com/vdimir)). +* Added full sorting merge join algorithm. [#35796](https://github.com/ClickHouse/ClickHouse/pull/35796) ([Vladimir C](https://github.com/vdimir)). +* Implement NATS table engine, which allows to pub/sub to NATS. Closes [#32388](https://github.com/ClickHouse/ClickHouse/issues/32388). [#37171](https://github.com/ClickHouse/ClickHouse/pull/37171) ([tchepavel](https://github.com/tchepavel)). ([Kseniia Sumarokova](https://github.com/kssenii)) +* Implement table function `mongodb`. Allow writes into `MongoDB` storage / table function. [#37213](https://github.com/ClickHouse/ClickHouse/pull/37213) ([aaapetrenko](https://github.com/aaapetrenko)). ([Kseniia Sumarokova](https://github.com/kssenii)) +* Add `SQLInsert` output format. Closes [#38441](https://github.com/ClickHouse/ClickHouse/issues/38441). [#38477](https://github.com/ClickHouse/ClickHouse/pull/38477) ([Kruglov Pavel](https://github.com/Avogar)). +* Introduced settings `additional_table_filters`. Using this setting, you can specify additional filtering condition for a table which will be applied directly after reading. Example: `select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x settings additional_table_filters={'system.numbers : 'number != 3', 'table_1' : 'x != 2'}`. Introduced setting `additional_result_filter` which specifies additional filtering condition for query result. Closes [#37918](https://github.com/ClickHouse/ClickHouse/issues/37918). [#38475](https://github.com/ClickHouse/ClickHouse/pull/38475) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add `compatibility` setting and `system.settings_changes` system table that contains information about changes in settings through ClickHouse versions. Closes [#35972](https://github.com/ClickHouse/ClickHouse/issues/35972). [#38957](https://github.com/ClickHouse/ClickHouse/pull/38957) ([Kruglov Pavel](https://github.com/Avogar)). +* Add functions `translate(string, from_string, to_string)` and `translateUTF8(string, from_string, to_string)`. It translates some characters to another. [#38935](https://github.com/ClickHouse/ClickHouse/pull/38935) ([Nikolay Degterinsky](https://github.com/evillique)). +* Support `parseTimeDelta` function. It can be used like ` ;-+,:` can be used as separators, eg. `1yr-2mo`, `2m:6s`: `SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ; 33 seconds')`. [#39071](https://github.com/ClickHouse/ClickHouse/pull/39071) ([jiahui-97](https://github.com/jiahui-97)). +* Added `CREATE TABLE ... EMPTY AS SELECT` query. It automatically deduces table structure from the SELECT query, but does not fill the table after creation. Resolves [#38049](https://github.com/ClickHouse/ClickHouse/issues/38049). [#38272](https://github.com/ClickHouse/ClickHouse/pull/38272) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Added options to limit IO operations with remote storage: `max_remote_read_network_bandwidth_for_server` and `max_remote_write_network_bandwidth_for_server`. [#39095](https://github.com/ClickHouse/ClickHouse/pull/39095) ([Sergei Trifonov](https://github.com/serxa)). +* Add `group_by_use_nulls` setting to make aggregation key columns nullable in the case of ROLLUP, CUBE and GROUPING SETS. Closes [#37359](https://github.com/ClickHouse/ClickHouse/issues/37359). [#38642](https://github.com/ClickHouse/ClickHouse/pull/38642) ([Dmitry Novik](https://github.com/novikd)). +* Add the ability to specify compression level during data export. [#38907](https://github.com/ClickHouse/ClickHouse/pull/38907) ([Nikolay Degterinsky](https://github.com/evillique)). +* Add an option to require explicit grants to SELECT from the `system` database. Details: [#38970](https://github.com/ClickHouse/ClickHouse/pull/38970) ([Vitaly Baranov](https://github.com/vitlibar)). +* Functions `multiMatchAny`, `multiMatchAnyIndex`, `multiMatchAllIndices` and their fuzzy variants now accept non-const pattern array argument. [#38485](https://github.com/ClickHouse/ClickHouse/pull/38485) ([Robert Schulze](https://github.com/rschu1ze)). SQL function `multiSearchAllPositions` now accepts non-const needle arguments. [#39167](https://github.com/ClickHouse/ClickHouse/pull/39167) ([Robert Schulze](https://github.com/rschu1ze)). +* Add a setting `zstd_window_log_max` to configure max memory usage on zstd decoding when importing external files. Closes [#35693](https://github.com/ClickHouse/ClickHouse/issues/35693). [#37015](https://github.com/ClickHouse/ClickHouse/pull/37015) ([wuxiaobai24](https://github.com/wuxiaobai24)). +* Add `send_logs_source_regexp` setting. Send server text logs with specified regexp to match log source name. Empty means all sources. [#39161](https://github.com/ClickHouse/ClickHouse/pull/39161) ([Amos Bird](https://github.com/amosbird)). +* Support `ALTER` for `Hive` tables. [#38214](https://github.com/ClickHouse/ClickHouse/pull/38214) ([lgbo](https://github.com/lgbo-ustc)). +* Support `isNullable` function. This function checks whether it's argument is nullable and return 1 or 0. Closes [#38611](https://github.com/ClickHouse/ClickHouse/issues/38611). [#38841](https://github.com/ClickHouse/ClickHouse/pull/38841) ([lokax](https://github.com/lokax)). +* Added functions for base58 encoding/decoding. [#38159](https://github.com/ClickHouse/ClickHouse/pull/38159) ([Andrey Zvonov](https://github.com/zvonand)). +* Add chart visualization to Play UI. [#38197](https://github.com/ClickHouse/ClickHouse/pull/38197) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Added L2 Squared distance and norm functions for both arrays and tuples. [#38545](https://github.com/ClickHouse/ClickHouse/pull/38545) ([Julian Gilyadov](https://github.com/israelg99)). +* Add ability to pass HTTP headers to the `url` table function / storage via SQL. Closes [#37897](https://github.com/ClickHouse/ClickHouse/issues/37897). [#38176](https://github.com/ClickHouse/ClickHouse/pull/38176) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add `clickhouse-diagnostics` binary to the packages. [#38647](https://github.com/ClickHouse/ClickHouse/pull/38647) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Experimental Feature +* Adds new setting `implicit_transaction` to run standalone queries inside a transaction. It handles both creation and closing (via COMMIT if the query succeeded or ROLLBACK if it didn't) of the transaction automatically. [#38344](https://github.com/ClickHouse/ClickHouse/pull/38344) ([Raúl Marín](https://github.com/Algunenano)). + +#### Performance Improvement +* Distinct optimization for sorted columns. Use specialized distinct transformation in case input stream is sorted by column(s) in distinct. Optimization can be applied to pre-distinct, final distinct, or both. Initial implementation by @dimarub2000. [#37803](https://github.com/ClickHouse/ClickHouse/pull/37803) ([Igor Nikonov](https://github.com/devcrafter)). +* Improve performance of `ORDER BY`, `MergeTree` merges, window functions using batch version of `BinaryHeap`. [#38022](https://github.com/ClickHouse/ClickHouse/pull/38022) ([Maksim Kita](https://github.com/kitaisreal)). +* More parallel execution for queries with `FINAL` [#36396](https://github.com/ClickHouse/ClickHouse/pull/36396) ([Nikita Taranov](https://github.com/nickitat)). +* Fix significant join performance regression which was introduced in [#35616](https://github.com/ClickHouse/ClickHouse/pull/35616). It's interesting that common join queries such as ssb queries have been 10 times slower for almost 3 months while no one complains. [#38052](https://github.com/ClickHouse/ClickHouse/pull/38052) ([Amos Bird](https://github.com/amosbird)). +* Migrate from the Intel hyperscan library to vectorscan, this speeds up many string matching on non-x86 platforms. [#38171](https://github.com/ClickHouse/ClickHouse/pull/38171) ([Robert Schulze](https://github.com/rschu1ze)). +* Increased parallelism of query plan steps executed after aggregation. [#38295](https://github.com/ClickHouse/ClickHouse/pull/38295) ([Nikita Taranov](https://github.com/nickitat)). +* Improve performance of insertion to columns of type `JSON`. [#38320](https://github.com/ClickHouse/ClickHouse/pull/38320) ([Anton Popov](https://github.com/CurtizJ)). +* Optimized insertion and lookups in the HashTable. [#38413](https://github.com/ClickHouse/ClickHouse/pull/38413) ([Nikita Taranov](https://github.com/nickitat)). +* Fix performance degradation from [#32493](https://github.com/ClickHouse/ClickHouse/issues/32493). [#38417](https://github.com/ClickHouse/ClickHouse/pull/38417) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)). +* Norm and Distance functions for arrays speed up 1.2-2 times. [#38740](https://github.com/ClickHouse/ClickHouse/pull/38740) ([Alexander Gololobov](https://github.com/davenger)). +* Add AVX-512 VBMI optimized `copyOverlap32Shuffle` for LZ4 decompression. In other words, LZ4 decompression performance is improved. [#37891](https://github.com/ClickHouse/ClickHouse/pull/37891) ([Guo Wangyang](https://github.com/guowangy)). +* `ORDER BY (a, b)` will use all the same benefits as `ORDER BY a, b`. [#38873](https://github.com/ClickHouse/ClickHouse/pull/38873) ([Igor Nikonov](https://github.com/devcrafter)). +* Align branches within a 32B boundary to make benchmark more stable. [#38988](https://github.com/ClickHouse/ClickHouse/pull/38988) ([Guo Wangyang](https://github.com/guowangy)). It improves performance 1..2% on average for Intel. +* Executable UDF, executable dictionaries, and Executable tables will avoid wasting one second during wait for subprocess termination. [#38929](https://github.com/ClickHouse/ClickHouse/pull/38929) ([Constantine Peresypkin](https://github.com/pkit)). +* Optimize accesses to `system.stack_trace` table if not all columns are selected. [#39177](https://github.com/ClickHouse/ClickHouse/pull/39177) ([Azat Khuzhin](https://github.com/azat)). +* Improve isNullable/isConstant/isNull/isNotNull performance for LowCardinality argument. [#39192](https://github.com/ClickHouse/ClickHouse/pull/39192) ([Kruglov Pavel](https://github.com/Avogar)). +* Optimized processing of ORDER BY in window functions. [#34632](https://github.com/ClickHouse/ClickHouse/pull/34632) ([Vladimir Chebotarev](https://github.com/excitoon)). +* The table `system.asynchronous_metric_log` is further optimized for storage space. This closes [#38134](https://github.com/ClickHouse/ClickHouse/issues/38134). See the [YouTube video](https://www.youtube.com/watch?v=0fSp9SF8N8A). [#38428](https://github.com/ClickHouse/ClickHouse/pull/38428) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Improvement +* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)). +* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)). +* Remove subprocess run for kerberos initialization. Added new integration test. Closes [#27651](https://github.com/ClickHouse/ClickHouse/issues/27651). [#38105](https://github.com/ClickHouse/ClickHouse/pull/38105) ([Roman Vasin](https://github.com/rvasin)). +* * Add setting `multiple_joins_try_to_keep_original_names` to not rewrite identifier name on multiple JOINs rewrite, close [#34697](https://github.com/ClickHouse/ClickHouse/issues/34697). [#38149](https://github.com/ClickHouse/ClickHouse/pull/38149) ([Vladimir C](https://github.com/vdimir)). +* Improved trace-visualizer UX. [#38169](https://github.com/ClickHouse/ClickHouse/pull/38169) ([Sergei Trifonov](https://github.com/serxa)). +* Enable stack trace collection and query profiler for AArch64. [#38181](https://github.com/ClickHouse/ClickHouse/pull/38181) ([Maksim Kita](https://github.com/kitaisreal)). +* Do not skip symlinks in `user_defined` directory during SQL user defined functions loading. Closes [#38042](https://github.com/ClickHouse/ClickHouse/issues/38042). [#38184](https://github.com/ClickHouse/ClickHouse/pull/38184) ([Maksim Kita](https://github.com/kitaisreal)). +* Added background cleanup of subdirectories in `store/`. In some cases clickhouse-server might left garbage subdirectories in `store/` (for example, on unsuccessful table creation) and those dirs were never been removed. Fixes [#33710](https://github.com/ClickHouse/ClickHouse/issues/33710). [#38265](https://github.com/ClickHouse/ClickHouse/pull/38265) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add `DESCRIBE CACHE` query to show cache settings from config. Add `SHOW CACHES` query to show available filesystem caches list. [#38279](https://github.com/ClickHouse/ClickHouse/pull/38279) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add access check for `system drop filesystem cache`. Support ON CLUSTER. [#38319](https://github.com/ClickHouse/ClickHouse/pull/38319) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix PostgreSQL database engine incompatibility on upgrade from 21.3 to 22.3. Closes [#36659](https://github.com/ClickHouse/ClickHouse/issues/36659). [#38369](https://github.com/ClickHouse/ClickHouse/pull/38369) ([Kseniia Sumarokova](https://github.com/kssenii)). +* `filesystemAvailable` and similar functions now work in `clickhouse-local`. This closes [#38423](https://github.com/ClickHouse/ClickHouse/issues/38423). [#38424](https://github.com/ClickHouse/ClickHouse/pull/38424) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add `revision` function. [#38555](https://github.com/ClickHouse/ClickHouse/pull/38555) ([Azat Khuzhin](https://github.com/azat)). +* Fix GCS via proxy tunnel usage. [#38726](https://github.com/ClickHouse/ClickHouse/pull/38726) ([Azat Khuzhin](https://github.com/azat)). +* Support `\i file` in clickhouse client / local (similar to psql \i). [#38813](https://github.com/ClickHouse/ClickHouse/pull/38813) ([Kseniia Sumarokova](https://github.com/kssenii)). +* New option `optimize = 1` in `EXPLAIN AST`. If enabled, it shows AST after it's rewritten, otherwise AST of original query. Disabled by default. [#38910](https://github.com/ClickHouse/ClickHouse/pull/38910) ([Igor Nikonov](https://github.com/devcrafter)). +* Allow trailing comma in columns list. closes [#38425](https://github.com/ClickHouse/ClickHouse/issues/38425). [#38440](https://github.com/ClickHouse/ClickHouse/pull/38440) ([chen](https://github.com/xiedeyantu)). +* Bugfixes and performance improvements for `parallel_hash` JOIN method. [#37648](https://github.com/ClickHouse/ClickHouse/pull/37648) ([Vladimir C](https://github.com/vdimir)). +* Support hadoop secure RPC transfer (hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#37852](https://github.com/ClickHouse/ClickHouse/pull/37852) ([Peng Liu](https://github.com/michael1589)). +* Add struct type support in `StorageHive`. [#38118](https://github.com/ClickHouse/ClickHouse/pull/38118) ([lgbo](https://github.com/lgbo-ustc)). +* S3 single objects are now removed with `RemoveObjectRequest`. Implement compatibility with GCP which did not allow to use `removeFileIfExists` effectively breaking approximately half of `remove` functionality. Automatic detection for `DeleteObjects` S3 API, that is not supported by GCS. This will allow to use GCS without explicit `support_batch_delete=0` in configuration. [#37882](https://github.com/ClickHouse/ClickHouse/pull/37882) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Expose basic ClickHouse Keeper related monitoring data (via ProfileEvents and CurrentMetrics). [#38072](https://github.com/ClickHouse/ClickHouse/pull/38072) ([lingpeng0314](https://github.com/lingpeng0314)). +* Support `auto_close` option for PostgreSQL engine connection. Closes [#31486](https://github.com/ClickHouse/ClickHouse/issues/31486). [#38363](https://github.com/ClickHouse/ClickHouse/pull/38363) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Allow `NULL` modifier in columns declaration for table functions. [#38816](https://github.com/ClickHouse/ClickHouse/pull/38816) ([Kruglov Pavel](https://github.com/Avogar)). +* Deactivate `mutations_finalizing_task` before shutdown to avoid benign `TABLE_IS_READ_ONLY` errors during shutdown. [#38851](https://github.com/ClickHouse/ClickHouse/pull/38851) ([Raúl Marín](https://github.com/Algunenano)). +* Eliminate unnecessary waiting of SELECT queries after ALTER queries in presence of INSERT queries if you use deprecated Ordinary databases. [#38864](https://github.com/ClickHouse/ClickHouse/pull/38864) ([Azat Khuzhin](https://github.com/azat)). +* New option `rewrite` in `EXPLAIN AST`. If enabled, it shows AST after it's rewritten, otherwise AST of original query. Disabled by default. [#38910](https://github.com/ClickHouse/ClickHouse/pull/38910) ([Igor Nikonov](https://github.com/devcrafter)). +* Stop reporting Zookeeper "Node exists" exceptions in system.errors when they are expected. [#38961](https://github.com/ClickHouse/ClickHouse/pull/38961) ([Raúl Marín](https://github.com/Algunenano)). +* `clickhouse-keeper`: add support for real-time digest calculation and verification. It is disabled by default. [#37555](https://github.com/ClickHouse/ClickHouse/pull/37555) ([Antonio Andelic](https://github.com/antonio2368)). +* Allow to specify globs `* or {expr1, expr2, expr3}` inside a key for `clickhouse-extract-from-config` tool. [#38966](https://github.com/ClickHouse/ClickHouse/pull/38966) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* clearOldLogs: Don't report KEEPER_EXCEPTION on concurrent deletes. [#39016](https://github.com/ClickHouse/ClickHouse/pull/39016) ([Raúl Marín](https://github.com/Algunenano)). +* clickhouse-keeper improvement: persist meta-information about keeper servers to disk. [#39069](https://github.com/ClickHouse/ClickHouse/pull/39069) ([Antonio Andelic](https://github.com/antonio2368)). This will make it easier to operate if you shutdown or restart all keeper nodes at the same time. +* Continue without exception when running out of disk space when using filesystem cache. [#39106](https://github.com/ClickHouse/ClickHouse/pull/39106) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Handling SIGTERM signals from k8s. [#39130](https://github.com/ClickHouse/ClickHouse/pull/39130) ([Timur Solodovnikov](https://github.com/tsolodov)). +* Add `merge_algorithm` column (Undecided, Horizontal, Vertical) to system.part_log. [#39181](https://github.com/ClickHouse/ClickHouse/pull/39181) ([Azat Khuzhin](https://github.com/azat)). +* Don't increment a counter in `system.errors` when the disk is not rotational. [#39216](https://github.com/ClickHouse/ClickHouse/pull/39216) ([Raúl Marín](https://github.com/Algunenano)). +* The metric `result_bytes` for `INSERT` queries in `system.query_log` shows number of bytes inserted. Previously value was incorrect and stored the same value as `result_rows`. [#39225](https://github.com/ClickHouse/ClickHouse/pull/39225) ([Ilya Yatsishin](https://github.com/qoega)). +* The CPU usage metric in clickhouse-client will be displayed in a better way. Fixes [#38756](https://github.com/ClickHouse/ClickHouse/issues/38756). [#39280](https://github.com/ClickHouse/ClickHouse/pull/39280) ([Sergei Trifonov](https://github.com/serxa)). +* Rethrow exception on filesystem cache initialization on server startup, better error message. [#39386](https://github.com/ClickHouse/ClickHouse/pull/39386) ([Kseniia Sumarokova](https://github.com/kssenii)). +* OpenTelemetry now collects traces without Processors spans by default (there are too many). To enable Processors spans collection `opentelemetry_trace_processors` setting. [#39170](https://github.com/ClickHouse/ClickHouse/pull/39170) ([Ilya Yatsishin](https://github.com/qoega)). +* Functions `multiMatch[Fuzzy](AllIndices/Any/AnyIndex)` - don't throw a logical error if the needle argument is empty. [#39012](https://github.com/ClickHouse/ClickHouse/pull/39012) ([Robert Schulze](https://github.com/rschu1ze)). +* Allow to declare `RabbitMQ` queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)). + +#### Build/Testing/Packaging Improvement +* Apply Clang Thread Safety Analysis (TSA) annotations to ClickHouse. [#38068](https://github.com/ClickHouse/ClickHouse/pull/38068) ([Robert Schulze](https://github.com/rschu1ze)). +* Adapt universal installation script for FreeBSD. [#39302](https://github.com/ClickHouse/ClickHouse/pull/39302) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Preparation for building on `s390x` platform. [#39193](https://github.com/ClickHouse/ClickHouse/pull/39193) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Fix a bug in `jemalloc` library [#38757](https://github.com/ClickHouse/ClickHouse/pull/38757) ([Azat Khuzhin](https://github.com/azat)). +* Hardware benchmark now has support for automatic results uploading. [#38427](https://github.com/ClickHouse/ClickHouse/pull/38427) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* System table "system.licenses" is now correctly populated on Mac (Darwin). [#38294](https://github.com/ClickHouse/ClickHouse/pull/38294) ([Robert Schulze](https://github.com/rschu1ze)). +* Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) +* Fix rounding for `Decimal128/Decimal256` with more than 19-digits long scale. [#38027](https://github.com/ClickHouse/ClickHouse/pull/38027) ([Igor Nikonov](https://github.com/devcrafter)). +* Fixed crash caused by data race in storage `Hive` (integration table engine). [#38887](https://github.com/ClickHouse/ClickHouse/pull/38887) ([lgbo](https://github.com/lgbo-ustc)). +* Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)). +* Correct glob expansion in case of `{0..10}` forms. Fixes [#38498](https://github.com/ClickHouse/ClickHouse/issues/38498) Current Implementation is similar to what shell does mentiond by @rschu1ze [here](https://github.com/ClickHouse/ClickHouse/pull/38502#issuecomment-1169057723). [#38502](https://github.com/ClickHouse/ClickHouse/pull/38502) ([Heena Bansal](https://github.com/HeenaBansal2009)). +* Fix crash for `mapUpdate`, `mapFilter` functions when using with constant map argument. Closes [#38547](https://github.com/ClickHouse/ClickHouse/issues/38547). [#38553](https://github.com/ClickHouse/ClickHouse/pull/38553) ([hexiaoting](https://github.com/hexiaoting)). +* Fix `toHour` monotonicity information for query optimization which can lead to incorrect query result (incorrect index analysis). This fixes [#38333](https://github.com/ClickHouse/ClickHouse/issues/38333). [#38675](https://github.com/ClickHouse/ClickHouse/pull/38675) ([Amos Bird](https://github.com/amosbird)). +* Fix checking whether s3 storage support parallel writes. It resulted in s3 parallel writes not working. [#38792](https://github.com/ClickHouse/ClickHouse/pull/38792) ([chen](https://github.com/xiedeyantu)). +* Fix s3 seekable reads with parallel read buffer. (Affected memory usage during query). Closes [#38258](https://github.com/ClickHouse/ClickHouse/issues/38258). [#38802](https://github.com/ClickHouse/ClickHouse/pull/38802) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Update `simdjson`. This fixes [#38621](https://github.com/ClickHouse/ClickHouse/issues/38621) - a buffer overflow on machines with the latest Intel CPUs with AVX-512 VBMI. [#38838](https://github.com/ClickHouse/ClickHouse/pull/38838) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix possible logical error for Vertical merges. [#38859](https://github.com/ClickHouse/ClickHouse/pull/38859) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix settings profile with seconds unit. [#38896](https://github.com/ClickHouse/ClickHouse/pull/38896) ([Raúl Marín](https://github.com/Algunenano)). +* Fix incorrect partition pruning when there is a nullable partition key. Note: most likely you don't use nullable partition keys - this is an obscure feature you should not use. Nullable keys are a nonsense and this feature is only needed for some crazy use-cases. This fixes [#38941](https://github.com/ClickHouse/ClickHouse/issues/38941). [#38946](https://github.com/ClickHouse/ClickHouse/pull/38946) ([Amos Bird](https://github.com/amosbird)). +* Improve `fsync_part_directory` for fetches. [#38993](https://github.com/ClickHouse/ClickHouse/pull/38993) ([Azat Khuzhin](https://github.com/azat)). +* Fix possible dealock inside `OvercommitTracker`. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)). +* Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix some corner cases of interpretation of the arguments of window expressions. Fixes [#38538](https://github.com/ClickHouse/ClickHouse/issues/38538) Allow using of higher-order functions in window expressions. [#39112](https://github.com/ClickHouse/ClickHouse/pull/39112) ([Dmitry Novik](https://github.com/novikd)). +* Keep `LowCardinality` type in `tuple` function. Previously `LowCardinality` type was dropped and elements of created tuple had underlying type of `LowCardinality`. [#39113](https://github.com/ClickHouse/ClickHouse/pull/39113) ([Anton Popov](https://github.com/CurtizJ)). +* Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix unexpected query result when both `optimize_trivial_count_query` and `empty_result_for_aggregation_by_empty_set` are set to true. This fixes [#39140](https://github.com/ClickHouse/ClickHouse/issues/39140). [#39155](https://github.com/ClickHouse/ClickHouse/pull/39155) ([Amos Bird](https://github.com/amosbird)). +* Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix extremely rare race condition in during hardlinks for remote filesystem. The only way to reproduce it is concurrent run of backups. [#39190](https://github.com/ClickHouse/ClickHouse/pull/39190) ([alesapin](https://github.com/alesapin)). +* (zero-copy replication is an experimental feature that should not be used in production) Fix fetch of in-memory part with `allow_remote_fs_zero_copy_replication`. [#39214](https://github.com/ClickHouse/ClickHouse/pull/39214) ([Azat Khuzhin](https://github.com/azat)). +* (MaterializedPostgreSQL - experimental feature). Fix segmentation fault in MaterializedPostgreSQL database engine, which could happen if some exception occurred at replication initialisation. Closes [#36939](https://github.com/ClickHouse/ClickHouse/issues/36939). [#39272](https://github.com/ClickHouse/ClickHouse/pull/39272) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix incorrect fetch of table metadata from PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix projection exception when aggregation keys are wrapped inside other functions. This fixes [#37151](https://github.com/ClickHouse/ClickHouse/issues/37151). [#37155](https://github.com/ClickHouse/ClickHouse/pull/37155) ([Amos Bird](https://github.com/amosbird)). +* Fix possible logical error `... with argument with type Nothing and default implementation for Nothing is expected to return result with type Nothing, got ...` in some functions. Closes: [#37610](https://github.com/ClickHouse/ClickHouse/issues/37610) Closes: [#37741](https://github.com/ClickHouse/ClickHouse/issues/37741). [#37759](https://github.com/ClickHouse/ClickHouse/pull/37759) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix incorrect columns order in subqueries of UNION (in case of duplicated columns in subselects may produce incorrect result). [#37887](https://github.com/ClickHouse/ClickHouse/pull/37887) ([Azat Khuzhin](https://github.com/azat)). +* Fix incorrect work of MODIFY ALTER Column with column names that contain dots. Closes [#37907](https://github.com/ClickHouse/ClickHouse/issues/37907). [#37971](https://github.com/ClickHouse/ClickHouse/pull/37971) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)). +* Fix possible crash in `Distributed` async insert in case of removing a replica from config. [#38029](https://github.com/ClickHouse/ClickHouse/pull/38029) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix "Missing columns" for GLOBAL JOIN with CTE without alias. [#38056](https://github.com/ClickHouse/ClickHouse/pull/38056) ([Azat Khuzhin](https://github.com/azat)). +* Rewrite tuple functions as literals in backwards-compatibility mode. [#38096](https://github.com/ClickHouse/ClickHouse/pull/38096) ([Anton Kozlov](https://github.com/tonickkozlov)). +* Fix redundant memory reservation for output block during `ORDER BY`. [#38127](https://github.com/ClickHouse/ClickHouse/pull/38127) ([iyupeng](https://github.com/iyupeng)). +* Fix possible logical error `Bad cast from type DB::IColumn* to DB::ColumnNullable*` in array mapped functions. Closes [#38006](https://github.com/ClickHouse/ClickHouse/issues/38006). [#38132](https://github.com/ClickHouse/ClickHouse/pull/38132) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix temporary name clash in partial merge join, close [#37928](https://github.com/ClickHouse/ClickHouse/issues/37928). [#38135](https://github.com/ClickHouse/ClickHouse/pull/38135) ([Vladimir C](https://github.com/vdimir)). +* Some minr issue with queries like `CREATE TABLE nested_name_tuples (`a` Tuple(x String, y Tuple(i Int32, j String))) ENGINE = Memory;` [#38136](https://github.com/ClickHouse/ClickHouse/pull/38136) ([lgbo](https://github.com/lgbo-ustc)). +* Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)). +* (Window View is a experimental feature) Fix LOGICAL_ERROR for WINDOW VIEW with incorrect structure. [#38205](https://github.com/ClickHouse/ClickHouse/pull/38205) ([Azat Khuzhin](https://github.com/azat)). +* Update librdkafka submodule to fix crash when an OAUTHBEARER refresh callback is set. [#38225](https://github.com/ClickHouse/ClickHouse/pull/38225) ([Rafael Acevedo](https://github.com/racevedoo)). +* Fix INSERT into Distributed hung due to ProfileEvents. [#38307](https://github.com/ClickHouse/ClickHouse/pull/38307) ([Azat Khuzhin](https://github.com/azat)). +* Fix retries in PostgreSQL engine. [#38310](https://github.com/ClickHouse/ClickHouse/pull/38310) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix optimization in PartialSortingTransform (SIGSEGV and possible incorrect result). [#38324](https://github.com/ClickHouse/ClickHouse/pull/38324) ([Azat Khuzhin](https://github.com/azat)). +* Fix RabbitMQ with formats based on PeekableReadBuffer. Closes [#38061](https://github.com/ClickHouse/ClickHouse/issues/38061). [#38356](https://github.com/ClickHouse/ClickHouse/pull/38356) ([Kseniia Sumarokova](https://github.com/kssenii)). +* MaterializedPostgreSQL - experimentail feature. Fix possible `Invalid number of rows in Chunk` in MaterializedPostgreSQL. Closes [#37323](https://github.com/ClickHouse/ClickHouse/issues/37323). [#38360](https://github.com/ClickHouse/ClickHouse/pull/38360) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix RabbitMQ configuration with connection string setting. Closes [#36531](https://github.com/ClickHouse/ClickHouse/issues/36531). [#38365](https://github.com/ClickHouse/ClickHouse/pull/38365) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix PostgreSQL engine not using PostgreSQL schema when retrieving array dimension size. Closes [#36755](https://github.com/ClickHouse/ClickHouse/issues/36755). Closes [#36772](https://github.com/ClickHouse/ClickHouse/issues/36772). [#38366](https://github.com/ClickHouse/ClickHouse/pull/38366) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix possibly incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)). +* Fix wrong results of countSubstrings() & position() on patterns with 0-bytes. [#38589](https://github.com/ClickHouse/ClickHouse/pull/38589) ([Robert Schulze](https://github.com/rschu1ze)). +* Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)). +* `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)). +* Fix use-after-free for aggregate functions with `Map` combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)). + ### ClickHouse release 22.6, 2022-06-16 #### Backward Incompatible Change diff --git a/CMakeLists.txt b/CMakeLists.txt index 367a88140e5..05f88f3530e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.14) +cmake_minimum_required(VERSION 3.15) project(ClickHouse LANGUAGES C CXX ASM) @@ -74,18 +74,13 @@ message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) option(USE_STATIC_LIBRARIES "Disable to use shared libraries" ON) +# DEVELOPER ONLY. +# Faster linking if turned on. +option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files" OFF) +option(CLICKHOUSE_SPLIT_BINARY "Make several binaries (clickhouse-server, clickhouse-client etc.) instead of one bundled" OFF) -if (NOT USE_STATIC_LIBRARIES) - # DEVELOPER ONLY. - # Faster linking if turned on. - option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files") - - option(CLICKHOUSE_SPLIT_BINARY - "Make several binaries (clickhouse-server, clickhouse-client etc.) instead of one bundled") -endif () - -if (USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES) - message(FATAL_ERROR "Defining SPLIT_SHARED_LIBRARIES=1 without USE_STATIC_LIBRARIES=0 has no effect.") +if (USE_STATIC_LIBRARIES AND (SPLIT_SHARED_LIBRARIES OR CLICKHOUSE_SPLIT_BINARY)) + message(FATAL_ERROR "SPLIT_SHARED_LIBRARIES=1 or CLICKHOUSE_SPLIT_BINARY=1 must not be used together with USE_STATIC_LIBRARIES=1") endif() if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES) diff --git a/base/base/defines.h b/base/base/defines.h index 5a646f4dca2..c8c408b9c93 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -93,7 +93,6 @@ # define NO_SANITIZE_ADDRESS __attribute__((__no_sanitize__("address"))) # define NO_SANITIZE_THREAD __attribute__((__no_sanitize__("thread"))) # define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED __attribute__((__always_inline__, __no_sanitize__("undefined"))) -# define DISABLE_SANITIZER_INSTRUMENTATION __attribute__((disable_sanitizer_instrumentation)) #else /// It does not work in GCC. GCC 7 cannot recognize this attribute and GCC 8 simply ignores it. # define NO_SANITIZE_UNDEFINED # define NO_SANITIZE_ADDRESS @@ -101,6 +100,13 @@ # define ALWAYS_INLINE_NO_SANITIZE_UNDEFINED ALWAYS_INLINE #endif +#if defined(__clang__) && defined(__clang_major__) && __clang_major__ >= 14 +# define DISABLE_SANITIZER_INSTRUMENTATION __attribute__((disable_sanitizer_instrumentation)) +#else +# define DISABLE_SANITIZER_INSTRUMENTATION +#endif + + #if !__has_include() || !defined(ADDRESS_SANITIZER) # define ASAN_UNPOISON_MEMORY_REGION(a, b) # define ASAN_POISON_MEMORY_REGION(a, b) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index e6c60e74c36..edc7805150b 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54464) +SET(VERSION_REVISION 54465) SET(VERSION_MAJOR 22) -SET(VERSION_MINOR 7) +SET(VERSION_MINOR 8) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 7000c4e0033bb9e69050ab8ef73e8e7465f78059) -SET(VERSION_DESCRIBE v22.7.1.1-testing) -SET(VERSION_STRING 22.7.1.1) +SET(VERSION_GITHASH f4f05ec786a8b8966dd0ea2a2d7e39a8c7db24f4) +SET(VERSION_DESCRIBE v22.8.1.1-testing) +SET(VERSION_STRING 22.8.1.1) # end of autochange diff --git a/cmake/cpu_features.cmake b/cmake/cpu_features.cmake index 7b966e1acac..1fc3c2db804 100644 --- a/cmake/cpu_features.cmake +++ b/cmake/cpu_features.cmake @@ -19,6 +19,7 @@ option (ENABLE_POPCNT "Use popcnt instructions on x86_64" 1) option (ENABLE_AVX "Use AVX instructions on x86_64" 0) option (ENABLE_AVX2 "Use AVX2 instructions on x86_64" 0) option (ENABLE_AVX512 "Use AVX512 instructions on x86_64" 0) +option (ENABLE_AVX512_VBMI "Use AVX512_VBMI instruction on x86_64 (depends on ENABLE_AVX512)" 0) option (ENABLE_BMI "Use BMI instructions on x86_64" 0) option (ENABLE_AVX2_FOR_SPEC_OP "Use avx2 instructions for specific operations on x86_64" 0) option (ENABLE_AVX512_FOR_SPEC_OP "Use avx512 instructions for specific operations on x86_64" 0) @@ -151,6 +152,20 @@ elseif (ARCH_AMD64) set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") endif () + set (TEST_FLAG "-mavx512vbmi") + set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") + check_cxx_source_compiles(" + #include + int main() { + auto a = _mm512_permutexvar_epi8(__m512i(), __m512i()); + (void)a; + return 0; + } + " HAVE_AVX512_VBMI) + if (HAVE_AVX512 AND ENABLE_AVX512 AND HAVE_AVX512_VBMI AND ENABLE_AVX512_VBMI) + set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}") + endif () + set (TEST_FLAG "-mbmi") set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") check_cxx_source_compiles(" diff --git a/cmake/linux/toolchain-x86_64.cmake b/cmake/linux/toolchain-x86_64.cmake index 965ea024ab7..bdcfcfa013a 100644 --- a/cmake/linux/toolchain-x86_64.cmake +++ b/cmake/linux/toolchain-x86_64.cmake @@ -1,3 +1,19 @@ +if (_CLICKHOUSE_TOOLCHAIN_FILE_LOADED) + # During first run of cmake the toolchain file will be loaded twice, + # - /usr/share/cmake-3.23/Modules/CMakeDetermineSystem.cmake + # - /bld/CMakeFiles/3.23.2/CMakeSystem.cmake + # + # But once you already have non-empty cmake cache it will be loaded only + # once: + # - /bld/CMakeFiles/3.23.2/CMakeSystem.cmake + # + # This has no harm except for double load of toolchain will add + # --gcc-toolchain multiple times that will not allow ccache to reuse the + # cache. + return() +endif() +set (_CLICKHOUSE_TOOLCHAIN_FILE_LOADED ON) + set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) set (CMAKE_SYSTEM_NAME "Linux") diff --git a/cmake/warnings.cmake b/cmake/warnings.cmake index b8fc4229ad9..994f14c6149 100644 --- a/cmake/warnings.cmake +++ b/cmake/warnings.cmake @@ -20,13 +20,9 @@ if (COMPILER_CLANG) # We want to get everything out of the compiler for code quality. add_warning(everything) add_warning(pedantic) - no_warning(vla-extension) no_warning(zero-length-array) - no_warning(c11-extensions) - no_warning(unused-command-line-argument) no_warning(c++98-compat-pedantic) no_warning(c++98-compat) - no_warning(c99-extensions) no_warning(conversion) no_warning(ctad-maybe-unsupported) # clang 9+, linux-only no_warning(disabled-macro-expansion) @@ -37,12 +33,7 @@ if (COMPILER_CLANG) no_warning(global-constructors) no_warning(missing-prototypes) no_warning(missing-variable-declarations) - no_warning(nested-anon-types) - no_warning(packed) no_warning(padded) - no_warning(return-std-move-in-c++11) # clang 7+ - no_warning(shift-sign-overflow) - no_warning(sign-conversion) no_warning(switch-enum) no_warning(undefined-func-template) no_warning(unused-template) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index d4a3f164214..0eba4da4a89 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -157,6 +157,8 @@ endif() add_contrib (sqlite-cmake sqlite-amalgamation) add_contrib (s2geometry-cmake s2geometry) add_contrib (base-x-cmake base-x) +add_contrib(c-ares-cmake c-ares) +add_contrib (qpl-cmake qpl) # Put all targets defined here and in subdirectories under "contrib/" folders in GUI-based IDEs. # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear diff --git a/contrib/c-ares b/contrib/c-ares new file mode 160000 index 00000000000..afee6748b0b --- /dev/null +++ b/contrib/c-ares @@ -0,0 +1 @@ +Subproject commit afee6748b0b99acf4509d42fa37ac8422262f91b diff --git a/contrib/c-ares-cmake/CMakeLists.txt b/contrib/c-ares-cmake/CMakeLists.txt new file mode 100644 index 00000000000..603c1f8b65c --- /dev/null +++ b/contrib/c-ares-cmake/CMakeLists.txt @@ -0,0 +1,35 @@ +# Choose to build static or shared library for c-ares. +if (USE_STATIC_LIBRARIES) + set(CARES_STATIC ON CACHE BOOL "" FORCE) + set(CARES_SHARED OFF CACHE BOOL "" FORCE) +else () + set(CARES_STATIC OFF CACHE BOOL "" FORCE) + set(CARES_SHARED ON CACHE BOOL "" FORCE) +endif () + +# Disable looking for libnsl on a platforms that has gethostbyname in glibc +# +# c-ares searching for gethostbyname in the libnsl library, however in the +# version that shipped with gRPC it doing it wrong [1], since it uses +# CHECK_LIBRARY_EXISTS(), which will return TRUE even if the function exists in +# another dependent library. The upstream already contains correct macro [2], +# but it is not included in gRPC (even upstream gRPC, not the one that is +# shipped with clickhousee). +# +# [1]: https://github.com/c-ares/c-ares/blob/e982924acee7f7313b4baa4ee5ec000c5e373c30/CMakeLists.txt#L125 +# [2]: https://github.com/c-ares/c-ares/blob/44fbc813685a1fa8aa3f27fcd7544faf612d376a/CMakeLists.txt#L146 +# +# And because if you by some reason have libnsl [3] installed, clickhouse will +# reject to start w/o it. While this is completelly different library. +# +# [3]: https://packages.debian.org/bullseye/libnsl2 +if (NOT CMAKE_SYSTEM_NAME STREQUAL "SunOS") + set(HAVE_LIBNSL OFF CACHE BOOL "" FORCE) +endif() + +# Force use of c-ares inet_net_pton instead of libresolv one +set(HAVE_INET_NET_PTON OFF CACHE BOOL "" FORCE) + +add_subdirectory("../c-ares/" "../c-ares/") + +add_library(ch_contrib::c-ares ALIAS c-ares) \ No newline at end of file diff --git a/contrib/googletest-cmake/CMakeLists.txt b/contrib/googletest-cmake/CMakeLists.txt index f116eddc337..90fdde0c185 100644 --- a/contrib/googletest-cmake/CMakeLists.txt +++ b/contrib/googletest-cmake/CMakeLists.txt @@ -2,7 +2,7 @@ set (SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest") add_library(_gtest "${SRC_DIR}/src/gtest-all.cc") set_target_properties(_gtest PROPERTIES VERSION "1.0.0") -target_compile_definitions (_gtest INTERFACE GTEST_HAS_POSIX_RE=0) +target_compile_definitions (_gtest PUBLIC GTEST_HAS_POSIX_RE=0) target_include_directories(_gtest SYSTEM PUBLIC "${SRC_DIR}/include") target_include_directories(_gtest PRIVATE "${SRC_DIR}") diff --git a/contrib/grpc-cmake/CMakeLists.txt b/contrib/grpc-cmake/CMakeLists.txt index 520e04d198e..b1ed7e464b6 100644 --- a/contrib/grpc-cmake/CMakeLists.txt +++ b/contrib/grpc-cmake/CMakeLists.txt @@ -45,38 +45,11 @@ set(_gRPC_SSL_LIBRARIES OpenSSL::Crypto OpenSSL::SSL) # Use abseil-cpp from ClickHouse contrib, not from gRPC third_party. set(gRPC_ABSL_PROVIDER "clickhouse" CACHE STRING "" FORCE) -# Choose to build static or shared library for c-ares. -if (USE_STATIC_LIBRARIES) - set(CARES_STATIC ON CACHE BOOL "" FORCE) - set(CARES_SHARED OFF CACHE BOOL "" FORCE) -else () - set(CARES_STATIC OFF CACHE BOOL "" FORCE) - set(CARES_SHARED ON CACHE BOOL "" FORCE) -endif () - -# Disable looking for libnsl on a platforms that has gethostbyname in glibc -# -# c-ares searching for gethostbyname in the libnsl library, however in the -# version that shipped with gRPC it doing it wrong [1], since it uses -# CHECK_LIBRARY_EXISTS(), which will return TRUE even if the function exists in -# another dependent library. The upstream already contains correct macro [2], -# but it is not included in gRPC (even upstream gRPC, not the one that is -# shipped with clickhousee). -# -# [1]: https://github.com/c-ares/c-ares/blob/e982924acee7f7313b4baa4ee5ec000c5e373c30/CMakeLists.txt#L125 -# [2]: https://github.com/c-ares/c-ares/blob/44fbc813685a1fa8aa3f27fcd7544faf612d376a/CMakeLists.txt#L146 -# -# And because if you by some reason have libnsl [3] installed, clickhouse will -# reject to start w/o it. While this is completelly different library. -# -# [3]: https://packages.debian.org/bullseye/libnsl2 -if (NOT CMAKE_SYSTEM_NAME STREQUAL "SunOS") - set(HAVE_LIBNSL OFF CACHE BOOL "" FORCE) -endif() - # We don't want to build C# extensions. set(gRPC_BUILD_CSHARP_EXT OFF) +set(_gRPC_CARES_LIBRARIES ch_contrib::c-ares) +set(gRPC_CARES_PROVIDER "clickhouse" CACHE STRING "" FORCE) add_subdirectory("${_gRPC_SOURCE_DIR}" "${_gRPC_BINARY_DIR}") # The contrib/grpc/CMakeLists.txt redefined the PROTOBUF_GENERATE_GRPC_CPP() function for its own purposes, diff --git a/contrib/llvm-cmake/CMakeLists.txt b/contrib/llvm-cmake/CMakeLists.txt index a108e6537c9..4a4a5cef62e 100644 --- a/contrib/llvm-cmake/CMakeLists.txt +++ b/contrib/llvm-cmake/CMakeLists.txt @@ -93,6 +93,18 @@ set (CMAKE_CXX_STANDARD 17) set (LLVM_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm/llvm") set (LLVM_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/llvm/llvm") add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}") +set_directory_properties (PROPERTIES + # due to llvm crosscompile cmake does not know how to clean it, and on clean + # will lead to the following error: + # + # ninja: error: remove(contrib/llvm/llvm/NATIVE): Directory not empty + # + ADDITIONAL_CLEAN_FILES "${LLVM_BINARY_DIR}" + # llvm's cmake configuring this file only when cmake runs, + # and after clean cmake will not know that it should re-run, + # add explicitly depends from llvm-config.h + CMAKE_CONFIGURE_DEPENDS "${LLVM_BINARY_DIR}/include/llvm/Config/llvm-config.h" +) add_library (_llvm INTERFACE) target_link_libraries (_llvm INTERFACE ${REQUIRED_LLVM_LIBRARIES}) diff --git a/contrib/qpl b/contrib/qpl new file mode 160000 index 00000000000..cdc8442f7a5 --- /dev/null +++ b/contrib/qpl @@ -0,0 +1 @@ +Subproject commit cdc8442f7a5e7a6ff6eea39c69665e0c5034d85d diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt new file mode 100644 index 00000000000..dc90f07a9bc --- /dev/null +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -0,0 +1,322 @@ +## The Intel® QPL provides high performance implementations of data processing functions for existing hardware accelerator, and/or software path in case if hardware accelerator is not available. +if (OS_LINUX AND ARCH_AMD64 AND (ENABLE_AVX2 OR ENABLE_AVX512)) + option (ENABLE_QPL "Enable Intel® Query Processing Library" ${ENABLE_LIBRARIES}) +elseif(ENABLE_QPL) + message (${RECONFIGURE_MESSAGE_LEVEL} "QPL library is only supported on x86_64 arch with avx2/avx512 support") +endif() + +if (NOT ENABLE_QPL) + message(STATUS "Not using QPL") + return() +endif() + +set (QPL_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl") +set (QPL_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl/sources") +set (QPL_BINARY_DIR "${ClickHouse_BINARY_DIR}/build/contrib/qpl") +set (UUID_DIR "${ClickHouse_SOURCE_DIR}/contrib/qpl-cmake") + +set (EFFICIENT_WAIT ON) +set (BLOCK_ON_FAULT ON) +set (LOG_HW_INIT OFF) +set (SANITIZE_MEMORY OFF) +set (SANITIZE_THREADS OFF) +set (LIB_FUZZING_ENGINE OFF) + +function(GetLibraryVersion _content _outputVar) + string(REGEX MATCHALL "Qpl VERSION (.+) LANGUAGES" VERSION_REGEX "${_content}") + SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE) +endfunction() + +FILE(READ "${QPL_PROJECT_DIR}/CMakeLists.txt" HEADER_CONTENT) +GetLibraryVersion("${HEADER_CONTENT}" QPL_VERSION) + +message(STATUS "Intel QPL version: ${QPL_VERSION}") + +# There are 5 source subdirectories under $QPL_SRC_DIR: isal, c_api, core-sw, middle-layer, c_api. +# Generate 7 library targets: middle_layer_lib, isal, isal_asm, qplcore_px, qplcore_avx512, core_iaa, middle_layer_lib. +# Output ch_contrib::qpl by linking with 7 library targets. + +include("${QPL_PROJECT_DIR}/cmake/CompileOptions.cmake") + +# check nasm compiler +include(CheckLanguage) +check_language(ASM_NASM) +if(NOT CMAKE_ASM_NASM_COMPILER) + message(FATAL_ERROR "Please install NASM from 'https://www.nasm.us/' because NASM compiler can not be found!") +endif() + +# [SUBDIR]isal +enable_language(ASM_NASM) + +set(ISAL_C_SRC ${QPL_SRC_DIR}/isal/igzip/adler32_base.c + ${QPL_SRC_DIR}/isal/igzip/huff_codes.c + ${QPL_SRC_DIR}/isal/igzip/hufftables_c.c + ${QPL_SRC_DIR}/isal/igzip/igzip.c + ${QPL_SRC_DIR}/isal/igzip/igzip_base.c + ${QPL_SRC_DIR}/isal/igzip/flatten_ll.c + ${QPL_SRC_DIR}/isal/igzip/encode_df.c + ${QPL_SRC_DIR}/isal/igzip/igzip_icf_base.c + ${QPL_SRC_DIR}/isal/igzip/igzip_inflate.c + ${QPL_SRC_DIR}/isal/igzip/igzip_icf_body.c + ${QPL_SRC_DIR}/isal/crc/crc_base.c + ${QPL_SRC_DIR}/isal/crc/crc64_base.c) + +set(ISAL_ASM_SRC ${QPL_SRC_DIR}/isal/igzip/igzip_body.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_gen_icf_map_lh1_04.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_gen_icf_map_lh1_06.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_decode_block_stateless_04.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_finish.asm + ${QPL_SRC_DIR}/isal/igzip/encode_df_04.asm + ${QPL_SRC_DIR}/isal/igzip/encode_df_06.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_decode_block_stateless_01.asm + ${QPL_SRC_DIR}/isal/igzip/proc_heap.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_icf_body_h1_gr_bt.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_icf_finish.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_inflate_multibinary.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_update_histogram_01.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_update_histogram_04.asm + ${QPL_SRC_DIR}/isal/igzip/rfc1951_lookup.asm + ${QPL_SRC_DIR}/isal/igzip/adler32_sse.asm + ${QPL_SRC_DIR}/isal/igzip/adler32_avx2_4.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_deflate_hash.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_04.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_set_long_icf_fg_06.asm + ${QPL_SRC_DIR}/isal/igzip/igzip_multibinary.asm + ${QPL_SRC_DIR}/isal/igzip/stdmac.asm + ${QPL_SRC_DIR}/isal/crc/crc_multibinary.asm + ${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8.asm + ${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by8_02.asm + ${QPL_SRC_DIR}/isal/crc/crc32_gzip_refl_by16_10.asm + ${QPL_SRC_DIR}/isal/crc/crc32_ieee_01.asm + ${QPL_SRC_DIR}/isal/crc/crc32_ieee_02.asm + ${QPL_SRC_DIR}/isal/crc/crc32_ieee_by4.asm + ${QPL_SRC_DIR}/isal/crc/crc32_ieee_by16_10.asm + ${QPL_SRC_DIR}/isal/crc/crc32_iscsi_00.asm + ${QPL_SRC_DIR}/isal/crc/crc32_iscsi_01.asm + ${QPL_SRC_DIR}/isal/crc/crc32_iscsi_by16_10.asm) + +# Adding ISA-L library target +add_library(isal OBJECT ${ISAL_C_SRC}) +add_library(isal_asm OBJECT ${ISAL_ASM_SRC}) + +# Setting external and internal interfaces for ISA-L library +target_include_directories(isal + PUBLIC $ + PRIVATE ${QPL_SRC_DIR}/isal/include + PUBLIC ${QPL_SRC_DIR}/isal/igzip) + +target_compile_options(isal PRIVATE + "$<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}>" + "$<$:>" + "$<$:>") + +target_compile_options(isal_asm PUBLIC "-I${QPL_SRC_DIR}/isal/include/" + PUBLIC "-I${QPL_SRC_DIR}/isal/igzip/" + PUBLIC "-I${QPL_SRC_DIR}/isal/crc/" + PUBLIC "-DQPL_LIB") + +# AS_FEATURE_LEVEL=10 means "Check SIMD capabilities of the target system at runtime and use up to AVX512 if available". +# AS_FEATURE_LEVEL=5 means "Check SIMD capabilities of the target system at runtime and use up to AVX2 if available". +# HAVE_KNOWS_AVX512 means rely on AVX512 being available on the target system. +if (ENABLE_AVX512) + target_compile_options(isal_asm PUBLIC "-DHAVE_AS_KNOWS_AVX512" "-DAS_FEATURE_LEVEL=10") +else() + target_compile_options(isal_asm PUBLIC "-DAS_FEATURE_LEVEL=5") +endif() + +# Here must remove "-fno-sanitize=undefined" from COMPILE_OPTIONS. +# Otherwise nasm compiler would fail to proceed due to unrecognition of "-fno-sanitize=undefined" +if (SANITIZE STREQUAL "undefined") + get_target_property(target_options isal_asm COMPILE_OPTIONS) + list(REMOVE_ITEM target_options "-fno-sanitize=undefined") + set_property(TARGET isal_asm PROPERTY COMPILE_OPTIONS ${target_options}) +endif() + +target_compile_definitions(isal PUBLIC + QPL_LIB + NDEBUG) + +# [SUBDIR]core-sw +# Two libraries:qplcore_avx512/qplcore_px for SW fallback will be created which are implemented by AVX512 and non-AVX512 instructions respectively. +# The upper level QPL API will check SIMD capabilities of the target system at runtime and decide to call AVX512 function or non-AVX512 function. +# Hence, here we don't need put qplcore_avx512 under an ENABLE_AVX512 CMake switch. +# Actually, if we do that, some undefined symbols errors would happen because both of AVX512 function and non-AVX512 function are referenced by QPL API. +# PLATFORM=2 means AVX512 implementation; PLATFORM=0 means non-AVX512 implementation. + +# Find Core Sources +file(GLOB SOURCES + ${QPL_SRC_DIR}/core-sw/src/checksums/*.c + ${QPL_SRC_DIR}/core-sw/src/filtering/*.c + ${QPL_SRC_DIR}/core-sw/src/other/*.c + ${QPL_SRC_DIR}/core-sw/src/compression/*.c) + +file(GLOB DATA_SOURCES + ${QPL_SRC_DIR}/core-sw/src/data/*.c) + +# Create avx512 library +add_library(qplcore_avx512 OBJECT ${SOURCES}) + +target_compile_definitions(qplcore_avx512 PRIVATE PLATFORM=2) + +target_include_directories(qplcore_avx512 + PUBLIC $ + PUBLIC $ + PUBLIC $ + PRIVATE $) + +set_target_properties(qplcore_avx512 PROPERTIES + $<$:C_STANDARD 17>) + +target_link_libraries(qplcore_avx512 ${CMAKE_DL_LIBS} isal) + +target_compile_options(qplcore_avx512 + PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS} + PRIVATE -march=skylake-avx512 + PRIVATE "$<$:>" + PRIVATE "$<$:-O3;-D_FORTIFY_SOURCE=2>") + + +target_compile_definitions(qplcore_avx512 PUBLIC QPL_BADARG_CHECK) + +# +# Create px library +# +#set(CMAKE_INCLUDE_CURRENT_DIR ON) + +# Create library +add_library(qplcore_px OBJECT ${SOURCES} ${DATA_SOURCES}) + +target_compile_definitions(qplcore_px PRIVATE PLATFORM=0) + +target_include_directories(qplcore_px + PUBLIC $ + PUBLIC $ + PUBLIC $ + PRIVATE $) + +set_target_properties(qplcore_px PROPERTIES + $<$:C_STANDARD 17>) + +target_link_libraries(qplcore_px isal ${CMAKE_DL_LIBS}) + +target_compile_options(qplcore_px + PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS} + PRIVATE "$<$:>" + PRIVATE "$<$:-O3;-D_FORTIFY_SOURCE=2>") + +target_compile_definitions(qplcore_px PUBLIC QPL_BADARG_CHECK) + +# [SUBDIR]core-iaa +file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c + ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.cpp + ${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.c + ${QPL_SRC_DIR}/core-iaa/sources/driver_loader/*.cpp + ${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.c + ${QPL_SRC_DIR}/core-iaa/sources/descriptors/*.cpp + ${QPL_SRC_DIR}/core-iaa/sources/bit_rev.c) + +# Create library +add_library(core_iaa OBJECT ${HW_PATH_SRC}) + +target_include_directories(core_iaa + PRIVATE ${UUID_DIR} + PUBLIC $ + PRIVATE $ + PRIVATE $) + +target_compile_options(core_iaa + PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; + $<$:-O3;-D_FORTIFY_SOURCE=2>>) + +target_compile_features(core_iaa PRIVATE c_std_11) + +target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK + PRIVATE $<$: BLOCK_ON_FAULT_ENABLED> + PRIVATE $<$:LOG_HW_INIT>) + +# [SUBDIR]middle-layer +generate_unpack_kernel_arrays(${QPL_BINARY_DIR}) + +file(GLOB MIDDLE_LAYER_SRC + ${QPL_SRC_DIR}/middle-layer/analytics/*.cpp + ${QPL_SRC_DIR}/middle-layer/c_wrapper/*.cpp + ${QPL_SRC_DIR}/middle-layer/checksum/*.cpp + ${QPL_SRC_DIR}/middle-layer/common/*.cpp + ${QPL_SRC_DIR}/middle-layer/compression/*.cpp + ${QPL_SRC_DIR}/middle-layer/compression/*/*.cpp + ${QPL_SRC_DIR}/middle-layer/compression/*/*/*.cpp + ${QPL_SRC_DIR}/middle-layer/dispatcher/*.cpp + ${QPL_SRC_DIR}/middle-layer/other/*.cpp + ${QPL_SRC_DIR}/middle-layer/util/*.cpp + ${QPL_SRC_DIR}/middle-layer/inflate/*.cpp + ${QPL_SRC_DIR}/core-iaa/sources/accelerator/*.cpp) # todo + +file(GLOB GENERATED_PX_TABLES_SRC ${QPL_BINARY_DIR}/generated/px_*.cpp) +file(GLOB GENERATED_AVX512_TABLES_SRC ${QPL_BINARY_DIR}/generated/avx512_*.cpp) + +add_library(middle_layer_lib OBJECT + ${GENERATED_PX_TABLES_SRC} + ${GENERATED_AVX512_TABLES_SRC} + ${MIDDLE_LAYER_SRC}) + +target_compile_options(middle_layer_lib + PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; + ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; + $<$:-O3;-D_FORTIFY_SOURCE=2>> + PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + +target_compile_definitions(middle_layer_lib + PUBLIC QPL_VERSION="${QPL_VERSION}" + PUBLIC $<$:LOG_HW_INIT> + PUBLIC $<$:QPL_EFFICIENT_WAIT> + PUBLIC QPL_BADARG_CHECK) + +set_source_files_properties(${GENERATED_PX_TABLES_SRC} PROPERTIES COMPILE_DEFINITIONS PLATFORM=0) +set_source_files_properties(${GENERATED_AVX512_TABLES_SRC} PROPERTIES COMPILE_DEFINITIONS PLATFORM=2) + +target_include_directories(middle_layer_lib + PRIVATE ${UUID_DIR} + PUBLIC $ + PUBLIC $ + PUBLIC $ + PUBLIC $ + PUBLIC $ + PUBLIC $) + +target_compile_definitions(middle_layer_lib PUBLIC -DQPL_LIB) + +# [SUBDIR]c_api +file(GLOB_RECURSE QPL_C_API_SRC + ${QPL_SRC_DIR}/c_api/*.c + ${QPL_SRC_DIR}/c_api/*.cpp) + +add_library(_qpl STATIC ${QPL_C_API_SRC} + $ + $ + $ + $ + $ + $ + $) + +target_include_directories(_qpl + PUBLIC $ + PRIVATE $ + PRIVATE $) + +target_compile_options(_qpl + PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}; + ${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS}; + $<$:-O3;-D_FORTIFY_SOURCE=2>> + PRIVATE $<$:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>) + +target_compile_definitions(_qpl + PRIVATE -DQPL_LIB + PRIVATE -DQPL_BADARG_CHECK + PUBLIC -DENABLE_QPL_COMPRESSION) + +target_link_libraries(_qpl + PRIVATE ${CMAKE_DL_LIBS}) + +add_library (ch_contrib::qpl ALIAS _qpl) +target_include_directories(_qpl SYSTEM BEFORE PUBLIC "${QPL_PROJECT_DIR}/include") diff --git a/contrib/qpl-cmake/uuid/uuid.h b/contrib/qpl-cmake/uuid/uuid.h new file mode 100644 index 00000000000..bf108ba0d29 --- /dev/null +++ b/contrib/qpl-cmake/uuid/uuid.h @@ -0,0 +1,4 @@ +#ifndef _QPL_UUID_UUID_H +#define _QPL_UUID_UUID_H +typedef unsigned char uuid_t[16]; +#endif /* _QPL_UUID_UUID_H */ diff --git a/contrib/thrift b/contrib/thrift index 010ccf0a0c7..2a93df80f27 160000 --- a/contrib/thrift +++ b/contrib/thrift @@ -1 +1 @@ -Subproject commit 010ccf0a0c7023fea0f6bf4e4078ebdff7e61982 +Subproject commit 2a93df80f27739ccabb5b885cb12a8dc7595ecdf diff --git a/contrib/thrift-cmake/CMakeLists.txt b/contrib/thrift-cmake/CMakeLists.txt index 6f94c1ebdc0..d6aa6b9e5f2 100644 --- a/contrib/thrift-cmake/CMakeLists.txt +++ b/contrib/thrift-cmake/CMakeLists.txt @@ -15,7 +15,6 @@ set(thriftcpp_SOURCES "${LIBRARY_DIR}/src/thrift/async/TConcurrentClientSyncInfo.cpp" "${LIBRARY_DIR}/src/thrift/concurrency/ThreadManager.cpp" "${LIBRARY_DIR}/src/thrift/concurrency/TimerManager.cpp" - "${LIBRARY_DIR}/src/thrift/concurrency/Util.cpp" "${LIBRARY_DIR}/src/thrift/processor/PeekProcessor.cpp" "${LIBRARY_DIR}/src/thrift/protocol/TBase64Utils.cpp" "${LIBRARY_DIR}/src/thrift/protocol/TDebugProtocol.cpp" @@ -33,6 +32,8 @@ set(thriftcpp_SOURCES "${LIBRARY_DIR}/src/thrift/transport/TServerSocket.cpp" "${LIBRARY_DIR}/src/thrift/transport/TTransportUtils.cpp" "${LIBRARY_DIR}/src/thrift/transport/TBufferTransports.cpp" + "${LIBRARY_DIR}/src/thrift/transport/SocketCommon.cpp" + "${LIBRARY_DIR}/src/thrift/transport/TWebSocketServer.cpp" "${LIBRARY_DIR}/src/thrift/server/TConnectedClient.cpp" "${LIBRARY_DIR}/src/thrift/server/TServerFramework.cpp" "${LIBRARY_DIR}/src/thrift/server/TSimpleServer.cpp" @@ -92,4 +93,4 @@ include_directories("${CMAKE_CURRENT_BINARY_DIR}") add_library(_thrift ${thriftcpp_SOURCES} ${thriftcpp_threads_SOURCES}) add_library(ch_contrib::thrift ALIAS _thrift) target_include_directories(_thrift SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/thrift/lib/cpp/src" ${CMAKE_CURRENT_BINARY_DIR}) -target_link_libraries (_thrift PUBLIC boost::headers_only) +target_link_libraries (_thrift PUBLIC OpenSSL::SSL boost::headers_only) diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 1dff4b1a2d4..d57c447e2af 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -51,6 +51,7 @@ RUN apt-get update \ rename \ software-properties-common \ tzdata \ + nasm \ --yes --no-install-recommends \ && apt-get clean diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 270c93c105c..87f98df2ad8 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -29,17 +29,21 @@ env if [ -n "$MAKE_DEB" ]; then rm -rf /build/packages/root - if [ -z "$SANITIZER" ]; then - # We need to check if clickhouse-diagnostics is fine and build it - ( - cd /build/programs/diagnostics - make test-no-docker - GOARCH="${DEB_ARCH}" CGO_ENABLED=0 make VERSION="$VERSION_STRING" build - mv clickhouse-diagnostics .. - ) - else - echo -e "#!/bin/sh\necho 'Not implemented for this type of package'" > /build/programs/clickhouse-diagnostics - chmod +x /build/programs/clickhouse-diagnostics + # NOTE: this is for backward compatibility with previous releases, + # that does not diagnostics tool (only script). + if [ -d /build/programs/diagnostics ]; then + if [ -z "$SANITIZER" ]; then + # We need to check if clickhouse-diagnostics is fine and build it + ( + cd /build/programs/diagnostics + make test-no-docker + GOARCH="${DEB_ARCH}" CGO_ENABLED=0 make VERSION="$VERSION_STRING" build + mv clickhouse-diagnostics .. + ) + else + echo -e "#!/bin/sh\necho 'Not implemented for this type of package'" > /build/programs/clickhouse-diagnostics + chmod +x /build/programs/clickhouse-diagnostics + fi fi fi diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index ca44354620f..a1ae77343cb 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -55,6 +55,7 @@ RUN apt-get update \ pkg-config \ tzdata \ pv \ + nasm \ --yes --no-install-recommends # Sanitizer options for services (clickhouse-server) diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 03a79b45a10..699e2c7ceb9 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -71,6 +71,8 @@ RUN apt-get update \ software-properties-common \ tzdata \ unixodbc \ + file \ + nasm \ --yes --no-install-recommends RUN pip3 install numpy scipy pandas Jinja2 diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index 2bbdd978e5e..6b8109a15b2 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -135,6 +135,7 @@ function clone_submodules contrib/replxx contrib/wyhash contrib/hashidsxx + contrib/c-ares ) git submodule sync diff --git a/docker/test/integration/runner/compose/docker_compose_coredns.yml b/docker/test/integration/runner/compose/docker_compose_coredns.yml new file mode 100644 index 00000000000..b329d4e0a46 --- /dev/null +++ b/docker/test/integration/runner/compose/docker_compose_coredns.yml @@ -0,0 +1,9 @@ +version: "2.3" + +services: + coredns: + image: coredns/coredns:latest + restart: always + volumes: + - ${COREDNS_CONFIG_DIR}/example.com:/example.com + - ${COREDNS_CONFIG_DIR}/Corefile:/Corefile diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 96c06845812..9141e89d744 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -34,6 +34,7 @@ RUN apt-get update -y \ unixodbc \ wget \ zstd \ + file \ && apt-get clean diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index e34195cdd32..ffa0b12b8a3 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -322,7 +322,11 @@ else clickhouse-client --query="SELECT 'Server version: ', version()" # Install new package before running stress test because we should use new clickhouse-client and new clickhouse-test + # But we should leave old binary in /usr/bin/ for gdb (so it will print sane stacktarces) + mv /usr/bin/clickhouse previous_release_package_folder/ install_packages package_folder + mv /usr/bin/clickhouse package_folder/ + mv previous_release_package_folder/clickhouse /usr/bin/ mkdir tmp_stress_output @@ -337,6 +341,7 @@ else mv /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.backward.stress.log # Start new server + mv package_folder/clickhouse /usr/bin/ configure start 500 clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \ @@ -356,6 +361,9 @@ else # FIXME https://github.com/ClickHouse/ClickHouse/issues/39174 ("Cannot parse string 'Hello' as UInt64") # FIXME Not sure if it's expected, but some tests from BC check may not be finished yet when we restarting server. # Let's just ignore all errors from queries ("} TCPHandler: Code:", "} executeQuery: Code:") + # FIXME https://github.com/ClickHouse/ClickHouse/issues/39197 ("Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'") + # NOTE Incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/39263, it's expected + # ("This engine is deprecated and is not supported in transactions", "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part") echo "Check for Error messages in server log:" zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \ -e "Code: 236. DB::Exception: Cancelled mutating parts" \ @@ -382,6 +390,9 @@ else -e "Cannot parse string 'Hello' as UInt64" \ -e "} TCPHandler: Code:" \ -e "} executeQuery: Code:" \ + -e "Missing columns: 'v3' while processing query: 'v3, k, v1, v2, p'" \ + -e "This engine is deprecated and is not supported in transactions" \ + -e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \ /var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "" > /test_output/bc_check_error_messages.txt \ && echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \ || echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv diff --git a/docker/test/stress/stress b/docker/test/stress/stress index ab25d13695b..6d90b9d5437 100755 --- a/docker/test/stress/stress +++ b/docker/test/stress/stress @@ -46,6 +46,9 @@ def get_options(i, backward_compatibility_check): if i == 13: client_options.append("memory_tracker_fault_probability=0.001") + if i % 2 == 1 and not backward_compatibility_check: + client_options.append("group_by_use_nulls=1") + if client_options: options.append(" --client-option " + " ".join(client_options)) diff --git a/docker/test/style/Dockerfile b/docker/test/style/Dockerfile index c2ed47a1392..0ec3f09ab7f 100644 --- a/docker/test/style/Dockerfile +++ b/docker/test/style/Dockerfile @@ -17,7 +17,9 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ python3-pip \ shellcheck \ yamllint \ - && pip3 install black boto3 codespell dohq-artifactory PyGithub unidiff pylint==2.6.2 + && pip3 install black boto3 codespell dohq-artifactory PyGithub unidiff pylint==2.6.2 \ + && apt-get clean \ + && rm -rf /root/.cache/pip # Architecture of the image when BuildKit/buildx is used ARG TARGETARCH diff --git a/docker/test/style/process_style_check_result.py b/docker/test/style/process_style_check_result.py index fd544f3e9c1..8c2110d64e5 100755 --- a/docker/test/style/process_style_check_result.py +++ b/docker/test/style/process_style_check_result.py @@ -40,10 +40,10 @@ def process_result(result_folder): def write_results(results_file, status_file, results, status): - with open(results_file, "w") as f: + with open(results_file, "w", encoding="utf-8") as f: out = csv.writer(f, delimiter="\t") out.writerows(results) - with open(status_file, "w") as f: + with open(status_file, "w", encoding="utf-8") as f: out = csv.writer(f, delimiter="\t") out.writerow(status) @@ -53,9 +53,10 @@ if __name__ == "__main__": parser = argparse.ArgumentParser( description="ClickHouse script for parsing results of style check" ) - parser.add_argument("--in-results-dir", default="/test_output/") - parser.add_argument("--out-results-file", default="/test_output/test_results.tsv") - parser.add_argument("--out-status-file", default="/test_output/check_status.tsv") + default_dir = "/test_output" + parser.add_argument("--in-results-dir", default=default_dir) + parser.add_argument("--out-results-file", default=f"{default_dir}/test_results.tsv") + parser.add_argument("--out-status-file", default=f"{default_dir}/check_status.tsv") args = parser.parse_args() state, description, test_results = process_result(args.in_results_dir) diff --git a/docs/README.md b/docs/README.md index b328a3ee125..fa8b6bed85c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -38,9 +38,9 @@ Writing the docs is extremely useful for project's users and developers, and gro The documentation contains information about all the aspects of the ClickHouse lifecycle: developing, testing, installing, operating, and using. The base language of the documentation is English. The English version is the most actual. All other languages are supported as much as they can by contributors from different countries. -At the moment, [documentation](https://clickhouse.com/docs) exists in English, Russian, Chinese, Japanese. We store the documentation besides the ClickHouse source code in the [GitHub repository](https://github.com/ClickHouse/ClickHouse/tree/master/docs). +At the moment, [documentation](https://clickhouse.com/docs) exists in English, Russian, and Chinese. We store the reference documentation besides the ClickHouse source code in the [GitHub repository](https://github.com/ClickHouse/ClickHouse/tree/master/docs), and user guides in a separate repo [Clickhouse/clickhouse-docs](https://github.com/ClickHouse/clickhouse-docs). -Each language lays in the corresponding folder. Files that are not translated from English are the symbolic links to the English ones. +Each language lies in the corresponding folder. Files that are not translated from English are symbolic links to the English ones. @@ -48,9 +48,9 @@ Each language lays in the corresponding folder. Files that are not translated fr You can contribute to the documentation in many ways, for example: -- Fork the ClickHouse repository, edit, commit, push, and open a pull request. +- Fork the ClickHouse and ClickHouse-docs repositories, edit, commit, push, and open a pull request. - Add the `documentation` label to this pull request for proper automatic checks applying. If you have no permissions for adding labels, the reviewer of your PR adds it. + Add the `pr-documentation` label to this pull request for proper automatic checks applying. If you do not have permission to add labels, then the reviewer of your PR will add it. - Open a required file in the ClickHouse repository and edit it from the GitHub web interface. @@ -158,15 +158,15 @@ When everything is ready, we will add the new language to the website. -### Documentation for Different Audience +### Documentation for Different Audiences -When writing documentation, think about people who read it. Each audience has specific requirements for terms they use in communications. +When writing documentation, think about the people who read it. Each audience has specific requirements for terms they use in communications. -ClickHouse documentation can be divided by the audience for the following parts: +ClickHouse documentation can be divided up by the audience for the following parts: -- Conceptual topics in [Introduction](https://clickhouse.com/docs/en/), tutorials and overviews, changelog. +- Conceptual topics like tutorials and overviews. - These topics are for the most common auditory. When editing text in them, use the most common terms that are comfortable for the audience with basic technical skills. + These topics are for the most common audience. When editing text in them, use the most common terms that are comfortable for the audience with basic technical skills. - Query language reference and related topics. diff --git a/docs/_includes/install/universal.sh b/docs/_includes/install/universal.sh index 59b814abf32..c2970924fb0 100755 --- a/docs/_includes/install/universal.sh +++ b/docs/_includes/install/universal.sh @@ -7,31 +7,31 @@ DIR= if [ "${OS}" = "Linux" ] then - if [ "${ARCH}" = "x86_64" ] + if [ "${ARCH}" = "x86_64" -o "${ARCH}" = "amd64" ] then DIR="amd64" - elif [ "${ARCH}" = "aarch64" ] + elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ] then DIR="aarch64" - elif [ "${ARCH}" = "powerpc64le" ] || [ "${ARCH}" = "ppc64le" ] + elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ] then DIR="powerpc64le" fi elif [ "${OS}" = "FreeBSD" ] then - if [ "${ARCH}" = "x86_64" ] + if [ "${ARCH}" = "x86_64" -o "${ARCH}" = "amd64" ] then DIR="freebsd" - elif [ "${ARCH}" = "aarch64" ] + elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ] then DIR="freebsd-aarch64" - elif [ "${ARCH}" = "powerpc64le" ] || [ "${ARCH}" = "ppc64le" ] + elif [ "${ARCH}" = "powerpc64le" -o "${ARCH}" = "ppc64le" ] then DIR="freebsd-powerpc64le" fi elif [ "${OS}" = "Darwin" ] then - if [ "${ARCH}" = "x86_64" ] + if [ "${ARCH}" = "x86_64" -o "${ARCH}" = "amd64" ] then DIR="macos" elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ] diff --git a/docs/changelogs/v22.7.1.2484-stable.md b/docs/changelogs/v22.7.1.2484-stable.md new file mode 100644 index 00000000000..0343568658b --- /dev/null +++ b/docs/changelogs/v22.7.1.2484-stable.md @@ -0,0 +1,468 @@ +--- +sidebar_position: 1 +sidebar_label: 2022 +--- + +# 2022 Changelog + +### ClickHouse release v22.7.1.2484-stable (f4f05ec786a) FIXME as compared to v22.6.1.1985-stable (7000c4e0033) + +#### Backward Incompatible Change +* Enable setting `enable_positional_arguments` by default. It allows queries like `SELECT ... ORDER BY 1, 2` where 1, 2 are the references to the select clause. If you need to return the old behavior, disable this setting. [#38204](https://github.com/ClickHouse/ClickHouse/pull/38204) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* `Ordinary` database engine and old storage definition syntax for `*MergeTree` tables are deprecated. By default it's not possible to create new ones. If `system` database has `Ordinary` engine it will be automatically converted to `Atomic` on server startup. There are settings to keep old behavior (`allow_deprecated_database_ordinary` and `allow_deprecated_syntax_for_merge_tree`), but these settings may be removed in future releases. [#38335](https://github.com/ClickHouse/ClickHouse/pull/38335) ([Alexander Tokmakov](https://github.com/tavplubix)). +* * Force rewriting comma join to inner by default (set default value `cross_to_inner_join_rewrite = 2`). To have old behavior set `cross_to_inner_join_rewrite = 1`. [#39326](https://github.com/ClickHouse/ClickHouse/pull/39326) ([Vladimir C](https://github.com/vdimir)). +* Disable format_csv_allow_single_quotes by default. [#37096](https://github.com/ClickHouse/ClickHouse/issues/37096). [#39423](https://github.com/ClickHouse/ClickHouse/pull/39423) ([Kruglov Pavel](https://github.com/Avogar)). + +#### New Feature +* Add new `direct` join algorithm for RocksDB, ref [#33582](https://github.com/ClickHouse/ClickHouse/issues/33582). [#35363](https://github.com/ClickHouse/ClickHouse/pull/35363) ([Vladimir C](https://github.com/vdimir)). +* * Added full sorting merge join algorithm. [#35796](https://github.com/ClickHouse/ClickHouse/pull/35796) ([Vladimir C](https://github.com/vdimir)). +* Add a setting `zstd_window_log_max` to configure max memory usage on zstd decoding when importing external files. Closes [#35693](https://github.com/ClickHouse/ClickHouse/issues/35693). [#37015](https://github.com/ClickHouse/ClickHouse/pull/37015) ([wuxiaobai24](https://github.com/wuxiaobai24)). +* Implement NatsStorage - table engine, which allows to pub/sub to NATS. Closes [#32388](https://github.com/ClickHouse/ClickHouse/issues/32388). [#37171](https://github.com/ClickHouse/ClickHouse/pull/37171) ([tchepavel](https://github.com/tchepavel)). +* Implement table function MongoDB. Allow writes into MongoDB storage / table function. [#37213](https://github.com/ClickHouse/ClickHouse/pull/37213) ([aaapetrenko](https://github.com/aaapetrenko)). +* `clickhouse-keeper` new feature: add support for real-time digest calculation and verification. [#37555](https://github.com/ClickHouse/ClickHouse/pull/37555) ([Antonio Andelic](https://github.com/antonio2368)). +* In [#17202](https://github.com/ClickHouse/ClickHouse/issues/17202) was reported that host_regexp was being tested against only one of the possible PTR responses. This PR makes the necessary changes so that host_regexp is applied against all possible PTR responses and validate if any matches. [#37827](https://github.com/ClickHouse/ClickHouse/pull/37827) ([Arthur Passos](https://github.com/arthurpassos)). +* Support hadoop secure rpc transfer(hadoop.rpc.protection=privacy and hadoop.rpc.protection=integrity). [#37852](https://github.com/ClickHouse/ClickHouse/pull/37852) ([Peng Liu](https://github.com/michael1589)). +* Add struct type support in `StorageHive`. [#38118](https://github.com/ClickHouse/ClickHouse/pull/38118) ([lgbo](https://github.com/lgbo-ustc)). +* Added Base58 encoding/decoding. [#38159](https://github.com/ClickHouse/ClickHouse/pull/38159) ([Andrey Zvonov](https://github.com/zvonand)). +* Add chart visualization to Play UI. [#38197](https://github.com/ClickHouse/ClickHouse/pull/38197) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* support `alter` command on `StorageHive` table. [#38214](https://github.com/ClickHouse/ClickHouse/pull/38214) ([lgbo](https://github.com/lgbo-ustc)). +* Added `CREATE TABLE ... EMPTY AS SELECT` query. It automatically deduces table structure from the SELECT query, but does not fill the table after creation. Resolves [#38049](https://github.com/ClickHouse/ClickHouse/issues/38049). [#38272](https://github.com/ClickHouse/ClickHouse/pull/38272) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Adds new setting `implicit_transaction` to run standalone queries inside a transaction. It handles both creation and closing (via COMMIT if the query succeeded or ROLLBACK if it didn't) of the transaction automatically. [#38344](https://github.com/ClickHouse/ClickHouse/pull/38344) ([Raúl Marín](https://github.com/Algunenano)). +* Allow trailing comma in columns list. closes [#38425](https://github.com/ClickHouse/ClickHouse/issues/38425). [#38440](https://github.com/ClickHouse/ClickHouse/pull/38440) ([chen](https://github.com/xiedeyantu)). +* Compress clickhouse into self-extracting executable (path programs/self-extracting). New build target 'self-extracting' is added. [#38447](https://github.com/ClickHouse/ClickHouse/pull/38447) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Introduced settings `additional_table_filters`. Using this setting, you can specify additional filtering condition for a table which will be applied directly after reading. Example: `select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x settings additional_table_filters={'system.numbers : 'number != 3', 'table_1' : 'x != 2'}`. Introduced setting `additional_result_filter` which specifies additional filtering condition for query result. Closes [#37918](https://github.com/ClickHouse/ClickHouse/issues/37918). [#38475](https://github.com/ClickHouse/ClickHouse/pull/38475) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Add SQLInsert output format. Closes [#38441](https://github.com/ClickHouse/ClickHouse/issues/38441). [#38477](https://github.com/ClickHouse/ClickHouse/pull/38477) ([Kruglov Pavel](https://github.com/Avogar)). +* Downloadable clickhouse executable is compressed self-extracting. [#38653](https://github.com/ClickHouse/ClickHouse/pull/38653) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Support `isNullable` function. This function checks whether it's argument is nullable and return true(1) or false(0). Closes [#38611](https://github.com/ClickHouse/ClickHouse/issues/38611). [#38841](https://github.com/ClickHouse/ClickHouse/pull/38841) ([lokax](https://github.com/lokax)). +* Add functions `translate(string, from_string, to_string)` and `translateUTF8(string, from_string, to_string)`. [#38935](https://github.com/ClickHouse/ClickHouse/pull/38935) ([Nikolay Degterinsky](https://github.com/evillique)). +* Add `compatibility` setting and `system.settings_changes` system table that contains information about changes in settings through ClickHouse versions. Closes [#35972](https://github.com/ClickHouse/ClickHouse/issues/35972). [#38957](https://github.com/ClickHouse/ClickHouse/pull/38957) ([Kruglov Pavel](https://github.com/Avogar)). +* Add the 3rd parameter to the tupleElement function and return it if tuple doesn't have a member. Only works if the 2nd parameter is of type String. Closes [#38872](https://github.com/ClickHouse/ClickHouse/issues/38872). [#38989](https://github.com/ClickHouse/ClickHouse/pull/38989) ([lokax](https://github.com/lokax)). +* Support parseTimedelta function. It can be used like ```sql # ' ', ';', '-', '+', ',', ':' can be used as separators, eg. "1yr-2mo", "2m:6s" SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ; 33 seconds');. [#39071](https://github.com/ClickHouse/ClickHouse/pull/39071) ([jiahui-97](https://github.com/jiahui-97)). +* Added options to limit IO operations with remote storage: `max_remote_read_network_bandwidth_for_server` and `max_remote_write_network_bandwidth_for_server`. [#39095](https://github.com/ClickHouse/ClickHouse/pull/39095) ([Sergei Trifonov](https://github.com/serxa)). +* Add `send_logs_source_regexp` setting. Send server text logs with specified regexp to match log source name. Empty means all sources. [#39161](https://github.com/ClickHouse/ClickHouse/pull/39161) ([Amos Bird](https://github.com/amosbird)). +* OpenTelemetry now collects traces without Processors spans by default. To enable Processors spans collection `opentelemetry_trace_processors` setting. [#39170](https://github.com/ClickHouse/ClickHouse/pull/39170) ([Ilya Yatsishin](https://github.com/qoega)). + +#### Performance Improvement +* Add new `local_filesystem_read_method` method `io_uring` based on the asynchronous Linux [io_uring](https://kernel.dk/io_uring.pdf) subsystem, improving read performance almost universally compared to the default `pread` method. [#36103](https://github.com/ClickHouse/ClickHouse/pull/36103) ([Saulius Valatka](https://github.com/sauliusvl)). +* Distinct optimization for sorted columns. Use specialized distinct transformation in case input stream is sorted by column(s) in distinct. Optimization can be applied to pre-distinct, final distinct, or both. Initial implementation by @dimarub2000. [#37803](https://github.com/ClickHouse/ClickHouse/pull/37803) ([Igor Nikonov](https://github.com/devcrafter)). +* Add VBMI optimized copyOverlap32Shuffle for LZ4 decompress. [#37891](https://github.com/ClickHouse/ClickHouse/pull/37891) ([Guo Wangyang](https://github.com/guowangy)). +* Improve performance of `ORDER BY`, `MergeTree` merges, window functions using batch version of `BinaryHeap`. [#38022](https://github.com/ClickHouse/ClickHouse/pull/38022) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix significant join performance regression which was introduced in https://github.com/ClickHouse/ClickHouse/pull/35616 . It's interesting that common join queries such as ssb queries have been 10 times slower for almost 3 months while no one complains. [#38052](https://github.com/ClickHouse/ClickHouse/pull/38052) ([Amos Bird](https://github.com/amosbird)). +* Migrate from the Intel hyperscan library to vectorscan, this speeds up many string matching on non-x86 platforms. [#38171](https://github.com/ClickHouse/ClickHouse/pull/38171) ([Robert Schulze](https://github.com/rschu1ze)). +* Increased parallelism of query plan steps executed after aggregation. [#38295](https://github.com/ClickHouse/ClickHouse/pull/38295) ([Nikita Taranov](https://github.com/nickitat)). +* Improve performance of insertion to columns of type `JSON`. [#38320](https://github.com/ClickHouse/ClickHouse/pull/38320) ([Anton Popov](https://github.com/CurtizJ)). +* Optimized insertion and lookups in the HashTable. [#38413](https://github.com/ClickHouse/ClickHouse/pull/38413) ([Nikita Taranov](https://github.com/nickitat)). +* Fix performance degradation from [#32493](https://github.com/ClickHouse/ClickHouse/issues/32493). [#38417](https://github.com/ClickHouse/ClickHouse/pull/38417) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve performance of column vector replicate using SIMD instructions. Author @zzachimed. [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)). +* Norm and Distance functions for arrays speed up 1.2-2 times. [#38740](https://github.com/ClickHouse/ClickHouse/pull/38740) ([Alexander Gololobov](https://github.com/davenger)). +* A less efficient execution plan can be generated for query with ORDER BY (a, b) than for ORDER BY a, b. [#38873](https://github.com/ClickHouse/ClickHouse/pull/38873) ([Igor Nikonov](https://github.com/devcrafter)). +* Executable UDF, Executable Dictionary, Executable Storage poll subprocess fix 1 second subprocess wait during subprocess termination. [#38929](https://github.com/ClickHouse/ClickHouse/pull/38929) ([Constantine Peresypkin](https://github.com/pkit)). +* * Pushdown filter to the right side of sorting join. [#39123](https://github.com/ClickHouse/ClickHouse/pull/39123) ([Vladimir C](https://github.com/vdimir)). +* Optimize accesses to system.stack_trace. [#39177](https://github.com/ClickHouse/ClickHouse/pull/39177) ([Azat Khuzhin](https://github.com/azat)). + +#### Improvement +* Optimized processing of ORDER BY in window functions. [#34632](https://github.com/ClickHouse/ClickHouse/pull/34632) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Support SQL standard create index and drop index syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)). +* use simd to re-write the current column replicate funcion and got 2x performance boost in our unit benchmark test. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). +* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)). +* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)). +* * Bugfixes and performance improvements for `parallel_hash`. [#37648](https://github.com/ClickHouse/ClickHouse/pull/37648) ([Vladimir C](https://github.com/vdimir)). +* Support expressions with window functions. Closes [#19857](https://github.com/ClickHouse/ClickHouse/issues/19857). [#37848](https://github.com/ClickHouse/ClickHouse/pull/37848) ([Dmitry Novik](https://github.com/novikd)). +* S3 single objects are now removed with `RemoveObjectRequest` (sic). Fixed a bug with `S3ObjectStorage` on GCP which did not allow to use `removeFileIfExists` effectively breaking approximately half of `remove` functionality. Automatic detection for `DeleteObjects` S3 API, that is not supported by GCS. This will allow to use GCS without explicit `support_batch_delete=0` in configuration. [#37882](https://github.com/ClickHouse/ClickHouse/pull/37882) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Fix refcnt for unused MergeTree parts in SELECT queries (may defer parts removal). [#37913](https://github.com/ClickHouse/ClickHouse/pull/37913) ([Azat Khuzhin](https://github.com/azat)). +* Expose basic Keeper related monitoring data (via ProfileEvents and CurrentMetrics). [#38072](https://github.com/ClickHouse/ClickHouse/pull/38072) ([lingpeng0314](https://github.com/lingpeng0314)). +* Added kerberosInit function and corresponding KerberosInit class as a replacement for kinit executable. Replaced all calls of kinit in Kafka and HDFS code by call of kerberosInit function. Added new integration test. Closes [#27651](https://github.com/ClickHouse/ClickHouse/issues/27651). [#38105](https://github.com/ClickHouse/ClickHouse/pull/38105) ([Roman Vasin](https://github.com/rvasin)). +* * Add setting `multiple_joins_try_to_keep_original_names` to not rewrite identifier name on multiple JOINs rewrite, close [#34697](https://github.com/ClickHouse/ClickHouse/issues/34697). [#38149](https://github.com/ClickHouse/ClickHouse/pull/38149) ([Vladimir C](https://github.com/vdimir)). +* improved trace-visualizer UX. [#38169](https://github.com/ClickHouse/ClickHouse/pull/38169) ([Sergei Trifonov](https://github.com/serxa)). +* Add ability to pass headers to url table function / storage via sql. Closes [#37897](https://github.com/ClickHouse/ClickHouse/issues/37897). [#38176](https://github.com/ClickHouse/ClickHouse/pull/38176) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Enable trace collection for AArch64. [#38181](https://github.com/ClickHouse/ClickHouse/pull/38181) ([Maksim Kita](https://github.com/kitaisreal)). +* Do not skip symlinks in `user_defined` directory during SQL user defined functions loading. Closes [#38042](https://github.com/ClickHouse/ClickHouse/issues/38042). [#38184](https://github.com/ClickHouse/ClickHouse/pull/38184) ([Maksim Kita](https://github.com/kitaisreal)). +* Improve the stability for hive storage integration test. Move the data prepare step into test.py. [#38260](https://github.com/ClickHouse/ClickHouse/pull/38260) ([lgbo](https://github.com/lgbo-ustc)). +* Added background cleanup of subdirectories in `store/`. In some cases clickhouse-server might left garbage subdirectories in `store/` (for example, on unsuccessful table creation) and those dirs were never been removed. Fixes [#33710](https://github.com/ClickHouse/ClickHouse/issues/33710). [#38265](https://github.com/ClickHouse/ClickHouse/pull/38265) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add `DESCRIBE CACHE` query to show cache settings from config. Add `SHOW CACHES` query to show available filesystem caches list. [#38279](https://github.com/ClickHouse/ClickHouse/pull/38279) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add access check for system drop fs cache. Support ON CLUSTER. [#38319](https://github.com/ClickHouse/ClickHouse/pull/38319) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support `auto_close` option for postgres engine connection. Closes [#31486](https://github.com/ClickHouse/ClickHouse/issues/31486). [#38363](https://github.com/ClickHouse/ClickHouse/pull/38363) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix PostgreSQL database engine incompatibility on upgrade from 21.3 to 22.3. Closes [#36659](https://github.com/ClickHouse/ClickHouse/issues/36659). [#38369](https://github.com/ClickHouse/ClickHouse/pull/38369) ([Kseniia Sumarokova](https://github.com/kssenii)). +* `filesystemAvailable` and similar functions now work in `clickhouse-local`. This closes [#38423](https://github.com/ClickHouse/ClickHouse/issues/38423). [#38424](https://github.com/ClickHouse/ClickHouse/pull/38424) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Hardware benchmark now has support for automatic results uploading. [#38427](https://github.com/ClickHouse/ClickHouse/pull/38427) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The table `system.asynchronous_metric_log` is further optimized for storage space. This closes [#38134](https://github.com/ClickHouse/ClickHouse/issues/38134). See the [YouTube video](https://www.youtube.com/watch?v=0fSp9SF8N8A). [#38428](https://github.com/ClickHouse/ClickHouse/pull/38428) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Functions multiMatchAny(), multiMatchAnyIndex(), multiMatchAllIndices() and their fuzzy variants now accept non-const pattern array argument. [#38485](https://github.com/ClickHouse/ClickHouse/pull/38485) ([Robert Schulze](https://github.com/rschu1ze)). +* Added L2 Squared distance and norm for both arrays and tuples. [#38545](https://github.com/ClickHouse/ClickHouse/pull/38545) ([Julian Gilyadov](https://github.com/israelg99)). +* Add revision() function. [#38555](https://github.com/ClickHouse/ClickHouse/pull/38555) ([Azat Khuzhin](https://github.com/azat)). +* Add `group_by_use_nulls` setting to make aggregation key columns nullable in the case of ROLLUP, CUBE and GROUPING SETS. Closes [#37359](https://github.com/ClickHouse/ClickHouse/issues/37359). [#38642](https://github.com/ClickHouse/ClickHouse/pull/38642) ([Dmitry Novik](https://github.com/novikd)). +* Fix GCS via proxy tunnel usage. [#38726](https://github.com/ClickHouse/ClickHouse/pull/38726) ([Azat Khuzhin](https://github.com/azat)). +* Support `\i file` in clickhouse client / local (similar to psql \i). [#38813](https://github.com/ClickHouse/ClickHouse/pull/38813) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Allow null modifier in columns declaration for table functions. [#38816](https://github.com/ClickHouse/ClickHouse/pull/38816) ([Kruglov Pavel](https://github.com/Avogar)). +* - Deactivate `mutations_finalizing_task` before shutdown to avoid `TABLE_IS_READ_ONLY` errors. [#38851](https://github.com/ClickHouse/ClickHouse/pull/38851) ([Raúl Marín](https://github.com/Algunenano)). +* Fix waiting of shared lock after exclusive lock failure. [#38864](https://github.com/ClickHouse/ClickHouse/pull/38864) ([Azat Khuzhin](https://github.com/azat)). +* Add the ability to specify compression level during data export. [#38907](https://github.com/ClickHouse/ClickHouse/pull/38907) ([Nikolay Degterinsky](https://github.com/evillique)). +* New option `rewrite` in `EXPLAIN AST`. If enabled, it shows AST after it's rewritten, otherwise AST of original query. Disabled by default. [#38910](https://github.com/ClickHouse/ClickHouse/pull/38910) ([Igor Nikonov](https://github.com/devcrafter)). +* - Stop reporting Zookeeper "Node exists" exceptions in system.errors when they are expected. [#38961](https://github.com/ClickHouse/ClickHouse/pull/38961) ([Raúl Marín](https://github.com/Algunenano)). +* Allow to specify globs `* or {expr1, expr2, expr3}` inside a key for `clickhouse-extract-from-config` tool. [#38966](https://github.com/ClickHouse/ClickHouse/pull/38966) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Add option enabling that SELECT from the system database requires grant. Details:. [#38970](https://github.com/ClickHouse/ClickHouse/pull/38970) ([Vitaly Baranov](https://github.com/vitlibar)). +* - clearOldLogs: Don't report KEEPER_EXCEPTION on concurrent deletes. [#39016](https://github.com/ClickHouse/ClickHouse/pull/39016) ([Raúl Marín](https://github.com/Algunenano)). +* clickhouse-keeper improvement: persist metainformation about keeper servers to disk. [#39069](https://github.com/ClickHouse/ClickHouse/pull/39069) ([Antonio Andelic](https://github.com/antonio2368)). +* Continue without exception when running out of disk space when using filesystem cache. [#39106](https://github.com/ClickHouse/ClickHouse/pull/39106) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Handling SIGTERM signals from k8s. [#39130](https://github.com/ClickHouse/ClickHouse/pull/39130) ([Timur Solodovnikov](https://github.com/tsolodov)). +* SQL function multiStringAllPositions() now accepts non-const needle arguments. [#39167](https://github.com/ClickHouse/ClickHouse/pull/39167) ([Robert Schulze](https://github.com/rschu1ze)). +* Add merge_algorithm (Undecided, Horizontal, Vertical) to system.part_log. [#39181](https://github.com/ClickHouse/ClickHouse/pull/39181) ([Azat Khuzhin](https://github.com/azat)). +* Improve isNullable/isConstant/isNull/isNotNull performance for LowCardinality argument. [#39192](https://github.com/ClickHouse/ClickHouse/pull/39192) ([Kruglov Pavel](https://github.com/Avogar)). +* - Don't report system.errors when the disk is not rotational. [#39216](https://github.com/ClickHouse/ClickHouse/pull/39216) ([Raúl Marín](https://github.com/Algunenano)). +* Metric `result_bytes` for `INSERT` queries in `system.query_log` shows number of bytes inserted. Previously value was incorrect and stored the same value as `result_rows`. [#39225](https://github.com/ClickHouse/ClickHouse/pull/39225) ([Ilya Yatsishin](https://github.com/qoega)). +* The CPU usage metric in clickhouse-client will be displayed in a better way. Fixes [#38756](https://github.com/ClickHouse/ClickHouse/issues/38756). [#39280](https://github.com/ClickHouse/ClickHouse/pull/39280) ([Sergei Trifonov](https://github.com/serxa)). +* Rethrow exception on filesystem cache initialisation on server startup, better error message. [#39386](https://github.com/ClickHouse/ClickHouse/pull/39386) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support milliseconds, microseconds and nanoseconds in `parseTimeDelta` function. [#39447](https://github.com/ClickHouse/ClickHouse/pull/39447) ([Kruglov Pavel](https://github.com/Avogar)). + +#### Bug Fix +* Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)). +* * Fixed crash caused by IHiveFile be shared among threads. [#38887](https://github.com/ClickHouse/ClickHouse/pull/38887) ([lgbo](https://github.com/lgbo-ustc)). + +#### Build/Testing/Packaging Improvement +* - Apply Clang Thread Safety Analysis (TSA) annotations to ClickHouse. [#38068](https://github.com/ClickHouse/ClickHouse/pull/38068) ([Robert Schulze](https://github.com/rschu1ze)). +* - System table "system.licenses" is now correctly populated on Mac (Darwin). [#38294](https://github.com/ClickHouse/ClickHouse/pull/38294) ([Robert Schulze](https://github.com/rschu1ze)). +* Handle full queue exception in clickhouse-test. If it happened we need to collect debug info to understand what queries didn't finish. [#38490](https://github.com/ClickHouse/ClickHouse/pull/38490) ([Dmitry Novik](https://github.com/novikd)). +* - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Add `clickhouse-diagnostics` binary to the packages. [#38647](https://github.com/ClickHouse/ClickHouse/pull/38647) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Always print stacktraces if test queue is full. Follow up [#38490](https://github.com/ClickHouse/ClickHouse/issues/38490) cc @tavplubix. [#38662](https://github.com/ClickHouse/ClickHouse/pull/38662) ([Dmitry Novik](https://github.com/novikd)). +* Align branches within a 32B boundary to make benchmark more stable. [#38988](https://github.com/ClickHouse/ClickHouse/pull/38988) ([Guo Wangyang](https://github.com/guowangy)). +* Fix LSan by fixing getauxval(). [#39299](https://github.com/ClickHouse/ClickHouse/pull/39299) ([Azat Khuzhin](https://github.com/azat)). +* Adapt universal installation script for FreeBSD. [#39302](https://github.com/ClickHouse/ClickHouse/pull/39302) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in official stable or prestable release) + +* Fix projection exception when aggregation keys are wrapped inside other functions. This fixes [#37151](https://github.com/ClickHouse/ClickHouse/issues/37151). [#37155](https://github.com/ClickHouse/ClickHouse/pull/37155) ([Amos Bird](https://github.com/amosbird)). +* Fix possible logical error `... with argument with type Nothing and default implementation for Nothing is expected to return result with type Nothing, got ...` in some functions. Closes: [#37610](https://github.com/ClickHouse/ClickHouse/issues/37610) Closes: [#37741](https://github.com/ClickHouse/ClickHouse/issues/37741). [#37759](https://github.com/ClickHouse/ClickHouse/pull/37759) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix incorrect columns order in subqueries of UNION (in case of duplicated columns in subselects may produce incorrect result). [#37887](https://github.com/ClickHouse/ClickHouse/pull/37887) ([Azat Khuzhin](https://github.com/azat)). +* Fix incorrect work of MODIFY ALTER Column with column names that contain dots. Closes [#37907](https://github.com/ClickHouse/ClickHouse/issues/37907). [#37971](https://github.com/ClickHouse/ClickHouse/pull/37971) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)). +* Fix rounding for `Decimal128/Decimal256` with more than 19-digits long scale. [#38027](https://github.com/ClickHouse/ClickHouse/pull/38027) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix possible crash in `Distributed` async insert in case of removing a replica from config. [#38029](https://github.com/ClickHouse/ClickHouse/pull/38029) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix "Missing columns" for GLOBAL JOIN with CTE w/o alias. [#38056](https://github.com/ClickHouse/ClickHouse/pull/38056) ([Azat Khuzhin](https://github.com/azat)). +* Rewrite tuple functions as literals in backwards-compatibility mode. [#38096](https://github.com/ClickHouse/ClickHouse/pull/38096) ([Anton Kozlov](https://github.com/tonickkozlov)). +* - Fix redundant memory reservation for output block during `ORDER BY`. [#38127](https://github.com/ClickHouse/ClickHouse/pull/38127) ([iyupeng](https://github.com/iyupeng)). +* Fix possible logical error `Bad cast from type DB::IColumn* to DB::ColumnNullable*` in array mapped functions. Closes [#38006](https://github.com/ClickHouse/ClickHouse/issues/38006). [#38132](https://github.com/ClickHouse/ClickHouse/pull/38132) ([Kruglov Pavel](https://github.com/Avogar)). +* * Fix temporary name clash in partial merge join, close [#37928](https://github.com/ClickHouse/ClickHouse/issues/37928). [#38135](https://github.com/ClickHouse/ClickHouse/pull/38135) ([Vladimir C](https://github.com/vdimir)). +* With table ```SQL CREATE TABLE nested_name_tuples ( `a` Tuple(x String, y Tuple(i Int32, j String)) ) ENGINE = Memory; ```. [#38136](https://github.com/ClickHouse/ClickHouse/pull/38136) ([lgbo](https://github.com/lgbo-ustc)). +* Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)). +* (Window View is a experimental feature) Fix LOGICAL_ERROR for WINDOW VIEW with incorrect structure. [#38205](https://github.com/ClickHouse/ClickHouse/pull/38205) ([Azat Khuzhin](https://github.com/azat)). +* Update librdkafka submodule to fix crash when an OAUTHBEARER refresh callback is set. [#38225](https://github.com/ClickHouse/ClickHouse/pull/38225) ([Rafael Acevedo](https://github.com/racevedoo)). +* Do not allow recursive usage of OvercommitTracker during logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794) cc @tavplubix @davenger. [#38246](https://github.com/ClickHouse/ClickHouse/pull/38246) ([Dmitry Novik](https://github.com/novikd)). +* Fix INSERT into Distributed hung due to ProfileEvents. [#38307](https://github.com/ClickHouse/ClickHouse/pull/38307) ([Azat Khuzhin](https://github.com/azat)). +* Fix retries in PostgreSQL engine. [#38310](https://github.com/ClickHouse/ClickHouse/pull/38310) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix optimization in PartialSortingTransform (SIGSEGV and possible incorrect result). [#38324](https://github.com/ClickHouse/ClickHouse/pull/38324) ([Azat Khuzhin](https://github.com/azat)). +* Fix RabbitMQ with formats based on PeekableReadBuffer. Closes [#38061](https://github.com/ClickHouse/ClickHouse/issues/38061). [#38356](https://github.com/ClickHouse/ClickHouse/pull/38356) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix possible `Invalid number of rows in Chunk` in materialised pg. Closes [#37323](https://github.com/ClickHouse/ClickHouse/issues/37323). [#38360](https://github.com/ClickHouse/ClickHouse/pull/38360) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix RabbitMQ configuration with connection string setting. Closes [#36531](https://github.com/ClickHouse/ClickHouse/issues/36531). [#38365](https://github.com/ClickHouse/ClickHouse/pull/38365) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix PostgreSQL engine not using PostgreSQL schema when retrieving array dimension size. Closes [#36755](https://github.com/ClickHouse/ClickHouse/issues/36755). Closes [#36772](https://github.com/ClickHouse/ClickHouse/issues/36772). [#38366](https://github.com/ClickHouse/ClickHouse/pull/38366) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)). +* fix: expose new CH keeper port in Dockerfile clickhouse/clickhouse-keeper fix: use correct KEEPER_CONFIG filename in clickhouse/clickhouse-keeper docker image. [#38462](https://github.com/ClickHouse/ClickHouse/pull/38462) ([Evgeny Kruglov](https://github.com/nordluf)). +* Fix parts removal (will be left forever if they had not been removed on server shutdown) after incorrect server shutdown. [#38486](https://github.com/ClickHouse/ClickHouse/pull/38486) ([Azat Khuzhin](https://github.com/azat)). +* Fixes [#38498](https://github.com/ClickHouse/ClickHouse/issues/38498) Current Implementation is similar to what shell does mentiond by @rschu1ze [here](https://github.com/ClickHouse/ClickHouse/pull/38502#issuecomment-1169057723). [#38502](https://github.com/ClickHouse/ClickHouse/pull/38502) ([Heena Bansal](https://github.com/HeenaBansal2009)). +* Fix table creation to avoid replication issues with pre-22.4 replicas. [#38541](https://github.com/ClickHouse/ClickHouse/pull/38541) ([Raúl Marín](https://github.com/Algunenano)). +* Fix crash for `mapUpdate`, `mapFilter` functions when using with constant map argument. Closes [#38547](https://github.com/ClickHouse/ClickHouse/issues/38547). [#38553](https://github.com/ClickHouse/ClickHouse/pull/38553) ([hexiaoting](https://github.com/hexiaoting)). +* Fix wrong results of countSubstrings() & position() on patterns with 0-bytes. [#38589](https://github.com/ClickHouse/ClickHouse/pull/38589) ([Robert Schulze](https://github.com/rschu1ze)). +* Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)). +* Adapt some more nodes to avoid issues with pre-22.4 replicas. [#38627](https://github.com/ClickHouse/ClickHouse/pull/38627) ([Raúl Marín](https://github.com/Algunenano)). +* Fix toHour() monotonicity which can lead to incorrect query result (incorrect index analysis). This fixes [#38333](https://github.com/ClickHouse/ClickHouse/issues/38333). [#38675](https://github.com/ClickHouse/ClickHouse/pull/38675) ([Amos Bird](https://github.com/amosbird)). +* `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)). +* Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)). +* Fix throwing exception for seekable read from s3 (exception was not thrown). [#38773](https://github.com/ClickHouse/ClickHouse/pull/38773) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix checking whether s3 storage support parallel writes. It resulted in s3 parallel writes not working. [#38792](https://github.com/ClickHouse/ClickHouse/pull/38792) ([chen](https://github.com/xiedeyantu)). +* Fix s3 seekable reads with parallel read buffer. (Affected memory usage during query). Closes [#38258](https://github.com/ClickHouse/ClickHouse/issues/38258). [#38802](https://github.com/ClickHouse/ClickHouse/pull/38802) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Update `simdjson`. This fixes [#38621](https://github.com/ClickHouse/ClickHouse/issues/38621). [#38838](https://github.com/ClickHouse/ClickHouse/pull/38838) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* MergeTree fix possible logical error for Vertical merges. [#38859](https://github.com/ClickHouse/ClickHouse/pull/38859) ([Maksim Kita](https://github.com/kitaisreal)). +* - Fix settings profile with seconds unit. [#38896](https://github.com/ClickHouse/ClickHouse/pull/38896) ([Raúl Marín](https://github.com/Algunenano)). +* Fix incorrect partition pruning when there is a nullable partition. This fixes [#38941](https://github.com/ClickHouse/ClickHouse/issues/38941). [#38946](https://github.com/ClickHouse/ClickHouse/pull/38946) ([Amos Bird](https://github.com/amosbird)). +* Fix fsync_part_directory for fetches. [#38993](https://github.com/ClickHouse/ClickHouse/pull/38993) ([Azat Khuzhin](https://github.com/azat)). +* Functions multiMatch[Fuzzy](AllIndices/Any/AnyIndex)() no throw a logical error if the needle argument is empty. [#39012](https://github.com/ClickHouse/ClickHouse/pull/39012) ([Robert Schulze](https://github.com/rschu1ze)). +* Any allocations inside OvercommitTracker may lead to deadlock. Logging was not very informative so it's easier just to remove logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)). +* Fix toHour() monotonicity which can lead to incorrect query result (incorrect index analysis). This fixes [#38333](https://github.com/ClickHouse/ClickHouse/issues/38333). [#39037](https://github.com/ClickHouse/ClickHouse/pull/39037) ([Amos Bird](https://github.com/amosbird)). +* Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix ActionsDAG construction for arguments of window expressions. Fixes [#38538](https://github.com/ClickHouse/ClickHouse/issues/38538) Allow using of higher-order functions in window expressions. [#39112](https://github.com/ClickHouse/ClickHouse/pull/39112) ([Dmitry Novik](https://github.com/novikd)). +* Keep `LowCardinality` type in `tuple()` function. Previously `LowCardinality` type was dropped and elements of created tuple had underlying type of `LowCardinality`. [#39113](https://github.com/ClickHouse/ClickHouse/pull/39113) ([Anton Popov](https://github.com/CurtizJ)). +* Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix unexpected query result when both `optimize_trivial_count_query` and `empty_result_for_aggregation_by_empty_set` are set to true. This fixes [#39140](https://github.com/ClickHouse/ClickHouse/issues/39140). [#39155](https://github.com/ClickHouse/ClickHouse/pull/39155) ([Amos Bird](https://github.com/amosbird)). +* Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix extremely rare race condition in during hardnlinks for remote fs. The only way to reproduce it is concurrent run of backups. [#39190](https://github.com/ClickHouse/ClickHouse/pull/39190) ([alesapin](https://github.com/alesapin)). +* Fix fetch of in-memory part with `allow_remote_fs_zero_copy_replication`. [#39214](https://github.com/ClickHouse/ClickHouse/pull/39214) ([Azat Khuzhin](https://github.com/azat)). +* Fix NOEXCEPT_SCOPE (before it calls std::terminate and looses the exception). [#39229](https://github.com/ClickHouse/ClickHouse/pull/39229) ([Azat Khuzhin](https://github.com/azat)). +* Declare RabbitMQ queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)). +* Fix segmentation fault in MaterializedPostgreSQL database engine, which could happen if some exception occurred at replication initialisation. Closes [#36939](https://github.com/ClickHouse/ClickHouse/issues/36939). [#39272](https://github.com/ClickHouse/ClickHouse/pull/39272) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix incorrect fetch postgresql tables query fro PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix possible UB in MergeTreeBackgroundExecutor (leads to SIGSEGV on race with DROP/DETACH). [#39342](https://github.com/ClickHouse/ClickHouse/pull/39342) ([Azat Khuzhin](https://github.com/azat)). +* Avoid possible abort() in CapnProto on exception descruction. Closes [#30706](https://github.com/ClickHouse/ClickHouse/issues/30706). [#39365](https://github.com/ClickHouse/ClickHouse/pull/39365) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix behaviour of dictHas for direct dictionaries when multiple lookups to the same key are made in a single action. [#39385](https://github.com/ClickHouse/ClickHouse/pull/39385) ([James Morrison](https://github.com/jawm)). +* Fix crash which may happen while reading from dictionary with `DateTime64` attribute. Fixes [#38930](https://github.com/ClickHouse/ClickHouse/issues/38930). [#39391](https://github.com/ClickHouse/ClickHouse/pull/39391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix WriteBuffer finalize in destructor when cacnel query that could lead to stuck query or even terminate. Closes [#38199](https://github.com/ClickHouse/ClickHouse/issues/38199). [#39396](https://github.com/ClickHouse/ClickHouse/pull/39396) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix UB (stack-use-after-scope) in extactAll(). [#39397](https://github.com/ClickHouse/ClickHouse/pull/39397) ([Azat Khuzhin](https://github.com/azat)). +* Fix incorrect query result when trivial count optimization is in effect with array join. This fixes [#39431](https://github.com/ClickHouse/ClickHouse/issues/39431). [#39444](https://github.com/ClickHouse/ClickHouse/pull/39444) ([Amos Bird](https://github.com/amosbird)). + +#### Bug Fix (user-visible misbehaviour in official stable or prestable release) + +* Disable send_logs_level for INSERT into Distributed to avoid possible hung. [#35075](https://github.com/ClickHouse/ClickHouse/pull/35075) ([Azat Khuzhin](https://github.com/azat)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "Add a setting to use more memory for zstd decompression"'. [#38194](https://github.com/ClickHouse/ClickHouse/pull/38194) ([alesapin](https://github.com/alesapin)). +* NO CL ENTRY: 'Revert "Revert "Add a setting to use more memory for zstd decompression""'. [#38196](https://github.com/ClickHouse/ClickHouse/pull/38196) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "ClickHouse's boringssl module updated to the official version of the FIPS compliant."'. [#38201](https://github.com/ClickHouse/ClickHouse/pull/38201) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Fix optimization in PartialSortingTransform (SIGSEGV and possible incorrect result)"'. [#38361](https://github.com/ClickHouse/ClickHouse/pull/38361) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Add support for io_uring read method"'. [#38377](https://github.com/ClickHouse/ClickHouse/pull/38377) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Revert "Fix optimization in PartialSortingTransform (SIGSEGV and possible incorrect result)""'. [#38449](https://github.com/ClickHouse/ClickHouse/pull/38449) ([Maksim Kita](https://github.com/kitaisreal)). +* NO CL ENTRY: 'Don't spoil return code of integration tests runner with redundant tee'. [#38548](https://github.com/ClickHouse/ClickHouse/pull/38548) ([Vladimir Chebotarev](https://github.com/excitoon)). +* NO CL ENTRY: 'Revert "Non Negative Derivative window function"'. [#38551](https://github.com/ClickHouse/ClickHouse/pull/38551) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Upload to S3 compressed self-extracting clickhouse"'. [#38788](https://github.com/ClickHouse/ClickHouse/pull/38788) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* NO CL ENTRY: 'Revert "Smallish updates of dev guide"'. [#38848](https://github.com/ClickHouse/ClickHouse/pull/38848) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Fix toHour() monotonicity which can lead to incorrect query result (incorrect index analysis)"'. [#39001](https://github.com/ClickHouse/ClickHouse/pull/39001) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Fix WriteBuffer finalize in destructor when cacnel query"'. [#39433](https://github.com/ClickHouse/ClickHouse/pull/39433) ([Kruglov Pavel](https://github.com/Avogar)). +* NO CL ENTRY: 'Revert "[RFC] Fix LSan by fixing getauxval()"'. [#39434](https://github.com/ClickHouse/ClickHouse/pull/39434) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Remove broken optimisation in Direct dictionary dictHas implementation"'. [#39461](https://github.com/ClickHouse/ClickHouse/pull/39461) ([Alexander Tokmakov](https://github.com/tavplubix)). +* NO CL ENTRY: 'Revert "Fix trivial count optimization with array join"'. [#39466](https://github.com/ClickHouse/ClickHouse/pull/39466) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Separate data storage abstraction for MergeTree [#36555](https://github.com/ClickHouse/ClickHouse/pull/36555) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Randomize settings related to in-order read/aggregation [#36914](https://github.com/ClickHouse/ClickHouse/pull/36914) ([Azat Khuzhin](https://github.com/azat)). +* Merge tree reader support for multiple read/filter steps: row level filter, prewhere, ... [#37165](https://github.com/ClickHouse/ClickHouse/pull/37165) ([Alexander Gololobov](https://github.com/davenger)). +* Backup Improvements 6 [#37358](https://github.com/ClickHouse/ClickHouse/pull/37358) ([Vitaly Baranov](https://github.com/vitlibar)). +* Move `updateInputStream` to `ITransformingStep` [#37393](https://github.com/ClickHouse/ClickHouse/pull/37393) ([Nikita Taranov](https://github.com/nickitat)). +* Proper wait of the clickhouse-server in tests [#37560](https://github.com/ClickHouse/ClickHouse/pull/37560) ([Azat Khuzhin](https://github.com/azat)). +* Upgrade curl to 7.83.1 [#37795](https://github.com/ClickHouse/ClickHouse/pull/37795) ([Suzy Wang](https://github.com/SuzyWangIBMer)). +* Try fix flaky tests with transactions [#37822](https://github.com/ClickHouse/ClickHouse/pull/37822) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Send perf tests results to ci database [#37841](https://github.com/ClickHouse/ClickHouse/pull/37841) ([Vladimir C](https://github.com/vdimir)). +* Remove duplicate peak mem log [#37860](https://github.com/ClickHouse/ClickHouse/pull/37860) ([Amos Bird](https://github.com/amosbird)). +* tests: fix log_comment (extra quotes) [#37932](https://github.com/ClickHouse/ClickHouse/pull/37932) ([Azat Khuzhin](https://github.com/azat)). +* Throw exception when xml user profile does not exist [#38024](https://github.com/ClickHouse/ClickHouse/pull/38024) ([nvartolomei](https://github.com/nvartolomei)). +* Add `SYNC` command to internal ZooKeeper client [#38047](https://github.com/ClickHouse/ClickHouse/pull/38047) ([Antonio Andelic](https://github.com/antonio2368)). +* Better support of GCP storage [#38069](https://github.com/ClickHouse/ClickHouse/pull/38069) ([Anton Popov](https://github.com/CurtizJ)). +* Build artifacts upload [#38086](https://github.com/ClickHouse/ClickHouse/pull/38086) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Extract some diff from pr [#36171](https://github.com/ClickHouse/ClickHouse/issues/36171) [#38088](https://github.com/ClickHouse/ClickHouse/pull/38088) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Aggregate functions added restrict into batch methods [#38090](https://github.com/ClickHouse/ClickHouse/pull/38090) ([Maksim Kita](https://github.com/kitaisreal)). +* Add perf checkers to all Jepsen tests [#38091](https://github.com/ClickHouse/ClickHouse/pull/38091) ([Antonio Andelic](https://github.com/antonio2368)). +* Some fixes for tests with tsan [#38106](https://github.com/ClickHouse/ClickHouse/pull/38106) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Bring back [#36396](https://github.com/ClickHouse/ClickHouse/issues/36396) [#38110](https://github.com/ClickHouse/ClickHouse/pull/38110) ([Nikita Taranov](https://github.com/nickitat)). +* More suppressions for backward compatibility check [#38131](https://github.com/ClickHouse/ClickHouse/pull/38131) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Cherry pick [#38137](https://github.com/ClickHouse/ClickHouse/pull/38137) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Disable parameters for non direct executable user defined functions [#38142](https://github.com/ClickHouse/ClickHouse/pull/38142) ([Maksim Kita](https://github.com/kitaisreal)). +* SortDescription compile fix typo [#38144](https://github.com/ClickHouse/ClickHouse/pull/38144) ([Maksim Kita](https://github.com/kitaisreal)). +* Update version after release [#38147](https://github.com/ClickHouse/ClickHouse/pull/38147) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* New changelog and versions updated [#38148](https://github.com/ClickHouse/ClickHouse/pull/38148) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Some fixes for clickhouse-disks [#38150](https://github.com/ClickHouse/ClickHouse/pull/38150) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Remove processor description from span attributes - it is not working [#38157](https://github.com/ClickHouse/ClickHouse/pull/38157) ([Ilya Yatsishin](https://github.com/qoega)). +* Bump minimum / maximum LLVM to 12 / 14 [#38170](https://github.com/ClickHouse/ClickHouse/pull/38170) ([Robert Schulze](https://github.com/rschu1ze)). +* Disk transaction [#38182](https://github.com/ClickHouse/ClickHouse/pull/38182) ([alesapin](https://github.com/alesapin)). +* Check row size to avoid out of bounds access in PostgreSQLSource [#38190](https://github.com/ClickHouse/ClickHouse/pull/38190) ([Alexander Gololobov](https://github.com/davenger)). +* tests: add no-backward-compatibility-check for 02067_lost_part_s3 [#38195](https://github.com/ClickHouse/ClickHouse/pull/38195) ([Azat Khuzhin](https://github.com/azat)). +* tests/stress: fix TSan detection (enables thread fuzzer for non-TSan builds) [#38207](https://github.com/ClickHouse/ClickHouse/pull/38207) ([Azat Khuzhin](https://github.com/azat)). +* tests: disable 01646_system_restart_replicas_smoke under stress tests [#38212](https://github.com/ClickHouse/ClickHouse/pull/38212) ([Azat Khuzhin](https://github.com/azat)). +* tests/stress: fix TSan detection [#38213](https://github.com/ClickHouse/ClickHouse/pull/38213) ([Azat Khuzhin](https://github.com/azat)). +* buffer's getFileSize small changes [#38227](https://github.com/ClickHouse/ClickHouse/pull/38227) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix test for system table count in diag tool [#38236](https://github.com/ClickHouse/ClickHouse/pull/38236) ([Dale McDiarmid](https://github.com/gingerwizard)). +* Update version_date.tsv after v22.3.7.28-lts [#38237](https://github.com/ClickHouse/ClickHouse/pull/38237) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Changelog attrs [#38238](https://github.com/ClickHouse/ClickHouse/pull/38238) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix reading from s3 in some corner cases [#38239](https://github.com/ClickHouse/ClickHouse/pull/38239) ([Anton Popov](https://github.com/CurtizJ)). +* use utility methods to access x509 struct fields. [#38251](https://github.com/ClickHouse/ClickHouse/pull/38251) ([larryluogit](https://github.com/larryluogit)). +* Don't try to kill empty list of containers in `integration/runner` II [#38269](https://github.com/ClickHouse/ClickHouse/pull/38269) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Improve runners AMI and init scripts [#38273](https://github.com/ClickHouse/ClickHouse/pull/38273) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update thrift to 0.16.0 [#38280](https://github.com/ClickHouse/ClickHouse/pull/38280) ([Suzy Wang](https://github.com/SuzyWangIBMer)). +* Extract some diff from [#36171](https://github.com/ClickHouse/ClickHouse/issues/36171) [#38285](https://github.com/ClickHouse/ClickHouse/pull/38285) ([Kseniia Sumarokova](https://github.com/kssenii)). +* fix trace-viz zoom anomalies [#38287](https://github.com/ClickHouse/ClickHouse/pull/38287) ([Sergei Trifonov](https://github.com/serxa)). +* Integration tests volume [#38291](https://github.com/ClickHouse/ClickHouse/pull/38291) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* fix typo in view.md [#38292](https://github.com/ClickHouse/ClickHouse/pull/38292) ([Anton Petrov](https://github.com/gsenseless)). +* Backup improvements 7 [#38299](https://github.com/ClickHouse/ClickHouse/pull/38299) ([Vitaly Baranov](https://github.com/vitlibar)). +* Document why the submodule check does not halt the configuration [#38304](https://github.com/ClickHouse/ClickHouse/pull/38304) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix misleading error message while s3 schema inference [#38306](https://github.com/ClickHouse/ClickHouse/pull/38306) ([Kruglov Pavel](https://github.com/Avogar)). +* Update README.md [#38313](https://github.com/ClickHouse/ClickHouse/pull/38313) ([Yuko Takagi](https://github.com/yukotakagi)). +* Ban projections for zero-copy replication in a right way [#38322](https://github.com/ClickHouse/ClickHouse/pull/38322) ([alesapin](https://github.com/alesapin)). +* Checkout full repositories for performance tests [#38327](https://github.com/ClickHouse/ClickHouse/pull/38327) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fixed comments [#38331](https://github.com/ClickHouse/ClickHouse/pull/38331) ([Vladimir Chebotarev](https://github.com/excitoon)). +* Try to fix 02305_schema_inference_with_globs [#38337](https://github.com/ClickHouse/ClickHouse/pull/38337) ([Kruglov Pavel](https://github.com/Avogar)). +* Extend ZooKeeper list request with support for filtering persistent or ephemeral nodes only [#38338](https://github.com/ClickHouse/ClickHouse/pull/38338) ([Antonio Andelic](https://github.com/antonio2368)). +* Upload logs for getting all tests command [#38343](https://github.com/ClickHouse/ClickHouse/pull/38343) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Followup test fix for ban projections [#38351](https://github.com/ClickHouse/ClickHouse/pull/38351) ([alesapin](https://github.com/alesapin)). +* Added --recursive to clickhouse-disks list [#38354](https://github.com/ClickHouse/ClickHouse/pull/38354) ([Alexander Gololobov](https://github.com/davenger)). +* Adding TLS V13 Test [#38355](https://github.com/ClickHouse/ClickHouse/pull/38355) ([larryluogit](https://github.com/larryluogit)). +* Better exception messages on wrong table engines/functions argument types [#38362](https://github.com/ClickHouse/ClickHouse/pull/38362) ([Kruglov Pavel](https://github.com/Avogar)). +* Better error message for failed odbc query [#38364](https://github.com/ClickHouse/ClickHouse/pull/38364) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Simplify parts commit methods [#38380](https://github.com/ClickHouse/ClickHouse/pull/38380) ([alesapin](https://github.com/alesapin)). +* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Correct submodule after "base-x" commit [#38414](https://github.com/ClickHouse/ClickHouse/pull/38414) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Better hardware benchmark [#38419](https://github.com/ClickHouse/ClickHouse/pull/38419) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Refactoring to enable multi-match functions with non-const needles [#38434](https://github.com/ClickHouse/ClickHouse/pull/38434) ([Robert Schulze](https://github.com/rschu1ze)). +* more consistent work with paths in object storages [#38436](https://github.com/ClickHouse/ClickHouse/pull/38436) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Distinct sorted: calculate column positions once [#38438](https://github.com/ClickHouse/ClickHouse/pull/38438) ([Igor Nikonov](https://github.com/devcrafter)). +* Small improvement of the error message to hint at possible issue [#38458](https://github.com/ClickHouse/ClickHouse/pull/38458) ([Miel Donkers](https://github.com/mdonkers)). +* Fix comment [#38465](https://github.com/ClickHouse/ClickHouse/pull/38465) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Follow up for [#38436](https://github.com/ClickHouse/ClickHouse/issues/38436) [#38466](https://github.com/ClickHouse/ClickHouse/pull/38466) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add 22.7 release webinar. [#38481](https://github.com/ClickHouse/ClickHouse/pull/38481) ([Yuko Takagi](https://github.com/yukotakagi)). +* Add some TSA annotations [#38487](https://github.com/ClickHouse/ClickHouse/pull/38487) ([Alexander Tokmakov](https://github.com/tavplubix)). +* tests: cleanup tmp data in 02335_column_ttl_expired_column_optimization [#38488](https://github.com/ClickHouse/ClickHouse/pull/38488) ([Azat Khuzhin](https://github.com/azat)). +* Cleanup: local clang-tidy warnings founded during review [#38489](https://github.com/ClickHouse/ClickHouse/pull/38489) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix some clang-tidy warnings in headers [#38491](https://github.com/ClickHouse/ClickHouse/pull/38491) ([Robert Schulze](https://github.com/rschu1ze)). +* A tiny improvement in report logging [#38507](https://github.com/ClickHouse/ClickHouse/pull/38507) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* tests: fix 02305_schema_inference_with_globs flakiness [#38511](https://github.com/ClickHouse/ClickHouse/pull/38511) ([Azat Khuzhin](https://github.com/azat)). +* Try to fix flaky test [#38516](https://github.com/ClickHouse/ClickHouse/pull/38516) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix `_csv.Error: field larger than field limit` [#38518](https://github.com/ClickHouse/ClickHouse/pull/38518) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix application errors grep in stress test [#38520](https://github.com/ClickHouse/ClickHouse/pull/38520) ([Kruglov Pavel](https://github.com/Avogar)). +* Use of disk batch operations in MergeTree [#38531](https://github.com/ClickHouse/ClickHouse/pull/38531) ([alesapin](https://github.com/alesapin)). +* Backup Improvements 8 [#38537](https://github.com/ClickHouse/ClickHouse/pull/38537) ([Vitaly Baranov](https://github.com/vitlibar)). +* Update poco [#38540](https://github.com/ClickHouse/ClickHouse/pull/38540) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Don't use std::unique_lock unless we need to [#38542](https://github.com/ClickHouse/ClickHouse/pull/38542) ([Robert Schulze](https://github.com/rschu1ze)). +* Rename slightly weirdly named "BuilderBinTidy" to "BuilderBinClangTidy" [#38546](https://github.com/ClickHouse/ClickHouse/pull/38546) ([Robert Schulze](https://github.com/rschu1ze)). +* Don't rollback SessionID request in Keeper [#38556](https://github.com/ClickHouse/ClickHouse/pull/38556) ([Antonio Andelic](https://github.com/antonio2368)). +* Add logging in Epoll and TimerDescriptor in case of EINTR [#38559](https://github.com/ClickHouse/ClickHouse/pull/38559) ([Kruglov Pavel](https://github.com/Avogar)). +* SQL create drop index minor fixes [#38561](https://github.com/ClickHouse/ClickHouse/pull/38561) ([Maksim Kita](https://github.com/kitaisreal)). +* Update version_date.tsv and changelogs after v22.6.2.12-stable [#38563](https://github.com/ClickHouse/ClickHouse/pull/38563) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Allow Ordinary database in Stress Tests [#38568](https://github.com/ClickHouse/ClickHouse/pull/38568) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Make postgres integration test great again [#38582](https://github.com/ClickHouse/ClickHouse/pull/38582) ([Ilya Yatsishin](https://github.com/qoega)). +* Add check for empty proccessors in AggregatingTransform::expandPipeline [#38584](https://github.com/ClickHouse/ClickHouse/pull/38584) ([filimonov](https://github.com/filimonov)). +* quick fix for 02112_with_fill_interval [#38587](https://github.com/ClickHouse/ClickHouse/pull/38587) ([Nikita Taranov](https://github.com/nickitat)). +* Remove zlib in mariadb-connector-c [#38599](https://github.com/ClickHouse/ClickHouse/pull/38599) ([Suzy Wang](https://github.com/SuzyWangIBMer)). +* Dictionaries added TSA annotations [#38601](https://github.com/ClickHouse/ClickHouse/pull/38601) ([Maksim Kita](https://github.com/kitaisreal)). +* CacheDictionary simplify update queue [#38602](https://github.com/ClickHouse/ClickHouse/pull/38602) ([Maksim Kita](https://github.com/kitaisreal)). +* Add separate option to omit symbols from heavy contrib [#38617](https://github.com/ClickHouse/ClickHouse/pull/38617) ([Azat Khuzhin](https://github.com/azat)). +* Fix exception messages in clickhouse su [#38619](https://github.com/ClickHouse/ClickHouse/pull/38619) ([filimonov](https://github.com/filimonov)). +* Added Greenplum benchmark [#38622](https://github.com/ClickHouse/ClickHouse/pull/38622) ([Dmitry Pavlov](https://github.com/kapustor)). +* Fix typo [#38623](https://github.com/ClickHouse/ClickHouse/pull/38623) ([tiegen](https://github.com/loyispa)). +* Better diagnostics in ReplicatedMergeTreeQueue [#38641](https://github.com/ClickHouse/ClickHouse/pull/38641) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Clean out randomized integration volumes each run [#38644](https://github.com/ClickHouse/ClickHouse/pull/38644) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update README.md [#38651](https://github.com/ClickHouse/ClickHouse/pull/38651) ([Yuko Takagi](https://github.com/yukotakagi)). +* Better naming for stuff related to splitted debug symbols [#38654](https://github.com/ClickHouse/ClickHouse/pull/38654) ([Robert Schulze](https://github.com/rschu1ze)). +* Add test for keeper `mntr` command [#38656](https://github.com/ClickHouse/ClickHouse/pull/38656) ([alesapin](https://github.com/alesapin)). +* Update hardware benchmark script [#38672](https://github.com/ClickHouse/ClickHouse/pull/38672) ([Filatenkov Artur](https://github.com/FArthur-cmd)). +* Fix strange backport titles issues [#38679](https://github.com/ClickHouse/ClickHouse/pull/38679) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Follow-up to [#38568](https://github.com/ClickHouse/ClickHouse/issues/38568) [#38680](https://github.com/ClickHouse/ClickHouse/pull/38680) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix possible deadlocks with `MergeTreeData::Transaction` [#38702](https://github.com/ClickHouse/ClickHouse/pull/38702) ([alesapin](https://github.com/alesapin)). +* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix FillingTransform [#38705](https://github.com/ClickHouse/ClickHouse/pull/38705) ([Nikita Taranov](https://github.com/nickitat)). +* Try to improve backward compatibility check [#38717](https://github.com/ClickHouse/ClickHouse/pull/38717) ([Kruglov Pavel](https://github.com/Avogar)). +* SQL create drop index fix formatting [#38720](https://github.com/ClickHouse/ClickHouse/pull/38720) ([Maksim Kita](https://github.com/kitaisreal)). +* Provide sort description for output stream in ReadFromMergeTree step [#38721](https://github.com/ClickHouse/ClickHouse/pull/38721) ([Igor Nikonov](https://github.com/devcrafter)). +* Add exp_internal for expect tests [#38728](https://github.com/ClickHouse/ClickHouse/pull/38728) ([Azat Khuzhin](https://github.com/azat)). +* Fix CLICKHOUSE_TMP in tests (fixes broken CI) [#38733](https://github.com/ClickHouse/ClickHouse/pull/38733) ([Azat Khuzhin](https://github.com/azat)). +* Add SimpleCheck [#38744](https://github.com/ClickHouse/ClickHouse/pull/38744) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Tiny tests cleanup [#38749](https://github.com/ClickHouse/ClickHouse/pull/38749) ([Azat Khuzhin](https://github.com/azat)). +* Fix replication after improper merge process [#38752](https://github.com/ClickHouse/ClickHouse/pull/38752) ([Raúl Marín](https://github.com/Algunenano)). +* tests: make aggregate_state_exception_memory_leak deterministic [#38754](https://github.com/ClickHouse/ClickHouse/pull/38754) ([Azat Khuzhin](https://github.com/azat)). +* Bump jemalloc to fix possible assertion [#38757](https://github.com/ClickHouse/ClickHouse/pull/38757) ([Azat Khuzhin](https://github.com/azat)). +* Reintroduce nonNegativeDerivative() [#38774](https://github.com/ClickHouse/ClickHouse/pull/38774) ([Andrey Zvonov](https://github.com/zvonand)). +* Temporarily disable 01710_projection_fetch_long in BC check [#38798](https://github.com/ClickHouse/ClickHouse/pull/38798) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Use native Map type for OpenTelemetry attributes [#38814](https://github.com/ClickHouse/ClickHouse/pull/38814) ([Ilya Yatsishin](https://github.com/qoega)). +* Add test for segfault in Map combinator [#38831](https://github.com/ClickHouse/ClickHouse/pull/38831) ([Kruglov Pavel](https://github.com/Avogar)). +* Update libprotobuf-mutator + fix build [#38834](https://github.com/ClickHouse/ClickHouse/pull/38834) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Get files changed in master since release is branched [#38836](https://github.com/ClickHouse/ClickHouse/pull/38836) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* update integration tests doc [#38837](https://github.com/ClickHouse/ClickHouse/pull/38837) ([Bharat Nallan](https://github.com/bharatnc)). +* Revert of revert of smallish devguide update [#38850](https://github.com/ClickHouse/ClickHouse/pull/38850) ([Robert Schulze](https://github.com/rschu1ze)). +* Do not override compiler if it had been already set [#38856](https://github.com/ClickHouse/ClickHouse/pull/38856) ([Azat Khuzhin](https://github.com/azat)). +* Move check for denied allocations [#38858](https://github.com/ClickHouse/ClickHouse/pull/38858) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Refactoring of code around object storages, added LocalObjectStorage (extracted this diff from PR [#36171](https://github.com/ClickHouse/ClickHouse/issues/36171)) [#38860](https://github.com/ClickHouse/ClickHouse/pull/38860) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backup Improvements 9 [#38861](https://github.com/ClickHouse/ClickHouse/pull/38861) ([Vitaly Baranov](https://github.com/vitlibar)). +* Simple cleanup: interpreters and parsers [#38876](https://github.com/ClickHouse/ClickHouse/pull/38876) ([Igor Nikonov](https://github.com/devcrafter)). +* Remove unnecessary log [#38892](https://github.com/ClickHouse/ClickHouse/pull/38892) ([Raúl Marín](https://github.com/Algunenano)). +* Update version_date.tsv and changelogs after v22.6.3.35-stable [#38894](https://github.com/ClickHouse/ClickHouse/pull/38894) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Retry docker buildx commands with progressive sleep in between [#38898](https://github.com/ClickHouse/ClickHouse/pull/38898) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid false positive red sanitizer asserts check in stress test [#38901](https://github.com/ClickHouse/ClickHouse/pull/38901) ([Kruglov Pavel](https://github.com/Avogar)). +* Interpreter cleanup: ContextPtr -> const ContextPtr & in parameters [#38902](https://github.com/ClickHouse/ClickHouse/pull/38902) ([Igor Nikonov](https://github.com/devcrafter)). +* Add a test for simdjson [#38933](https://github.com/ClickHouse/ClickHouse/pull/38933) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix a typo [#38938](https://github.com/ClickHouse/ClickHouse/pull/38938) ([Nikolay Degterinsky](https://github.com/evillique)). +* Avoid redundant join block transformation during planning. [#38943](https://github.com/ClickHouse/ClickHouse/pull/38943) ([Amos Bird](https://github.com/amosbird)). +* Rename NUMBER_OF_DIMENSIONS_MISMATHED const to NUMBER_OF_DIMENSIONS_MISMATCHED [#38947](https://github.com/ClickHouse/ClickHouse/pull/38947) ([Vladimir Galunshchikov](https://github.com/soyayaos)). +* More careful destructor in BackupImpl [#38949](https://github.com/ClickHouse/ClickHouse/pull/38949) ([Vitaly Baranov](https://github.com/vitlibar)). +* Avoid weird exception in Keeper [#38963](https://github.com/ClickHouse/ClickHouse/pull/38963) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update version_date.tsv after v22.3.8.39-lts [#38969](https://github.com/ClickHouse/ClickHouse/pull/38969) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Remove tag no-backward-compatibility-check for specific versions [#38971](https://github.com/ClickHouse/ClickHouse/pull/38971) ([Kruglov Pavel](https://github.com/Avogar)). +* add Hetzner benchmark [#38974](https://github.com/ClickHouse/ClickHouse/pull/38974) ([Tyler Hannan](https://github.com/tylerhannan)). +* Update version_date.tsv after v22.4.6.53-stable [#38975](https://github.com/ClickHouse/ClickHouse/pull/38975) ([github-actions[bot]](https://github.com/apps/github-actions)). +* Disable instrumentation of sanitizer death callback [#38977](https://github.com/ClickHouse/ClickHouse/pull/38977) ([Alexander Tokmakov](https://github.com/tavplubix)). +* add ryzen 9 5950 benchmark [#38979](https://github.com/ClickHouse/ClickHouse/pull/38979) ([Tyler Hannan](https://github.com/tylerhannan)). +* EXPLAIN AST rewrite: rename to optimize [#38980](https://github.com/ClickHouse/ClickHouse/pull/38980) ([Igor Nikonov](https://github.com/devcrafter)). +* add macbook pro core i7 2014 benchmark [#38981](https://github.com/ClickHouse/ClickHouse/pull/38981) ([Tyler Hannan](https://github.com/tylerhannan)). +* add Huawei TaiShan 920 Benchmark [#38982](https://github.com/ClickHouse/ClickHouse/pull/38982) ([Tyler Hannan](https://github.com/tylerhannan)). +* tests: unique ZooKeeper path for Replicated.*MergeTree tables [#38999](https://github.com/ClickHouse/ClickHouse/pull/38999) ([Azat Khuzhin](https://github.com/azat)). +* Try another suppression for [#38629](https://github.com/ClickHouse/ClickHouse/issues/38629) [#39009](https://github.com/ClickHouse/ClickHouse/pull/39009) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add docker_server.py running to backport and release CIs [#39011](https://github.com/ClickHouse/ClickHouse/pull/39011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix flaky `test_system_merges/test.py::test_mutation_simple` [#39013](https://github.com/ClickHouse/ClickHouse/pull/39013) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix assertion in full soring merge join [#39014](https://github.com/ClickHouse/ClickHouse/pull/39014) ([Vladimir C](https://github.com/vdimir)). +* Fix flaky 00620_optimize_on_nonleader_replica_zookeeper [#39019](https://github.com/ClickHouse/ClickHouse/pull/39019) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Suppress [#38643](https://github.com/ClickHouse/ClickHouse/issues/38643) [#39024](https://github.com/ClickHouse/ClickHouse/pull/39024) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update url.md [#39025](https://github.com/ClickHouse/ClickHouse/pull/39025) ([Ilya Yatsishin](https://github.com/qoega)). +* Fix 'Tried to lock part ... for removal second time' [#39036](https://github.com/ClickHouse/ClickHouse/pull/39036) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add more settings for randomization [#39039](https://github.com/ClickHouse/ClickHouse/pull/39039) ([Anton Popov](https://github.com/CurtizJ)). +* add ScaleFlux CSD3000 Benchmark [#39040](https://github.com/ClickHouse/ClickHouse/pull/39040) ([Tyler Hannan](https://github.com/tylerhannan)). +* BACKUP/RESTORE ON CLUSTER use async mode on replicas now. [#39046](https://github.com/ClickHouse/ClickHouse/pull/39046) ([Vitaly Baranov](https://github.com/vitlibar)). +* More stable `test_s3_zero_copy_ttl`, weakened requirement to move data to S3 in 0-5 seconds [#39064](https://github.com/ClickHouse/ClickHouse/pull/39064) ([Vladimir Chebotaryov](https://github.com/quickhouse)). +* Parameter --decompressor added to utils/self-extracting-executable/compressor [#39065](https://github.com/ClickHouse/ClickHouse/pull/39065) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Follow-up to [#39036](https://github.com/ClickHouse/ClickHouse/issues/39036) [#39091](https://github.com/ClickHouse/ClickHouse/pull/39091) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Update registerDiskS3.cpp [#39092](https://github.com/ClickHouse/ClickHouse/pull/39092) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix code in getLeastSupertype function [#39101](https://github.com/ClickHouse/ClickHouse/pull/39101) ([Kruglov Pavel](https://github.com/Avogar)). +* Remove some debug logging [#39102](https://github.com/ClickHouse/ClickHouse/pull/39102) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Prefix overridden add_executable() command with "clickhouse_" [#39108](https://github.com/ClickHouse/ClickHouse/pull/39108) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix meilisearch tests [#39110](https://github.com/ClickHouse/ClickHouse/pull/39110) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Properly remove projection from part in case it was removed from table metadata. [#39119](https://github.com/ClickHouse/ClickHouse/pull/39119) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update cluster.py [#39120](https://github.com/ClickHouse/ClickHouse/pull/39120) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Tiny updates for tests. [#39127](https://github.com/ClickHouse/ClickHouse/pull/39127) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix data race in CompletedPipelineExecutor. [#39132](https://github.com/ClickHouse/ClickHouse/pull/39132) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix style again [#39133](https://github.com/ClickHouse/ClickHouse/pull/39133) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix path retrieval for Keeper's state [#39148](https://github.com/ClickHouse/ClickHouse/pull/39148) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Slightly better interface of waitForMutation [#39154](https://github.com/ClickHouse/ClickHouse/pull/39154) ([Amos Bird](https://github.com/amosbird)). +* ThreadPool fixes [#39160](https://github.com/ClickHouse/ClickHouse/pull/39160) ([Azat Khuzhin](https://github.com/azat)). +* Add test for [#39132](https://github.com/ClickHouse/ClickHouse/issues/39132) [#39173](https://github.com/ClickHouse/ClickHouse/pull/39173) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Suppression for BC check (`Cannot parse string 'Hello' as UInt64`) [#39176](https://github.com/ClickHouse/ClickHouse/pull/39176) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix 01961_roaring_memory_tracking test [#39187](https://github.com/ClickHouse/ClickHouse/pull/39187) ([Dmitry Novik](https://github.com/novikd)). +* Cleanup: done during [#38719](https://github.com/ClickHouse/ClickHouse/issues/38719) (SortingStep: deduce way to sort based on … [#39191](https://github.com/ClickHouse/ClickHouse/pull/39191) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix exception in AsynchronousMetrics for s390x [#39193](https://github.com/ClickHouse/ClickHouse/pull/39193) ([Harry Lee](https://github.com/HarryLeeIBM)). +* Optimize accesses to system.stack_trace (filter by name before sending signal) [#39212](https://github.com/ClickHouse/ClickHouse/pull/39212) ([Azat Khuzhin](https://github.com/azat)). +* Enable warning "-Wdeprecated-dynamic-exception-spec" [#39213](https://github.com/ClickHouse/ClickHouse/pull/39213) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove specialization global lock/unlock from ActionLocksManager [#39215](https://github.com/ClickHouse/ClickHouse/pull/39215) ([Azat Khuzhin](https://github.com/azat)). +* Turn some warnings on [#39223](https://github.com/ClickHouse/ClickHouse/pull/39223) ([Robert Schulze](https://github.com/rschu1ze)). +* Pass const std::string_view by value, not by reference [#39224](https://github.com/ClickHouse/ClickHouse/pull/39224) ([Kruglov Pavel](https://github.com/Avogar)). +* Minor fix for BC check [#39231](https://github.com/ClickHouse/ClickHouse/pull/39231) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Backport script [#39235](https://github.com/ClickHouse/ClickHouse/pull/39235) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Do not color logs on client if they are redirected to file [#39243](https://github.com/ClickHouse/ClickHouse/pull/39243) ([Anton Popov](https://github.com/CurtizJ)). +* Remove incorrect assertion [#39245](https://github.com/ClickHouse/ClickHouse/pull/39245) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Add X86 prefix to x86 performance tests [#39251](https://github.com/ClickHouse/ClickHouse/pull/39251) ([Robert Schulze](https://github.com/rschu1ze)). +* Check that the destination for a backup is not in use. [#39254](https://github.com/ClickHouse/ClickHouse/pull/39254) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix stacktraces in gdb in BC check [#39256](https://github.com/ClickHouse/ClickHouse/pull/39256) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Disable flaky test `test_s3_zero_copy_on_hybrid_storage` [#39258](https://github.com/ClickHouse/ClickHouse/pull/39258) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Enabled Wc99-extensions + Wsign-conversion [#39261](https://github.com/ClickHouse/ClickHouse/pull/39261) ([Robert Schulze](https://github.com/rschu1ze)). +* Pass const StringRef by value, not by reference [#39262](https://github.com/ClickHouse/ClickHouse/pull/39262) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix assertion in transactions [#39263](https://github.com/ClickHouse/ClickHouse/pull/39263) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix macosx compilation due to endian.h [#39265](https://github.com/ClickHouse/ClickHouse/pull/39265) ([Jordi Villar](https://github.com/jrdi)). +* Another supression for BC check [#39276](https://github.com/ClickHouse/ClickHouse/pull/39276) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix builder image for releases w/o diagnostics tool [#39281](https://github.com/ClickHouse/ClickHouse/pull/39281) ([Azat Khuzhin](https://github.com/azat)). +* [RFC] Remove superior atomic from MergeTreeBackgroundExecutor and annotations for TSA [#39285](https://github.com/ClickHouse/ClickHouse/pull/39285) ([Azat Khuzhin](https://github.com/azat)). +* Fix clang tidy [#39288](https://github.com/ClickHouse/ClickHouse/pull/39288) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix running cmake with predefined cache (for development only) [#39295](https://github.com/ClickHouse/ClickHouse/pull/39295) ([Azat Khuzhin](https://github.com/azat)). +* Fix googletest contrib compilation (due to GTEST_HAS_POSIX_RE=0) [#39298](https://github.com/ClickHouse/ClickHouse/pull/39298) ([Azat Khuzhin](https://github.com/azat)). +* First try at reducing the use of StringRef [#39300](https://github.com/ClickHouse/ClickHouse/pull/39300) ([Robert Schulze](https://github.com/rschu1ze)). +* Whitespaces [#39303](https://github.com/ClickHouse/ClickHouse/pull/39303) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add test for window function inside CASE [#39305](https://github.com/ClickHouse/ClickHouse/pull/39305) ([Dmitry Novik](https://github.com/novikd)). +* Simple Check should be updated on rerun [#39307](https://github.com/ClickHouse/ClickHouse/pull/39307) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix leaking of logger in clickhouse-disks [#39314](https://github.com/ClickHouse/ClickHouse/pull/39314) ([Azat Khuzhin](https://github.com/azat)). +* Update exception message [#39315](https://github.com/ClickHouse/ClickHouse/pull/39315) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix build clang-13 [#39318](https://github.com/ClickHouse/ClickHouse/pull/39318) ([alesapin](https://github.com/alesapin)). +* Auto set test name in integration tests [#39322](https://github.com/ClickHouse/ClickHouse/pull/39322) ([Vitaly Baranov](https://github.com/vitlibar)). +* Try fix flaky test_store_cleanup [#39334](https://github.com/ClickHouse/ClickHouse/pull/39334) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Do not start on unexpected Ordinary metadata [#39337](https://github.com/ClickHouse/ClickHouse/pull/39337) ([Alexander Tokmakov](https://github.com/tavplubix)). +* switch from mkdocs to Docusaurus [#39338](https://github.com/ClickHouse/ClickHouse/pull/39338) ([Dan Roscigno](https://github.com/DanRoscigno)). +* Fix flaky 01174_select_insert_isolation [#39339](https://github.com/ClickHouse/ClickHouse/pull/39339) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Better exception messages in schema inference [#39340](https://github.com/ClickHouse/ClickHouse/pull/39340) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix memory exceptions with transactions [#39341](https://github.com/ClickHouse/ClickHouse/pull/39341) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix typo [#39360](https://github.com/ClickHouse/ClickHouse/pull/39360) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix typo [#39361](https://github.com/ClickHouse/ClickHouse/pull/39361) ([Kruglov Pavel](https://github.com/Avogar)). +* Do not enqueue uneeded parts for check [#39366](https://github.com/ClickHouse/ClickHouse/pull/39366) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Avoid loading toolchain file multiple times to avoid confusing ccache [#39387](https://github.com/ClickHouse/ClickHouse/pull/39387) ([Azat Khuzhin](https://github.com/azat)). +* Fix make clean (due to crosscompile of llvm) [#39392](https://github.com/ClickHouse/ClickHouse/pull/39392) ([Azat Khuzhin](https://github.com/azat)). +* Disable real-time digest in Keeper by default [#39393](https://github.com/ClickHouse/ClickHouse/pull/39393) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix interactive client with older server [#39413](https://github.com/ClickHouse/ClickHouse/pull/39413) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix BC check [#39414](https://github.com/ClickHouse/ClickHouse/pull/39414) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Fix flaky test test_login_as_dropped_user_xml. [#39415](https://github.com/ClickHouse/ClickHouse/pull/39415) ([Vitaly Baranov](https://github.com/vitlibar)). +* Introduce a dependency to libuv when building NATS [#39427](https://github.com/ClickHouse/ClickHouse/pull/39427) ([ltrk2](https://github.com/ltrk2)). +* Set default value cross_to_inner_join_rewrite = 1 [#39443](https://github.com/ClickHouse/ClickHouse/pull/39443) ([Vladimir C](https://github.com/vdimir)). +* Respect table alias for additional_table_filters. [#39456](https://github.com/ClickHouse/ClickHouse/pull/39456) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). + +#### Performance optimization and Bug Fix + +* Enabled `pread_threadpool` read method by default. It will increase read performance. Bug fix: if direct IO is enabled and the number of threads is large and `pread_threadpool` is used, it may cause a logical error. [#33653](https://github.com/ClickHouse/ClickHouse/pull/33653) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + diff --git a/docs/en/development/build.md b/docs/en/development/build.md index dbb90f8e537..e12884b61c4 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -75,7 +75,7 @@ This will create the `programs/clickhouse` executable, which can be used with `c The build requires the following components: - Git (is used only to checkout the sources, it’s not needed for the build) -- CMake 3.14 or newer +- CMake 3.15 or newer - Ninja - C++ compiler: clang-14 or newer - Linker: lld diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 5d8ed9cdacd..e499849426b 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -1632,6 +1632,8 @@ kafka_topic_list = 'topic1', kafka_group_name = 'group1', kafka_format = 'AvroConfluent'; +-- for debug purposes you can set format_avro_schema_registry_url in a session. +-- this way cannot be used in production SET format_avro_schema_registry_url = 'http://schema-registry'; SELECT * FROM topic1_stream; diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index d3a50969a39..fe4795d3798 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -45,7 +45,7 @@ Configuration template: - `min_part_size` – The minimum size of a data part. - `min_part_size_ratio` – The ratio of the data part size to the table size. -- `method` – Compression method. Acceptable values: `lz4`, `lz4hc`, `zstd`. +- `method` – Compression method. Acceptable values: `lz4`, `lz4hc`, `zstd`,`deflate_qpl`. - `level` – Compression level. See [Codecs](../../sql-reference/statements/create/table.md#create-query-general-purpose-codecs). You can configure multiple `` sections. diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 75c2aa57b32..ed1f139f482 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -2626,7 +2626,7 @@ Possible values: - Any positive integer. - 0 - Disabled (infinite timeout). -Default value: 1800. +Default value: 180. ## http_receive_timeout {#http_receive_timeout} @@ -2637,7 +2637,7 @@ Possible values: - Any positive integer. - 0 - Disabled (infinite timeout). -Default value: 1800. +Default value: 180. ## check_query_single_value_result {#check_query_single_value_result} @@ -3329,6 +3329,15 @@ Read more about [memory overcommit](memory-overcommit.md). Default value: `1GiB`. +## compatibility {#compatibility} + +This setting changes other settings according to provided ClickHouse version. +If a behaviour in ClickHouse was changed by using a different default value for some setting, this compatibility setting allows you to use default values from previous versions for all the settings that were not set by the user. + +This setting takes ClickHouse version number as a string, like `21.3`, `21.8`. Empty value means that this setting is disabled. + +Disabled by default. + # Format settings {#format-settings} ## input_format_skip_unknown_fields {#input_format_skip_unknown_fields} diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index 9e6f0effcf9..6b01ee31501 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -681,6 +681,47 @@ SELECT └────────────┴─────────────────────────────────────────────────────────────────┘ ``` +## parseTimeDelta + +Parse a sequence of numbers followed by something resembling a time unit. + +**Syntax** + +```sql +parseTimeDelta(timestr) +``` + +**Arguments** + +- `timestr` — A sequence of numbers followed by something resembling a time unit. + + +**Returned value** + +- A floating-point number with the number of seconds. + +**Example** + +```sql +SELECT parseTimeDelta('11s+22min') +``` + +```text +┌─parseTimeDelta('11s+22min')─┐ +│ 1331 │ +└─────────────────────────────┘ +``` + +```sql +SELECT parseTimeDelta('1yr2mo') +``` + +```text +┌─parseTimeDelta('1yr2mo')─┐ +│ 36806400 │ +└──────────────────────────┘ +``` + ## least(a, b) Returns the smallest value from a and b. diff --git a/docs/en/sql-reference/statements/alter/partition.md b/docs/en/sql-reference/statements/alter/partition.md index 079d462a536..b7787fbef92 100644 --- a/docs/en/sql-reference/statements/alter/partition.md +++ b/docs/en/sql-reference/statements/alter/partition.md @@ -108,7 +108,8 @@ Note that data will be deleted neither from `table1` nor from `table2`. For the query to run successfully, the following conditions must be met: - Both tables must have the same structure. -- Both tables must have the same partition key. +- Both tables must have the same partition key, the same order by key and the same primary key. +- Both tables must have the same storage policy (a disk where the partition is stored should be available for both tables). ## REPLACE PARTITION @@ -121,7 +122,8 @@ This query copies the data partition from the `table1` to `table2` and replaces For the query to run successfully, the following conditions must be met: - Both tables must have the same structure. -- Both tables must have the same partition key. +- Both tables must have the same partition key, the same order by key and the same primary key. +- Both tables must have the same storage policy (a disk where the partition is stored should be available for both tables). ## MOVE PARTITION TO TABLE @@ -134,9 +136,9 @@ This query moves the data partition from the `table_source` to `table_dest` with For the query to run successfully, the following conditions must be met: - Both tables must have the same structure. -- Both tables must have the same partition key. +- Both tables must have the same partition key, the same order by key and the same primary key. +- Both tables must have the same storage policy (a disk where the partition is stored should be available for both tables). - Both tables must be the same engine family (replicated or non-replicated). -- Both tables must have the same storage policy. ## CLEAR COLUMN IN PARTITION diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index 2cf57cc2243..0c2e87fbcac 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -248,6 +248,13 @@ ClickHouse supports general purpose codecs and specialized codecs. High compression levels are useful for asymmetric scenarios, like compress once, decompress repeatedly. Higher levels mean better compression and higher CPU usage. +#### DEFLATE_QPL + +`DEFLATE_QPL` — [Deflate compression algorithm](https://github.com/intel/qpl) implemented by Intel® Query Processing Library, which has dependency on Intel Hardware: + +- DEFLATE_QPL is only supported on systems with AVX2/AVX512/IAA. +- DEFLATE_QPL-compressed data can only be transferred between nodes with AVX2/AVX512/IAA. + ### Specialized Codecs These codecs are designed to make compression more effective by using specific features of data. Some of these codecs do not compress data themself. Instead, they prepare the data for a common purpose codec, which compresses it better than without this preparation. diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 680e77dfb6c..0c0c7da330d 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -44,7 +44,7 @@ ClickHouse перезагружает встроенные словари с з - `min_part_size` - Минимальный размер части таблицы. - `min_part_size_ratio` - Отношение размера минимальной части таблицы к полному размеру таблицы. -- `method` - Метод сжатия. Возможные значения: `lz4`, `lz4hc`, `zstd`. +- `method` - Метод сжатия. Возможные значения: `lz4`, `lz4hc`, `zstd`,`deflate_qpl`. - `level` – Уровень сжатия. См. [Кодеки](../../sql-reference/statements/create/table/#create-query-common-purpose-codecs). Можно сконфигурировать несколько разделов ``. diff --git a/docs/ru/sql-reference/statements/alter/partition.md b/docs/ru/sql-reference/statements/alter/partition.md index 036f72fc951..aecf954a45a 100644 --- a/docs/ru/sql-reference/statements/alter/partition.md +++ b/docs/ru/sql-reference/statements/alter/partition.md @@ -106,7 +106,8 @@ ALTER TABLE table2 [ON CLUSTER cluster] ATTACH PARTITION partition_expr FROM tab Следует иметь в виду: - Таблицы должны иметь одинаковую структуру. -- Для таблиц должен быть задан одинаковый ключ партиционирования. +- Для таблиц должен быть задан одинаковый ключ партиционирования, одинаковый ключ сортировки и одинаковый первичный ключ. +- Для таблиц должна быть задана одинаковая политика хранения (диск, на котором хранится партиция, должен быть доступен для обеих таблиц). Подробнее о том, как корректно задать имя партиции, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr). @@ -121,7 +122,8 @@ ALTER TABLE table2 [ON CLUSTER cluster] REPLACE PARTITION partition_expr FROM ta Следует иметь в виду: - Таблицы должны иметь одинаковую структуру. -- Для таблиц должен быть задан одинаковый ключ партиционирования. +- Для таблиц должен быть задан одинаковый ключ партиционирования, одинаковый ключ сортировки и одинаковый первичный ключ. +- Для таблиц должна быть задана одинаковая политика хранения (диск, на котором хранится партиция, должен быть доступен для обеих таблиц). Подробнее о том, как корректно задать имя партиции, см. в разделе [Как задавать имя партиции в запросах ALTER](#alter-how-to-specify-part-expr). @@ -136,9 +138,9 @@ ALTER TABLE table_source [ON CLUSTER cluster] MOVE PARTITION partition_expr TO T Следует иметь в виду: - Таблицы должны иметь одинаковую структуру. -- Для таблиц должен быть задан одинаковый ключ партиционирования. +- Для таблиц должен быть задан одинаковый ключ партиционирования, одинаковый ключ сортировки и одинаковый первичный ключ. +- Для таблиц должна быть задана одинаковая политика хранения (диск, на котором хранится партиция, должен быть доступен для обеих таблиц). - Движки таблиц должны быть одинакового семейства (реплицированные или нереплицированные). -- Для таблиц должна быть задана одинаковая политика хранения. ## CLEAR COLUMN IN PARTITION {#alter_clear-column-partition} diff --git a/docs/tools/README.md b/docs/tools/README.md index 163600804c6..7cf3540d108 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -1,50 +1,94 @@ -## How ClickHouse documentation is generated? {#how-clickhouse-documentation-is-generated} +## Generating ClickHouse documentation {#how-clickhouse-documentation-is-generated} -ClickHouse documentation is built using [build.py](build.py) script that uses [mkdocs](https://www.mkdocs.org) library and it’s dependencies to separately build all version of documentations (all languages in either single and multi page mode) as static HTMLs for each single page version. The results are then put in the correct directory structure. It is recommended to use Python 3.7 to run this script. +ClickHouse documentation is built using [Docusaurus](https://docusaurus.io). -[release.sh](release.sh) also pulls static files needed for [official ClickHouse website](https://clickhouse.com) from [../../website](../../website) folder then pushes to specified GitHub repo to be served via [GitHub Pages](https://pages.github.com). +## Check the look of your documentation changes {#how-to-check-if-the-documentation-will-look-fine} -## How to check if the documentation will look fine? {#how-to-check-if-the-documentation-will-look-fine} +There are a few options that are all useful depending on how large or complex your edits are. -There are few options that are all useful depending on how large or complex your edits are. +### Use the GitHub web interface to edit -### Use GitHub web interface to edit +Every page in the docs has an **Edit this page** link that opens the page in the GitHub editor. GitHub has Markdown support with a preview feature. The details of GitHub Markdown and the documentation Markdown are a bit different but generally this is close enough, and the person merging your PR will build the docs and check them. -GitHub has Markdown support with preview feature, but the details of GitHub Markdown dialect are a bit different in ClickHouse documentation. +### Install a Markdown editor or plugin for your IDE {#install-markdown-editor-or-plugin-for-your-ide} -### Install Markdown editor or plugin for your IDE {#install-markdown-editor-or-plugin-for-your-ide} +Usually, these plugins provide a preview of how the markdown will render, and they catch basic errors like unclosed tags very early. -Usually those also have some way to preview how Markdown will look like, which allows to catch basic errors like unclosed tags very early. -### Use build.py {#use-build-py} +## Build the docs locally {#use-build-py} -It’ll take some effort to go through, but the result will be very close to production documentation. +You can build the docs locally. It takes a few minutes to set up, but once you have done it the first time, the process is very simple. -For the first time you’ll need to: +### Clone the repos -#### 1. Set up virtualenv +The documentation is in two repos, clone both of them: +- [ClickHouse/ClickHouse](https://github.com/ClickHouse/ClickHouse) +- [ClickHouse/ClickHouse-docs](https://github.com/ClickHouse/clickhouse-docs) -``` bash -$ cd ClickHouse/docs/tools -$ mkdir venv -$ virtualenv -p $(which python3) venv -$ source venv/bin/activate -$ pip3 install -r requirements.txt +### Install Node.js + +The documentation is built with Docusaurus, which requires Node.js. We recommend version 16. Install [Node.js](https://nodejs.org/en/download/). + +### Copy files into place + +Docusaurus expects all of the markdown files to be located in the directory tree `clickhouse-docs/docs/`. This is not the way our repos are set up, so some copying of files is needed to build the docs: + +```bash +# from the parent directory of both the ClickHouse/ClickHouse and ClickHouse-clickhouse-docs repos: +cp -r ClickHouse/docs/en/development clickhouse-docs/docs/en/ +cp -r ClickHouse/docs/en/engines clickhouse-docs/docs/en/ +cp -r ClickHouse/docs/en/getting-started clickhouse-docs/docs/en/ +cp -r ClickHouse/docs/en/interfaces clickhouse-docs/docs/en/ +cp -r ClickHouse/docs/en/operations clickhouse-docs/docs/en/ +cp -r ClickHouse/docs/en/sql-reference clickhouse-docs/docs/en/ + +cp -r ClickHouse/docs/ru/* clickhouse-docs/docs/ru/ +cp -r ClickHouse/docs/zh clickhouse-docs/docs/ ``` -#### 2. Run build.py +#### Note: Symlinks will not work. +### Setup Docusaurus -When all prerequisites are installed, running `build.py` without args (there are some, check `build.py --help`) will generate `ClickHouse/docs/build` folder with complete static html website. +There are two commands that you may need to use with Docusaurus: +- `yarn install` +- `yarn start` -The easiest way to see the result is to use `--livereload=8888` argument of build.py. Alternatively, you can manually launch a HTTP server to serve the docs, for example by running `cd ClickHouse/docs/build && python3 -m http.server 8888`. Then go to http://localhost:8888 in browser. Feel free to use any other port instead of 8888. +#### Install Docusaurus and its dependencies: + +```bash +cd clickhouse-docs +yarn install +``` + +#### Start a development Docusaurus environment + +This command will start Docusaurus in development mode, which means that as you edit source (for example, `.md` files) files the changes will be rendered into HTML files and served by the Docusaurus development server. + +```bash +yarn start +``` + +### Make your changes to the markdown files + +Edit your files. Remember that if you are editing files in the `ClickHouse/ClickHouse` repo then you should edit them +in that repo and then copy the edited file into the `ClickHouse/clickhouse-docs/` directory structure so that they are updated in your develoment environment. + +`yarn start` probably opened a browser for you when you ran it; if not, open a browser to `http://localhost:3000/docs/en/intro` and navigate to the documentation that you are changing. If you have already made the changes, you can verify them here; if not, make them, and you will see the page update as you save the changes. ## How to change code highlighting? {#how-to-change-code-hl} -ClickHouse does not use mkdocs `highlightjs` feature. It uses modified pygments styles instead. -If you want to change code highlighting, edit the `website/css/highlight.css` file. -Currently, an [eighties](https://github.com/idleberg/base16-pygments/blob/master/css/base16-eighties.dark.css) theme -is used. +Code highlighting is based on the language chosen for your code blocks. Specify the language when you start the code block: +
```sql
+SELECT firstname from imdb.actors;
+```
+
+ +```sql +SELECT firstname from imdb.actors; +``` + +If you need a language supported then open an issue in [ClickHouse-docs](https://github.com/ClickHouse/clickhouse-docs/issues). ## How to subscribe on documentation changes? {#how-to-subscribe-on-documentation-changes} At the moment there’s no easy way to do just that, but you can consider: diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 4e3aa701d95..584806951cf 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -102,11 +102,38 @@ void Client::processError(const String & query) const } +void Client::showWarnings() +{ + try + { + std::vector messages = loadWarningMessages(); + if (!messages.empty()) + { + std::cout << "Warnings:" << std::endl; + for (const auto & message : messages) + std::cout << " * " << message << std::endl; + std::cout << std::endl; + } + } + catch (...) + { + /// Ignore exception + } +} + /// Make query to get all server warnings std::vector Client::loadWarningMessages() { + /// Older server versions cannot execute the query loading warnings. + constexpr UInt64 min_server_revision_to_load_warnings = DBMS_MIN_PROTOCOL_VERSION_WITH_VIEW_IF_PERMITTED; + + if (server_revision < min_server_revision_to_load_warnings) + return {}; + std::vector messages; - connection->sendQuery(connection_parameters.timeouts, "SELECT message FROM system.warnings", "" /* query_id */, + connection->sendQuery(connection_parameters.timeouts, + "SELECT * FROM viewIfPermitted(SELECT message FROM system.warnings ELSE null('message String'))", + "" /* query_id */, QueryProcessingStage::Complete, &global_context->getSettingsRef(), &global_context->getClientInfo(), false, {}); @@ -224,25 +251,9 @@ try connect(); - /// Load Warnings at the beginning of connection + /// Show warnings at the beginning of connection. if (is_interactive && !config().has("no-warnings")) - { - try - { - std::vector messages = loadWarningMessages(); - if (!messages.empty()) - { - std::cout << "Warnings:" << std::endl; - for (const auto & message : messages) - std::cout << " * " << message << std::endl; - std::cout << std::endl; - } - } - catch (...) - { - /// Ignore exception - } - } + showWarnings(); if (is_interactive && !delayed_interactive) { @@ -368,7 +379,7 @@ void Client::connect() } server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch); - load_suggestions = is_interactive && (server_revision >= Suggest::MIN_SERVER_REVISION && !config().getBool("disable_suggestion", false)); + load_suggestions = is_interactive && (server_revision >= Suggest::MIN_SERVER_REVISION) && !config().getBool("disable_suggestion", false); if (server_display_name = connection->getServerDisplayName(connection_parameters.timeouts); server_display_name.empty()) server_display_name = config().getString("host", "localhost"); diff --git a/programs/client/Client.h b/programs/client/Client.h index 164b8e2ebaa..1fec282be51 100644 --- a/programs/client/Client.h +++ b/programs/client/Client.h @@ -45,6 +45,7 @@ protected: private: void printChangedSettings() const; + void showWarnings(); std::vector loadWarningMessages(); }; } diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index d0fc3528473..fe8debcee27 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -79,6 +79,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) ("block-size,b", po::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") ("hc", "use LZ4HC instead of LZ4") ("zstd", "use ZSTD instead of LZ4") + ("deflate_qpl", "use deflate_qpl instead of LZ4") ("codec", po::value>()->multitoken(), "use codecs combination instead of LZ4") ("level", po::value(), "compression level for codecs specified via flags") ("none", "use no compression instead of LZ4") @@ -103,6 +104,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) bool decompress = options.count("decompress"); bool use_lz4hc = options.count("hc"); bool use_zstd = options.count("zstd"); + bool use_deflate_qpl = options.count("deflate_qpl"); bool stat_mode = options.count("stat"); bool use_none = options.count("none"); unsigned block_size = options["block-size"].as(); @@ -110,7 +112,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) if (options.count("codec")) codecs = options["codec"].as>(); - if ((use_lz4hc || use_zstd || use_none) && !codecs.empty()) + if ((use_lz4hc || use_zstd || use_deflate_qpl || use_none) && !codecs.empty()) throw Exception("Wrong options, codec flags like --zstd and --codec options are mutually exclusive", ErrorCodes::BAD_ARGUMENTS); if (!codecs.empty() && options.count("level")) @@ -122,6 +124,8 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) method_family = "LZ4HC"; else if (use_zstd) method_family = "ZSTD"; + else if (use_deflate_qpl) + method_family = "DEFLATE_QPL"; else if (use_none) method_family = "NONE"; diff --git a/programs/copier/ZooKeeperStaff.h b/programs/copier/ZooKeeperStaff.h index 66036ae2f27..a9e04578607 100644 --- a/programs/copier/ZooKeeperStaff.h +++ b/programs/copier/ZooKeeperStaff.h @@ -20,7 +20,7 @@ public: bool operator<=(const WrappingUInt32 & other) const { - const UInt32 HALF = 1 << 31; + const UInt32 HALF = static_cast(1) << 31; return (value <= other.value && other.value - value < HALF) || (value > other.value && value - other.value > HALF); } diff --git a/programs/disks/CommandCopy.cpp b/programs/disks/CommandCopy.cpp index f9cd7444287..1e5852fe651 100644 --- a/programs/disks/CommandCopy.cpp +++ b/programs/disks/CommandCopy.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/CommandLink.cpp b/programs/disks/CommandLink.cpp index 6e9a7e64324..af48f0de097 100644 --- a/programs/disks/CommandLink.cpp +++ b/programs/disks/CommandLink.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/CommandList.cpp b/programs/disks/CommandList.cpp index 8c6bfac3a9b..e76bb9e65fb 100644 --- a/programs/disks/CommandList.cpp +++ b/programs/disks/CommandList.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/CommandListDisks.cpp b/programs/disks/CommandListDisks.cpp index 2bcbb045d67..22cffdd21fd 100644 --- a/programs/disks/CommandListDisks.cpp +++ b/programs/disks/CommandListDisks.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/CommandMove.cpp b/programs/disks/CommandMove.cpp index 4a377cc7225..6322cf4b47d 100644 --- a/programs/disks/CommandMove.cpp +++ b/programs/disks/CommandMove.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/CommandRead.cpp b/programs/disks/CommandRead.cpp index aa472fa217e..6b77a27e918 100644 --- a/programs/disks/CommandRead.cpp +++ b/programs/disks/CommandRead.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/CommandRemove.cpp b/programs/disks/CommandRemove.cpp index d9925fbd93e..c1d3129bb8d 100644 --- a/programs/disks/CommandRemove.cpp +++ b/programs/disks/CommandRemove.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/CommandWrite.cpp b/programs/disks/CommandWrite.cpp index c8ae91ea8d5..0b1c5823c81 100644 --- a/programs/disks/CommandWrite.cpp +++ b/programs/disks/CommandWrite.cpp @@ -1,6 +1,7 @@ #pragma once #include "ICommand.h" +#include namespace DB { diff --git a/programs/disks/DisksApp.cpp b/programs/disks/DisksApp.cpp index 99b4b099bd8..58a18f6ad2e 100644 --- a/programs/disks/DisksApp.cpp +++ b/programs/disks/DisksApp.cpp @@ -154,7 +154,7 @@ int DisksApp::main(const std::vector & /*args*/) Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level)); auto log_path = config().getString("logger.clickhouse-disks", "/var/log/clickhouse-server/clickhouse-disks.log"); - Poco::Logger::root().setChannel(new Poco::FileChannel(log_path)); + Poco::Logger::root().setChannel(Poco::AutoPtr(new Poco::FileChannel(log_path))); } if (config().has("config-file") || fs::exists(getDefaultConfigFileName())) diff --git a/programs/server/config.xml b/programs/server/config.xml index 203684a9e00..40e561c1880 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -604,9 +604,23 @@ if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows. By default this setting is false for compatibility with earlier access configurations. --> false + false + + + false + + + false diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index 7152820b5bc..c6729459988 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -101,7 +101,7 @@ public: registered_prefixes = prefixes_; } - bool isSettingNameAllowed(const std::string_view & setting_name) const + bool isSettingNameAllowed(std::string_view setting_name) const { if (Settings::hasBuiltin(setting_name)) return true; @@ -116,7 +116,7 @@ public: return false; } - void checkSettingNameIsAllowed(const std::string_view & setting_name) const + void checkSettingNameIsAllowed(std::string_view setting_name) const { if (isSettingNameAllowed(setting_name)) return; @@ -165,13 +165,12 @@ void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration setNoPasswordAllowed(config_.getBool("allow_no_password", true)); setPlaintextPasswordAllowed(config_.getBool("allow_plaintext_password", true)); - setEnabledUsersWithoutRowPoliciesCanReadRows(config_.getBool( - "access_control_improvements.users_without_row_policies_can_read_rows", - false /* false because we need to be compatible with earlier access configurations */)); - - setOnClusterQueriesRequireClusterGrant(config_.getBool( - "access_control_improvements.on_cluster_queries_require_cluster_grant", - false /* false because we need to be compatible with earlier access configurations */)); + /// Optional improvements in access control system. + /// The default values are false because we need to be compatible with earlier access configurations + setEnabledUsersWithoutRowPoliciesCanReadRows(config_.getBool("access_control_improvements.users_without_row_policies_can_read_rows", false)); + setOnClusterQueriesRequireClusterGrant(config_.getBool("access_control_improvements.on_cluster_queries_require_cluster_grant", false)); + setSelectFromSystemDatabaseRequiresGrant(config_.getBool("access_control_improvements.select_from_system_db_requires_grant", false)); + setSelectFromInformationSchemaRequiresGrant(config_.getBool("access_control_improvements.select_from_information_schema_requires_grant", false)); addStoragesFromMainConfig(config_, config_path_, get_zookeeper_function_); } diff --git a/src/Access/AccessControl.h b/src/Access/AccessControl.h index 22ff0a488f7..ab9cdba9ad1 100644 --- a/src/Access/AccessControl.h +++ b/src/Access/AccessControl.h @@ -152,6 +152,12 @@ public: void setOnClusterQueriesRequireClusterGrant(bool enable) { on_cluster_queries_require_cluster_grant = enable; } bool doesOnClusterQueriesRequireClusterGrant() const { return on_cluster_queries_require_cluster_grant; } + void setSelectFromSystemDatabaseRequiresGrant(bool enable) { select_from_system_db_requires_grant = enable; } + bool doesSelectFromSystemDatabaseRequireGrant() const { return select_from_system_db_requires_grant; } + + void setSelectFromInformationSchemaRequiresGrant(bool enable) { select_from_information_schema_requires_grant = enable; } + bool doesSelectFromInformationSchemaRequireGrant() const { return select_from_information_schema_requires_grant; } + std::shared_ptr getContextAccess( const UUID & user_id, const std::vector & current_roles, @@ -215,6 +221,8 @@ private: std::atomic_bool allow_no_password = true; std::atomic_bool users_without_row_policies_can_read_rows = false; std::atomic_bool on_cluster_queries_require_cluster_grant = false; + std::atomic_bool select_from_system_db_requires_grant = false; + std::atomic_bool select_from_information_schema_requires_grant = false; }; } diff --git a/src/Access/AccessRights.cpp b/src/Access/AccessRights.cpp index 7c3139dbb0f..20afc916901 100644 --- a/src/Access/AccessRights.cpp +++ b/src/Access/AccessRights.cpp @@ -252,7 +252,7 @@ public: } template - void grant(const AccessFlags & flags_, const std::string_view & name, const Args &... subnames) + void grant(const AccessFlags & flags_, std::string_view name, const Args &... subnames) { auto & child = getChild(name); child.grant(flags_, subnames...); @@ -279,7 +279,7 @@ public: } template - void revoke(const AccessFlags & flags_, const std::string_view & name, const Args &... subnames) + void revoke(const AccessFlags & flags_, std::string_view name, const Args &... subnames) { auto & child = getChild(name); @@ -306,7 +306,7 @@ public: } template - bool isGranted(const AccessFlags & flags_, const std::string_view & name, const Args &... subnames) const + bool isGranted(const AccessFlags & flags_, std::string_view name, const Args &... subnames) const { AccessFlags flags_to_check = flags_ - min_flags_with_children; if (!flags_to_check) @@ -388,11 +388,11 @@ public: return res; } - void modifyFlags(const ModifyFlagsFunction & function, bool & flags_added, bool & flags_removed) + void modifyFlags(const ModifyFlagsFunction & function, bool grant_option, bool & flags_added, bool & flags_removed) { flags_added = false; flags_removed = false; - modifyFlagsRec(function, flags_added, flags_removed); + modifyFlagsRec(function, grant_option, flags_added, flags_removed); if (flags_added || flags_removed) optimizeTree(); } @@ -415,7 +415,7 @@ private: AccessFlags getAllGrantableFlags() const { return ::DB::getAllGrantableFlags(level); } AccessFlags getChildAllGrantableFlags() const { return ::DB::getAllGrantableFlags(static_cast(level + 1)); } - Node * tryGetChild(const std::string_view & name) const + Node * tryGetChild(std::string_view name) const { if (!children) return nullptr; @@ -425,7 +425,7 @@ private: return &it->second; } - Node & getChild(const std::string_view & name) + Node & getChild(std::string_view name) { auto * child = tryGetChild(name); if (child) @@ -669,11 +669,11 @@ private: } template - void modifyFlagsRec(const ModifyFlagsFunction & function, bool & flags_added, bool & flags_removed, const ParentNames & ... parent_names) + void modifyFlagsRec(const ModifyFlagsFunction & function, bool grant_option, bool & flags_added, bool & flags_removed, const ParentNames & ... parent_names) { - auto invoke = [&function](const AccessFlags & flags_, const AccessFlags & min_flags_with_children_, const AccessFlags & max_flags_with_children_, std::string_view database_ = {}, std::string_view table_ = {}, std::string_view column_ = {}) -> AccessFlags + auto invoke = [function, grant_option](const AccessFlags & flags_, const AccessFlags & min_flags_with_children_, const AccessFlags & max_flags_with_children_, std::string_view database_ = {}, std::string_view table_ = {}, std::string_view column_ = {}) -> AccessFlags { - return function(flags_, min_flags_with_children_, max_flags_with_children_, database_, table_, column_); + return function(flags_, min_flags_with_children_, max_flags_with_children_, database_, table_, column_, grant_option); }; if constexpr (sizeof...(ParentNames) < 3) @@ -683,7 +683,7 @@ private: for (auto & child : *children | boost::adaptors::map_values) { const String & child_name = *child.node_name; - child.modifyFlagsRec(function, flags_added, flags_removed, parent_names..., child_name); + child.modifyFlagsRec(function, grant_option, flags_added, flags_removed, parent_names..., child_name); } } } @@ -819,20 +819,20 @@ void AccessRights::grantImpl(const AccessRightsElements & elements) } void AccessRights::grant(const AccessFlags & flags) { grantImpl(flags); } -void AccessRights::grant(const AccessFlags & flags, const std::string_view & database) { grantImpl(flags, database); } -void AccessRights::grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) { grantImpl(flags, database, table); } -void AccessRights::grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) { grantImpl(flags, database, table, column); } -void AccessRights::grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) { grantImpl(flags, database, table, columns); } -void AccessRights::grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) { grantImpl(flags, database, table, columns); } +void AccessRights::grant(const AccessFlags & flags, std::string_view database) { grantImpl(flags, database); } +void AccessRights::grant(const AccessFlags & flags, std::string_view database, std::string_view table) { grantImpl(flags, database, table); } +void AccessRights::grant(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) { grantImpl(flags, database, table, column); } +void AccessRights::grant(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) { grantImpl(flags, database, table, columns); } +void AccessRights::grant(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) { grantImpl(flags, database, table, columns); } void AccessRights::grant(const AccessRightsElement & element) { grantImpl(element); } void AccessRights::grant(const AccessRightsElements & elements) { grantImpl(elements); } void AccessRights::grantWithGrantOption(const AccessFlags & flags) { grantImpl(flags); } -void AccessRights::grantWithGrantOption(const AccessFlags & flags, const std::string_view & database) { grantImpl(flags, database); } -void AccessRights::grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) { grantImpl(flags, database, table); } -void AccessRights::grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) { grantImpl(flags, database, table, column); } -void AccessRights::grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) { grantImpl(flags, database, table, columns); } -void AccessRights::grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) { grantImpl(flags, database, table, columns); } +void AccessRights::grantWithGrantOption(const AccessFlags & flags, std::string_view database) { grantImpl(flags, database); } +void AccessRights::grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) { grantImpl(flags, database, table); } +void AccessRights::grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) { grantImpl(flags, database, table, column); } +void AccessRights::grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) { grantImpl(flags, database, table, columns); } +void AccessRights::grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) { grantImpl(flags, database, table, columns); } void AccessRights::grantWithGrantOption(const AccessRightsElement & element) { grantImpl(element); } void AccessRights::grantWithGrantOption(const AccessRightsElements & elements) { grantImpl(elements); } @@ -892,20 +892,20 @@ void AccessRights::revokeImpl(const AccessRightsElements & elements) } void AccessRights::revoke(const AccessFlags & flags) { revokeImpl(flags); } -void AccessRights::revoke(const AccessFlags & flags, const std::string_view & database) { revokeImpl(flags, database); } -void AccessRights::revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) { revokeImpl(flags, database, table); } -void AccessRights::revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) { revokeImpl(flags, database, table, column); } -void AccessRights::revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) { revokeImpl(flags, database, table, columns); } -void AccessRights::revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) { revokeImpl(flags, database, table, columns); } +void AccessRights::revoke(const AccessFlags & flags, std::string_view database) { revokeImpl(flags, database); } +void AccessRights::revoke(const AccessFlags & flags, std::string_view database, std::string_view table) { revokeImpl(flags, database, table); } +void AccessRights::revoke(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) { revokeImpl(flags, database, table, column); } +void AccessRights::revoke(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) { revokeImpl(flags, database, table, columns); } +void AccessRights::revoke(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) { revokeImpl(flags, database, table, columns); } void AccessRights::revoke(const AccessRightsElement & element) { revokeImpl(element); } void AccessRights::revoke(const AccessRightsElements & elements) { revokeImpl(elements); } void AccessRights::revokeGrantOption(const AccessFlags & flags) { revokeImpl(flags); } -void AccessRights::revokeGrantOption(const AccessFlags & flags, const std::string_view & database) { revokeImpl(flags, database); } -void AccessRights::revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) { revokeImpl(flags, database, table); } -void AccessRights::revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) { revokeImpl(flags, database, table, column); } -void AccessRights::revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) { revokeImpl(flags, database, table, columns); } -void AccessRights::revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) { revokeImpl(flags, database, table, columns); } +void AccessRights::revokeGrantOption(const AccessFlags & flags, std::string_view database) { revokeImpl(flags, database); } +void AccessRights::revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) { revokeImpl(flags, database, table); } +void AccessRights::revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) { revokeImpl(flags, database, table, column); } +void AccessRights::revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) { revokeImpl(flags, database, table, columns); } +void AccessRights::revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) { revokeImpl(flags, database, table, columns); } void AccessRights::revokeGrantOption(const AccessRightsElement & element) { revokeImpl(element); } void AccessRights::revokeGrantOption(const AccessRightsElements & elements) { revokeImpl(elements); } @@ -984,20 +984,20 @@ bool AccessRights::isGrantedImpl(const AccessRightsElements & elements) const } bool AccessRights::isGranted(const AccessFlags & flags) const { return isGrantedImpl(flags); } -bool AccessRights::isGranted(const AccessFlags & flags, const std::string_view & database) const { return isGrantedImpl(flags, database); } -bool AccessRights::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const { return isGrantedImpl(flags, database, table); } -bool AccessRights::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return isGrantedImpl(flags, database, table, column); } -bool AccessRights::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return isGrantedImpl(flags, database, table, columns); } -bool AccessRights::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return isGrantedImpl(flags, database, table, columns); } +bool AccessRights::isGranted(const AccessFlags & flags, std::string_view database) const { return isGrantedImpl(flags, database); } +bool AccessRights::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const { return isGrantedImpl(flags, database, table); } +bool AccessRights::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return isGrantedImpl(flags, database, table, column); } +bool AccessRights::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return isGrantedImpl(flags, database, table, columns); } +bool AccessRights::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return isGrantedImpl(flags, database, table, columns); } bool AccessRights::isGranted(const AccessRightsElement & element) const { return isGrantedImpl(element); } bool AccessRights::isGranted(const AccessRightsElements & elements) const { return isGrantedImpl(elements); } bool AccessRights::hasGrantOption(const AccessFlags & flags) const { return isGrantedImpl(flags); } -bool AccessRights::hasGrantOption(const AccessFlags & flags, const std::string_view & database) const { return isGrantedImpl(flags, database); } -bool AccessRights::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const { return isGrantedImpl(flags, database, table); } -bool AccessRights::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return isGrantedImpl(flags, database, table, column); } -bool AccessRights::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return isGrantedImpl(flags, database, table, columns); } -bool AccessRights::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return isGrantedImpl(flags, database, table, columns); } +bool AccessRights::hasGrantOption(const AccessFlags & flags, std::string_view database) const { return isGrantedImpl(flags, database); } +bool AccessRights::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { return isGrantedImpl(flags, database, table); } +bool AccessRights::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return isGrantedImpl(flags, database, table, column); } +bool AccessRights::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return isGrantedImpl(flags, database, table, columns); } +bool AccessRights::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return isGrantedImpl(flags, database, table, columns); } bool AccessRights::hasGrantOption(const AccessRightsElement & element) const { return isGrantedImpl(element); } bool AccessRights::hasGrantOption(const AccessRightsElements & elements) const { return isGrantedImpl(elements); } @@ -1062,24 +1062,21 @@ void AccessRights::modifyFlags(const ModifyFlagsFunction & function) { if (!root) return; + bool flags_added, flags_removed; - root->modifyFlags(function, flags_added, flags_removed); + root->modifyFlags(function, false, flags_added, flags_removed); if (flags_removed && root_with_grant_option) root_with_grant_option->makeIntersection(*root); -} - -void AccessRights::modifyFlagsWithGrantOption(const ModifyFlagsFunction & function) -{ - if (!root_with_grant_option) - return; - bool flags_added, flags_removed; - root_with_grant_option->modifyFlags(function, flags_added, flags_removed); - if (flags_added) + if (root_with_grant_option) { - if (!root) - root = std::make_unique(); - root->makeUnion(*root_with_grant_option); + root_with_grant_option->modifyFlags(function, true, flags_added, flags_removed); + if (flags_added) + { + if (!root) + root = std::make_unique(); + root->makeUnion(*root_with_grant_option); + } } } diff --git a/src/Access/AccessRights.h b/src/Access/AccessRights.h index b7499d69f70..5efffc0037a 100644 --- a/src/Access/AccessRights.h +++ b/src/Access/AccessRights.h @@ -39,59 +39,59 @@ public: /// Grants access on a specified database/table/column. /// Does nothing if the specified access has been already granted. void grant(const AccessFlags & flags); - void grant(const AccessFlags & flags, const std::string_view & database); - void grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table); - void grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void grant(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns); + void grant(const AccessFlags & flags, std::string_view database); + void grant(const AccessFlags & flags, std::string_view database, std::string_view table); + void grant(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column); + void grant(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns); + void grant(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns); void grant(const AccessRightsElement & element); void grant(const AccessRightsElements & elements); void grantWithGrantOption(const AccessFlags & flags); - void grantWithGrantOption(const AccessFlags & flags, const std::string_view & database); - void grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table); - void grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void grantWithGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns); + void grantWithGrantOption(const AccessFlags & flags, std::string_view database); + void grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table); + void grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column); + void grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns); + void grantWithGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns); void grantWithGrantOption(const AccessRightsElement & element); void grantWithGrantOption(const AccessRightsElements & elements); /// Revokes a specified access granted earlier on a specified database/table/column. /// For example, revoke(AccessType::ALL) revokes all grants at all, just like clear(); void revoke(const AccessFlags & flags); - void revoke(const AccessFlags & flags, const std::string_view & database); - void revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table); - void revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void revoke(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns); + void revoke(const AccessFlags & flags, std::string_view database); + void revoke(const AccessFlags & flags, std::string_view database, std::string_view table); + void revoke(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column); + void revoke(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns); + void revoke(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns); void revoke(const AccessRightsElement & element); void revoke(const AccessRightsElements & elements); void revokeGrantOption(const AccessFlags & flags); - void revokeGrantOption(const AccessFlags & flags, const std::string_view & database); - void revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table); - void revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column); - void revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns); - void revokeGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns); + void revokeGrantOption(const AccessFlags & flags, std::string_view database); + void revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table); + void revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column); + void revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns); + void revokeGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns); void revokeGrantOption(const AccessRightsElement & element); void revokeGrantOption(const AccessRightsElements & elements); /// Whether a specified access granted. bool isGranted(const AccessFlags & flags) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + bool isGranted(const AccessFlags & flags, std::string_view database) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const; bool isGranted(const AccessRightsElement & element) const; bool isGranted(const AccessRightsElements & elements) const; bool hasGrantOption(const AccessFlags & flags) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const; bool hasGrantOption(const AccessRightsElement & element) const; bool hasGrantOption(const AccessRightsElements & elements) const; @@ -107,11 +107,11 @@ public: const AccessFlags & flags, const AccessFlags & min_flags_with_children, const AccessFlags & max_flags_with_children, - const std::string_view & database, - const std::string_view & table, - const std::string_view & column)>; + std::string_view database, + std::string_view table, + std::string_view column, + bool grant_option)>; void modifyFlags(const ModifyFlagsFunction & function); - void modifyFlagsWithGrantOption(const ModifyFlagsFunction & function); friend bool operator ==(const AccessRights & left, const AccessRights & right); friend bool operator !=(const AccessRights & left, const AccessRights & right) { return !(left == right); } diff --git a/src/Access/Authentication.cpp b/src/Access/Authentication.cpp index 4f304cf5952..c6bbd421c77 100644 --- a/src/Access/Authentication.cpp +++ b/src/Access/Authentication.cpp @@ -26,17 +26,17 @@ namespace return (Util::stringToDigest(password) == password_plaintext); } - bool checkPasswordDoubleSHA1(const std::string_view & password, const Digest & password_double_sha1) + bool checkPasswordDoubleSHA1(std::string_view password, const Digest & password_double_sha1) { return (Util::encodeDoubleSHA1(password) == password_double_sha1); } - bool checkPasswordSHA256(const std::string_view & password, const Digest & password_sha256, const String & salt) + bool checkPasswordSHA256(std::string_view password, const Digest & password_sha256, const String & salt) { return Util::encodeSHA256(String(password).append(salt)) == password_sha256; } - bool checkPasswordDoubleSHA1MySQL(const std::string_view & scramble, const std::string_view & scrambled_password, const Digest & password_double_sha1) + bool checkPasswordDoubleSHA1MySQL(std::string_view scramble, std::string_view scrambled_password, const Digest & password_double_sha1) { /// scrambled_password = SHA1(password) XOR SHA1(scramble SHA1(SHA1(password))) @@ -61,7 +61,7 @@ namespace return calculated_password_double_sha1 == password_double_sha1; } - bool checkPasswordPlainTextMySQL(const std::string_view & scramble, const std::string_view & scrambled_password, const Digest & password_plaintext) + bool checkPasswordPlainTextMySQL(std::string_view scramble, std::string_view scrambled_password, const Digest & password_plaintext) { return checkPasswordDoubleSHA1MySQL(scramble, scrambled_password, Util::encodeDoubleSHA1(password_plaintext)); } diff --git a/src/Access/Common/AccessFlags.cpp b/src/Access/Common/AccessFlags.cpp index 82e1cbfb26b..305ae3f7cf5 100644 --- a/src/Access/Common/AccessFlags.cpp +++ b/src/Access/Common/AccessFlags.cpp @@ -35,7 +35,7 @@ namespace return access_type_to_flags_mapping[static_cast(type)]; } - Flags keywordToFlags(const std::string_view & keyword) const + Flags keywordToFlags(std::string_view keyword) const { auto it = keyword_to_flags_map.find(keyword); if (it == keyword_to_flags_map.end()) @@ -142,14 +142,14 @@ namespace } }; - static String replaceUnderscoreWithSpace(const std::string_view & str) + static String replaceUnderscoreWithSpace(std::string_view str) { String res{str}; boost::replace_all(res, "_", " "); return res; } - static Strings splitAliases(const std::string_view & str) + static Strings splitAliases(std::string_view str) { Strings aliases; boost::split(aliases, str, boost::is_any_of(",")); @@ -160,10 +160,10 @@ namespace static void makeNode( AccessType access_type, - const std::string_view & name, - const std::string_view & aliases, + std::string_view name, + std::string_view aliases, NodeType node_type, - const std::string_view & parent_group_name, + std::string_view parent_group_name, std::unordered_map & nodes, std::unordered_map & owned_nodes, size_t & next_flag) @@ -353,7 +353,7 @@ namespace AccessFlags::AccessFlags(AccessType type) : flags(Helper::instance().accessTypeToFlags(type)) {} -AccessFlags::AccessFlags(const std::string_view & keyword) : flags(Helper::instance().keywordToFlags(keyword)) {} +AccessFlags::AccessFlags(std::string_view keyword) : flags(Helper::instance().keywordToFlags(keyword)) {} AccessFlags::AccessFlags(const std::vector & keywords) : flags(Helper::instance().keywordsToFlags(keywords)) {} AccessFlags::AccessFlags(const Strings & keywords) : flags(Helper::instance().keywordsToFlags(keywords)) {} String AccessFlags::toString() const { return Helper::instance().flagsToString(flags); } diff --git a/src/Access/Common/AccessFlags.h b/src/Access/Common/AccessFlags.h index 51bf3cd19b0..5124f4ef332 100644 --- a/src/Access/Common/AccessFlags.h +++ b/src/Access/Common/AccessFlags.h @@ -21,7 +21,7 @@ public: AccessFlags() = default; /// Constructs from a string like "SELECT". - AccessFlags(const std::string_view & keyword); /// NOLINT + AccessFlags(std::string_view keyword); /// NOLINT /// Constructs from a list of strings like "SELECT, UPDATE, INSERT". AccessFlags(const std::vector & keywords); /// NOLINT diff --git a/src/Access/Common/AccessRightsElement.cpp b/src/Access/Common/AccessRightsElement.cpp index 9913fc02f4a..69a2354f25d 100644 --- a/src/Access/Common/AccessRightsElement.cpp +++ b/src/Access/Common/AccessRightsElement.cpp @@ -81,7 +81,7 @@ namespace } bool need_comma = false; - for (const std::string_view & keyword : keywords) + for (std::string_view keyword : keywords) { if (need_comma) result.append(", "); @@ -145,18 +145,18 @@ namespace } -AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_) +AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, std::string_view database_) : access_flags(access_flags_), database(database_), any_database(false) { } -AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_) +AccessRightsElement::AccessRightsElement(AccessFlags access_flags_, std::string_view database_, std::string_view table_) : access_flags(access_flags_), database(database_), table(table_), any_database(false), any_table(false) { } AccessRightsElement::AccessRightsElement( - AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_, const std::string_view & column_) + AccessFlags access_flags_, std::string_view database_, std::string_view table_, std::string_view column_) : access_flags(access_flags_) , database(database_) , table(table_) @@ -169,8 +169,8 @@ AccessRightsElement::AccessRightsElement( AccessRightsElement::AccessRightsElement( AccessFlags access_flags_, - const std::string_view & database_, - const std::string_view & table_, + std::string_view database_, + std::string_view table_, const std::vector & columns_) : access_flags(access_flags_), database(database_), table(table_), any_database(false), any_table(false), any_column(false) { @@ -180,7 +180,7 @@ AccessRightsElement::AccessRightsElement( } AccessRightsElement::AccessRightsElement( - AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_, const Strings & columns_) + AccessFlags access_flags_, std::string_view database_, std::string_view table_, const Strings & columns_) : access_flags(access_flags_) , database(database_) , table(table_) diff --git a/src/Access/Common/AccessRightsElement.h b/src/Access/Common/AccessRightsElement.h index e5bf76d0017..5f65b6bcd12 100644 --- a/src/Access/Common/AccessRightsElement.h +++ b/src/Access/Common/AccessRightsElement.h @@ -28,19 +28,19 @@ struct AccessRightsElement explicit AccessRightsElement(AccessFlags access_flags_) : access_flags(access_flags_) {} - AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_); - AccessRightsElement(AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_); + AccessRightsElement(AccessFlags access_flags_, std::string_view database_); + AccessRightsElement(AccessFlags access_flags_, std::string_view database_, std::string_view table_); AccessRightsElement( - AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_, const std::string_view & column_); + AccessFlags access_flags_, std::string_view database_, std::string_view table_, std::string_view column_); AccessRightsElement( AccessFlags access_flags_, - const std::string_view & database_, - const std::string_view & table_, + std::string_view database_, + std::string_view table_, const std::vector & columns_); AccessRightsElement( - AccessFlags access_flags_, const std::string_view & database_, const std::string_view & table_, const Strings & columns_); + AccessFlags access_flags_, std::string_view database_, std::string_view table_, const Strings & columns_); bool empty() const { return !access_flags || (!any_column && columns.empty()); } diff --git a/src/Access/Common/AccessType.cpp b/src/Access/Common/AccessType.cpp index d44d70d78b2..4df1e1bc77f 100644 --- a/src/Access/Common/AccessType.cpp +++ b/src/Access/Common/AccessType.cpp @@ -35,7 +35,7 @@ namespace #undef ACCESS_TYPE_TO_STRING_CONVERTER_ADD_TO_MAPPING } - void addToMapping(AccessType type, const std::string_view & str) + void addToMapping(AccessType type, std::string_view str) { String str2{str}; boost::replace_all(str2, "_", " "); diff --git a/src/Access/Common/AllowedClientHosts.cpp b/src/Access/Common/AllowedClientHosts.cpp index 85d7065d823..efbdf3924e8 100644 --- a/src/Access/Common/AllowedClientHosts.cpp +++ b/src/Access/Common/AllowedClientHosts.cpp @@ -110,18 +110,24 @@ namespace } /// Returns the host name by its address. - String getHostByAddress(const IPAddress & address) + Strings getHostsByAddress(const IPAddress & address) { - String host = DNSResolver::instance().reverseResolve(address); + auto hosts = DNSResolver::instance().reverseResolve(address); - /// Check that PTR record is resolved back to client address - if (!isAddressOfHost(address, host)) - throw Exception("Host " + String(host) + " isn't resolved back to " + address.toString(), ErrorCodes::DNS_ERROR); + if (hosts.empty()) + throw Exception(ErrorCodes::DNS_ERROR, "{} could not be resolved", address.toString()); - return host; + + for (const auto & host : hosts) + { + /// Check that PTR record is resolved back to client address + if (!isAddressOfHost(address, host)) + throw Exception(ErrorCodes::DNS_ERROR, "Host {} isn't resolved back to {}", host, address.toString()); + } + + return hosts; } - void parseLikePatternIfIPSubnet(const String & pattern, IPSubnet & subnet, IPAddress::Family address_family) { size_t slash = pattern.find('/'); @@ -520,20 +526,29 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const return true; /// Check `name_regexps`. - std::optional resolved_host; + std::optional resolved_hosts; auto check_name_regexp = [&](const String & name_regexp_) { try { if (boost::iequals(name_regexp_, "localhost")) return is_client_local(); - if (!resolved_host) - resolved_host = getHostByAddress(client_v6); - if (resolved_host->empty()) - return false; - Poco::RegularExpression re(name_regexp_); - Poco::RegularExpression::Match match; - return re.match(*resolved_host, match) != 0; + if (!resolved_hosts) + { + resolved_hosts = getHostsByAddress(client_address); + } + + for (const auto & host : resolved_hosts.value()) + { + Poco::RegularExpression re(name_regexp_); + Poco::RegularExpression::Match match; + if (re.match(host, match) != 0) + { + return true; + } + } + + return false; } catch (const Exception & e) { diff --git a/src/Access/Common/AuthenticationData.cpp b/src/Access/Common/AuthenticationData.cpp index db0a5d54a63..f3d3bb5b758 100644 --- a/src/Access/Common/AuthenticationData.cpp +++ b/src/Access/Common/AuthenticationData.cpp @@ -71,7 +71,7 @@ const AuthenticationTypeInfo & AuthenticationTypeInfo::get(AuthenticationType ty } -AuthenticationData::Digest AuthenticationData::Util::encodeSHA256(const std::string_view & text [[maybe_unused]]) +AuthenticationData::Digest AuthenticationData::Util::encodeSHA256(std::string_view text [[maybe_unused]]) { #if USE_SSL Digest hash; @@ -86,7 +86,7 @@ AuthenticationData::Digest AuthenticationData::Util::encodeSHA256(const std::str } -AuthenticationData::Digest AuthenticationData::Util::encodeSHA1(const std::string_view & text) +AuthenticationData::Digest AuthenticationData::Util::encodeSHA1(std::string_view text) { Poco::SHA1Engine engine; engine.update(text.data(), text.size()); diff --git a/src/Access/Common/AuthenticationData.h b/src/Access/Common/AuthenticationData.h index 2837e0f10a1..ced9fcd4b6d 100644 --- a/src/Access/Common/AuthenticationData.h +++ b/src/Access/Common/AuthenticationData.h @@ -96,11 +96,11 @@ public: struct Util { - static Digest stringToDigest(const std::string_view & text) { return Digest(text.data(), text.data() + text.size()); } - static Digest encodeSHA256(const std::string_view & text); - static Digest encodeSHA1(const std::string_view & text); + static Digest stringToDigest(std::string_view text) { return Digest(text.data(), text.data() + text.size()); } + static Digest encodeSHA256(std::string_view text); + static Digest encodeSHA1(std::string_view text); static Digest encodeSHA1(const Digest & text) { return encodeSHA1(std::string_view{reinterpret_cast(text.data()), text.size()}); } - static Digest encodeDoubleSHA1(const std::string_view & text) { return encodeSHA1(encodeSHA1(text)); } + static Digest encodeDoubleSHA1(std::string_view text) { return encodeSHA1(encodeSHA1(text)); } static Digest encodeDoubleSHA1(const Digest & text) { return encodeSHA1(encodeSHA1(text)); } }; diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp index 995a46d07ca..49736c76994 100644 --- a/src/Access/ContextAccess.cpp +++ b/src/Access/ContextAccess.cpp @@ -44,9 +44,17 @@ namespace } - AccessRights addImplicitAccessRights(const AccessRights & access) + AccessRights addImplicitAccessRights(const AccessRights & access, const AccessControl & access_control) { - auto modifier = [&](const AccessFlags & flags, const AccessFlags & min_flags_with_children, const AccessFlags & max_flags_with_children, const std::string_view & database, const std::string_view & table, const std::string_view & column) -> AccessFlags + AccessFlags max_flags; + + auto modifier = [&](const AccessFlags & flags, + const AccessFlags & min_flags_with_children, + const AccessFlags & max_flags_with_children, + std::string_view database, + std::string_view table, + std::string_view column, + bool /* grant_option */) -> AccessFlags { size_t level = !database.empty() + !table.empty() + !column.empty(); AccessFlags res = flags; @@ -115,17 +123,80 @@ namespace res |= show_databases; } + max_flags |= res; + return res; }; AccessRights res = access; res.modifyFlags(modifier); - res.modifyFlagsWithGrantOption(modifier); - /// Anyone has access to the "system" and "information_schema" database. - res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE); - res.grant(AccessType::SELECT, DatabaseCatalog::INFORMATION_SCHEMA); - res.grant(AccessType::SELECT, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE); + /// If "select_from_system_db_requires_grant" is enabled we provide implicit grants only for a few tables in the system database. + if (access_control.doesSelectFromSystemDatabaseRequireGrant()) + { + const char * always_accessible_tables[] = { + /// Constant tables + "one", + + /// "numbers", "numbers_mt", "zeros", "zeros_mt" were excluded because they can generate lots of values and + /// that can decrease performance in some cases. + + "contributors", + "licenses", + "time_zones", + "collations", + + "formats", + "privileges", + "data_type_families", + "table_engines", + "table_functions", + "aggregate_function_combinators", + + "functions", /// Can contain user-defined functions + + /// The following tables hide some rows if the current user doesn't have corresponding SHOW privileges. + "databases", + "tables", + "columns", + + /// Specific to the current session + "settings", + "current_roles", + "enabled_roles", + "quota_usage" + }; + + for (const auto * table_name : always_accessible_tables) + res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE, table_name); + + if (max_flags.contains(AccessType::SHOW_USERS)) + res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE, "users"); + + if (max_flags.contains(AccessType::SHOW_ROLES)) + res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE, "roles"); + + if (max_flags.contains(AccessType::SHOW_ROW_POLICIES)) + res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE, "row_policies"); + + if (max_flags.contains(AccessType::SHOW_SETTINGS_PROFILES)) + res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE, "settings_profiles"); + + if (max_flags.contains(AccessType::SHOW_QUOTAS)) + res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE, "quotas"); + } + else + { + res.grant(AccessType::SELECT, DatabaseCatalog::SYSTEM_DATABASE); + } + + /// If "select_from_information_schema_requires_grant" is enabled we don't provide implicit grants for the information_schema database. + if (!access_control.doesSelectFromInformationSchemaRequireGrant()) + { + res.grant(AccessType::SELECT, DatabaseCatalog::INFORMATION_SCHEMA); + res.grant(AccessType::SELECT, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE); + } + return res; } @@ -141,7 +212,7 @@ namespace std::string_view getDatabase() { return {}; } template - std::string_view getDatabase(const std::string_view & arg1, const OtherArgs &...) { return arg1; } + std::string_view getDatabase(std::string_view arg1, const OtherArgs &...) { return arg1; } } @@ -247,7 +318,7 @@ void ContextAccess::setRolesInfo(const std::shared_ptr & void ContextAccess::calculateAccessRights() const { access = std::make_shared(mixAccessRightsFromUserAndRoles(*user, *roles_info)); - access_with_implicit = std::make_shared(addImplicitAccessRights(*access)); + access_with_implicit = std::make_shared(addImplicitAccessRights(*access, *access_control)); if (trace_log) { @@ -342,7 +413,7 @@ std::shared_ptr ContextAccess::getFullAccess() auto full_access = std::shared_ptr(new ContextAccess); full_access->is_full_access = true; full_access->access = std::make_shared(AccessRights::getFullAccess()); - full_access->access_with_implicit = std::make_shared(addImplicitAccessRights(*full_access->access)); + full_access->access_with_implicit = full_access->access; return full_access; }(); return res; @@ -413,7 +484,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg }; if (is_full_access) - return access_granted(); + return true; if (user_was_dropped) return access_denied("User has been dropped", ErrorCodes::UNKNOWN_USER); @@ -422,7 +493,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg flags &= ~AccessType::CLUSTER; if (!flags) - return access_granted(); + return true; /// Access to temporary tables is controlled in an unusual way, not like normal tables. /// Creating of temporary tables is controlled by AccessType::CREATE_TEMPORARY_TABLES grant, @@ -519,7 +590,7 @@ bool ContextAccess::checkAccessImpl(const AccessFlags & flags) const } template -bool ContextAccess::checkAccessImpl(const AccessFlags & flags, const std::string_view & database, const Args &... args) const +bool ContextAccess::checkAccessImpl(const AccessFlags & flags, std::string_view database, const Args &... args) const { return checkAccessImplHelper(flags, database.empty() ? params.current_database : database, args...); } @@ -564,38 +635,38 @@ bool ContextAccess::checkAccessImpl(const AccessRightsElements & elements) const } bool ContextAccess::isGranted(const AccessFlags & flags) const { return checkAccessImpl(flags); } -bool ContextAccess::isGranted(const AccessFlags & flags, const std::string_view & database) const { return checkAccessImpl(flags, database); } -bool ContextAccess::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const { return checkAccessImpl(flags, database, table); } -bool ContextAccess::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return checkAccessImpl(flags, database, table, column); } -bool ContextAccess::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); } -bool ContextAccess::isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); } +bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database) const { return checkAccessImpl(flags, database); } +bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl(flags, database, table); } +bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl(flags, database, table, column); } +bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); } +bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); } bool ContextAccess::isGranted(const AccessRightsElement & element) const { return checkAccessImpl(element); } bool ContextAccess::isGranted(const AccessRightsElements & elements) const { return checkAccessImpl(elements); } bool ContextAccess::hasGrantOption(const AccessFlags & flags) const { return checkAccessImpl(flags); } -bool ContextAccess::hasGrantOption(const AccessFlags & flags, const std::string_view & database) const { return checkAccessImpl(flags, database); } -bool ContextAccess::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const { return checkAccessImpl(flags, database, table); } -bool ContextAccess::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return checkAccessImpl(flags, database, table, column); } -bool ContextAccess::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); } -bool ContextAccess::hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); } +bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database) const { return checkAccessImpl(flags, database); } +bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl(flags, database, table); } +bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl(flags, database, table, column); } +bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); } +bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); } bool ContextAccess::hasGrantOption(const AccessRightsElement & element) const { return checkAccessImpl(element); } bool ContextAccess::hasGrantOption(const AccessRightsElements & elements) const { return checkAccessImpl(elements); } void ContextAccess::checkAccess(const AccessFlags & flags) const { checkAccessImpl(flags); } -void ContextAccess::checkAccess(const AccessFlags & flags, const std::string_view & database) const { checkAccessImpl(flags, database); } -void ContextAccess::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const { checkAccessImpl(flags, database, table); } -void ContextAccess::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { checkAccessImpl(flags, database, table, column); } -void ContextAccess::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { checkAccessImpl(flags, database, table, columns); } -void ContextAccess::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const { checkAccessImpl(flags, database, table, columns); } +void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database) const { checkAccessImpl(flags, database); } +void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl(flags, database, table); } +void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl(flags, database, table, column); } +void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { checkAccessImpl(flags, database, table, columns); } +void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl(flags, database, table, columns); } void ContextAccess::checkAccess(const AccessRightsElement & element) const { checkAccessImpl(element); } void ContextAccess::checkAccess(const AccessRightsElements & elements) const { checkAccessImpl(elements); } void ContextAccess::checkGrantOption(const AccessFlags & flags) const { checkAccessImpl(flags); } -void ContextAccess::checkGrantOption(const AccessFlags & flags, const std::string_view & database) const { checkAccessImpl(flags, database); } -void ContextAccess::checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const { checkAccessImpl(flags, database, table); } -void ContextAccess::checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { checkAccessImpl(flags, database, table, column); } -void ContextAccess::checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { checkAccessImpl(flags, database, table, columns); } -void ContextAccess::checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const { checkAccessImpl(flags, database, table, columns); } +void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database) const { checkAccessImpl(flags, database); } +void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl(flags, database, table); } +void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl(flags, database, table, column); } +void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { checkAccessImpl(flags, database, table, columns); } +void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl(flags, database, table, columns); } void ContextAccess::checkGrantOption(const AccessRightsElement & element) const { checkAccessImpl(element); } void ContextAccess::checkGrantOption(const AccessRightsElements & elements) const { checkAccessImpl(elements); } diff --git a/src/Access/ContextAccess.h b/src/Access/ContextAccess.h index 729574898aa..fa3523977e7 100644 --- a/src/Access/ContextAccess.h +++ b/src/Access/ContextAccess.h @@ -101,40 +101,40 @@ public: /// Checks if a specified access is granted, and throws an exception if not. /// Empty database means the current database. void checkAccess(const AccessFlags & flags) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + void checkAccess(const AccessFlags & flags, std::string_view database) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const; void checkAccess(const AccessRightsElement & element) const; void checkAccess(const AccessRightsElements & elements) const; void checkGrantOption(const AccessFlags & flags) const; - void checkGrantOption(const AccessFlags & flags, const std::string_view & database) const; - void checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - void checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - void checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - void checkGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + void checkGrantOption(const AccessFlags & flags, std::string_view database) const; + void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const; + void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const; + void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const; + void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const; void checkGrantOption(const AccessRightsElement & element) const; void checkGrantOption(const AccessRightsElements & elements) const; /// Checks if a specified access is granted, and returns false if not. /// Empty database means the current database. bool isGranted(const AccessFlags & flags) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool isGranted(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + bool isGranted(const AccessFlags & flags, std::string_view database) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const; + bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const; bool isGranted(const AccessRightsElement & element) const; bool isGranted(const AccessRightsElements & elements) const; bool hasGrantOption(const AccessFlags & flags) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - bool hasGrantOption(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const; + bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const; bool hasGrantOption(const AccessRightsElement & element) const; bool hasGrantOption(const AccessRightsElements & elements) const; @@ -180,7 +180,7 @@ private: bool checkAccessImpl(const AccessFlags & flags) const; template - bool checkAccessImpl(const AccessFlags & flags, const std::string_view & database, const Args &... args) const; + bool checkAccessImpl(const AccessFlags & flags, std::string_view database, const Args &... args) const; template bool checkAccessImpl(const AccessRightsElement & element) const; diff --git a/src/Access/DiskAccessStorage.cpp b/src/Access/DiskAccessStorage.cpp index 994abc7b53a..0cbe420f345 100644 --- a/src/Access/DiskAccessStorage.cpp +++ b/src/Access/DiskAccessStorage.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/src/Access/LDAPClient.cpp b/src/Access/LDAPClient.cpp index 3486be1de33..ff1ee6f3609 100644 --- a/src/Access/LDAPClient.cpp +++ b/src/Access/LDAPClient.cpp @@ -509,7 +509,6 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params) if (referrals) { SCOPE_EXIT({ -// ldap_value_free(referrals); ber_memvfree(reinterpret_cast(referrals)); referrals = nullptr; }); diff --git a/src/Access/SettingsConstraints.cpp b/src/Access/SettingsConstraints.cpp index 6084138f306..34f2e10dc83 100644 --- a/src/Access/SettingsConstraints.cpp +++ b/src/Access/SettingsConstraints.cpp @@ -36,12 +36,12 @@ void SettingsConstraints::clear() } -void SettingsConstraints::setMinValue(const std::string_view & setting_name, const Field & min_value) +void SettingsConstraints::setMinValue(std::string_view setting_name, const Field & min_value) { getConstraintRef(setting_name).min_value = Settings::castValueUtil(setting_name, min_value); } -Field SettingsConstraints::getMinValue(const std::string_view & setting_name) const +Field SettingsConstraints::getMinValue(std::string_view setting_name) const { const auto * ptr = tryGetConstraint(setting_name); if (ptr) @@ -51,12 +51,12 @@ Field SettingsConstraints::getMinValue(const std::string_view & setting_name) co } -void SettingsConstraints::setMaxValue(const std::string_view & setting_name, const Field & max_value) +void SettingsConstraints::setMaxValue(std::string_view setting_name, const Field & max_value) { getConstraintRef(setting_name).max_value = Settings::castValueUtil(setting_name, max_value); } -Field SettingsConstraints::getMaxValue(const std::string_view & setting_name) const +Field SettingsConstraints::getMaxValue(std::string_view setting_name) const { const auto * ptr = tryGetConstraint(setting_name); if (ptr) @@ -66,12 +66,12 @@ Field SettingsConstraints::getMaxValue(const std::string_view & setting_name) co } -void SettingsConstraints::setReadOnly(const std::string_view & setting_name, bool read_only) +void SettingsConstraints::setReadOnly(std::string_view setting_name, bool read_only) { getConstraintRef(setting_name).read_only = read_only; } -bool SettingsConstraints::isReadOnly(const std::string_view & setting_name) const +bool SettingsConstraints::isReadOnly(std::string_view setting_name) const { const auto * ptr = tryGetConstraint(setting_name); if (ptr) @@ -81,7 +81,7 @@ bool SettingsConstraints::isReadOnly(const std::string_view & setting_name) cons } -void SettingsConstraints::set(const std::string_view & setting_name, const Field & min_value, const Field & max_value, bool read_only) +void SettingsConstraints::set(std::string_view setting_name, const Field & min_value, const Field & max_value, bool read_only) { auto & ref = getConstraintRef(setting_name); ref.min_value = Settings::castValueUtil(setting_name, min_value); @@ -89,7 +89,7 @@ void SettingsConstraints::set(const std::string_view & setting_name, const Field ref.read_only = read_only; } -void SettingsConstraints::get(const std::string_view & setting_name, Field & min_value, Field & max_value, bool & read_only) const +void SettingsConstraints::get(std::string_view setting_name, Field & min_value, Field & max_value, bool & read_only) const { const auto * ptr = tryGetConstraint(setting_name); if (ptr) @@ -318,7 +318,7 @@ bool SettingsConstraints::checkImpl(const Settings & current_settings, SettingCh } -SettingsConstraints::Constraint & SettingsConstraints::getConstraintRef(const std::string_view & setting_name) +SettingsConstraints::Constraint & SettingsConstraints::getConstraintRef(std::string_view setting_name) { auto it = constraints.find(setting_name); if (it == constraints.end()) @@ -331,7 +331,7 @@ SettingsConstraints::Constraint & SettingsConstraints::getConstraintRef(const st return it->second; } -const SettingsConstraints::Constraint * SettingsConstraints::tryGetConstraint(const std::string_view & setting_name) const +const SettingsConstraints::Constraint * SettingsConstraints::tryGetConstraint(std::string_view setting_name) const { auto it = constraints.find(setting_name); if (it == constraints.end()) diff --git a/src/Access/SettingsConstraints.h b/src/Access/SettingsConstraints.h index f7bca1eafb3..645a690e051 100644 --- a/src/Access/SettingsConstraints.h +++ b/src/Access/SettingsConstraints.h @@ -61,17 +61,17 @@ public: void clear(); bool empty() const { return constraints.empty(); } - void setMinValue(const std::string_view & setting_name, const Field & min_value); - Field getMinValue(const std::string_view & setting_name) const; + void setMinValue(std::string_view setting_name, const Field & min_value); + Field getMinValue(std::string_view setting_name) const; - void setMaxValue(const std::string_view & setting_name, const Field & max_value); - Field getMaxValue(const std::string_view & setting_name) const; + void setMaxValue(std::string_view setting_name, const Field & max_value); + Field getMaxValue(std::string_view setting_name) const; - void setReadOnly(const std::string_view & setting_name, bool read_only); - bool isReadOnly(const std::string_view & setting_name) const; + void setReadOnly(std::string_view setting_name, bool read_only); + bool isReadOnly(std::string_view setting_name) const; - void set(const std::string_view & setting_name, const Field & min_value, const Field & max_value, bool read_only); - void get(const std::string_view & setting_name, Field & min_value, Field & max_value, bool & read_only) const; + void set(std::string_view setting_name, const Field & min_value, const Field & max_value, bool read_only); + void get(std::string_view setting_name, Field & min_value, Field & max_value, bool & read_only) const; void merge(const SettingsConstraints & other); @@ -105,8 +105,8 @@ private: }; bool checkImpl(const Settings & current_settings, SettingChange & change, ReactionOnViolation reaction) const; - Constraint & getConstraintRef(const std::string_view & setting_name); - const Constraint * tryGetConstraint(const std::string_view & setting_name) const; + Constraint & getConstraintRef(std::string_view setting_name); + const Constraint * tryGetConstraint(std::string_view setting_name) const; std::unordered_map constraints; const AccessControl * access_control = nullptr; diff --git a/src/AggregateFunctions/AggregateFunctionMap.h b/src/AggregateFunctions/AggregateFunctionMap.h index 5ccc9041c36..9ed4b48c281 100644 --- a/src/AggregateFunctions/AggregateFunctionMap.h +++ b/src/AggregateFunctions/AggregateFunctionMap.h @@ -132,7 +132,7 @@ public: key_ref = assert_cast(key_column).getDataAt(offset + i); #ifdef __cpp_lib_generic_unordered_lookup - key = static_cast(key_ref); + key = key_ref.toView(); #else key = key_ref.toString(); #endif diff --git a/src/AggregateFunctions/ThetaSketchData.h b/src/AggregateFunctions/ThetaSketchData.h index cc35597ba56..f46836ad189 100644 --- a/src/AggregateFunctions/ThetaSketchData.h +++ b/src/AggregateFunctions/ThetaSketchData.h @@ -43,7 +43,7 @@ public: ~ThetaSketchData() = default; /// Insert original value without hash, as `datasketches::update_theta_sketch.update` will do the hash internal. - void insertOriginal(const StringRef & value) + void insertOriginal(StringRef value) { getSkUpdate()->update(value.data, value.size); } diff --git a/src/AggregateFunctions/parseAggregateFunctionParameters.h b/src/AggregateFunctions/parseAggregateFunctionParameters.h index a67bc081303..41a04324f6d 100644 --- a/src/AggregateFunctions/parseAggregateFunctionParameters.h +++ b/src/AggregateFunctions/parseAggregateFunctionParameters.h @@ -8,6 +8,8 @@ namespace DB { +struct Array; + Array getAggregateFunctionParametersArray( const ASTPtr & expression_list, const std::string & error_context, diff --git a/src/Backups/BackupFactory.h b/src/Backups/BackupFactory.h index f9a97e3dfc5..9057d2cbfae 100644 --- a/src/Backups/BackupFactory.h +++ b/src/Backups/BackupFactory.h @@ -25,7 +25,6 @@ public: struct CreateParams { OpenMode open_mode = OpenMode::WRITE; - std::optional backup_uuid; BackupInfo backup_info; std::optional base_backup_info; String compression_method; @@ -34,6 +33,7 @@ public: ContextPtr context; bool is_internal_backup = false; std::shared_ptr backup_coordination; + std::optional backup_uuid; }; static BackupFactory & instance(); diff --git a/src/Backups/BackupIO.h b/src/Backups/BackupIO.h index ec0b2301800..433e81a70a2 100644 --- a/src/Backups/BackupIO.h +++ b/src/Backups/BackupIO.h @@ -23,8 +23,9 @@ class IBackupWriter /// BackupWriterFile, BackupWriterDisk, BackupWriterS3 public: virtual ~IBackupWriter() = default; virtual bool fileExists(const String & file_name) = 0; + virtual bool fileContentsEqual(const String & file_name, const String & expected_file_contents) = 0; virtual std::unique_ptr writeFile(const String & file_name) = 0; - virtual void removeFilesAfterFailure(const Strings & file_names) = 0; + virtual void removeFiles(const Strings & file_names) = 0; }; } diff --git a/src/Backups/BackupIO_Disk.cpp b/src/Backups/BackupIO_Disk.cpp index a5c26bdbed6..537bc667cd4 100644 --- a/src/Backups/BackupIO_Disk.cpp +++ b/src/Backups/BackupIO_Disk.cpp @@ -38,6 +38,25 @@ bool BackupWriterDisk::fileExists(const String & file_name) return disk->exists(path / file_name); } +bool BackupWriterDisk::fileContentsEqual(const String & file_name, const String & expected_file_contents) +{ + if (!disk->exists(path / file_name)) + return false; + + try + { + auto in = disk->readFile(path / file_name); + String actual_file_contents(expected_file_contents.size(), ' '); + return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size()) + && (actual_file_contents == expected_file_contents) && in->eof(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + return false; + } +} + std::unique_ptr BackupWriterDisk::writeFile(const String & file_name) { auto file_path = path / file_name; @@ -45,7 +64,7 @@ std::unique_ptr BackupWriterDisk::writeFile(const String & file_nam return disk->writeFile(file_path); } -void BackupWriterDisk::removeFilesAfterFailure(const Strings & file_names) +void BackupWriterDisk::removeFiles(const Strings & file_names) { for (const auto & file_name : file_names) disk->removeFileIfExists(path / file_name); diff --git a/src/Backups/BackupIO_Disk.h b/src/Backups/BackupIO_Disk.h index 88d70b0f1db..8ba99470938 100644 --- a/src/Backups/BackupIO_Disk.h +++ b/src/Backups/BackupIO_Disk.h @@ -30,8 +30,9 @@ public: ~BackupWriterDisk() override; bool fileExists(const String & file_name) override; + bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override; std::unique_ptr writeFile(const String & file_name) override; - void removeFilesAfterFailure(const Strings & file_names) override; + void removeFiles(const Strings & file_names) override; private: DiskPtr disk; diff --git a/src/Backups/BackupIO_File.cpp b/src/Backups/BackupIO_File.cpp index 8e7bfb5b83e..774d493ee38 100644 --- a/src/Backups/BackupIO_File.cpp +++ b/src/Backups/BackupIO_File.cpp @@ -39,6 +39,25 @@ bool BackupWriterFile::fileExists(const String & file_name) return fs::exists(path / file_name); } +bool BackupWriterFile::fileContentsEqual(const String & file_name, const String & expected_file_contents) +{ + if (!fs::exists(path / file_name)) + return false; + + try + { + auto in = createReadBufferFromFileBase(path / file_name, {}); + String actual_file_contents(expected_file_contents.size(), ' '); + return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size()) + && (actual_file_contents == expected_file_contents) && in->eof(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + return false; + } +} + std::unique_ptr BackupWriterFile::writeFile(const String & file_name) { auto file_path = path / file_name; @@ -46,7 +65,7 @@ std::unique_ptr BackupWriterFile::writeFile(const String & file_nam return std::make_unique(file_path); } -void BackupWriterFile::removeFilesAfterFailure(const Strings & file_names) +void BackupWriterFile::removeFiles(const Strings & file_names) { for (const auto & file_name : file_names) fs::remove(path / file_name); diff --git a/src/Backups/BackupIO_File.h b/src/Backups/BackupIO_File.h index c4aa20718a9..aebf2bdab73 100644 --- a/src/Backups/BackupIO_File.h +++ b/src/Backups/BackupIO_File.h @@ -27,8 +27,9 @@ public: ~BackupWriterFile() override; bool fileExists(const String & file_name) override; + bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override; std::unique_ptr writeFile(const String & file_name) override; - void removeFilesAfterFailure(const Strings & file_names) override; + void removeFiles(const Strings & file_names) override; private: std::filesystem::path path; diff --git a/src/Backups/BackupImpl.cpp b/src/Backups/BackupImpl.cpp index 20b7bf37cfc..d445ef9d52c 100644 --- a/src/Backups/BackupImpl.cpp +++ b/src/Backups/BackupImpl.cpp @@ -37,6 +37,7 @@ namespace ErrorCodes extern const int BACKUP_ENTRY_ALREADY_EXISTS; extern const int BACKUP_ENTRY_NOT_FOUND; extern const int BACKUP_IS_EMPTY; + extern const int FAILED_TO_SYNC_BACKUP_OR_RESTORE; extern const int LOGICAL_ERROR; } @@ -146,9 +147,9 @@ BackupImpl::BackupImpl( const std::optional & base_backup_info_, std::shared_ptr writer_, const ContextPtr & context_, - const std::optional & backup_uuid_, bool is_internal_backup_, - const std::shared_ptr & coordination_) + const std::shared_ptr & coordination_, + const std::optional & backup_uuid_) : backup_name(backup_name_) , archive_params(archive_params_) , use_archives(!archive_params.archive_name.empty()) @@ -177,42 +178,28 @@ BackupImpl::~BackupImpl() } } - void BackupImpl::open(const ContextPtr & context) { std::lock_guard lock{mutex}; - String file_name_to_check_existence; - if (use_archives) - file_name_to_check_existence = archive_params.archive_name; - else - file_name_to_check_existence = ".backup"; - bool backup_exists = (open_mode == OpenMode::WRITE) ? writer->fileExists(file_name_to_check_existence) : reader->fileExists(file_name_to_check_existence); - - if (open_mode == OpenMode::WRITE) - { - if (backup_exists) - throw Exception(ErrorCodes::BACKUP_ALREADY_EXISTS, "Backup {} already exists", backup_name); - } - else - { - if (!backup_exists) - throw Exception(ErrorCodes::BACKUP_NOT_FOUND, "Backup {} not found", backup_name); - } - if (open_mode == OpenMode::WRITE) { timestamp = std::time(nullptr); if (!uuid) uuid = UUIDHelpers::generateV4(); + lock_file_name = use_archives ? (archive_params.archive_name + ".lock") : ".lock"; writing_finalized = false; + + /// Check that we can write a backup there and create the lock file to own this destination. + checkBackupDoesntExist(); + if (!is_internal_backup) + createLockFile(); + checkLockFile(true); } if (open_mode == OpenMode::READ) readBackupMetadata(); - assert(uuid); /// Backup's UUID must be loaded or generated at this point. - if (base_backup_info) { BackupFactory::CreateParams params; @@ -253,6 +240,8 @@ time_t BackupImpl::getTimestamp() const void BackupImpl::writeBackupMetadata() { + assert(!is_internal_backup); + Poco::AutoPtr config{new Poco::Util::XMLConfiguration()}; config->setUInt("version", CURRENT_BACKUP_VERSION); config->setString("timestamp", toString(LocalDateTime{timestamp})); @@ -308,6 +297,8 @@ void BackupImpl::writeBackupMetadata() config->save(stream); String str = stream.str(); + checkLockFile(true); + std::unique_ptr out; if (use_archives) out = getArchiveWriter("")->writeFile(".backup"); @@ -321,9 +312,17 @@ void BackupImpl::readBackupMetadata() { std::unique_ptr in; if (use_archives) + { + if (!reader->fileExists(archive_params.archive_name)) + throw Exception(ErrorCodes::BACKUP_NOT_FOUND, "Backup {} not found", backup_name); in = getArchiveReader("")->readFile(".backup"); + } else + { + if (!reader->fileExists(".backup")) + throw Exception(ErrorCodes::BACKUP_NOT_FOUND, "Backup {} not found", backup_name); in = reader->readFile(".backup"); + } String str; readStringUntilEOF(str, *in); @@ -387,6 +386,59 @@ void BackupImpl::readBackupMetadata() } } +void BackupImpl::checkBackupDoesntExist() const +{ + String file_name_to_check_existence; + if (use_archives) + file_name_to_check_existence = archive_params.archive_name; + else + file_name_to_check_existence = ".backup"; + + if (writer->fileExists(file_name_to_check_existence)) + throw Exception(ErrorCodes::BACKUP_ALREADY_EXISTS, "Backup {} already exists", backup_name); + + /// Check that no other backup (excluding internal backups) is writing to the same destination. + if (!is_internal_backup) + { + assert(!lock_file_name.empty()); + if (writer->fileExists(lock_file_name)) + throw Exception(ErrorCodes::BACKUP_ALREADY_EXISTS, "Backup {} is being written already", backup_name); + } +} + +void BackupImpl::createLockFile() +{ + /// Internal backup must not create the lock file (it should be created by the initiator). + assert(!is_internal_backup); + + assert(uuid); + auto out = writer->writeFile(lock_file_name); + writeUUIDText(*uuid, *out); +} + +bool BackupImpl::checkLockFile(bool throw_if_failed) const +{ + if (!lock_file_name.empty() && uuid && writer->fileContentsEqual(lock_file_name, toString(*uuid))) + return true; + + if (throw_if_failed) + { + if (!writer->fileExists(lock_file_name)) + throw Exception(ErrorCodes::FAILED_TO_SYNC_BACKUP_OR_RESTORE, "Lock file {} suddenly disappeared while writing backup {}", lock_file_name, backup_name); + throw Exception(ErrorCodes::BACKUP_ALREADY_EXISTS, "A concurrent backup writing to the same destination {} detected", backup_name); + } + return false; +} + +void BackupImpl::removeLockFile() +{ + if (is_internal_backup) + return; /// Internal backup must not remove the lock file (it's still used by the initiator). + + if (checkLockFile(false)) + writer->removeFiles({lock_file_name}); +} + Strings BackupImpl::listFiles(const String & directory, bool recursive) const { std::lock_guard lock{mutex}; @@ -648,6 +700,9 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry) read_buffer = entry->getReadBuffer(); read_buffer->seek(copy_pos, SEEK_SET); + if (!num_files_written) + checkLockFile(true); + /// Copy the entry's data after `copy_pos`. std::unique_ptr out; if (use_archives) @@ -675,6 +730,7 @@ void BackupImpl::writeFile(const String & file_name, BackupEntryPtr entry) copyData(*read_buffer, *out); out->finalize(); + ++num_files_written; } @@ -694,6 +750,7 @@ void BackupImpl::finalizeWriting() { LOG_TRACE(log, "Finalizing backup {}", backup_name); writeBackupMetadata(); + removeLockFile(); LOG_TRACE(log, "Finalized backup {}", backup_name); } @@ -741,6 +798,9 @@ std::shared_ptr BackupImpl::getArchiveWriter(const String & suff void BackupImpl::removeAllFilesAfterFailure() { + if (is_internal_backup) + return; /// Let the initiator remove unnecessary files. + try { LOG_INFO(log, "Removing all files of backup {} after failure", backup_name); @@ -762,7 +822,11 @@ void BackupImpl::removeAllFilesAfterFailure() files_to_remove.push_back(file_info.data_file_name); } - writer->removeFilesAfterFailure(files_to_remove); + if (!checkLockFile(false)) + return; + + writer->removeFiles(files_to_remove); + removeLockFile(); } catch (...) { diff --git a/src/Backups/BackupImpl.h b/src/Backups/BackupImpl.h index f8c5bc0cf5f..ac0662c62c1 100644 --- a/src/Backups/BackupImpl.h +++ b/src/Backups/BackupImpl.h @@ -47,9 +47,9 @@ public: const std::optional & base_backup_info_, std::shared_ptr writer_, const ContextPtr & context_, - const std::optional & backup_uuid_ = {}, bool is_internal_backup_ = false, - const std::shared_ptr & coordination_ = {}); + const std::shared_ptr & coordination_ = {}, + const std::optional & backup_uuid_ = {}); ~BackupImpl() override; @@ -76,12 +76,25 @@ private: void open(const ContextPtr & context); void close(); + + /// Writes the file ".backup" containing backup's metadata. void writeBackupMetadata(); void readBackupMetadata(); + + /// Checks that a new backup doesn't exist yet. + void checkBackupDoesntExist() const; + + /// Lock file named ".lock" and containing the UUID of a backup is used to own the place where we're writing the backup. + /// Thus it will not be allowed to put any other backup to the same place (even if the BACKUP command is executed on a different node). + void createLockFile(); + bool checkLockFile(bool throw_if_failed) const; + void removeLockFile(); + + void removeAllFilesAfterFailure(); + String getArchiveNameWithSuffix(const String & suffix) const; std::shared_ptr getArchiveReader(const String & suffix) const; std::shared_ptr getArchiveWriter(const String & suffix); - void removeAllFilesAfterFailure(); const String backup_name; const ArchiveParams archive_params; @@ -102,6 +115,8 @@ private: mutable std::unordered_map> archive_readers; std::pair> archive_writers[2]; String current_archive_suffix; + String lock_file_name; + size_t num_files_written = 0; bool writing_finalized = false; const Poco::Logger * log; }; diff --git a/src/Backups/BackupSettings.cpp b/src/Backups/BackupSettings.cpp index 05ba0676ab8..a9ba7cb5f74 100644 --- a/src/Backups/BackupSettings.cpp +++ b/src/Backups/BackupSettings.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB @@ -15,6 +16,48 @@ namespace ErrorCodes extern const int WRONG_BACKUP_SETTINGS; } + +namespace +{ + struct SettingFieldOptionalUUID + { + std::optional value; + + explicit SettingFieldOptionalUUID(const std::optional & value_) : value(value_) {} + + explicit SettingFieldOptionalUUID(const Field & field) + { + if (field.getType() == Field::Types::Null) + { + value = std::nullopt; + return; + } + + if (field.getType() == Field::Types::String) + { + const String & str = field.get(); + if (str.empty()) + { + value = std::nullopt; + return; + } + + UUID id; + if (tryParse(id, str)) + { + value = id; + return; + } + } + + throw Exception(ErrorCodes::CANNOT_PARSE_BACKUP_SETTINGS, "Cannot parse uuid from {}", field); + } + + explicit operator Field() const { return Field(value ? toString(*value) : ""); } + }; +} + + /// List of backup settings except base_backup_name and cluster_host_ids. #define LIST_OF_BACKUP_SETTINGS(M) \ M(String, compression_method) \ @@ -26,7 +69,8 @@ namespace ErrorCodes M(UInt64, replica_num) \ M(Bool, internal) \ M(String, host_id) \ - M(String, coordination_zk_path) + M(String, coordination_zk_path) \ + M(OptionalUUID, backup_uuid) BackupSettings BackupSettings::fromBackupQuery(const ASTBackupQuery & query) { diff --git a/src/Backups/BackupSettings.h b/src/Backups/BackupSettings.h index 8a606ffded8..4e2bad67fce 100644 --- a/src/Backups/BackupSettings.h +++ b/src/Backups/BackupSettings.h @@ -53,6 +53,10 @@ struct BackupSettings /// Path in Zookeeper used to coordinate a distributed backup created by BACKUP ON CLUSTER. String coordination_zk_path; + /// Internal, should not be specified by user. + /// UUID of the backup. If it's not set it will be generated randomly. + std::optional backup_uuid; + static BackupSettings fromBackupQuery(const ASTBackupQuery & query); void copySettingsToQuery(ASTBackupQuery & query) const; diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index bf90d58d009..09614886f06 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -72,12 +72,15 @@ UUID BackupsWorker::start(const ASTPtr & backup_or_restore_query, ContextMutable UUID BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & context) { - UUID backup_uuid = UUIDHelpers::generateV4(); auto backup_query = std::static_pointer_cast(query->clone()); auto backup_settings = BackupSettings::fromBackupQuery(*backup_query); auto backup_info = BackupInfo::fromAST(*backup_query->backup_name); bool on_cluster = !backup_query->cluster.empty(); + if (!backup_settings.backup_uuid) + backup_settings.backup_uuid = UUIDHelpers::generateV4(); + UUID backup_uuid = *backup_settings.backup_uuid; + /// Prepare context to use. ContextPtr context_in_use = context; ContextMutablePtr mutable_context; @@ -107,7 +110,7 @@ UUID BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & c { if (async) { - query_scope.emplace(context_in_use); + query_scope.emplace(mutable_context); setThreadName("BackupWorker"); } @@ -151,9 +154,9 @@ UUID BackupsWorker::startMakingBackup(const ASTPtr & query, const ContextPtr & c backup_create_params.compression_method = backup_settings.compression_method; backup_create_params.compression_level = backup_settings.compression_level; backup_create_params.password = backup_settings.password; - backup_create_params.backup_uuid = backup_uuid; backup_create_params.is_internal_backup = backup_settings.internal; backup_create_params.backup_coordination = backup_coordination; + backup_create_params.backup_uuid = backup_uuid; BackupMutablePtr backup = BackupFactory::instance().createBackup(backup_create_params); /// Write the backup. diff --git a/src/Backups/registerBackupEnginesFileAndDisk.cpp b/src/Backups/registerBackupEnginesFileAndDisk.cpp index b1bccb6e914..380ae36a8e3 100644 --- a/src/Backups/registerBackupEnginesFileAndDisk.cpp +++ b/src/Backups/registerBackupEnginesFileAndDisk.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB @@ -180,7 +181,7 @@ void registerBackupEnginesFileAndDisk(BackupFactory & factory) writer = std::make_shared(path); else writer = std::make_shared(disk, path); - return std::make_unique(backup_name, archive_params, params.base_backup_info, writer, params.context, params.backup_uuid, params.is_internal_backup, params.backup_coordination); + return std::make_unique(backup_name, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid); } }; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2424f2c42d8..f4d3be14da6 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -346,6 +346,12 @@ set_source_files_properties( Columns/ColumnString.cpp PROPERTIES COMPILE_FLAGS "${X86_INTRINSICS_FLAGS}") +if (ENABLE_QPL) +set_source_files_properties( + Compression/CompressionCodecDeflateQpl.cpp + PROPERTIES COMPILE_FLAGS "-mwaitpkg") +endif () + target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::re2_st) target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::re2) @@ -376,7 +382,7 @@ if (TARGET ch_contrib::rdkafka) endif() if (TARGET ch_contrib::nats_io) - dbms_target_link_libraries(PRIVATE ch_contrib::nats_io) + dbms_target_link_libraries(PRIVATE ch_contrib::nats_io ch_contrib::uv) endif() if (TARGET ch_contrib::sasl2) @@ -447,6 +453,9 @@ if (TARGET ch_contrib::avrocpp) dbms_target_link_libraries(PRIVATE ch_contrib::avrocpp) endif () +set_source_files_properties(Common/CaresPTRResolver.cpp PROPERTIES COMPILE_FLAGS -Wno-reserved-identifier) +target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::c-ares) + if (TARGET OpenSSL::Crypto) dbms_target_link_libraries (PRIVATE OpenSSL::Crypto) target_link_libraries (clickhouse_common_io PRIVATE OpenSSL::Crypto) @@ -527,6 +536,10 @@ endif () target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4) +if (TARGET ch_contrib::qpl) +dbms_target_link_libraries(PUBLIC ch_contrib::qpl) +endif () + dbms_target_link_libraries(PRIVATE _boost_context) if (ENABLE_NLP) diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 0e243f97aaf..c6f14c7e865 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -152,7 +152,6 @@ static void incrementProfileEventsBlock(Block & dst, const Block & src) auto & dst_column_host_name = typeid_cast(*mutable_columns[name_pos["host_name"]]); auto & dst_array_current_time = typeid_cast(*mutable_columns[name_pos["current_time"]]).getData(); - // auto & dst_array_thread_id = typeid_cast(*mutable_columns[name_pos["thread_id"]]).getData(); auto & dst_array_type = typeid_cast(*mutable_columns[name_pos["type"]]).getData(); auto & dst_column_name = typeid_cast(*mutable_columns[name_pos["name"]]); auto & dst_array_value = typeid_cast(*mutable_columns[name_pos["value"]]).getData(); @@ -601,6 +600,7 @@ void ClientBase::initLogsOutputStream() { WriteBuffer * wb = out_logs_buf.get(); + bool color_logs = false; if (!out_logs_buf) { if (server_logs_file.empty()) @@ -608,11 +608,13 @@ void ClientBase::initLogsOutputStream() /// Use stderr by default out_logs_buf = std::make_unique(STDERR_FILENO); wb = out_logs_buf.get(); + color_logs = stderr_is_a_tty; } else if (server_logs_file == "-") { /// Use stdout if --server_logs_file=- specified wb = &std_out; + color_logs = stdout_is_a_tty; } else { @@ -622,7 +624,7 @@ void ClientBase::initLogsOutputStream() } } - logs_out_stream = std::make_unique(*wb, stdout_is_a_tty); + logs_out_stream = std::make_unique(*wb, color_logs); } } @@ -980,8 +982,7 @@ void ClientBase::onProfileEvents(Block & block) else if (event_name == MemoryTracker::USAGE_EVENT_NAME) thread_times[host_name][thread_id].memory_usage = value; } - auto elapsed_time = profile_events.watch.elapsedMicroseconds(); - progress_indication.updateThreadEventData(thread_times, elapsed_time); + progress_indication.updateThreadEventData(thread_times); if (need_render_progress) progress_indication.writeProgress(); @@ -2155,6 +2156,7 @@ void ClientBase::init(int argc, char ** argv) stdin_is_a_tty = isatty(STDIN_FILENO); stdout_is_a_tty = isatty(STDOUT_FILENO); + stderr_is_a_tty = isatty(STDERR_FILENO); terminal_width = getTerminalWidth(); Arguments common_arguments{""}; /// 0th argument is ignored. diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index ec2267a3be6..b012680fc3c 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -173,6 +173,7 @@ protected: bool stdin_is_a_tty = false; /// stdin is a terminal. bool stdout_is_a_tty = false; /// stdout is a terminal. + bool stderr_is_a_tty = false; /// stderr is a terminal. uint64_t terminal_width = 0; ServerConnectionPtr connection; diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index de09c07f4c1..1074adb2bd4 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -50,52 +50,58 @@ static String getLoadSuggestionQuery(Int32 suggestion_limit, bool basic_suggesti { /// NOTE: Once you will update the completion list, /// do not forget to update 01676_clickhouse_client_autocomplete.sh - WriteBufferFromOwnString query; - query << "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" - "SELECT name FROM system.functions" - " UNION ALL " - "SELECT name FROM system.table_engines" - " UNION ALL " - "SELECT name FROM system.formats" - " UNION ALL " - "SELECT name FROM system.table_functions" - " UNION ALL " - "SELECT name FROM system.data_type_families" - " UNION ALL " - "SELECT name FROM system.merge_tree_settings" - " UNION ALL " - "SELECT name FROM system.settings" - " UNION ALL "; + String query; + + auto add_subquery = [&](std::string_view select, std::string_view result_column_name) + { + if (!query.empty()) + query += " UNION ALL "; + query += fmt::format("SELECT * FROM viewIfPermitted({} ELSE null('{} String'))", select, result_column_name); + }; + + auto add_column = [&](std::string_view column_name, std::string_view table_name, bool distinct, std::optional limit) + { + add_subquery( + fmt::format( + "SELECT {}{} FROM system.{}{}", + (distinct ? "DISTINCT " : ""), + column_name, + table_name, + (limit ? (" LIMIT " + std::to_string(*limit)) : "")), + column_name); + }; + + add_column("name", "functions", false, {}); + add_column("name", "table_engines", false, {}); + add_column("name", "formats", false, {}); + add_column("name", "table_functions", false, {}); + add_column("name", "data_type_families", false, {}); + add_column("name", "merge_tree_settings", false, {}); + add_column("name", "settings", false, {}); + if (!basic_suggestion) { - query << "SELECT cluster FROM system.clusters" - " UNION ALL " - "SELECT macro FROM system.macros" - " UNION ALL " - "SELECT policy_name FROM system.storage_policies" - " UNION ALL "; + add_column("cluster", "clusters", false, {}); + add_column("macro", "macros", false, {}); + add_column("policy_name", "storage_policies", false, {}); } - query << "SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate"; + + add_subquery("SELECT concat(func.name, comb.name) AS x FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate", "x"); + /// The user may disable loading of databases, tables, columns by setting suggestion_limit to zero. if (suggestion_limit > 0) { - String limit_str = toString(suggestion_limit); - query << " UNION ALL " - "SELECT name FROM system.databases LIMIT " << limit_str - << " UNION ALL " - "SELECT DISTINCT name FROM system.tables LIMIT " << limit_str - << " UNION ALL "; - + add_column("name", "databases", false, suggestion_limit); + add_column("name", "tables", true, suggestion_limit); if (!basic_suggestion) { - query << "SELECT DISTINCT name FROM system.dictionaries LIMIT " << limit_str - << " UNION ALL "; + add_column("name", "dictionaries", true, suggestion_limit); } - query << "SELECT DISTINCT name FROM system.columns LIMIT " << limit_str; + add_column("name", "columns", true, suggestion_limit); } - query << ") WHERE notEmpty(res)"; - return query.str(); + query = "SELECT DISTINCT arrayJoin(extractAll(name, '[\\\\w_]{2,}')) AS res FROM (" + query + ") WHERE notEmpty(res)"; + return query; } template diff --git a/src/Client/Suggest.h b/src/Client/Suggest.h index 65b60ceffc4..25d45f7ffaf 100644 --- a/src/Client/Suggest.h +++ b/src/Client/Suggest.h @@ -28,8 +28,8 @@ public: template void load(ContextPtr context, const ConnectionParameters & connection_parameters, Int32 suggestion_limit); - /// Older server versions cannot execute the query above. - static constexpr int MIN_SERVER_REVISION = 54406; + /// Older server versions cannot execute the query loading suggestions. + static constexpr int MIN_SERVER_REVISION = DBMS_MIN_PROTOCOL_VERSION_WITH_VIEW_IF_PERMITTED; private: void fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query); diff --git a/src/Columns/ColumnLowCardinality.cpp b/src/Columns/ColumnLowCardinality.cpp index 62fb69a47e1..17e9bd97669 100644 --- a/src/Columns/ColumnLowCardinality.cpp +++ b/src/Columns/ColumnLowCardinality.cpp @@ -132,14 +132,12 @@ namespace ColumnLowCardinality::ColumnLowCardinality(MutableColumnPtr && column_unique_, MutableColumnPtr && indexes_, bool is_shared) : dictionary(std::move(column_unique_), is_shared), idx(std::move(indexes_)) { - // idx.check(getDictionary().size()); } void ColumnLowCardinality::insert(const Field & x) { compactIfSharedDictionary(); idx.insertPosition(dictionary.getColumnUnique().uniqueInsert(x)); - // idx.check(getDictionary().size()); } void ColumnLowCardinality::insertDefault() @@ -167,15 +165,12 @@ void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n) const auto & nested = *low_cardinality_src->getDictionary().getNestedColumn(); idx.insertPosition(dictionary.getColumnUnique().uniqueInsertFrom(nested, position)); } - - // idx.check(getDictionary().size()); } void ColumnLowCardinality::insertFromFullColumn(const IColumn & src, size_t n) { compactIfSharedDictionary(); idx.insertPosition(dictionary.getColumnUnique().uniqueInsertFrom(src, n)); - // idx.check(getDictionary().size()); } void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, size_t length) @@ -205,7 +200,6 @@ void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, si auto inserted_indexes = dictionary.getColumnUnique().uniqueInsertRangeFrom(*used_keys, 0, used_keys->size()); idx.insertPositionsRange(*inserted_indexes->index(*sub_idx, 0), 0, length); } - // idx.check(getDictionary().size()); } void ColumnLowCardinality::insertRangeFromFullColumn(const IColumn & src, size_t start, size_t length) @@ -213,7 +207,6 @@ void ColumnLowCardinality::insertRangeFromFullColumn(const IColumn & src, size_t compactIfSharedDictionary(); auto inserted_indexes = dictionary.getColumnUnique().uniqueInsertRangeFrom(src, start, length); idx.insertPositionsRange(*inserted_indexes, 0, length); - // idx.check(getDictionary().size()); } static void checkPositionsAreLimited(const IColumn & positions, UInt64 limit) @@ -254,14 +247,12 @@ void ColumnLowCardinality::insertRangeFromDictionaryEncodedColumn(const IColumn compactIfSharedDictionary(); auto inserted_indexes = dictionary.getColumnUnique().uniqueInsertRangeFrom(keys, 0, keys.size()); idx.insertPositionsRange(*inserted_indexes->index(positions, 0), 0, positions.size()); - // idx.check(getDictionary().size()); } void ColumnLowCardinality::insertData(const char * pos, size_t length) { compactIfSharedDictionary(); idx.insertPosition(dictionary.getColumnUnique().uniqueInsertData(pos, length)); - // idx.check(getDictionary().size()); } StringRef ColumnLowCardinality::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const @@ -276,7 +267,6 @@ const char * ColumnLowCardinality::deserializeAndInsertFromArena(const char * po const char * new_pos; idx.insertPosition(dictionary.getColumnUnique().uniqueDeserializeAndInsertFromArena(pos, new_pos)); - // idx.check(getDictionary().size()); return new_pos; } diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index d8e98ec9406..809024316bf 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -273,14 +273,6 @@ llvm::Value * ColumnNullable::compileComparator(llvm::IRBuilderBase & builder, l b.CreateCondBr(lhs_or_rhs_are_null, lhs_or_rhs_are_null_block, lhs_rhs_are_not_null_block); - // if (unlikely(lval_is_null || rval_is_null)) - // { - // if (lval_is_null && rval_is_null) - // return 0; - // else - // return lval_is_null ? null_direction_hint : -null_direction_hint; - // } - b.SetInsertPoint(lhs_or_rhs_are_null_block); auto * lhs_equals_rhs_result = llvm::ConstantInt::getSigned(b.getInt8Ty(), 0); llvm::Value * lhs_and_rhs_are_null = b.CreateAnd(lhs_is_null_value, rhs_is_null_value); @@ -288,8 +280,6 @@ llvm::Value * ColumnNullable::compileComparator(llvm::IRBuilderBase & builder, l llvm::Value * lhs_or_rhs_are_null_block_result = b.CreateSelect(lhs_and_rhs_are_null, lhs_equals_rhs_result, lhs_is_null_result); b.CreateBr(join_block); - // getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint); - b.SetInsertPoint(lhs_rhs_are_not_null_block); llvm::Value * lhs_rhs_are_not_null_block_result = nested_column->compileComparator(builder, lhs_unwrapped_value, rhs_unwrapped_value, nan_direction_hint); @@ -793,4 +783,18 @@ ColumnPtr makeNullable(const ColumnPtr & column) return ColumnNullable::create(column, ColumnUInt8::create(column->size(), 0)); } +ColumnPtr makeNullableSafe(const ColumnPtr & column) +{ + if (isColumnNullable(*column)) + return column; + + if (isColumnConst(*column)) + return ColumnConst::create(makeNullableSafe(assert_cast(*column).getDataColumnPtr()), column->size()); + + if (column->canBeInsideNullable()) + return makeNullable(column); + + return column; +} + } diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index 52e57f7f0d0..e832f6d20e5 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -223,5 +223,6 @@ private: }; ColumnPtr makeNullable(const ColumnPtr & column); +ColumnPtr makeNullableSafe(const ColumnPtr & column); } diff --git a/src/Columns/ColumnUnique.h b/src/Columns/ColumnUnique.h index 33135224e11..58891e30e12 100644 --- a/src/Columns/ColumnUnique.h +++ b/src/Columns/ColumnUnique.h @@ -509,7 +509,7 @@ MutableColumnPtr ColumnUnique::uniqueInsertRangeImpl( if (secondary_index) next_position += secondary_index->size(); - auto insert_key = [&](const StringRef & ref, ReverseIndex & cur_index) -> MutableColumnPtr + auto insert_key = [&](StringRef ref, ReverseIndex & cur_index) -> MutableColumnPtr { auto inserted_pos = cur_index.insert(ref); positions[num_added_rows] = inserted_pos; @@ -548,7 +548,6 @@ MutableColumnPtr ColumnUnique::uniqueInsertRangeImpl( } } - // checkIndexes(*positions_column, column->size() + (overflowed_keys ? overflowed_keys->size() : 0)); return std::move(positions_column); } diff --git a/src/Columns/ReverseIndex.h b/src/Columns/ReverseIndex.h index 3f4427e17ad..ba6a014b49d 100644 --- a/src/Columns/ReverseIndex.h +++ b/src/Columns/ReverseIndex.h @@ -92,7 +92,7 @@ struct ReverseIndexHashTableCell /// Special case when we want to compare with something not in index_column. /// When we compare something inside column default keyEquals checks only that row numbers are equal. - bool keyEquals(const StringRef & object, size_t hash_ [[maybe_unused]], const State & state) const + bool keyEquals(StringRef object, size_t hash_ [[maybe_unused]], const State & state) const { auto index = key; if constexpr (has_base_index) @@ -322,7 +322,7 @@ public: static constexpr bool is_numeric_column = isNumericColumn(static_cast(nullptr)); static constexpr bool use_saved_hash = !is_numeric_column; - UInt64 insert(const StringRef & data); + UInt64 insert(StringRef data); /// Returns the found data's index in the dictionary. If index is not built, builds it. UInt64 getInsertionPoint(StringRef data) @@ -383,7 +383,7 @@ private: void buildIndex(); - UInt64 getHash(const StringRef & ref) const + UInt64 getHash(StringRef ref) const { if constexpr (is_numeric_column) { @@ -478,7 +478,7 @@ ColumnUInt64::MutablePtr ReverseIndex::calcHashes() const } template -UInt64 ReverseIndex::insert(const StringRef & data) +UInt64 ReverseIndex::insert(StringRef data) { if (!index) buildIndex(); diff --git a/src/Common/ArrayCache.h b/src/Common/ArrayCache.h index 6efa5c92b5b..f01ff94e38b 100644 --- a/src/Common/ArrayCache.h +++ b/src/Common/ArrayCache.h @@ -514,8 +514,6 @@ private: return allocateFromFreeRegion(*free_region, size); } -// std::cerr << "Requested size: " << size << "\n"; - /// Evict something from cache and continue. while (true) { diff --git a/src/Common/CaresPTRResolver.cpp b/src/Common/CaresPTRResolver.cpp new file mode 100644 index 00000000000..f6228e97c02 --- /dev/null +++ b/src/Common/CaresPTRResolver.cpp @@ -0,0 +1,109 @@ +#include "CaresPTRResolver.h" +#include +#include +#include +#include "ares.h" +#include "netdb.h" + +namespace DB +{ + + namespace ErrorCodes + { + extern const int DNS_ERROR; + } + + static void callback(void * arg, int status, int, struct hostent * host) + { + auto * ptr_records = reinterpret_cast*>(arg); + if (status == ARES_SUCCESS && host->h_aliases) + { + int i = 0; + while (auto * ptr_record = host->h_aliases[i]) + { + ptr_records->emplace_back(ptr_record); + i++; + } + } + } + + CaresPTRResolver::CaresPTRResolver(CaresPTRResolver::provider_token) : channel(nullptr) + { + /* + * ares_library_init is not thread safe. Currently, the only other usage of c-ares seems to be in grpc. + * In grpc, ares_library_init seems to be called only in Windows. + * See https://github.com/grpc/grpc/blob/master/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc#L1187 + * That means it's safe to init it here, but we should be cautious when introducing new code that depends on c-ares and even updates + * to grpc. As discussed in https://github.com/ClickHouse/ClickHouse/pull/37827#discussion_r919189085, c-ares should be adapted to be atomic + * */ + if (ares_library_init(ARES_LIB_INIT_ALL) != ARES_SUCCESS || ares_init(&channel) != ARES_SUCCESS) + { + throw DB::Exception("Failed to initialize c-ares", DB::ErrorCodes::DNS_ERROR); + } + } + + CaresPTRResolver::~CaresPTRResolver() + { + ares_destroy(channel); + ares_library_cleanup(); + } + + std::vector CaresPTRResolver::resolve(const std::string & ip) + { + std::vector ptr_records; + + resolve(ip, ptr_records); + wait(); + + return ptr_records; + } + + std::vector CaresPTRResolver::resolve_v6(const std::string & ip) + { + std::vector ptr_records; + + resolve_v6(ip, ptr_records); + wait(); + + return ptr_records; + } + + void CaresPTRResolver::resolve(const std::string & ip, std::vector & response) + { + in_addr addr; + + inet_pton(AF_INET, ip.c_str(), &addr); + + ares_gethostbyaddr(channel, reinterpret_cast(&addr), sizeof(addr), AF_INET, callback, &response); + } + + void CaresPTRResolver::resolve_v6(const std::string & ip, std::vector & response) + { + in6_addr addr; + inet_pton(AF_INET6, ip.c_str(), &addr); + + ares_gethostbyaddr(channel, reinterpret_cast(&addr), sizeof(addr), AF_INET6, callback, &response); + } + + void CaresPTRResolver::wait() + { + timeval * tvp, tv; + fd_set read_fds; + fd_set write_fds; + int nfds; + + for (;;) + { + FD_ZERO(&read_fds); + FD_ZERO(&write_fds); + nfds = ares_fds(channel, &read_fds,&write_fds); + if (nfds == 0) + { + break; + } + tvp = ares_timeout(channel, nullptr, &tv); + select(nfds, &read_fds, &write_fds, nullptr, tvp); + ares_process(channel, &read_fds, &write_fds); + } + } +} diff --git a/src/Common/CaresPTRResolver.h b/src/Common/CaresPTRResolver.h new file mode 100644 index 00000000000..fd6a1cf7bc5 --- /dev/null +++ b/src/Common/CaresPTRResolver.h @@ -0,0 +1,42 @@ +#pragma once + +#include "DNSPTRResolver.h" + +using ares_channel = struct ares_channeldata *; + +namespace DB +{ + + /* + * Implements reverse DNS resolution using c-ares lib. System reverse DNS resolution via + * gethostbyaddr or getnameinfo does not work reliably because in some systems + * it returns all PTR records for a given IP and in others it returns only one. + * */ + class CaresPTRResolver : public DNSPTRResolver + { + friend class DNSPTRResolverProvider; + + /* + * Allow only DNSPTRProvider to instantiate this class + * */ + struct provider_token {}; + + public: + explicit CaresPTRResolver(provider_token); + ~CaresPTRResolver() override; + + std::vector resolve(const std::string & ip) override; + + std::vector resolve_v6(const std::string & ip) override; + + private: + void wait(); + + void resolve(const std::string & ip, std::vector & response); + + void resolve_v6(const std::string & ip, std::vector & response); + + ares_channel channel; + }; +} + diff --git a/src/Common/CurrentThread.h b/src/Common/CurrentThread.h index 4888adb511a..fa52fafa9e2 100644 --- a/src/Common/CurrentThread.h +++ b/src/Common/CurrentThread.h @@ -2,7 +2,6 @@ #include #include -#include #include #include @@ -76,7 +75,7 @@ public: static void finalizePerformanceCounters(); /// Returns a non-empty string if the thread is attached to a query - static StringRef getQueryId() + static std::string_view getQueryId() { if (unlikely(!current_thread)) return {}; diff --git a/src/Common/DNSPTRResolver.h b/src/Common/DNSPTRResolver.h new file mode 100644 index 00000000000..e6cce83f79d --- /dev/null +++ b/src/Common/DNSPTRResolver.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include + +namespace DB +{ + struct DNSPTRResolver + { + + virtual ~DNSPTRResolver() = default; + + virtual std::vector resolve(const std::string & ip) = 0; + + virtual std::vector resolve_v6(const std::string & ip) = 0; + + }; +} diff --git a/src/Common/DNSPTRResolverProvider.cpp b/src/Common/DNSPTRResolverProvider.cpp new file mode 100644 index 00000000000..41c73f4f36f --- /dev/null +++ b/src/Common/DNSPTRResolverProvider.cpp @@ -0,0 +1,13 @@ +#include "DNSPTRResolverProvider.h" +#include "CaresPTRResolver.h" + +namespace DB +{ + std::shared_ptr DNSPTRResolverProvider::get() + { + static auto cares_resolver = std::make_shared( + CaresPTRResolver::provider_token {} + ); + return cares_resolver; + } +} diff --git a/src/Common/DNSPTRResolverProvider.h b/src/Common/DNSPTRResolverProvider.h new file mode 100644 index 00000000000..a7f534749e3 --- /dev/null +++ b/src/Common/DNSPTRResolverProvider.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include "DNSPTRResolver.h" + +namespace DB +{ + /* + * Provides a ready-to-use DNSPTRResolver instance. + * It hides 3rd party lib dependencies, handles initialization and lifetime. + * Since `get` function is static, it can be called from any context. Including cached static functions. + * */ + class DNSPTRResolverProvider + { + public: + static std::shared_ptr get(); + }; +} diff --git a/src/Common/DNSResolver.cpp b/src/Common/DNSResolver.cpp index 0616e324b73..10797b7a809 100644 --- a/src/Common/DNSResolver.cpp +++ b/src/Common/DNSResolver.cpp @@ -12,6 +12,7 @@ #include #include #include +#include "DNSPTRResolverProvider.h" namespace ProfileEvents { @@ -138,16 +139,17 @@ static DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host) return addresses; } -static String reverseResolveImpl(const Poco::Net::IPAddress & address) +static Strings reverseResolveImpl(const Poco::Net::IPAddress & address) { - Poco::Net::SocketAddress sock_addr(address, 0); + auto ptr_resolver = DB::DNSPTRResolverProvider::get(); - /// Resolve by hand, because Poco::Net::DNS::hostByAddress(...) does getaddrinfo(...) after getnameinfo(...) - char host[1024]; - int err = getnameinfo(sock_addr.addr(), sock_addr.length(), host, sizeof(host), nullptr, 0, NI_NAMEREQD); - if (err) - throw Exception("Cannot getnameinfo(" + address.toString() + "): " + gai_strerror(err), ErrorCodes::DNS_ERROR); - return host; + if (address.family() == Poco::Net::IPAddress::Family::IPv4) + { + return ptr_resolver->resolve(address.toString()); + } else + { + return ptr_resolver->resolve_v6(address.toString()); + } } struct DNSResolver::Impl @@ -235,7 +237,7 @@ std::vector DNSResolver::resolveAddressList(const std: return addresses; } -String DNSResolver::reverseResolve(const Poco::Net::IPAddress & address) +Strings DNSResolver::reverseResolve(const Poco::Net::IPAddress & address) { if (impl->disable_cache) return reverseResolveImpl(address); diff --git a/src/Common/DNSResolver.h b/src/Common/DNSResolver.h index fdd9799f96f..84c88586636 100644 --- a/src/Common/DNSResolver.h +++ b/src/Common/DNSResolver.h @@ -36,8 +36,8 @@ public: std::vector resolveAddressList(const std::string & host, UInt16 port); - /// Accepts host IP and resolves its host name - String reverseResolve(const Poco::Net::IPAddress & address); + /// Accepts host IP and resolves its host names + Strings reverseResolve(const Poco::Net::IPAddress & address); /// Get this server host name String getHostName(); diff --git a/src/Common/DateLUTImpl.cpp b/src/Common/DateLUTImpl.cpp index 869954bb2ae..31290c53b49 100644 --- a/src/Common/DateLUTImpl.cpp +++ b/src/Common/DateLUTImpl.cpp @@ -122,9 +122,6 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_) values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor; values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor; -// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n"; -// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n"; - /// We don't support too large changes. if (values.amount_of_offset_change_value > 24 * 4) values.amount_of_offset_change_value = 24 * 4; diff --git a/src/Common/EventRateMeter.h b/src/Common/EventRateMeter.h new file mode 100644 index 00000000000..f70258faa9e --- /dev/null +++ b/src/Common/EventRateMeter.h @@ -0,0 +1,63 @@ +#pragma once + +#include + +#include + +#include + + +namespace DB +{ + +/// Event count measurement with exponential smoothing intended for computing time derivatives +class EventRateMeter +{ +public: + explicit EventRateMeter(double now, double period_) + : period(period_) + , half_decay_time(period * std::numbers::ln2) // for `ExponentiallySmoothedAverage::sumWeights()` to be equal to `1/period` + { + reset(now); + } + + /// Add `count` events happened at `now` instant. + /// Previous events that are older than `period` from `now` will be forgotten + /// in a way to keep average event rate the same, using exponential smoothing. + /// NOTE: Adding events into distant past (further than `period`) must be avoided. + void add(double now, double count) + { + if (now - period <= start) // precise counting mode + events = ExponentiallySmoothedAverage(events.value + count, now); + else // exponential smoothing mode + events.add(count, now, half_decay_time); + } + + /// Compute average event rate throughout `[now - period, now]` period. + /// If measurements are just started (`now - period < start`), then average + /// is computed based on shorter `[start; now]` period to avoid initial linear growth. + double rate(double now) + { + add(now, 0); + if (unlikely(now <= start)) + return 0; + if (now - period <= start) // precise counting mode + return events.value / (now - start); + else // exponential smoothing mode + return events.get(half_decay_time); // equals to `events.value / period` + } + + void reset(double now) + { + start = now; + events = ExponentiallySmoothedAverage(); + } + +private: + const double period; + const double half_decay_time; + double start; // Instant in past without events before it; when measurement started or reset + ExponentiallySmoothedAverage events; // Estimated number of events in the last `period` +}; + +} diff --git a/src/Common/FileSegment.cpp b/src/Common/FileSegment.cpp index 1183abc0e22..c16d4658ae5 100644 --- a/src/Common/FileSegment.cpp +++ b/src/Common/FileSegment.cpp @@ -104,10 +104,10 @@ String FileSegment::getCallerId() { if (!CurrentThread::isInitialized() || !CurrentThread::get().getQueryContext() - || CurrentThread::getQueryId().size == 0) + || CurrentThread::getQueryId().empty()) return "None:" + toString(getThreadId()); - return CurrentThread::getQueryId().toString() + ":" + toString(getThreadId()); + return std::string(CurrentThread::getQueryId()) + ":" + toString(getThreadId()); } String FileSegment::getOrSetDownloader() diff --git a/src/Common/HashTable/SmallTable.h b/src/Common/HashTable/SmallTable.h index ad9537ff94a..b78901b03f6 100644 --- a/src/Common/HashTable/SmallTable.h +++ b/src/Common/HashTable/SmallTable.h @@ -74,7 +74,6 @@ public: using key_type = Key; using mapped_type = typename Cell::mapped_type; using value_type = typename Cell::value_type; - using cell_type = Cell; class Reader final : private Cell::State { @@ -247,39 +246,6 @@ public: } } - - /// Same, but return false if it's full. - bool ALWAYS_INLINE tryEmplace(Key x, iterator & it, bool & inserted) - { - Cell * res = findCell(x); - it = iteratorTo(res); - inserted = res == buf + m_size; - if (inserted) - { - if (res == buf + capacity) - return false; - - new(res) Cell(x, *this); - ++m_size; - } - return true; - } - - - /// Copy the cell from another hash table. It is assumed that there was no such key in the table yet. - void ALWAYS_INLINE insertUnique(const Cell * cell) - { - memcpy(&buf[m_size], cell, sizeof(*cell)); - ++m_size; - } - - void ALWAYS_INLINE insertUnique(Key x) - { - new(&buf[m_size]) Cell(x, *this); - ++m_size; - } - - iterator ALWAYS_INLINE find(Key x) { return iteratorTo(findCell(x)); } const_iterator ALWAYS_INLINE find(Key x) const { return iteratorTo(findCell(x)); } @@ -381,36 +347,3 @@ template > using SmallSet = SmallTable, capacity>; - -template -< - typename Key, - typename Cell, - size_t capacity -> -class SmallMapTable : public SmallTable -{ -public: - using key_type = Key; - using mapped_type = typename Cell::mapped_type; - using value_type = typename Cell::value_type; - using cell_type = Cell; - - mapped_type & ALWAYS_INLINE operator[](Key x) - { - typename SmallMapTable::iterator it; - bool inserted; - this->emplace(x, it, inserted); - new (&it->getMapped()) mapped_type(); - return it->getMapped(); - } -}; - - -template -< - typename Key, - typename Mapped, - size_t capacity -> -using SmallMap = SmallMapTable, capacity>; diff --git a/src/Common/HashTable/StringHashMap.h b/src/Common/HashTable/StringHashMap.h index a3b5c3e9ed0..ada10180786 100644 --- a/src/Common/HashTable/StringHashMap.h +++ b/src/Common/HashTable/StringHashMap.h @@ -12,7 +12,7 @@ struct StringHashMapCell : public HashMapCellvalue.first); } /// NOLINT + StringRef getKey() const { return toStringRef(this->value.first); } /// NOLINT // internal static const Key & getKey(const value_type & value_) { return value_.first; } }; @@ -32,7 +32,7 @@ struct StringHashMapCell : public HashMapCellvalue.first.items[1] = 0; } // external - const StringRef getKey() const { return toStringRef(this->value.first); } /// NOLINT + StringRef getKey() const { return toStringRef(this->value.first); } /// NOLINT // internal static const StringKey16 & getKey(const value_type & value_) { return value_.first; } }; @@ -53,7 +53,7 @@ struct StringHashMapCell : public HashMapCellvalue.first.c = 0; } // external - const StringRef getKey() const { return toStringRef(this->value.first); } /// NOLINT + StringRef getKey() const { return toStringRef(this->value.first); } /// NOLINT // internal static const StringKey24 & getKey(const value_type & value_) { return value_.first; } }; diff --git a/src/Common/IFileCache.cpp b/src/Common/IFileCache.cpp index fb120ae5902..8fe434dd740 100644 --- a/src/Common/IFileCache.cpp +++ b/src/Common/IFileCache.cpp @@ -58,7 +58,7 @@ static bool isQueryInitialized() { return CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() - && CurrentThread::getQueryId().size != 0; + && !CurrentThread::getQueryId().empty(); } bool IFileCache::isReadOnly() @@ -77,7 +77,7 @@ IFileCache::QueryContextPtr IFileCache::getCurrentQueryContext(std::lock_guard & /* cache_lock */) diff --git a/src/Common/JSONParsers/DummyJSONParser.h b/src/Common/JSONParsers/DummyJSONParser.h index 77b958d1429..3cedd59decd 100644 --- a/src/Common/JSONParsers/DummyJSONParser.h +++ b/src/Common/JSONParsers/DummyJSONParser.h @@ -84,7 +84,7 @@ struct DummyJSONParser static Iterator begin() { return {}; } static Iterator end() { return {}; } static size_t size() { return 0; } - bool find(const std::string_view &, Element &) const { return false; } /// NOLINT + bool find(std::string_view, Element &) const { return false; } /// NOLINT #if 0 /// Optional: Provides access to an object's element by index. @@ -93,7 +93,7 @@ struct DummyJSONParser }; /// Parses a JSON document, returns the reference to its root element if succeeded. - bool parse(const std::string_view &, Element &) { throw Exception{"Functions JSON* are not supported", ErrorCodes::NOT_IMPLEMENTED}; } /// NOLINT + bool parse(std::string_view, Element &) { throw Exception{"Functions JSON* are not supported", ErrorCodes::NOT_IMPLEMENTED}; } /// NOLINT #if 0 /// Optional: Allocates memory to parse JSON documents faster. diff --git a/src/Common/JSONParsers/RapidJSONParser.h b/src/Common/JSONParsers/RapidJSONParser.h index 2d8514868e5..77e8f6b2a74 100644 --- a/src/Common/JSONParsers/RapidJSONParser.h +++ b/src/Common/JSONParsers/RapidJSONParser.h @@ -98,7 +98,7 @@ struct RapidJSONParser ALWAYS_INLINE Iterator end() const { return ptr->MemberEnd(); } ALWAYS_INLINE size_t size() const { return ptr->MemberCount(); } - bool find(const std::string_view & key, Element & result) const + bool find(std::string_view key, Element & result) const { auto it = ptr->FindMember(rapidjson::StringRef(key.data(), key.length())); if (it == ptr->MemberEnd()) @@ -122,7 +122,7 @@ struct RapidJSONParser }; /// Parses a JSON document, returns the reference to its root element if succeeded. - bool parse(const std::string_view & json, Element & result) + bool parse(std::string_view json, Element & result) { rapidjson::MemoryStream ms(json.data(), json.size()); rapidjson::EncodedInputStream, rapidjson::MemoryStream> is(ms); diff --git a/src/Common/JSONParsers/SimdJSONParser.h b/src/Common/JSONParsers/SimdJSONParser.h index 3abeb85fb56..f3bbfe4dfde 100644 --- a/src/Common/JSONParsers/SimdJSONParser.h +++ b/src/Common/JSONParsers/SimdJSONParser.h @@ -105,7 +105,7 @@ struct SimdJSONParser ALWAYS_INLINE Iterator end() const { return object.end(); } ALWAYS_INLINE size_t size() const { return object.size(); } - bool find(const std::string_view & key, Element & result) const + bool find(std::string_view key, Element & result) const { auto x = object.at_key(key); if (x.error()) @@ -131,7 +131,7 @@ struct SimdJSONParser }; /// Parses a JSON document, returns the reference to its root element if succeeded. - bool parse(const std::string_view & json, Element & result) + bool parse(std::string_view json, Element & result) { auto document = parser.parse(json.data(), json.size()); if (document.error()) diff --git a/src/Common/LRUFileCache.cpp b/src/Common/LRUFileCache.cpp index 0ce76dbdec6..6306b6de059 100644 --- a/src/Common/LRUFileCache.cpp +++ b/src/Common/LRUFileCache.cpp @@ -45,7 +45,7 @@ void LRUFileCache::initialize() catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); - return; + throw; } } else @@ -841,7 +841,11 @@ void LRUFileCache::loadCacheInfoIntoMemory(std::lock_guard & cache_l /// cache_base_path / key_prefix / key / offset if (!files.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache already initialized"); + throw Exception( + ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, + "Cache initialization is partially made. " + "This can be a result of a failed first attempt to initialize cache. " + "Please, check log for error messages"); fs::directory_iterator key_prefix_it{cache_base_path}; for (; key_prefix_it != fs::directory_iterator(); ++key_prefix_it) diff --git a/src/Common/OpenSSLHelpers.cpp b/src/Common/OpenSSLHelpers.cpp index d73e08e79a9..4e7848afc85 100644 --- a/src/Common/OpenSSLHelpers.cpp +++ b/src/Common/OpenSSLHelpers.cpp @@ -10,7 +10,7 @@ namespace DB { #pragma GCC diagnostic warning "-Wold-style-cast" -std::string encodeSHA256(const std::string_view & text) +std::string encodeSHA256(std::string_view text) { return encodeSHA256(text.data(), text.size()); } @@ -21,7 +21,7 @@ std::string encodeSHA256(const void * text, size_t size) encodeSHA256(text, size, reinterpret_cast(out.data())); return out; } -void encodeSHA256(const std::string_view & text, unsigned char * out) +void encodeSHA256(std::string_view text, unsigned char * out) { encodeSHA256(text.data(), text.size(), out); } diff --git a/src/Common/OpenSSLHelpers.h b/src/Common/OpenSSLHelpers.h index f0dbbc10b4c..41f092f0109 100644 --- a/src/Common/OpenSSLHelpers.h +++ b/src/Common/OpenSSLHelpers.h @@ -10,10 +10,10 @@ namespace DB { /// Encodes `text` and returns it. -std::string encodeSHA256(const std::string_view & text); +std::string encodeSHA256(std::string_view text); std::string encodeSHA256(const void * text, size_t size); /// `out` must be at least 32 bytes long. -void encodeSHA256(const std::string_view & text, unsigned char * out); +void encodeSHA256(std::string_view text, unsigned char * out); void encodeSHA256(const void * text, size_t size, unsigned char * out); /// Returns concatenation of error strings for all errors that OpenSSL has recorded, emptying the error queue. diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp index cfc364929a3..60efab69433 100644 --- a/src/Common/OptimizedRegularExpression.cpp +++ b/src/Common/OptimizedRegularExpression.cpp @@ -342,6 +342,23 @@ OptimizedRegularExpressionImpl::OptimizedRegularExpressionImpl(cons } } +template +OptimizedRegularExpressionImpl::OptimizedRegularExpressionImpl(OptimizedRegularExpressionImpl && rhs) noexcept + : is_trivial(rhs.is_trivial) + , required_substring_is_prefix(rhs.required_substring_is_prefix) + , is_case_insensitive(rhs.is_case_insensitive) + , required_substring(std::move(rhs.required_substring)) + , re2(std::move(rhs.re2)) + , number_of_subpatterns(rhs.number_of_subpatterns) +{ + if (!required_substring.empty()) + { + if (is_case_insensitive) + case_insensitive_substring_searcher.emplace(required_substring.data(), required_substring.size()); + else + case_sensitive_substring_searcher.emplace(required_substring.data(), required_substring.size()); + } +} template bool OptimizedRegularExpressionImpl::match(const char * subject, size_t subject_size) const diff --git a/src/Common/OptimizedRegularExpression.h b/src/Common/OptimizedRegularExpression.h index eaa7b06e309..dad8706a50d 100644 --- a/src/Common/OptimizedRegularExpression.h +++ b/src/Common/OptimizedRegularExpression.h @@ -56,6 +56,9 @@ public: using StringPieceType = std::conditional_t; OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); /// NOLINT + /// StringSearcher store pointers to required_substring, it must be updated on move. + OptimizedRegularExpressionImpl(OptimizedRegularExpressionImpl && rhs) noexcept; + OptimizedRegularExpressionImpl(const OptimizedRegularExpressionImpl & rhs) = delete; bool match(const std::string & subject) const { diff --git a/src/Common/ProgressIndication.cpp b/src/Common/ProgressIndication.cpp index 7bea00f5b1e..8ca1612e916 100644 --- a/src/Common/ProgressIndication.cpp +++ b/src/Common/ProgressIndication.cpp @@ -8,6 +8,7 @@ #include "Common/formatReadable.h" #include #include +#include #include "IO/WriteBufferFromString.h" #include @@ -16,16 +17,16 @@ namespace { constexpr UInt64 ALL_THREADS = 0; - double calculateCPUUsage(DB::ThreadIdToTimeMap times, UInt64 elapsed) + UInt64 aggregateCPUUsageNs(DB::ThreadIdToTimeMap times) { - auto accumulated = std::accumulate(times.begin(), times.end(), 0, + constexpr UInt64 us_to_ns = 1000; + return us_to_ns * std::accumulate(times.begin(), times.end(), 0ull, [](UInt64 acc, const auto & elem) { if (elem.first == ALL_THREADS) return acc; return acc + elem.second.time(); }); - return static_cast(accumulated) / elapsed; } } @@ -55,7 +56,7 @@ void ProgressIndication::resetProgress() write_progress_on_update = false; { std::lock_guard lock(profile_events_mutex); - host_cpu_usage.clear(); + cpu_usage_meter.reset(static_cast(clock_gettime_ns())); thread_data.clear(); } } @@ -82,15 +83,17 @@ void ProgressIndication::addThreadIdToList(String const & host, UInt64 thread_id thread_to_times[thread_id] = {}; } -void ProgressIndication::updateThreadEventData(HostToThreadTimesMap & new_thread_data, UInt64 elapsed_time) +void ProgressIndication::updateThreadEventData(HostToThreadTimesMap & new_thread_data) { std::lock_guard lock(profile_events_mutex); + UInt64 total_cpu_ns = 0; for (auto & new_host_map : new_thread_data) { - host_cpu_usage[new_host_map.first] = calculateCPUUsage(new_host_map.second, elapsed_time); + total_cpu_ns += aggregateCPUUsageNs(new_host_map.second); thread_data[new_host_map.first] = std::move(new_host_map.second); } + cpu_usage_meter.add(static_cast(clock_gettime_ns()), total_cpu_ns); } size_t ProgressIndication::getUsedThreadsCount() const @@ -104,14 +107,10 @@ size_t ProgressIndication::getUsedThreadsCount() const }); } -double ProgressIndication::getCPUUsage() const +double ProgressIndication::getCPUUsage() { std::lock_guard lock(profile_events_mutex); - - double res = 0; - for (const auto & elem : host_cpu_usage) - res += elem.second; - return res; + return cpu_usage_meter.rate(clock_gettime_ns()); } ProgressIndication::MemoryUsage ProgressIndication::getMemoryUsage() const diff --git a/src/Common/ProgressIndication.h b/src/Common/ProgressIndication.h index 9ce29ef0d3c..588a31beca7 100644 --- a/src/Common/ProgressIndication.h +++ b/src/Common/ProgressIndication.h @@ -7,7 +7,7 @@ #include #include #include - +#include /// http://en.wikipedia.org/wiki/ANSI_escape_code #define CLEAR_TO_END_OF_LINE "\033[K" @@ -59,12 +59,12 @@ public: void addThreadIdToList(String const & host, UInt64 thread_id); - void updateThreadEventData(HostToThreadTimesMap & new_thread_data, UInt64 elapsed_time); + void updateThreadEventData(HostToThreadTimesMap & new_thread_data); private: size_t getUsedThreadsCount() const; - double getCPUUsage() const; + double getCPUUsage(); struct MemoryUsage { @@ -91,7 +91,7 @@ private: bool write_progress_on_update = false; - std::unordered_map host_cpu_usage; + EventRateMeter cpu_usage_meter{static_cast(clock_gettime_ns()), 3'000'000'000 /*ns*/}; // average cpu utilization last 3 second HostToThreadTimesMap thread_data; /// In case of all of the above: /// - clickhouse-local @@ -100,7 +100,7 @@ private: /// /// It is possible concurrent access to the following: /// - writeProgress() (class properties) (guarded with progress_mutex) - /// - thread_data/host_cpu_usage (guarded with profile_events_mutex) + /// - thread_data/cpu_usage_meter (guarded with profile_events_mutex) mutable std::mutex profile_events_mutex; mutable std::mutex progress_mutex; }; diff --git a/src/Common/RadixSort.h b/src/Common/RadixSort.h index 4bf975c4c7a..9ca43bee30c 100644 --- a/src/Common/RadixSort.h +++ b/src/Common/RadixSort.h @@ -355,8 +355,6 @@ private: template static inline void radixSortMSDInternal(Element * arr, size_t size, size_t limit) { -// std::cerr << PASS << ", " << size << ", " << limit << "\n"; - /// The beginning of every i-1-th bucket. 0th element will be equal to 1st. /// Last element will point to array end. std::unique_ptr prev_buckets{new Element*[HISTOGRAM_SIZE + 1]}; diff --git a/src/Common/SettingsChanges.cpp b/src/Common/SettingsChanges.cpp index 370b465eba3..9fb4f361e09 100644 --- a/src/Common/SettingsChanges.cpp +++ b/src/Common/SettingsChanges.cpp @@ -4,7 +4,7 @@ namespace DB { namespace { - SettingChange * find(SettingsChanges & changes, const std::string_view & name) + SettingChange * find(SettingsChanges & changes, std::string_view name) { auto it = std::find_if(changes.begin(), changes.end(), [&name](const SettingChange & change) { return change.name == name; }); if (it == changes.end()) @@ -12,7 +12,7 @@ namespace return &*it; } - const SettingChange * find(const SettingsChanges & changes, const std::string_view & name) + const SettingChange * find(const SettingsChanges & changes, std::string_view name) { auto it = std::find_if(changes.begin(), changes.end(), [&name](const SettingChange & change) { return change.name == name; }); if (it == changes.end()) @@ -21,7 +21,7 @@ namespace } } -bool SettingsChanges::tryGet(const std::string_view & name, Field & out_value) const +bool SettingsChanges::tryGet(std::string_view name, Field & out_value) const { const auto * change = find(*this, name); if (!change) @@ -30,7 +30,7 @@ bool SettingsChanges::tryGet(const std::string_view & name, Field & out_value) c return true; } -const Field * SettingsChanges::tryGet(const std::string_view & name) const +const Field * SettingsChanges::tryGet(std::string_view name) const { const auto * change = find(*this, name); if (!change) @@ -38,7 +38,7 @@ const Field * SettingsChanges::tryGet(const std::string_view & name) const return &change->value; } -Field * SettingsChanges::tryGet(const std::string_view & name) +Field * SettingsChanges::tryGet(std::string_view name) { auto * change = find(*this, name); if (!change) diff --git a/src/Common/SettingsChanges.h b/src/Common/SettingsChanges.h index 5f6a390d0d2..67cb69f77bf 100644 --- a/src/Common/SettingsChanges.h +++ b/src/Common/SettingsChanges.h @@ -14,8 +14,8 @@ struct SettingChange Field value; SettingChange() = default; - SettingChange(const std::string_view & name_, const Field & value_) : name(name_), value(value_) {} - SettingChange(const std::string_view & name_, Field && value_) : name(name_), value(std::move(value_)) {} + SettingChange(std::string_view name_, const Field & value_) : name(name_), value(value_) {} + SettingChange(std::string_view name_, Field && value_) : name(name_), value(std::move(value_)) {} friend bool operator ==(const SettingChange & lhs, const SettingChange & rhs) { return (lhs.name == rhs.name) && (lhs.value == rhs.value); } friend bool operator !=(const SettingChange & lhs, const SettingChange & rhs) { return !(lhs == rhs); } @@ -27,9 +27,9 @@ class SettingsChanges : public std::vector public: using std::vector::vector; - bool tryGet(const std::string_view & name, Field & out_value) const; - const Field * tryGet(const std::string_view & name) const; - Field * tryGet(const std::string_view & name); + bool tryGet(std::string_view name, Field & out_value) const; + const Field * tryGet(std::string_view name) const; + Field * tryGet(std::string_view name); }; } diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 86adeeaf7e5..0050288b1cf 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -1,9 +1,7 @@ #include #include -#include #include #include -#include #include #include @@ -13,6 +11,7 @@ #include #include #include +#include namespace @@ -94,53 +93,15 @@ ShellCommand::~ShellCommand() bool ShellCommand::tryWaitProcessWithTimeout(size_t timeout_in_seconds) { - int status = 0; - LOG_TRACE(getLogger(), "Try wait for shell command pid {} with timeout {}", pid, timeout_in_seconds); wait_called = true; - struct timespec interval {.tv_sec = 1, .tv_nsec = 0}; in.close(); out.close(); err.close(); - if (timeout_in_seconds == 0) - { - /// If there is no timeout before signal try to waitpid 1 time without block so we can avoid sending - /// signal if process is already normally terminated. - - int waitpid_res = waitpid(pid, &status, WNOHANG); - bool process_terminated_normally = (waitpid_res == pid); - return process_terminated_normally; - } - - /// If timeout is positive try waitpid without block in loop until - /// process is normally terminated or waitpid return error - - while (timeout_in_seconds != 0) - { - int waitpid_res = waitpid(pid, &status, WNOHANG); - bool process_terminated_normally = (waitpid_res == pid); - - if (process_terminated_normally) - { - return true; - } - else if (waitpid_res == 0) - { - --timeout_in_seconds; - nanosleep(&interval, nullptr); - - continue; - } - else if (waitpid_res == -1 && errno != EINTR) - { - return false; - } - } - - return false; + return waitForPid(pid, timeout_in_seconds); } void ShellCommand::logCommand(const char * filename, char * const argv[]) diff --git a/src/Common/ShellCommand.h b/src/Common/ShellCommand.h index 190b5bc664e..dfc4a826f62 100644 --- a/src/Common/ShellCommand.h +++ b/src/Common/ShellCommand.h @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB diff --git a/src/Common/SpaceSaving.h b/src/Common/SpaceSaving.h index 48817d8c926..0f577349722 100644 --- a/src/Common/SpaceSaving.h +++ b/src/Common/SpaceSaving.h @@ -49,12 +49,12 @@ struct SpaceSavingArena template <> struct SpaceSavingArena { - StringRef emplace(const StringRef & key) + StringRef emplace(StringRef key) { return copyStringInArena(arena, key); } - void free(const StringRef & key) + void free(StringRef key) { if (key.data) arena.free(const_cast(key.data), key.size); diff --git a/src/Common/StringUtils/StringUtils.h b/src/Common/StringUtils/StringUtils.h index 21df0f5ae8b..b5a081ab693 100644 --- a/src/Common/StringUtils/StringUtils.h +++ b/src/Common/StringUtils/StringUtils.h @@ -147,7 +147,7 @@ inline bool isPunctuationASCII(char c) } -inline bool isValidIdentifier(const std::string_view & str) +inline bool isValidIdentifier(std::string_view str) { return !str.empty() && isValidIdentifierBegin(str[0]) diff --git a/src/Common/TLDListsHolder.cpp b/src/Common/TLDListsHolder.cpp index 3e5649a5ac6..a3019ac1c49 100644 --- a/src/Common/TLDListsHolder.cpp +++ b/src/Common/TLDListsHolder.cpp @@ -20,13 +20,13 @@ TLDList::TLDList(size_t size) : tld_container(size) , pool(std::make_unique(10 << 20)) {} -bool TLDList::insert(const StringRef & host) +bool TLDList::insert(StringRef host) { bool inserted; tld_container.emplace(DB::ArenaKeyHolder{host, *pool}, inserted); return inserted; } -bool TLDList::has(const StringRef & host) const +bool TLDList::has(StringRef host) const { return tld_container.has(host); } diff --git a/src/Common/TLDListsHolder.h b/src/Common/TLDListsHolder.h index 708d049d5a6..e8acefb1b5e 100644 --- a/src/Common/TLDListsHolder.h +++ b/src/Common/TLDListsHolder.h @@ -23,9 +23,9 @@ public: explicit TLDList(size_t size); /// Return true if the tld_container does not contains such element. - bool insert(const StringRef & host); + bool insert(StringRef host); /// Check is there such TLD - bool has(const StringRef & host) const; + bool has(StringRef host) const; size_t size() const { return tld_container.size(); } private: diff --git a/src/Common/TargetSpecific.cpp b/src/Common/TargetSpecific.cpp index c52c8c2bcf0..70b03833775 100644 --- a/src/Common/TargetSpecific.cpp +++ b/src/Common/TargetSpecific.cpp @@ -18,6 +18,8 @@ UInt32 getSupportedArchs() result |= static_cast(TargetArch::AVX512F); if (Cpu::CpuFlagsCache::have_AVX512BW) result |= static_cast(TargetArch::AVX512BW); + if (Cpu::CpuFlagsCache::have_AVX512VBMI) + result |= static_cast(TargetArch::AVX512VBMI); return result; } @@ -37,6 +39,7 @@ String toString(TargetArch arch) case TargetArch::AVX2: return "avx2"; case TargetArch::AVX512F: return "avx512f"; case TargetArch::AVX512BW: return "avx512bw"; + case TargetArch::AVX512VBMI: return "avx512vbmi"; } __builtin_unreachable(); diff --git a/src/Common/TargetSpecific.h b/src/Common/TargetSpecific.h index b045892d2c1..f078c0e3ffc 100644 --- a/src/Common/TargetSpecific.h +++ b/src/Common/TargetSpecific.h @@ -81,6 +81,7 @@ enum class TargetArch : UInt32 AVX2 = (1 << 2), AVX512F = (1 << 3), AVX512BW = (1 << 4), + AVX512VBMI = (1 << 5), }; /// Runtime detection. @@ -88,6 +89,10 @@ bool isArchSupported(TargetArch arch); String toString(TargetArch arch); +#ifndef ENABLE_MULTITARGET_CODE +# define ENABLE_MULTITARGET_CODE 0 +#endif + #if ENABLE_MULTITARGET_CODE && defined(__GNUC__) && defined(__x86_64__) /// NOLINTNEXTLINE @@ -95,6 +100,7 @@ String toString(TargetArch arch); #if defined(__clang__) +#define AVX512VBMI_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi"))) #define AVX512BW_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw"))) #define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f"))) #define AVX2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2"))) @@ -102,6 +108,8 @@ String toString(TargetArch arch); #define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt"))) #define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE +# define BEGIN_AVX512VBMI_SPECIFIC_CODE \ + _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi\"))),apply_to=function)") # define BEGIN_AVX512BW_SPECIFIC_CODE \ _Pragma("clang attribute push(__attribute__((target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw\"))),apply_to=function)") # define BEGIN_AVX512F_SPECIFIC_CODE \ @@ -121,13 +129,17 @@ String toString(TargetArch arch); # define DUMMY_FUNCTION_DEFINITION [[maybe_unused]] void _dummy_function_definition(); #else +#define AVX512VBMI_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,tune=native"))) #define AVX512BW_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,tune=native"))) #define AVX512_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,tune=native"))) #define AVX2_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,tune=native"))) #define AVX_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt,avx,tune=native"))) -#define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt",tune=native)))) +#define SSE42_FUNCTION_SPECIFIC_ATTRIBUTE __attribute__((target("sse,sse2,sse3,ssse3,sse4,popcnt",tune=native))) #define DEFAULT_FUNCTION_SPECIFIC_ATTRIBUTE +# define BEGIN_AVX512VBMI_SPECIFIC_CODE \ + _Pragma("GCC push_options") \ + _Pragma("GCC target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,avx512vl,avx512vbmi,tune=native\")") # define BEGIN_AVX512BW_SPECIFIC_CODE \ _Pragma("GCC push_options") \ _Pragma("GCC target(\"sse,sse2,sse3,ssse3,sse4,popcnt,avx,avx2,avx512f,avx512bw,tune=native\")") @@ -196,6 +208,15 @@ namespace TargetSpecific::AVX512BW { \ } \ END_TARGET_SPECIFIC_CODE +#define DECLARE_AVX512VBMI_SPECIFIC_CODE(...) \ +BEGIN_AVX512VBMI_SPECIFIC_CODE \ +namespace TargetSpecific::AVX512VBMI { \ + DUMMY_FUNCTION_DEFINITION \ + using namespace DB::TargetSpecific::AVX512VBMI; \ + __VA_ARGS__ \ +} \ +END_TARGET_SPECIFIC_CODE + #else #define USE_MULTITARGET_CODE 0 @@ -207,6 +228,7 @@ END_TARGET_SPECIFIC_CODE #define DECLARE_AVX2_SPECIFIC_CODE(...) #define DECLARE_AVX512F_SPECIFIC_CODE(...) #define DECLARE_AVX512BW_SPECIFIC_CODE(...) +#define DECLARE_AVX512VBMI_SPECIFIC_CODE(...) #endif @@ -223,7 +245,8 @@ DECLARE_SSE42_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX2_SPECIFIC_CODE (__VA_ARGS__) \ DECLARE_AVX512F_SPECIFIC_CODE(__VA_ARGS__) \ -DECLARE_AVX512BW_SPECIFIC_CODE(__VA_ARGS__) +DECLARE_AVX512BW_SPECIFIC_CODE(__VA_ARGS__) \ +DECLARE_AVX512VBMI_SPECIFIC_CODE(__VA_ARGS__) DECLARE_DEFAULT_CODE( constexpr auto BuildArch = TargetArch::Default; /// NOLINT @@ -249,6 +272,11 @@ DECLARE_AVX512BW_SPECIFIC_CODE( constexpr auto BuildArch = TargetArch::AVX512BW; /// NOLINT ) // DECLARE_AVX512BW_SPECIFIC_CODE +DECLARE_AVX512VBMI_SPECIFIC_CODE( + constexpr auto BuildArch = TargetArch::AVX512VBMI; /// NOLINT +) // DECLARE_AVX512VBMI_SPECIFIC_CODE + + /** Runtime Dispatch helpers for class members. * * Example of usage: diff --git a/src/Common/TaskStatsInfoGetter.cpp b/src/Common/TaskStatsInfoGetter.cpp index 36e8a0fce00..304ccc84765 100644 --- a/src/Common/TaskStatsInfoGetter.cpp +++ b/src/Common/TaskStatsInfoGetter.cpp @@ -21,6 +21,7 @@ #if defined(__clang__) #pragma clang diagnostic ignored "-Wgnu-anonymous-struct" + #pragma clang diagnostic ignored "-Wnested-anon-types" #endif /// Basic idea is motivated by "iotop" tool. diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index c80150a8fe8..7c22d3b8335 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -210,7 +210,7 @@ public: return thread_state.load(std::memory_order_relaxed); } - StringRef getQueryId() const + std::string_view getQueryId() const { return query_id; } diff --git a/src/Common/Throttler.h b/src/Common/Throttler.h index 89a83bb23be..6d44ad6ca5f 100644 --- a/src/Common/Throttler.h +++ b/src/Common/Throttler.h @@ -1,5 +1,7 @@ #pragma once +#include + #include #include #include @@ -57,7 +59,4 @@ private: std::shared_ptr parent; }; - -using ThrottlerPtr = std::shared_ptr; - } diff --git a/src/Common/Throttler_fwd.h b/src/Common/Throttler_fwd.h new file mode 100644 index 00000000000..1efaf1c85c5 --- /dev/null +++ b/src/Common/Throttler_fwd.h @@ -0,0 +1,11 @@ +#pragma once + +#include + +namespace DB +{ + +class Throttler; +using ThrottlerPtr = std::shared_ptr; + +} diff --git a/src/Common/TraceSender.cpp b/src/Common/TraceSender.cpp index 6f0904bd50f..ad88e508d06 100644 --- a/src/Common/TraceSender.cpp +++ b/src/Common/TraceSender.cpp @@ -42,13 +42,14 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int char buffer[buf_size]; WriteBufferFromFileDescriptorDiscardOnFailure out(pipe.fds_rw[1], buf_size, buffer); - StringRef query_id; + std::string_view query_id; UInt64 thread_id; if (CurrentThread::isInitialized()) { query_id = CurrentThread::getQueryId(); - query_id.size = std::min(query_id.size, QUERY_ID_MAX_LEN); + if (query_id.size() > QUERY_ID_MAX_LEN) + query_id.remove_suffix(query_id.size() - QUERY_ID_MAX_LEN); thread_id = CurrentThread::get().thread_id; } @@ -59,8 +60,8 @@ void TraceSender::send(TraceType trace_type, const StackTrace & stack_trace, Int writeChar(false, out); /// true if requested to stop the collecting thread. - writeBinary(static_cast(query_id.size), out); - out.write(query_id.data, query_id.size); + writeBinary(static_cast(query_id.size()), out); + out.write(query_id.data(), query_id.size()); size_t stack_trace_size = stack_trace.getSize(); size_t stack_trace_offset = stack_trace.getOffset(); diff --git a/src/Common/filesystemHelpers.cpp b/src/Common/filesystemHelpers.cpp index 00764269269..1e8e53bf1ea 100644 --- a/src/Common/filesystemHelpers.cpp +++ b/src/Common/filesystemHelpers.cpp @@ -87,7 +87,10 @@ BlockDeviceType getBlockDeviceType([[maybe_unused]] const String & device_id) #if defined(OS_LINUX) try { - ReadBufferFromFile in("/sys/dev/block/" + device_id + "/queue/rotational"); + const auto path{std::filesystem::path("/sys/dev/block/") / device_id / "queue/rotational"}; + if (!std::filesystem::exists(path)) + return BlockDeviceType::UNKNOWN; + ReadBufferFromFile in(path); int rotational; readText(rotational, in); return rotational ? BlockDeviceType::ROT : BlockDeviceType::NONROT; @@ -109,7 +112,8 @@ UInt64 getBlockDeviceReadAheadBytes([[maybe_unused]] const String & device_id) #if defined(OS_LINUX) try { - ReadBufferFromFile in("/sys/dev/block/" + device_id + "/queue/read_ahead_kb"); + const auto path{std::filesystem::path("/sys/dev/block/") / device_id / "queue/read_ahead_kb"}; + ReadBufferFromFile in(path); int read_ahead_kb; readText(read_ahead_kb, in); return read_ahead_kb * 1024; diff --git a/src/Common/noexcept_scope.h b/src/Common/noexcept_scope.h index 56fb44ff0bf..bdd7a98925a 100644 --- a/src/Common/noexcept_scope.h +++ b/src/Common/noexcept_scope.h @@ -1,36 +1,28 @@ #pragma once -#include #include #include - -#define NOEXCEPT_SCOPE_IMPL_CONCAT(n, expected) \ - LockMemoryExceptionInThread lock_memory_tracker##n(VariableContext::Global); \ - SCOPE_EXIT( \ - { \ - const auto uncaught = std::uncaught_exceptions(); \ - assert((expected) == uncaught || (expected) + 1 == uncaught); \ - if ((expected) < uncaught) \ - { \ - tryLogCurrentException("NOEXCEPT_SCOPE"); \ - abort(); \ - } \ - } \ - ) - -#define NOEXCEPT_SCOPE_IMPL(n, expected) NOEXCEPT_SCOPE_IMPL_CONCAT(n, expected) - -#define NOEXCEPT_SCOPE_CONCAT(n) \ - const auto num_curr_exceptions##n = std::uncaught_exceptions(); \ - NOEXCEPT_SCOPE_IMPL(n, num_curr_exceptions##n) - -#define NOEXCEPT_SCOPE_FWD(n) NOEXCEPT_SCOPE_CONCAT(n) - - /// It can be used in critical places to exit on unexpected exceptions. /// SIGABRT is usually better that broken in-memory state with unpredictable consequences. /// It also temporarily disables exception from memory tracker in current thread. /// Strict version does not take into account nested exception (i.e. it aborts even when we're in catch block). -#define NOEXCEPT_SCOPE_STRICT NOEXCEPT_SCOPE_IMPL(__LINE__, 0) -#define NOEXCEPT_SCOPE NOEXCEPT_SCOPE_FWD(__LINE__) +#define NOEXCEPT_SCOPE_IMPL(...) do { \ + LockMemoryExceptionInThread \ + noexcept_lock_memory_tracker(VariableContext::Global); \ + try \ + { \ + __VA_ARGS__; \ + } \ + catch (...) \ + { \ + DB::tryLogCurrentException(__PRETTY_FUNCTION__); \ + std::terminate(); \ + } \ +} while (0) /* to allow leading semi-colon */ + +#define NOEXCEPT_SCOPE_STRICT(...) \ + if (std::uncaught_exceptions()) std::terminate(); \ + NOEXCEPT_SCOPE_IMPL(__VA_ARGS__) + +#define NOEXCEPT_SCOPE(...) NOEXCEPT_SCOPE_IMPL(__VA_ARGS__) diff --git a/src/Common/quoteString.cpp b/src/Common/quoteString.cpp index e3e6e0b3249..b464f4837a1 100644 --- a/src/Common/quoteString.cpp +++ b/src/Common/quoteString.cpp @@ -14,7 +14,7 @@ String quoteString(std::string_view x) } -String doubleQuoteString(const StringRef & x) +String doubleQuoteString(StringRef x) { String res(x.size, '\0'); WriteBufferFromString wb(res); @@ -23,7 +23,7 @@ String doubleQuoteString(const StringRef & x) } -String backQuote(const StringRef & x) +String backQuote(StringRef x) { String res(x.size, '\0'); { @@ -34,7 +34,7 @@ String backQuote(const StringRef & x) } -String backQuoteIfNeed(const StringRef & x) +String backQuoteIfNeed(StringRef x) { String res(x.size, '\0'); { diff --git a/src/Common/quoteString.h b/src/Common/quoteString.h index 73c0de03d45..b83988258e2 100644 --- a/src/Common/quoteString.h +++ b/src/Common/quoteString.h @@ -16,12 +16,12 @@ namespace DB } /// Double quote the string. -String doubleQuoteString(const StringRef & x); +String doubleQuoteString(StringRef x); /// Quote the identifier with backquotes. -String backQuote(const StringRef & x); +String backQuote(StringRef x); /// Quote the identifier with backquotes, if required. -String backQuoteIfNeed(const StringRef & x); +String backQuoteIfNeed(StringRef x); } diff --git a/src/Common/waitForPid.cpp b/src/Common/waitForPid.cpp new file mode 100644 index 00000000000..38f43ae2f6a --- /dev/null +++ b/src/Common/waitForPid.cpp @@ -0,0 +1,192 @@ +#include +#include +#include +#include + +#include +#include +#include + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wgnu-statement-expression" +#define HANDLE_EINTR(x) ({ \ + decltype(x) eintr_wrapper_result; \ + do { \ + eintr_wrapper_result = (x); \ + } while (eintr_wrapper_result == -1 && errno == EINTR); \ + eintr_wrapper_result; \ +}) + +#if defined(OS_LINUX) + +#include +#include + +#if !defined(__NR_pidfd_open) + #if defined(__x86_64__) + #define SYS_pidfd_open 434 + #elif defined(__aarch64__) + #define SYS_pidfd_open 434 + #elif defined(__ppc64__) + #define SYS_pidfd_open 434 + #elif defined(__riscv) + #define SYS_pidfd_open 434 + #else + #error "Unsupported architecture" + #endif +#else + #define SYS_pidfd_open __NR_pidfd_open +#endif + +namespace DB +{ + +static int syscall_pidfd_open(pid_t pid) +{ + // pidfd_open cannot be interrupted, no EINTR handling + return syscall(SYS_pidfd_open, pid, 0); +} + +static int dir_pidfd_open(pid_t pid) +{ + std::string path = "/proc/" + std::to_string(pid); + return HANDLE_EINTR(open(path.c_str(), O_DIRECTORY)); +} + +static bool supportsPidFdOpen() +{ + VersionNumber pidfd_open_minimal_version(5, 3, 0); + VersionNumber linux_version(Poco::Environment::osVersion()); + return linux_version >= pidfd_open_minimal_version; +} + +static int pidFdOpen(pid_t pid) +{ + // use pidfd_open or just plain old /proc/[pid] open for Linux + if (supportsPidFdOpen()) + { + return syscall_pidfd_open(pid); + } + else + { + return dir_pidfd_open(pid); + } +} + +static int pollPid(pid_t pid, int timeout_in_ms) +{ + struct pollfd pollfd; + + int pid_fd = pidFdOpen(pid); + if (pid_fd == -1) + { + return false; + } + pollfd.fd = pid_fd; + pollfd.events = POLLIN; + int ready = poll(&pollfd, 1, timeout_in_ms); + int save_errno = errno; + close(pid_fd); + errno = save_errno; + return ready; +} +#elif defined(OS_DARWIN) || defined(OS_FREEBSD) + +#include +#include + +namespace DB +{ + +static int pollPid(pid_t pid, int timeout_in_ms) +{ + int status = 0; + int kq = HANDLE_EINTR(kqueue()); + if (kq == -1) + { + return false; + } + struct kevent change = {.ident = NULL}; + EV_SET(&change, pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL); + int result = HANDLE_EINTR(kevent(kq, &change, 1, NULL, 0, NULL)); + if (result == -1) + { + if (errno != ESRCH) + { + return false; + } + // check if pid already died while we called kevent() + if (waitpid(pid, &status, WNOHANG) == pid) + { + return true; + } + return false; + } + + struct kevent event = {.ident = NULL}; + struct timespec remaining_timespec = {.tv_sec = timeout_in_ms / 1000, .tv_nsec = (timeout_in_ms % 1000) * 1000000}; + int ready = kevent(kq, nullptr, 0, &event, 1, &remaining_timespec); + int save_errno = errno; + close(kq); + errno = save_errno; + return ready; +} +#else + #error "Unsupported OS type" +#endif + +bool waitForPid(pid_t pid, size_t timeout_in_seconds) +{ + int status = 0; + + Stopwatch watch; + + if (timeout_in_seconds == 0) + { + /// If there is no timeout before signal try to waitpid 1 time without block so we can avoid sending + /// signal if process is already normally terminated. + + int waitpid_res = waitpid(pid, &status, WNOHANG); + bool process_terminated_normally = (waitpid_res == pid); + return process_terminated_normally; + } + + /// If timeout is positive try waitpid without block in loop until + /// process is normally terminated or waitpid return error + + int timeout_in_ms = timeout_in_seconds * 1000; + while (timeout_in_ms > 0) + { + int waitpid_res = waitpid(pid, &status, WNOHANG); + bool process_terminated_normally = (waitpid_res == pid); + if (process_terminated_normally) + { + return true; + } + else if (waitpid_res == 0) + { + watch.restart(); + int ready = pollPid(pid, timeout_in_ms); + if (ready <= 0) + { + if (errno == EINTR || errno == EAGAIN) + { + timeout_in_ms -= watch.elapsedMilliseconds(); + } + else + { + return false; + } + } + continue; + } + else if (waitpid_res == -1 && errno != EINTR) + { + return false; + } + } + return false; +} + +} +#pragma GCC diagnostic pop diff --git a/src/Common/waitForPid.h b/src/Common/waitForPid.h new file mode 100644 index 00000000000..71c1a74712c --- /dev/null +++ b/src/Common/waitForPid.h @@ -0,0 +1,12 @@ +#pragma once +#include + +namespace DB +{ +/* + * Waits for a specific pid with timeout, using modern Linux and OSX facilities + * Returns `true` if process terminated successfully or `false` otherwise + */ +bool waitForPid(pid_t pid, size_t timeout_in_seconds); + +} diff --git a/src/Compression/CachedCompressedReadBuffer.cpp b/src/Compression/CachedCompressedReadBuffer.cpp index 7f6422ae734..8abc16ebb2a 100644 --- a/src/Compression/CachedCompressedReadBuffer.cpp +++ b/src/Compression/CachedCompressedReadBuffer.cpp @@ -89,7 +89,7 @@ void CachedCompressedReadBuffer::seek(size_t offset_in_compressed_file, size_t o { /// Nothing to do if we already at required position if (!owned_cell && file_pos == offset_in_compressed_file - && (offset() == offset_in_decompressed_block || + && ((!buffer().empty() && offset() == offset_in_decompressed_block) || nextimpl_working_buffer_offset == offset_in_decompressed_block)) return; diff --git a/src/Compression/CompressedReadBufferBase.cpp b/src/Compression/CompressedReadBufferBase.cpp index 81e49e445a7..2c85dc6d9a9 100644 --- a/src/Compression/CompressedReadBufferBase.cpp +++ b/src/Compression/CompressedReadBufferBase.cpp @@ -106,21 +106,15 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c throw Exception(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH); } - -/// Read compressed data into compressed_buffer. Get size of decompressed data from block header. Checksum if need. -/// Returns number of compressed bytes read. -size_t CompressedReadBufferBase::readCompressedData(size_t & size_decompressed, size_t & size_compressed_without_checksum, bool always_copy) +static void readHeaderAndGetCodecAndSize( + const char * compressed_buffer, + UInt8 header_size, + CompressionCodecPtr & codec, + size_t & size_decompressed, + size_t & size_compressed_without_checksum, + bool allow_different_codecs) { - if (compressed_in->eof()) - return 0; - - UInt8 header_size = ICompressionCodec::getHeaderSize(); - own_compressed_buffer.resize(header_size + sizeof(Checksum)); - - compressed_in->readStrict(own_compressed_buffer.data(), sizeof(Checksum) + header_size); - char * compressed_header = own_compressed_buffer.data() + sizeof(Checksum); - - uint8_t method = ICompressionCodec::readMethod(compressed_header); + uint8_t method = ICompressionCodec::readMethod(compressed_buffer); if (!codec) { @@ -142,8 +136,8 @@ size_t CompressedReadBufferBase::readCompressedData(size_t & size_decompressed, } } - size_compressed_without_checksum = ICompressionCodec::readCompressedBlockSize(compressed_header); - size_decompressed = ICompressionCodec::readDecompressedBlockSize(compressed_header); + size_compressed_without_checksum = ICompressionCodec::readCompressedBlockSize(compressed_buffer); + size_decompressed = ICompressionCodec::readDecompressedBlockSize(compressed_buffer); /// This is for clang static analyzer. assert(size_decompressed > 0); @@ -157,8 +151,27 @@ size_t CompressedReadBufferBase::readCompressedData(size_t & size_decompressed, if (size_compressed_without_checksum < header_size) throw Exception("Can't decompress data: the compressed data size (" + toString(size_compressed_without_checksum) + ", this should include header size) is less than the header size (" + toString(header_size) + ")", ErrorCodes::CORRUPTED_DATA); +} - ProfileEvents::increment(ProfileEvents::ReadCompressedBytes, size_compressed_without_checksum + sizeof(Checksum)); +/// Read compressed data into compressed_buffer. Get size of decompressed data from block header. Checksum if need. +/// Returns number of compressed bytes read. +size_t CompressedReadBufferBase::readCompressedData(size_t & size_decompressed, size_t & size_compressed_without_checksum, bool always_copy) +{ + if (compressed_in->eof()) + return 0; + + UInt8 header_size = ICompressionCodec::getHeaderSize(); + own_compressed_buffer.resize(header_size + sizeof(Checksum)); + + compressed_in->readStrict(own_compressed_buffer.data(), sizeof(Checksum) + header_size); + + readHeaderAndGetCodecAndSize( + own_compressed_buffer.data() + sizeof(Checksum), + header_size, + codec, + size_decompressed, + size_compressed_without_checksum, + allow_different_codecs); auto additional_size_at_the_end_of_buffer = codec->getAdditionalSizeAtTheEndOfBuffer(); @@ -184,9 +197,55 @@ size_t CompressedReadBufferBase::readCompressedData(size_t & size_decompressed, validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum); } + ProfileEvents::increment(ProfileEvents::ReadCompressedBytes, size_compressed_without_checksum + sizeof(Checksum)); return size_compressed_without_checksum + sizeof(Checksum); } +/// Read compressed data into compressed_buffer for asynchronous decompression to avoid the situation of "read compressed block across the compressed_in". +size_t CompressedReadBufferBase::readCompressedDataBlockForAsynchronous(size_t & size_decompressed, size_t & size_compressed_without_checksum) +{ + UInt8 header_size = ICompressionCodec::getHeaderSize(); + /// Make sure the whole header located in 'compressed_in->' buffer. + if (compressed_in->eof() || (compressed_in->available() < (header_size + sizeof(Checksum)))) + return 0; + + own_compressed_buffer.resize(header_size + sizeof(Checksum)); + compressed_in->readStrict(own_compressed_buffer.data(), sizeof(Checksum) + header_size); + + readHeaderAndGetCodecAndSize( + own_compressed_buffer.data() + sizeof(Checksum), + header_size, + codec, + size_decompressed, + size_compressed_without_checksum, + allow_different_codecs); + + auto additional_size_at_the_end_of_buffer = codec->getAdditionalSizeAtTheEndOfBuffer(); + + /// Make sure the whole compressed block located in 'compressed_in->' buffer. + /// Otherwise, abandon header and restore original offset of compressed_in + if (compressed_in->offset() >= header_size + sizeof(Checksum) && + compressed_in->available() >= (size_compressed_without_checksum - header_size) + additional_size_at_the_end_of_buffer + sizeof(Checksum)) + { + compressed_in->position() -= header_size; + compressed_buffer = compressed_in->position(); + compressed_in->position() += size_compressed_without_checksum; + + if (!disable_checksum) + { + Checksum & checksum = *reinterpret_cast(own_compressed_buffer.data()); + validateChecksum(compressed_buffer, size_compressed_without_checksum, checksum); + } + + ProfileEvents::increment(ProfileEvents::ReadCompressedBytes, size_compressed_without_checksum + sizeof(Checksum)); + return size_compressed_without_checksum + sizeof(Checksum); + } + else + { + compressed_in->position() -= (sizeof(Checksum) + header_size); + return 0; + } +} static void readHeaderAndGetCodec(const char * compressed_buffer, size_t size_decompressed, CompressionCodecPtr & codec, bool allow_different_codecs) { @@ -216,14 +275,12 @@ static void readHeaderAndGetCodec(const char * compressed_buffer, size_t size_de } } - void CompressedReadBufferBase::decompressTo(char * to, size_t size_decompressed, size_t size_compressed_without_checksum) { readHeaderAndGetCodec(compressed_buffer, size_decompressed, codec, allow_different_codecs); codec->decompress(compressed_buffer, size_compressed_without_checksum, to); } - void CompressedReadBufferBase::decompress(BufferBase::Buffer & to, size_t size_decompressed, size_t size_compressed_without_checksum) { readHeaderAndGetCodec(compressed_buffer, size_decompressed, codec, allow_different_codecs); @@ -245,6 +302,17 @@ void CompressedReadBufferBase::decompress(BufferBase::Buffer & to, size_t size_d codec->decompress(compressed_buffer, size_compressed_without_checksum, to.begin()); } +void CompressedReadBufferBase::flushAsynchronousDecompressRequests() const +{ + if (codec) + codec->flushAsynchronousDecompressRequests(); +} + +void CompressedReadBufferBase::setDecompressMode(ICompressionCodec::CodecMode mode) const +{ + if (codec) + codec->setDecompressMode(mode); +} /// 'compressed_in' could be initialized lazily, but before first call of 'readCompressedData'. CompressedReadBufferBase::CompressedReadBufferBase(ReadBuffer * in, bool allow_different_codecs_) @@ -253,7 +321,7 @@ CompressedReadBufferBase::CompressedReadBufferBase(ReadBuffer * in, bool allow_d } -CompressedReadBufferBase::~CompressedReadBufferBase() = default; /// Proper destruction of unique_ptr of forward-declared type. +CompressedReadBufferBase::~CompressedReadBufferBase() = default; /// Proper destruction of unique_ptr of forward-declared type. } diff --git a/src/Compression/CompressedReadBufferBase.h b/src/Compression/CompressedReadBufferBase.h index 152447c0b64..baea4d2b855 100644 --- a/src/Compression/CompressedReadBufferBase.h +++ b/src/Compression/CompressedReadBufferBase.h @@ -39,6 +39,17 @@ protected: /// Returns number of compressed bytes read. size_t readCompressedData(size_t & size_decompressed, size_t & size_compressed_without_checksum, bool always_copy); + /// Read compressed data into compressed_buffer for asynchronous decompression to avoid the situation of "read compressed block across the compressed_in". + /// + /// Compressed block may not be completely contained in "compressed_in" buffer which means compressed block may be read across the "compressed_in". + /// For native LZ4/ZSTD, it has no problem in facing situation above because they are synchronous. + /// But for asynchronous decompression, such as QPL deflate, it requires source and target buffer for decompression can not be overwritten until execution complete. + /// + /// Returns number of compressed bytes read. + /// If Returns value > 0, means the address range for current block are maintained in "compressed_in", then asynchronous decompression can be called to boost performance. + /// If Returns value == 0, it means current block cannot be decompressed asynchronously.Meanwhile, asynchronous requests for previous blocks should be flushed if any. + size_t readCompressedDataBlockForAsynchronous(size_t & size_decompressed, size_t & size_compressed_without_checksum); + /// Decompress into memory pointed by `to` void decompressTo(char * to, size_t size_decompressed, size_t size_compressed_without_checksum); @@ -46,6 +57,14 @@ protected: /// It is more efficient for compression codec NONE but not suitable if you want to decompress into specific location. void decompress(BufferBase::Buffer & to, size_t size_decompressed, size_t size_compressed_without_checksum); + /// Flush all asynchronous decompress request. + void flushAsynchronousDecompressRequests() const; + + /// Set decompression mode: Synchronous/Asynchronous/SoftwareFallback. + /// The mode is "Synchronous" by default. + /// flushAsynchronousDecompressRequests must be called subsequently once set "Asynchronous" mode. + void setDecompressMode(ICompressionCodec::CodecMode mode) const; + public: /// 'compressed_in' could be initialized lazily, but before first call of 'readCompressedData'. explicit CompressedReadBufferBase(ReadBuffer * in = nullptr, bool allow_different_codecs_ = false); diff --git a/src/Compression/CompressedReadBufferFromFile.cpp b/src/Compression/CompressedReadBufferFromFile.cpp index 0c347b7ce2c..68f6757e04d 100644 --- a/src/Compression/CompressedReadBufferFromFile.cpp +++ b/src/Compression/CompressedReadBufferFromFile.cpp @@ -91,6 +91,9 @@ void CompressedReadBufferFromFile::seek(size_t offset_in_compressed_file, size_t size_t CompressedReadBufferFromFile::readBig(char * to, size_t n) { size_t bytes_read = 0; + /// The codec mode is only relevant for codecs which support hardware offloading. + ICompressionCodec::CodecMode decompress_mode = ICompressionCodec::CodecMode::Synchronous; + bool read_tail = false; /// If there are unread bytes in the buffer, then we copy needed to `to`. if (pos < working_buffer.end()) @@ -102,10 +105,28 @@ size_t CompressedReadBufferFromFile::readBig(char * to, size_t n) size_t size_decompressed = 0; size_t size_compressed_without_checksum = 0; - size_t new_size_compressed = readCompressedData(size_decompressed, size_compressed_without_checksum, false); + ///Try to read block which is entirely located in a single 'compressed_in->' buffer. + size_t new_size_compressed = readCompressedDataBlockForAsynchronous(size_decompressed, size_compressed_without_checksum); + + if (new_size_compressed) + { + /// Current block is entirely located in a single 'compressed_in->' buffer. + /// We can set asynchronous decompression mode if supported to boost performance. + decompress_mode = ICompressionCodec::CodecMode::Asynchronous; + } + else + { + /// Current block cannot be decompressed asynchronously, means it probably span across two compressed_in buffers. + /// Meanwhile, asynchronous requests for previous blocks should be flushed if any. + flushAsynchronousDecompressRequests(); + /// Fallback to generic API + new_size_compressed = readCompressedData(size_decompressed, size_compressed_without_checksum, false); + decompress_mode = ICompressionCodec::CodecMode::Synchronous; + } size_compressed = 0; /// file_in no longer points to the end of the block in working_buffer. + if (!new_size_compressed) - return bytes_read; + break; auto additional_size_at_the_end_of_buffer = codec->getAdditionalSizeAtTheEndOfBuffer(); @@ -113,6 +134,7 @@ size_t CompressedReadBufferFromFile::readBig(char * to, size_t n) /// need to skip some bytes in decompressed data (seek happened before readBig call). if (nextimpl_working_buffer_offset == 0 && size_decompressed + additional_size_at_the_end_of_buffer <= n - bytes_read) { + setDecompressMode(decompress_mode); decompressTo(to + bytes_read, size_decompressed, size_compressed_without_checksum); bytes_read += size_decompressed; bytes += size_decompressed; @@ -127,6 +149,8 @@ size_t CompressedReadBufferFromFile::readBig(char * to, size_t n) assert(size_decompressed + additional_size_at_the_end_of_buffer > 0); memory.resize(size_decompressed + additional_size_at_the_end_of_buffer); working_buffer = Buffer(memory.data(), &memory[size_decompressed]); + /// Synchronous mode must be set since we need read partial data immediately from working buffer to target buffer. + setDecompressMode(ICompressionCodec::CodecMode::Synchronous); decompress(working_buffer, size_decompressed, size_compressed_without_checksum); /// Read partial data from first block. Won't run here at second block. @@ -145,15 +169,25 @@ size_t CompressedReadBufferFromFile::readBig(char * to, size_t n) assert(size_decompressed + additional_size_at_the_end_of_buffer > 0); memory.resize(size_decompressed + additional_size_at_the_end_of_buffer); working_buffer = Buffer(memory.data(), &memory[size_decompressed]); + // Asynchronous mode can be set here because working_buffer wouldn't be overwritten any more since this is the last block. + setDecompressMode(ICompressionCodec::CodecMode::Asynchronous); decompress(working_buffer, size_decompressed, size_compressed_without_checksum); - - ///Read partial data from last block. - pos = working_buffer.begin(); - bytes_read += read(to + bytes_read, n - bytes_read); + read_tail = true; break; } } + /// Here we must make sure all asynchronous requests above are completely done. + flushAsynchronousDecompressRequests(); + + if (read_tail) + { + /// Manually take nextimpl_working_buffer_offset into account, because we don't use + /// nextImpl in this method. + pos = working_buffer.begin(); + bytes_read += read(to + bytes_read, n - bytes_read); + } + return bytes_read; } diff --git a/src/Compression/CompressionCodecDeflateQpl.cpp b/src/Compression/CompressionCodecDeflateQpl.cpp new file mode 100644 index 00000000000..81ec7ee5dca --- /dev/null +++ b/src/Compression/CompressionCodecDeflateQpl.cpp @@ -0,0 +1,413 @@ +#ifdef ENABLE_QPL_COMPRESSION +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ +namespace ErrorCodes +{ + extern const int CANNOT_COMPRESS; + extern const int CANNOT_DECOMPRESS; +} + +std::array DeflateQplJobHWPool::hw_job_ptr_pool; +std::array DeflateQplJobHWPool::hw_job_ptr_locks; +bool DeflateQplJobHWPool::job_pool_ready = false; +std::unique_ptr DeflateQplJobHWPool::hw_jobs_buffer; + +DeflateQplJobHWPool & DeflateQplJobHWPool::instance() +{ + static DeflateQplJobHWPool pool; + return pool; +} + +DeflateQplJobHWPool::DeflateQplJobHWPool() + :random_engine(std::random_device()()) + ,distribution(0, MAX_HW_JOB_NUMBER-1) +{ + Poco::Logger * log = &Poco::Logger::get("DeflateQplJobHWPool"); + UInt32 job_size = 0; + const char * qpl_version = qpl_get_library_version(); + + /// Get size required for saving a single qpl job object + qpl_get_job_size(qpl_path_hardware, &job_size); + /// Allocate entire buffer for storing all job objects + hw_jobs_buffer = std::make_unique(job_size * MAX_HW_JOB_NUMBER); + /// Initialize pool for storing all job object pointers + /// Reallocate buffer by shifting address offset for each job object. + for (UInt32 index = 0; index < MAX_HW_JOB_NUMBER; ++index) + { + qpl_job * qpl_job_ptr = reinterpret_cast(hw_jobs_buffer.get() + index * job_size); + if (qpl_init_job(qpl_path_hardware, qpl_job_ptr) != QPL_STS_OK) + { + job_pool_ready = false; + LOG_WARNING(log, "Initialization of hardware-assisted DeflateQpl codec failed, falling back to software DeflateQpl codec. Please check if Intel In-Memory Analytics Accelerator (IAA) is properly set up. QPL Version: {}.",qpl_version); + return; + } + hw_job_ptr_pool[index] = qpl_job_ptr; + unLockJob(index); + } + + job_pool_ready = true; + LOG_DEBUG(log, "Hardware-assisted DeflateQpl codec is ready! QPL Version: {}",qpl_version); +} + +DeflateQplJobHWPool::~DeflateQplJobHWPool() +{ + for (UInt32 i = 0; i < MAX_HW_JOB_NUMBER; ++i) + { + if (hw_job_ptr_pool[i]) + { + while (!tryLockJob(i)); + qpl_fini_job(hw_job_ptr_pool[i]); + unLockJob(i); + hw_job_ptr_pool[i] = nullptr; + } + } + job_pool_ready = false; +} + +qpl_job * DeflateQplJobHWPool::acquireJob(UInt32 &job_id) +{ + if (isJobPoolReady()) + { + UInt32 retry = 0; + auto index = distribution(random_engine); + while (!tryLockJob(index)) + { + index = distribution(random_engine); + retry++; + if (retry > MAX_HW_JOB_NUMBER) + { + return nullptr; + } + } + job_id = MAX_HW_JOB_NUMBER - index; + assert(index < MAX_HW_JOB_NUMBER); + return hw_job_ptr_pool[index]; + } + else + return nullptr; +} + +void DeflateQplJobHWPool::releaseJob(UInt32 job_id) +{ + if (isJobPoolReady()) + unLockJob(MAX_HW_JOB_NUMBER - job_id); +} + +bool DeflateQplJobHWPool::tryLockJob(UInt32 index) +{ + bool expected = false; + assert(index < MAX_HW_JOB_NUMBER); + return hw_job_ptr_locks[index].compare_exchange_strong(expected, true); +} + +void DeflateQplJobHWPool::unLockJob(UInt32 index) +{ + assert(index < MAX_HW_JOB_NUMBER); + hw_job_ptr_locks[index].store(false); +} + +//HardwareCodecDeflateQpl +HardwareCodecDeflateQpl::HardwareCodecDeflateQpl() + :log(&Poco::Logger::get("HardwareCodecDeflateQpl")) +{ +} + +HardwareCodecDeflateQpl::~HardwareCodecDeflateQpl() +{ +#ifndef NDEBUG + assert(decomp_async_job_map.empty()); +#else + if (!decomp_async_job_map.empty()) + { + LOG_WARNING(log, "Find un-released job when HardwareCodecDeflateQpl destroy"); + for (auto it : decomp_async_job_map) + { + DeflateQplJobHWPool::instance().releaseJob(it.first); + } + decomp_async_job_map.clear(); + } +#endif +} + +Int32 HardwareCodecDeflateQpl::doCompressData(const char * source, UInt32 source_size, char * dest, UInt32 dest_size) const +{ + UInt32 job_id = 0; + qpl_job* job_ptr = nullptr; + UInt32 compressed_size = 0; + if (!(job_ptr = DeflateQplJobHWPool::instance().acquireJob(job_id))) + { + LOG_INFO(log, "DeflateQpl HW codec failed, falling back to SW codec.(Details: doCompressData->acquireJob fail, probably job pool exhausted)"); + return RET_ERROR; + } + + job_ptr->op = qpl_op_compress; + job_ptr->next_in_ptr = reinterpret_cast(const_cast(source)); + job_ptr->next_out_ptr = reinterpret_cast(dest); + job_ptr->available_in = source_size; + job_ptr->level = qpl_default_level; + job_ptr->available_out = dest_size; + job_ptr->flags = QPL_FLAG_FIRST | QPL_FLAG_DYNAMIC_HUFFMAN | QPL_FLAG_LAST | QPL_FLAG_OMIT_VERIFY; + + if (auto status = qpl_execute_job(job_ptr); status == QPL_STS_OK) + { + compressed_size = job_ptr->total_out; + DeflateQplJobHWPool::instance().releaseJob(job_id); + return compressed_size; + } + else + { + LOG_WARNING(log, "DeflateQpl HW codec failed, falling back to SW codec.(Details: doCompressData->qpl_execute_job with error code: {} - please refer to qpl_status in ./contrib/qpl/include/qpl/c_api/status.h)", status); + DeflateQplJobHWPool::instance().releaseJob(job_id); + return RET_ERROR; + } +} + +Int32 HardwareCodecDeflateQpl::doDecompressDataSynchronous(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) +{ + UInt32 job_id = 0; + qpl_job * job_ptr = nullptr; + UInt32 decompressed_size = 0; + if (!(job_ptr = DeflateQplJobHWPool::instance().acquireJob(job_id))) + { + LOG_INFO(log, "DeflateQpl HW codec failed, falling back to SW codec.(Details: doDecompressDataSynchronous->acquireJob fail, probably job pool exhausted)"); + return RET_ERROR; + } + + // Performing a decompression operation + job_ptr->op = qpl_op_decompress; + job_ptr->next_in_ptr = reinterpret_cast(const_cast(source)); + job_ptr->next_out_ptr = reinterpret_cast(dest); + job_ptr->available_in = source_size; + job_ptr->available_out = uncompressed_size; + job_ptr->flags = QPL_FLAG_FIRST | QPL_FLAG_LAST; + + if (auto status = qpl_submit_job(job_ptr); status != QPL_STS_OK) + { + DeflateQplJobHWPool::instance().releaseJob(job_id); + LOG_WARNING(log, "DeflateQpl HW codec failed, falling back to SW codec.(Details: doDecompressDataSynchronous->qpl_execute_job with error code: {} - please refer to qpl_status in ./contrib/qpl/include/qpl/c_api/status.h)", status); + return RET_ERROR; + } + /// Busy waiting till job complete. + do + { + _tpause(1, __rdtsc() + 1000); + } while (qpl_check_job(job_ptr) == QPL_STS_BEING_PROCESSED); + + decompressed_size = job_ptr->total_out; + DeflateQplJobHWPool::instance().releaseJob(job_id); + return decompressed_size; +} + +Int32 HardwareCodecDeflateQpl::doDecompressDataAsynchronous(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) +{ + UInt32 job_id = 0; + qpl_job * job_ptr = nullptr; + if (!(job_ptr = DeflateQplJobHWPool::instance().acquireJob(job_id))) + { + LOG_INFO(log, "DeflateQpl HW codec failed, falling back to SW codec.(Details: doDecompressDataAsynchronous->acquireJob fail, probably job pool exhausted)"); + return RET_ERROR; + } + + // Performing a decompression operation + job_ptr->op = qpl_op_decompress; + job_ptr->next_in_ptr = reinterpret_cast(const_cast(source)); + job_ptr->next_out_ptr = reinterpret_cast(dest); + job_ptr->available_in = source_size; + job_ptr->available_out = uncompressed_size; + job_ptr->flags = QPL_FLAG_FIRST | QPL_FLAG_LAST; + + if (auto status = qpl_submit_job(job_ptr); status == QPL_STS_OK) + { + decomp_async_job_map.insert({job_id, job_ptr}); + return job_id; + } + else + { + DeflateQplJobHWPool::instance().releaseJob(job_id); + LOG_WARNING(log, "DeflateQpl HW codec failed, falling back to SW codec.(Details: doDecompressDataAsynchronous->qpl_execute_job with error code: {} - please refer to qpl_status in ./contrib/qpl/include/qpl/c_api/status.h)", status); + return RET_ERROR; + } +} + +void HardwareCodecDeflateQpl::flushAsynchronousDecompressRequests() +{ + UInt32 n_jobs_processing = decomp_async_job_map.size(); + std::map::iterator it = decomp_async_job_map.begin(); + + while (n_jobs_processing) + { + UInt32 job_id = 0; + qpl_job * job_ptr = nullptr; + job_id = it->first; + job_ptr = it->second; + + if (qpl_check_job(job_ptr) == QPL_STS_BEING_PROCESSED) + { + it++; + } + else + { + it = decomp_async_job_map.erase(it); + DeflateQplJobHWPool::instance().releaseJob(job_id); + n_jobs_processing--; + if (n_jobs_processing <= 0) + break; + } + if (it == decomp_async_job_map.end()) + { + it = decomp_async_job_map.begin(); + _tpause(1, __rdtsc() + 1000); + } + } +} + +SoftwareCodecDeflateQpl::~SoftwareCodecDeflateQpl() +{ + if (!sw_job) + qpl_fini_job(sw_job); +} + +qpl_job * SoftwareCodecDeflateQpl::getJobCodecPtr() +{ + if (!sw_job) + { + UInt32 size = 0; + qpl_get_job_size(qpl_path_software, &size); + + sw_buffer = std::make_unique(size); + sw_job = reinterpret_cast(sw_buffer.get()); + + // Job initialization + if (auto status = qpl_init_job(qpl_path_software, sw_job); status != QPL_STS_OK) + throw Exception(ErrorCodes::CANNOT_COMPRESS, + "Initialization of DeflateQpl software fallback codec failed. (Details: qpl_init_job with error code: {} - please refer to qpl_status in ./contrib/qpl/include/qpl/c_api/status.h)", status); + } + return sw_job; +} + +UInt32 SoftwareCodecDeflateQpl::doCompressData(const char * source, UInt32 source_size, char * dest, UInt32 dest_size) +{ + qpl_job * job_ptr = getJobCodecPtr(); + // Performing a compression operation + job_ptr->op = qpl_op_compress; + job_ptr->next_in_ptr = reinterpret_cast(const_cast(source)); + job_ptr->next_out_ptr = reinterpret_cast(dest); + job_ptr->available_in = source_size; + job_ptr->available_out = dest_size; + job_ptr->level = qpl_default_level; + job_ptr->flags = QPL_FLAG_FIRST | QPL_FLAG_DYNAMIC_HUFFMAN | QPL_FLAG_LAST | QPL_FLAG_OMIT_VERIFY; + + if (auto status = qpl_execute_job(job_ptr); status != QPL_STS_OK) + throw Exception(ErrorCodes::CANNOT_COMPRESS, + "Execution of DeflateQpl software fallback codec failed. (Details: qpl_execute_job with error code: {} - please refer to qpl_status in ./contrib/qpl/include/qpl/c_api/status.h)", status); + + return job_ptr->total_out; +} + +void SoftwareCodecDeflateQpl::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) +{ + qpl_job * job_ptr = getJobCodecPtr(); + + // Performing a decompression operation + job_ptr->op = qpl_op_decompress; + job_ptr->next_in_ptr = reinterpret_cast(const_cast(source)); + job_ptr->next_out_ptr = reinterpret_cast(dest); + job_ptr->available_in = source_size; + job_ptr->available_out = uncompressed_size; + job_ptr->flags = QPL_FLAG_FIRST | QPL_FLAG_LAST; + + if (auto status = qpl_execute_job(job_ptr); status != QPL_STS_OK) + throw Exception(ErrorCodes::CANNOT_DECOMPRESS, + "Execution of DeflateQpl software fallback codec failed. (Details: qpl_execute_job with error code: {} - please refer to qpl_status in ./contrib/qpl/include/qpl/c_api/status.h)", status); +} + +//CompressionCodecDeflateQpl +CompressionCodecDeflateQpl::CompressionCodecDeflateQpl() + :hw_codec(std::make_unique()) + ,sw_codec(std::make_unique()) +{ + setCodecDescription("DEFLATE_QPL"); +} + +uint8_t CompressionCodecDeflateQpl::getMethodByte() const +{ + return static_cast(CompressionMethodByte::DeflateQpl); +} + +void CompressionCodecDeflateQpl::updateHash(SipHash & hash) const +{ + getCodecDesc()->updateTreeHash(hash); +} + +UInt32 CompressionCodecDeflateQpl::getMaxCompressedDataSize(UInt32 uncompressed_size) const +{ + /// Aligned with ZLIB + return ((uncompressed_size) + ((uncompressed_size) >> 12) + ((uncompressed_size) >> 14) + ((uncompressed_size) >> 25) + 13); +} + +UInt32 CompressionCodecDeflateQpl::doCompressData(const char * source, UInt32 source_size, char * dest) const +{ + Int32 res = HardwareCodecDeflateQpl::RET_ERROR; + if (DeflateQplJobHWPool::instance().isJobPoolReady()) + res = hw_codec->doCompressData(source, source_size, dest, getMaxCompressedDataSize(source_size)); + if (res == HardwareCodecDeflateQpl::RET_ERROR) + res = sw_codec->doCompressData(source, source_size, dest, getMaxCompressedDataSize(source_size)); + return res; +} + +void CompressionCodecDeflateQpl::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const +{ + switch (getDecompressMode()) + { + case CodecMode::Synchronous: + { + Int32 res = HardwareCodecDeflateQpl::RET_ERROR; + if (DeflateQplJobHWPool::instance().isJobPoolReady()) + { + res = hw_codec->doDecompressDataSynchronous(source, source_size, dest, uncompressed_size); + if (res == HardwareCodecDeflateQpl::RET_ERROR) + sw_codec->doDecompressData(source, source_size, dest, uncompressed_size); + } + else + sw_codec->doDecompressData(source, source_size, dest, uncompressed_size); + return; + } + case CodecMode::Asynchronous: + { + Int32 res = HardwareCodecDeflateQpl::RET_ERROR; + if (DeflateQplJobHWPool::instance().isJobPoolReady()) + res = hw_codec->doDecompressDataAsynchronous(source, source_size, dest, uncompressed_size); + if (res == HardwareCodecDeflateQpl::RET_ERROR) + sw_codec->doDecompressData(source, source_size, dest, uncompressed_size); + return; + } + case CodecMode::SoftwareFallback: + sw_codec->doDecompressData(source, source_size, dest, uncompressed_size); + return; + } + __builtin_unreachable(); +} + +void CompressionCodecDeflateQpl::flushAsynchronousDecompressRequests() +{ + if (DeflateQplJobHWPool::instance().isJobPoolReady()) + hw_codec->flushAsynchronousDecompressRequests(); + /// After flush previous all async requests, we must restore mode to be synchronous by default. + setDecompressMode(CodecMode::Synchronous); +} +void registerCodecDeflateQpl(CompressionCodecFactory & factory) +{ + factory.registerSimpleCompressionCodec( + "DEFLATE_QPL", static_cast(CompressionMethodByte::DeflateQpl), [&]() { return std::make_shared(); }); +} +} +#endif diff --git a/src/Compression/CompressionCodecDeflateQpl.h b/src/Compression/CompressionCodecDeflateQpl.h new file mode 100644 index 00000000000..c15f537fd3f --- /dev/null +++ b/src/Compression/CompressionCodecDeflateQpl.h @@ -0,0 +1,120 @@ +#pragma once + +#include +#include +#include + +namespace Poco +{ +class Logger; +} + +namespace DB +{ + +/// DeflateQplJobHWPool is resource pool to provide the job objects. +/// Job object is used for storing context information during offloading compression job to HW Accelerator. +class DeflateQplJobHWPool +{ +public: + DeflateQplJobHWPool(); + + ~DeflateQplJobHWPool(); + + qpl_job * acquireJob(UInt32 &job_id); + + static void releaseJob(UInt32 job_id); + + static const bool & isJobPoolReady() { return job_pool_ready; } + + static DeflateQplJobHWPool & instance(); + +private: + static bool tryLockJob(UInt32 index); + + static void unLockJob(UInt32 index); + + /// Maximum jobs running in parallel supported by IAA hardware + static constexpr auto MAX_HW_JOB_NUMBER = 1024; + /// Entire buffer for storing all job objects + static std::unique_ptr hw_jobs_buffer; + /// Job pool for storing all job object pointers + static std::array hw_job_ptr_pool; + /// Locks for accessing each job object pointers + static std::array hw_job_ptr_locks; + static bool job_pool_ready; + std::mt19937 random_engine; + std::uniform_int_distribution distribution; +}; + +class SoftwareCodecDeflateQpl +{ +public: + ~SoftwareCodecDeflateQpl(); + UInt32 doCompressData(const char * source, UInt32 source_size, char * dest, UInt32 dest_size); + void doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size); + +private: + qpl_job * sw_job = nullptr; + std::unique_ptr sw_buffer; + qpl_job * getJobCodecPtr(); +}; + +class HardwareCodecDeflateQpl +{ +public: + /// RET_ERROR stands for hardware codec fail,need fallback to software codec. + static constexpr Int32 RET_ERROR = -1; + + HardwareCodecDeflateQpl(); + ~HardwareCodecDeflateQpl(); + Int32 doCompressData(const char * source, UInt32 source_size, char * dest, UInt32 dest_size) const; + + ///Submit job request to the IAA hardware and then busy waiting till it complete. + Int32 doDecompressDataSynchronous(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size); + + ///Submit job request to the IAA hardware and return immediately. IAA hardware will process decompression jobs automatically. + Int32 doDecompressDataAsynchronous(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size); + + /// Flush result for all previous requests which means busy waiting till all the jobs in "decomp_async_job_map" are finished. + /// Must be called subsequently after several calls of doDecompressDataReq. + void flushAsynchronousDecompressRequests(); + +private: + /// Asynchronous job map for decompression: job ID - job object. + /// For each submission, push job ID && job object into this map; + /// For flush, pop out job ID && job object from this map. Use job ID to release job lock and use job object to check job status till complete. + std::map decomp_async_job_map; + Poco::Logger * log; +}; + +class CompressionCodecDeflateQpl : public ICompressionCodec +{ +public: + CompressionCodecDeflateQpl(); + uint8_t getMethodByte() const override; + void updateHash(SipHash & hash) const override; + +protected: + bool isCompression() const override + { + return true; + } + + bool isGenericCompression() const override + { + return true; + } + + UInt32 doCompressData(const char * source, UInt32 source_size, char * dest) const override; + void doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const override; + ///Flush result for previous asynchronous decompression requests on asynchronous mode. + void flushAsynchronousDecompressRequests() override; + +private: + UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const override; + std::unique_ptr hw_codec; + std::unique_ptr sw_codec; +}; + +} diff --git a/src/Compression/CompressionCodecEncrypted.cpp b/src/Compression/CompressionCodecEncrypted.cpp index f7e597a0519..bf36fa114fb 100644 --- a/src/Compression/CompressionCodecEncrypted.cpp +++ b/src/Compression/CompressionCodecEncrypted.cpp @@ -131,7 +131,7 @@ std::string lastErrorString() /// This function get key and nonce and encrypt text with their help. /// If something went wrong (can't init context or can't encrypt data) it throws exception. /// It returns length of encrypted text. -size_t encrypt(const std::string_view & plaintext, char * ciphertext_and_tag, EncryptionMethod method, const String & key, const String & nonce) +size_t encrypt(std::string_view plaintext, char * ciphertext_and_tag, EncryptionMethod method, const String & key, const String & nonce) { /// Init context for encryption, using key. EVP_AEAD_CTX encrypt_ctx; @@ -160,7 +160,7 @@ size_t encrypt(const std::string_view & plaintext, char * ciphertext_and_tag, En /// This function get key and nonce and encrypt text with their help. /// If something went wrong (can't init context or can't encrypt data) it throws exception. /// It returns length of encrypted text. -size_t decrypt(const std::string_view & ciphertext, char * plaintext, EncryptionMethod method, const String & key, const String & nonce) +size_t decrypt(std::string_view ciphertext, char * plaintext, EncryptionMethod method, const String & key, const String & nonce) { /// Init context for decryption with given key. EVP_AEAD_CTX decrypt_ctx; diff --git a/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp index b8a1c5877a4..7291d42f681 100644 --- a/src/Compression/CompressionFactory.cpp +++ b/src/Compression/CompressionFactory.cpp @@ -166,7 +166,7 @@ void registerCodecLZ4(CompressionCodecFactory & factory); void registerCodecLZ4HC(CompressionCodecFactory & factory); void registerCodecZSTD(CompressionCodecFactory & factory); void registerCodecMultiple(CompressionCodecFactory & factory); - +void registerCodecDeflateQpl(CompressionCodecFactory & factory); /// Keeper use only general-purpose codecs, so we don't need these special codecs /// in standalone build @@ -188,7 +188,6 @@ CompressionCodecFactory::CompressionCodecFactory() registerCodecZSTD(*this); registerCodecLZ4HC(*this); registerCodecMultiple(*this); - #ifndef KEEPER_STANDALONE_BUILD registerCodecDelta(*this); registerCodecT64(*this); @@ -196,6 +195,9 @@ CompressionCodecFactory::CompressionCodecFactory() registerCodecGorilla(*this); registerCodecEncrypted(*this); registerCodecFPC(*this); + #ifdef ENABLE_QPL_COMPRESSION + registerCodecDeflateQpl(*this); + #endif #endif default_codec = get("LZ4", {}); diff --git a/src/Compression/CompressionInfo.h b/src/Compression/CompressionInfo.h index 839fb68e8c3..985d74bbb74 100644 --- a/src/Compression/CompressionInfo.h +++ b/src/Compression/CompressionInfo.h @@ -45,7 +45,8 @@ enum class CompressionMethodByte : uint8_t Gorilla = 0x95, AES_128_GCM_SIV = 0x96, AES_256_GCM_SIV = 0x97, - FPC = 0x98 + FPC = 0x98, + DeflateQpl = 0x99, }; } diff --git a/src/Compression/ICompressionCodec.cpp b/src/Compression/ICompressionCodec.cpp index ba52aee69f8..c48ca99d452 100644 --- a/src/Compression/ICompressionCodec.cpp +++ b/src/Compression/ICompressionCodec.cpp @@ -91,7 +91,6 @@ UInt32 ICompressionCodec::compress(const char * source, UInt32 source_size, char return header_size + compressed_bytes_written; } - UInt32 ICompressionCodec::decompress(const char * source, UInt32 source_size, char * dest) const { assert(source != nullptr && dest != nullptr); diff --git a/src/Compression/ICompressionCodec.h b/src/Compression/ICompressionCodec.h index a741e65dfdd..f40404a84f3 100644 --- a/src/Compression/ICompressionCodec.h +++ b/src/Compression/ICompressionCodec.h @@ -45,9 +45,37 @@ public: /// Compressed bytes from uncompressed source to dest. Dest should preallocate memory UInt32 compress(const char * source, UInt32 source_size, char * dest) const; - /// Decompress bytes from compressed source to dest. Dest should preallocate memory + /// Decompress bytes from compressed source to dest. Dest should preallocate memory; UInt32 decompress(const char * source, UInt32 source_size, char * dest) const; + /// Three kinds of codec mode: + /// Synchronous mode which is commonly used by default; + /// --- For the codec with HW decompressor, it means submit request to HW and busy wait till complete. + /// Asynchronous mode which required HW decompressor support; + /// --- For the codec with HW decompressor, it means submit request to HW and return immediately. + /// --- Must be used in pair with flushAsynchronousDecompressRequests. + /// SoftwareFallback mode is exclusively defined for the codec with HW decompressor, enable its capability of "fallback to SW codec". + enum class CodecMode + { + Synchronous, + Asynchronous, + SoftwareFallback + }; + + /// Get current decompression mode + CodecMode getDecompressMode() const{ return decompressMode; } + + /// if set mode to CodecMode::Asynchronous, must be followed with flushAsynchronousDecompressRequests + void setDecompressMode(CodecMode mode){ decompressMode = mode; } + + /// Flush result for previous asynchronous decompression requests. + /// This function must be called following several requests offload to HW. + /// To make sure asynchronous results have been flushed into target buffer completely. + /// Meanwhile, source and target buffer for decompression can not be overwritten until this function execute completely. + /// Otherwise it would conflict with HW offloading and cause exception. + /// For QPL deflate, it support the maximum number of requests equal to DeflateQplJobHWPool::jobPoolSize + virtual void flushAsynchronousDecompressRequests(){} + /// Number of bytes, that will be used to compress uncompressed_size bytes with current codec virtual UInt32 getCompressedReserveSize(UInt32 uncompressed_size) const { @@ -103,6 +131,7 @@ protected: private: ASTPtr full_codec_desc; + CodecMode decompressMode{CodecMode::Synchronous}; }; } diff --git a/src/Compression/LZ4_decompress_faster.cpp b/src/Compression/LZ4_decompress_faster.cpp index 32d21a37f18..82a86a80d8d 100644 --- a/src/Compression/LZ4_decompress_faster.cpp +++ b/src/Compression/LZ4_decompress_faster.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -15,10 +16,22 @@ #include #endif +#if USE_MULTITARGET_CODE +#include +#endif + #ifdef __aarch64__ #include #endif +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +static inline UInt16 LZ4_readLE16(const void* mem_ptr) +{ + const UInt8* p = reinterpret_cast(mem_ptr); + return static_cast(p[0]) + (p[1] << 8); +} +#endif + namespace LZ4 { @@ -403,10 +416,65 @@ inline void copyOverlap32(UInt8 * op, const UInt8 *& match, const size_t offset) match += shift4[offset]; } +DECLARE_AVX512VBMI_SPECIFIC_CODE( +inline void copyOverlap32Shuffle(UInt8 * op, const UInt8 *& match, const size_t offset) +{ + static constexpr UInt8 __attribute__((__aligned__(32))) masks[] = + { + 0, 1, 2, 2, 4, 2, 2, 4, 8, 5, 2, 10, 8, 6, 4, 2, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, /* offset=0, shift amount index. */ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* offset=1 */ + 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, + 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, + 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, + 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, + 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, + 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, + 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 1, 2, 3, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 0, 1, 2, 3, 4, 5, 6, 7, 8, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 0, 1, 2, 3, 4, 5, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 0, 1, 2, 3, 4, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 0, 1, 2, 3, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 0, 1, 2, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0, + }; + + _mm256_storeu_si256(reinterpret_cast<__m256i *>(op), + _mm256_permutexvar_epi8( + _mm256_load_si256(reinterpret_cast(masks) + offset), + _mm256_loadu_si256(reinterpret_cast(match)))); + match += masks[offset]; +} +) /// DECLARE_AVX512VBMI_SPECIFIC_CODE + template <> void inline copy<32>(UInt8 * dst, const UInt8 * src) { copy32(dst, src); } template <> void inline wildCopy<32>(UInt8 * dst, const UInt8 * src, UInt8 * dst_end) { wildCopy32(dst, src, dst_end); } template <> void inline copyOverlap<32, false>(UInt8 * op, const UInt8 *& match, const size_t offset) { copyOverlap32(op, match, offset); } +template <> void inline copyOverlap<32, true>(UInt8 * op, const UInt8 *& match, const size_t offset) +{ +#if USE_MULTITARGET_CODE + TargetSpecific::AVX512VBMI::copyOverlap32Shuffle(op, match, offset); +#else + copyOverlap32(op, match, offset); +#endif +} /// See also https://stackoverflow.com/a/30669632 @@ -501,7 +569,11 @@ bool NO_INLINE decompressImpl( /// Get match offset. +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + size_t offset = LZ4_readLE16(ip); +#else size_t offset = unalignedLoad(ip); +#endif ip += 2; const UInt8 * match = op - offset; @@ -578,7 +650,13 @@ bool decompress( /// Don't run timer if the block is too small. if (dest_size >= 32768) { - size_t best_variant = statistics.select(); + size_t variant_size = 4; +#if USE_MULTITARGET_CODE && !defined(MEMORY_SANITIZER) + /// best_variant == 4 only valid when AVX512VBMI available + if (isArchSupported(DB::TargetArch::AVX512VBMI)) + variant_size = 5; +#endif + size_t best_variant = statistics.select(variant_size); /// Run the selected method and measure time. @@ -592,6 +670,8 @@ bool decompress( success = decompressImpl<8, true>(source, dest, source_size, dest_size); if (best_variant == 3) success = decompressImpl<32, false>(source, dest, source_size, dest_size); + if (best_variant == 4) + success = decompressImpl<32, true>(source, dest, source_size, dest_size); watch.stop(); diff --git a/src/Compression/LZ4_decompress_faster.h b/src/Compression/LZ4_decompress_faster.h index c596ea6364b..08744755916 100644 --- a/src/Compression/LZ4_decompress_faster.h +++ b/src/Compression/LZ4_decompress_faster.h @@ -88,7 +88,7 @@ struct PerformanceStatistics }; /// Number of different algorithms to select from. - static constexpr size_t NUM_ELEMENTS = 4; + static constexpr size_t NUM_ELEMENTS = 5; /// Cold invocations may be affected by additional memory latencies. Don't take first invocations into account. static constexpr double NUM_INVOCATIONS_TO_THROW_OFF = 2; @@ -106,17 +106,17 @@ struct PerformanceStatistics /// To select from different algorithms we use a kind of "bandits" algorithm. /// Sample random values from estimated normal distributions and choose the minimal. - size_t select() + size_t select(size_t max_method = NUM_ELEMENTS) { if (choose_method < 0) { - double samples[NUM_ELEMENTS]; - for (size_t i = 0; i < NUM_ELEMENTS; ++i) + double samples[max_method]; + for (size_t i = 0; i < max_method; ++i) samples[i] = choose_method == -1 ? data[i].sample(rng) : data[i].adjustedCount(); - return std::min_element(samples, samples + NUM_ELEMENTS) - samples; + return std::min_element(samples, samples + max_method) - samples; } else return choose_method; diff --git a/src/Coordination/CoordinationSettings.cpp b/src/Coordination/CoordinationSettings.cpp index 34d69967828..4733adcf67a 100644 --- a/src/Coordination/CoordinationSettings.cpp +++ b/src/Coordination/CoordinationSettings.cpp @@ -1,5 +1,4 @@ #include -#include #include #include #include diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 7c6ed227a06..8261f5d1e26 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -21,6 +21,7 @@ #include #include #include +#include namespace DB { @@ -111,7 +112,7 @@ KeeperServer::KeeperServer( configuration_and_settings_->snapshot_storage_path, coordination_settings, checkAndGetSuperdigest(configuration_and_settings_->super_digest), - config.getBool("keeper_server.digest_enabled", true))) + config.getBool("keeper_server.digest_enabled", false))) , state_manager(nuraft::cs_new( server_id, "keeper_server", configuration_and_settings_->log_storage_path, configuration_and_settings_->state_file_path, config, coordination_settings)) , log(&Poco::Logger::get("KeeperServer")) diff --git a/src/Core/BaseSettings.cpp b/src/Core/BaseSettings.cpp index d4b2d1551b6..f03a59c1342 100644 --- a/src/Core/BaseSettings.cpp +++ b/src/Core/BaseSettings.cpp @@ -11,7 +11,7 @@ namespace ErrorCodes extern const int UNKNOWN_SETTING; } -void BaseSettingsHelpers::writeString(const std::string_view & str, WriteBuffer & out) +void BaseSettingsHelpers::writeString(std::string_view str, WriteBuffer & out) { writeStringBinary(str, out); } @@ -39,13 +39,13 @@ BaseSettingsHelpers::Flags BaseSettingsHelpers::readFlags(ReadBuffer & in) } -void BaseSettingsHelpers::throwSettingNotFound(const std::string_view & name) +void BaseSettingsHelpers::throwSettingNotFound(std::string_view name) { throw Exception("Unknown setting " + String{name}, ErrorCodes::UNKNOWN_SETTING); } -void BaseSettingsHelpers::warningSettingNotFound(const std::string_view & name) +void BaseSettingsHelpers::warningSettingNotFound(std::string_view name) { static auto * log = &Poco::Logger::get("Settings"); LOG_WARNING(log, "Unknown setting {}, skipping", name); diff --git a/src/Core/BaseSettings.h b/src/Core/BaseSettings.h index a4ddc6571ed..7b56367769e 100644 --- a/src/Core/BaseSettings.h +++ b/src/Core/BaseSettings.h @@ -43,18 +43,25 @@ class BaseSettings : public TTraits::Data { using CustomSettingMap = std::unordered_map, SettingFieldCustom>>; public: + BaseSettings() = default; + BaseSettings(const BaseSettings &) = default; + BaseSettings(BaseSettings &&) noexcept = default; + BaseSettings & operator=(const BaseSettings &) = default; + BaseSettings & operator=(BaseSettings &&) noexcept = default; + virtual ~BaseSettings() = default; + using Traits = TTraits; - void set(const std::string_view & name, const Field & value); - Field get(const std::string_view & name) const; + virtual void set(std::string_view name, const Field & value); + Field get(std::string_view name) const; - void setString(const std::string_view & name, const String & value); - String getString(const std::string_view & name) const; + void setString(std::string_view name, const String & value); + String getString(std::string_view name) const; - bool tryGet(const std::string_view & name, Field & value) const; - bool tryGetString(const std::string_view & name, String & value) const; + bool tryGet(std::string_view name, Field & value) const; + bool tryGetString(std::string_view name, String & value) const; - bool isChanged(const std::string_view & name) const; + bool isChanged(std::string_view name) const; SettingsChanges changes() const; void applyChange(const SettingChange & change); void applyChanges(const SettingsChanges & changes); @@ -62,23 +69,25 @@ public: /// Resets all the settings to their default values. void resetToDefault(); + /// Resets specified setting to its default value. + void resetToDefault(std::string_view name); - bool has(const std::string_view & name) const { return hasBuiltin(name) || hasCustom(name); } - static bool hasBuiltin(const std::string_view & name); - bool hasCustom(const std::string_view & name) const; + bool has(std::string_view name) const { return hasBuiltin(name) || hasCustom(name); } + static bool hasBuiltin(std::string_view name); + bool hasCustom(std::string_view name) const; - const char * getTypeName(const std::string_view & name) const; - const char * getDescription(const std::string_view & name) const; + const char * getTypeName(std::string_view name) const; + const char * getDescription(std::string_view name) const; /// Checks if it's possible to assign a field to a specified value and throws an exception if not. /// This function doesn't change the fields, it performs check only. - static void checkCanSet(const std::string_view & name, const Field & value); - static void checkCanSetString(const std::string_view & name, const String & str); + static void checkCanSet(std::string_view name, const Field & value); + static void checkCanSetString(std::string_view name, const String & str); /// Conversions without changing the fields. - static Field castValueUtil(const std::string_view & name, const Field & value); - static String valueToStringUtil(const std::string_view & name, const Field & value); - static Field stringToValueUtil(const std::string_view & name, const String & str); + static Field castValueUtil(std::string_view name, const Field & value); + static String valueToStringUtil(std::string_view name, const Field & value); + static Field stringToValueUtil(std::string_view name, const String & str); void write(WriteBuffer & out, SettingsWriteFormat format = SettingsWriteFormat::DEFAULT) const; void read(ReadBuffer & in, SettingsWriteFormat format = SettingsWriteFormat::DEFAULT); @@ -164,19 +173,19 @@ public: Iterator end() const { return allChanged().end(); } private: - SettingFieldCustom & getCustomSetting(const std::string_view & name); - const SettingFieldCustom & getCustomSetting(const std::string_view & name) const; - const SettingFieldCustom * tryGetCustomSetting(const std::string_view & name) const; + SettingFieldCustom & getCustomSetting(std::string_view name); + const SettingFieldCustom & getCustomSetting(std::string_view name) const; + const SettingFieldCustom * tryGetCustomSetting(std::string_view name) const; std::conditional_t custom_settings_map; }; struct BaseSettingsHelpers { - [[noreturn]] static void throwSettingNotFound(const std::string_view & name); - static void warningSettingNotFound(const std::string_view & name); + [[noreturn]] static void throwSettingNotFound(std::string_view name); + static void warningSettingNotFound(std::string_view name); - static void writeString(const std::string_view & str, WriteBuffer & out); + static void writeString(std::string_view str, WriteBuffer & out); static String readString(ReadBuffer & in); enum Flags : UInt64 @@ -190,7 +199,7 @@ struct BaseSettingsHelpers }; template -void BaseSettings::set(const std::string_view & name, const Field & value) +void BaseSettings::set(std::string_view name, const Field & value) { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -200,7 +209,7 @@ void BaseSettings::set(const std::string_view & name, const Field & val } template -Field BaseSettings::get(const std::string_view & name) const +Field BaseSettings::get(std::string_view name) const { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -210,7 +219,7 @@ Field BaseSettings::get(const std::string_view & name) const } template -void BaseSettings::setString(const std::string_view & name, const String & value) +void BaseSettings::setString(std::string_view name, const String & value) { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -220,7 +229,7 @@ void BaseSettings::setString(const std::string_view & name, const Strin } template -String BaseSettings::getString(const std::string_view & name) const +String BaseSettings::getString(std::string_view name) const { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -230,7 +239,7 @@ String BaseSettings::getString(const std::string_view & name) const } template -bool BaseSettings::tryGet(const std::string_view & name, Field & value) const +bool BaseSettings::tryGet(std::string_view name, Field & value) const { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -247,7 +256,7 @@ bool BaseSettings::tryGet(const std::string_view & name, Field & value) } template -bool BaseSettings::tryGetString(const std::string_view & name, String & value) const +bool BaseSettings::tryGetString(std::string_view name, String & value) const { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -264,7 +273,7 @@ bool BaseSettings::tryGetString(const std::string_view & name, String & } template -bool BaseSettings::isChanged(const std::string_view & name) const +bool BaseSettings::isChanged(std::string_view name) const { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -316,20 +325,28 @@ void BaseSettings::resetToDefault() } template -bool BaseSettings::hasBuiltin(const std::string_view & name) +void BaseSettings::resetToDefault(std::string_view name) +{ + const auto & accessor = Traits::Accessor::instance(); + if (size_t index = accessor.find(name); index != static_cast(-1)) + accessor.resetValueToDefault(*this, index); +} + +template +bool BaseSettings::hasBuiltin(std::string_view name) { const auto & accessor = Traits::Accessor::instance(); return (accessor.find(name) != static_cast(-1)); } template -bool BaseSettings::hasCustom(const std::string_view & name) const +bool BaseSettings::hasCustom(std::string_view name) const { return tryGetCustomSetting(name); } template -const char * BaseSettings::getTypeName(const std::string_view & name) const +const char * BaseSettings::getTypeName(std::string_view name) const { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -341,7 +358,7 @@ const char * BaseSettings::getTypeName(const std::string_view & name) c } template -const char * BaseSettings::getDescription(const std::string_view & name) const +const char * BaseSettings::getDescription(std::string_view name) const { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -353,19 +370,19 @@ const char * BaseSettings::getDescription(const std::string_view & name } template -void BaseSettings::checkCanSet(const std::string_view & name, const Field & value) +void BaseSettings::checkCanSet(std::string_view name, const Field & value) { castValueUtil(name, value); } template -void BaseSettings::checkCanSetString(const std::string_view & name, const String & str) +void BaseSettings::checkCanSetString(std::string_view name, const String & str) { stringToValueUtil(name, str); } template -Field BaseSettings::castValueUtil(const std::string_view & name, const Field & value) +Field BaseSettings::castValueUtil(std::string_view name, const Field & value) { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -377,7 +394,7 @@ Field BaseSettings::castValueUtil(const std::string_view & name, const } template -String BaseSettings::valueToStringUtil(const std::string_view & name, const Field & value) +String BaseSettings::valueToStringUtil(std::string_view name, const Field & value) { const auto & accessor = Traits::Accessor::instance(); if (size_t index = accessor.find(name); index != static_cast(-1)) @@ -389,7 +406,7 @@ String BaseSettings::valueToStringUtil(const std::string_view & name, c } template -Field BaseSettings::stringToValueUtil(const std::string_view & name, const String & str) +Field BaseSettings::stringToValueUtil(std::string_view name, const String & str) { try { @@ -521,7 +538,7 @@ bool operator!=(const BaseSettings & left, const BaseSettings } template -SettingFieldCustom & BaseSettings::getCustomSetting(const std::string_view & name) +SettingFieldCustom & BaseSettings::getCustomSetting(std::string_view name) { if constexpr (Traits::allow_custom_settings) { @@ -537,7 +554,7 @@ SettingFieldCustom & BaseSettings::getCustomSetting(const std::string_v } template -const SettingFieldCustom & BaseSettings::getCustomSetting(const std::string_view & name) const +const SettingFieldCustom & BaseSettings::getCustomSetting(std::string_view name) const { if constexpr (Traits::allow_custom_settings) { @@ -549,7 +566,7 @@ const SettingFieldCustom & BaseSettings::getCustomSetting(const std::st } template -const SettingFieldCustom * BaseSettings::tryGetCustomSetting(const std::string_view & name) const +const SettingFieldCustom * BaseSettings::tryGetCustomSetting(std::string_view name) const { if constexpr (Traits::allow_custom_settings) { @@ -780,7 +797,7 @@ bool BaseSettings::SettingFieldRef::isObsolete() const public: \ static const Accessor & instance(); \ size_t size() const { return field_infos.size(); } \ - size_t find(const std::string_view & name) const; \ + size_t find(std::string_view name) const; \ const String & getName(size_t index) const { return field_infos[index].name; } \ const char * getTypeName(size_t index) const { return field_infos[index].type; } \ const char * getDescription(size_t index) const { return field_infos[index].description; } \ @@ -851,7 +868,7 @@ bool BaseSettings::SettingFieldRef::isObsolete() const \ SETTINGS_TRAITS_NAME::Accessor::Accessor() {} \ \ - size_t SETTINGS_TRAITS_NAME::Accessor::find(const std::string_view & name) const \ + size_t SETTINGS_TRAITS_NAME::Accessor::find(std::string_view name) const \ { \ auto it = name_to_index_map.find(name); \ if (it != name_to_index_map.end()) \ diff --git a/src/Core/Field.cpp b/src/Core/Field.cpp index 3a4b66e6266..acdfca7a7b2 100644 --- a/src/Core/Field.cpp +++ b/src/Core/Field.cpp @@ -286,7 +286,7 @@ String Field::dump() const return applyVisitor(FieldVisitorDump(), *this); } -Field Field::restoreFromDump(const std::string_view & dump_) +Field Field::restoreFromDump(std::string_view dump_) { auto show_error = [&dump_] { diff --git a/src/Core/Field.h b/src/Core/Field.h index 08274876914..f60b7e4902e 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -346,7 +346,7 @@ public: } /// Create a string inplace. - Field(const std::string_view & str) { create(str.data(), str.size()); } /// NOLINT + Field(std::string_view str) { create(str.data(), str.size()); } /// NOLINT Field(const String & str) { create(std::string_view{str}); } /// NOLINT Field(String && str) { create(std::move(str)); } /// NOLINT Field(const char * str) { create(std::string_view{str}); } /// NOLINT @@ -403,7 +403,7 @@ public: return *this; } - Field & operator= (const std::string_view & str); + Field & operator= (std::string_view str); Field & operator= (const String & str) { return *this = std::string_view{str}; } Field & operator= (String && str); Field & operator= (const char * str) { return *this = std::string_view{str}; } @@ -631,7 +631,7 @@ public: } String dump() const; - static Field restoreFromDump(const std::string_view & dump_); + static Field restoreFromDump(std::string_view dump_); private: std::aligned_union_t #include #include #include @@ -145,6 +146,53 @@ std::vector Settings::getAllRegisteredNames() const return all_settings; } +void Settings::set(std::string_view name, const Field & value) +{ + BaseSettings::set(name, value); + + if (name == "compatibility") + applyCompatibilitySetting(); + /// If we change setting that was changed by compatibility setting before + /// we should remove it from settings_changed_by_compatibility_setting, + /// otherwise the next time we will change compatibility setting + /// this setting will be changed too (and we don't want it). + else if (settings_changed_by_compatibility_setting.contains(name)) + settings_changed_by_compatibility_setting.erase(name); +} + +void Settings::applyCompatibilitySetting() +{ + /// First, revert all changes applied by previous compatibility setting + for (const auto & setting_name : settings_changed_by_compatibility_setting) + resetToDefault(setting_name); + + settings_changed_by_compatibility_setting.clear(); + String compatibility = getString("compatibility"); + /// If setting value is empty, we don't need to change settings + if (compatibility.empty()) + return; + + ClickHouseVersion version(compatibility); + /// Iterate through ClickHouse version in descending order and apply reversed + /// changes for each version that is higher that version from compatibility setting + for (auto it = settings_changes_history.rbegin(); it != settings_changes_history.rend(); ++it) + { + if (version >= it->first) + break; + + /// Apply reversed changes from this version. + for (const auto & change : it->second) + { + /// If this setting was changed manually, we don't change it + if (isChanged(change.name) && !settings_changed_by_compatibility_setting.contains(change.name)) + continue; + + BaseSettings::set(change.name, change.previous_value); + settings_changed_by_compatibility_setting.insert(change.name); + } + } +} + IMPLEMENT_SETTINGS_TRAITS(FormatFactorySettingsTraits, FORMAT_FACTORY_SETTINGS) } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index dac9b888d44..17e4d27bbcd 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -35,6 +35,10 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) * * `flags` can be either 0 or IMPORTANT. * A setting is "IMPORTANT" if it affects the results of queries and can't be ignored by older versions. + * + * When adding new settings that control some backward incompatible changes or when changing some settings values, + * consider adding them to settings changes history in SettingsChangesHistory.h for special `compatibility` setting + * to work correctly. */ #define COMMON_SETTINGS(M) \ @@ -96,6 +100,8 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, replace_running_query, false, "Whether the running request should be canceled with the same id as the new one.", 0) \ M(UInt64, max_replicated_fetches_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated fetches. Zero means unlimited. Only has meaning at server startup.", 0) \ M(UInt64, max_replicated_sends_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for replicated sends. Zero means unlimited. Only has meaning at server startup.", 0) \ + M(UInt64, max_remote_read_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for read. Zero means unlimited. Only has meaning at server startup.", 0) \ + M(UInt64, max_remote_write_network_bandwidth_for_server, 0, "The maximum speed of data exchange over the network in bytes per second for write. Zero means unlimited. Only has meaning at server startup.", 0) \ M(Bool, stream_like_engine_allow_direct_select, false, "Allow direct SELECT query for Kafka, RabbitMQ, FileLog, Redis Streams and NATS engines. In case there are attached materialized views, SELECT query is not allowed even if this setting is enabled.", 0) \ M(String, stream_like_engine_insert_queue, "", "When stream like engine reads from multiple queues, user will need to select one queue to insert into when writing. Used by Redis Streams and NATS.", 0) \ \ @@ -130,6 +136,8 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(UInt64, aggregation_memory_efficient_merge_threads, 0, "Number of threads to use for merge intermediate aggregation results in memory efficient mode. When bigger, then more memory is consumed. 0 means - same as 'max_threads'.", 0) \ M(Bool, enable_positional_arguments, true, "Enable positional arguments in ORDER BY, GROUP BY and LIMIT BY", 0) \ \ + M(Bool, group_by_use_nulls, false, "Treat columns mentioned in ROLLUP, CUBE or GROUPING SETS as Nullable", 0) \ + \ M(UInt64, max_parallel_replicas, 1, "The maximum number of replicas of each shard used when the query is executed. For consistency (to get different parts of the same partition), this option only works for the specified sampling key. The lag of the replicas is not controlled.", 0) \ M(UInt64, parallel_replicas_count, 0, "", 0) \ M(UInt64, parallel_replica_offset, 0, "", 0) \ @@ -597,6 +605,11 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, allow_deprecated_database_ordinary, false, "Allow to create databases with deprecated Ordinary engine", 0) \ M(Bool, allow_deprecated_syntax_for_merge_tree, false, "Allow to create *MergeTree tables with deprecated engine definition syntax", 0) \ \ + M(String, compatibility, "", "Changes other settings according to provided ClickHouse version. If we know that we changed some behaviour in ClickHouse by changing some settings in some version, this compatibility setting will control these settings", 0) \ + \ + M(Map, additional_table_filters, "", "Additional filter expression which would be applied after reading from specified table. Syntax: {'table1': 'expression', 'database.table2': 'expression'}", 0) \ + M(String, additional_result_filter, "", "Additional filter expression which would be applied to query result", 0) \ + \ /** Experimental functions */ \ M(Bool, allow_experimental_funnel_functions, false, "Enable experimental functions for funnel analysis.", 0) \ M(Bool, allow_experimental_nlp_functions, false, "Enable experimental functions for natural language processing.", 0) \ @@ -650,7 +663,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) #define FORMAT_FACTORY_SETTINGS(M) \ M(Char, format_csv_delimiter, ',', "The character to be considered as a delimiter in CSV data. If setting with a string, a string has to have a length of 1.", 0) \ - M(Bool, format_csv_allow_single_quotes, true, "If it is set to true, allow strings in single quotes.", 0) \ + M(Bool, format_csv_allow_single_quotes, false, "If it is set to true, allow strings in single quotes.", 0) \ M(Bool, format_csv_allow_double_quotes, true, "If it is set to true, allow strings in double quotes.", 0) \ M(Bool, output_format_csv_crlf_end_of_line, false, "If it is set true, end of line in CSV format will be \\r\\n instead of \\n.", 0) \ M(Bool, input_format_csv_enum_as_number, false, "Treat inserted enum values in CSV formats as enum indices", 0) \ @@ -756,7 +769,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, output_format_pretty_row_numbers, false, "Add row numbers before each row for pretty output format", 0) \ M(Bool, insert_distributed_one_random_shard, false, "If setting is enabled, inserting into distributed table will choose a random shard to write when there is no sharding key", 0) \ \ - M(UInt64, cross_to_inner_join_rewrite, 1, "Use inner join instead of comma/cross join if possible. Possible values: 0 - no rewrite, 1 - apply if possible, 2 - force rewrite all cross joins", 0) \ + M(UInt64, cross_to_inner_join_rewrite, 1, "Use inner join instead of comma/cross join if there're joining expressions in the WHERE section. Values: 0 - no rewrite, 1 - apply if possible for comma/cross, 2 - force rewrite all comma joins, cross - if possible", 0) \ \ M(Bool, output_format_arrow_low_cardinality_as_dictionary, false, "Enable output LowCardinality type as Dictionary Arrow type", 0) \ M(Bool, output_format_arrow_string_as_string, false, "Use Arrow String type instead of Binary for String columns", 0) \ @@ -823,6 +836,13 @@ struct Settings : public BaseSettings, public IHints<2, Settings void addProgramOption(boost::program_options::options_description & options, const SettingFieldRef & field); void addProgramOptionAsMultitoken(boost::program_options::options_description & options, const SettingFieldRef & field); + + void set(std::string_view name, const Field & value) override; + +private: + void applyCompatibilitySetting(); + + std::unordered_set settings_changed_by_compatibility_setting; }; /* diff --git a/src/Core/SettingsChangesHistory.h b/src/Core/SettingsChangesHistory.h new file mode 100644 index 00000000000..ba60fb99308 --- /dev/null +++ b/src/Core/SettingsChangesHistory.h @@ -0,0 +1,114 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + +class ClickHouseVersion +{ +public: + ClickHouseVersion(const String & version) + { + Strings split; + boost::split(split, version, [](char c){ return c == '.'; }); + components.reserve(split.size()); + if (split.empty()) + throw Exception{ErrorCodes::BAD_ARGUMENTS, "Cannot parse ClickHouse version here: {}", version}; + + for (const auto & split_element : split) + { + size_t component; + if (!tryParse(component, split_element)) + throw Exception{ErrorCodes::BAD_ARGUMENTS, "Cannot parse ClickHouse version here: {}", version}; + components.push_back(component); + } + } + + ClickHouseVersion(const char * version) : ClickHouseVersion(String(version)) {} + + String toString() const + { + String version = std::to_string(components[0]); + for (size_t i = 1; i < components.size(); ++i) + version += "." + std::to_string(components[i]); + + return version; + } + + bool operator<(const ClickHouseVersion & other) const + { + return components < other.components; + } + + bool operator>=(const ClickHouseVersion & other) const + { + return components >= other.components; + } + +private: + std::vector components; +}; + +namespace SettingsChangesHistory +{ + struct SettingChange + { + String name; + Field previous_value; + Field new_value; + String reason; + }; + + using SettingsChanges = std::vector; +} + +/// History of settings changes that controls some backward incompatible changes +/// across all ClickHouse versions. It maps ClickHouse version to settings changes that were done +/// in this version. Settings changes is a vector of structs {setting_name, previous_value, new_value} +/// It's used to implement `compatibility` setting (see https://github.com/ClickHouse/ClickHouse/issues/35972) +static std::map settings_changes_history = +{ + {"22.7", {{"cross_to_inner_join_rewrite", 1, 2, "Force rewrite comma join to inner"}, + {"enable_positional_arguments", false, true, "Enable positional arguments feature by default"}, + {"format_csv_allow_single_quotes", true, false, "Most tools don't treat single quote in CSV specially, don't do it by default too"}}}, + {"22.6", {{"output_format_json_named_tuples_as_objects", false, true, "Allow to serialize named tuples as JSON objects in JSON formats by default"}}}, + {"22.5", {{"memory_overcommit_ratio_denominator", 0, 1073741824, "Enable memory overcommit feature by default"}, + {"memory_overcommit_ratio_denominator_for_user", 0, 1073741824, "Enable memory overcommit feature by default"}}}, + {"22.4", {{"allow_settings_after_format_in_insert", true, false, "Do not allow SETTINGS after FORMAT for INSERT queries because ClickHouse interpret SETTINGS as some values, which is misleading"}}}, + {"22.3", {{"cast_ipv4_ipv6_default_on_conversion_error", true, false, "Make functions cast(value, 'IPv4') and cast(value, 'IPv6') behave same as toIPv4 and toIPv6 functions"}}}, + {"21.12", {{"stream_like_engine_allow_direct_select", true, false, "Do not allow direct select for Kafka/RabbitMQ/FileLog by default"}}}, + {"21.9", {{"output_format_decimal_trailing_zeros", true, false, "Do not output trailing zeros in text representation of Decimal types by default for better looking output"}, + {"use_hedged_requests", false, true, "Enable Hedged Requests feature bu default"}}}, + {"21.7", {{"legacy_column_name_of_tuple_literal", true, false, "Add this setting only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher"}}}, + {"21.5", {{"async_socket_for_remote", false, true, "Fix all problems and turn on asynchronous reads from socket for remote queries by default again"}}}, + {"21.3", {{"async_socket_for_remote", true, false, "Turn off asynchronous reads from socket for remote queries because of some problems"}, + {"optimize_normalize_count_variants", false, true, "Rewrite aggregate functions that semantically equals to count() as count() by default"}, + {"normalize_function_names", false, true, "Normalize function names to their canonical names, this was needed for projection query routing"}}}, + {"21.2", {{"enable_global_with_statement", false, true, "Propagate WITH statements to UNION queries and all subqueries by default"}}}, + {"21.1", {{"insert_quorum_parallel", false, true, "Use parallel quorum inserts by default. It is significantly more convenient to use than sequential quorum inserts"}, + {"input_format_null_as_default", false, true, "Allow to insert NULL as default for input formats by default"}, + {"optimize_on_insert", false, true, "Enable data optimization on INSERT by default for better user experience"}, + {"use_compact_format_in_distributed_parts_names", false, true, "Use compact format for async INSERT into Distributed tables by default"}}}, + {"20.10", {{"format_regexp_escaping_rule", "Escaped", "Raw", "Use Raw as default escaping rule for Regexp format to male the behaviour more like to what users expect"}}}, + {"20.7", {{"show_table_uuid_in_table_create_query_if_not_nil", true, false, "Stop showing UID of the table in its CREATE query for Engine=Atomic"}}}, + {"20.5", {{"input_format_with_names_use_header", false, true, "Enable using header with names for formats with WithNames/WithNamesAndTypes suffixes"}, + {"allow_suspicious_codecs", true, false, "Don't allow to specify meaningless compression codecs"}}}, + {"20.4", {{"validate_polygons", false, true, "Throw exception if polygon is invalid in function pointInPolygon by default instead of returning possibly wrong results"}}}, + {"19.18", {{"enable_scalar_subquery_optimization", false, true, "Prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once"}}}, + {"19.14", {{"any_join_distinct_right_table_keys", true, false, "Disable ANY RIGHT and ANY FULL JOINs by default to avoid inconsistency"}}}, + {"19.12", {{"input_format_defaults_for_omitted_fields", false, true, "Enable calculation of complex default expressions for omitted fields for some input formats, because it should be the expected behaviour"}}}, + {"19.5", {{"max_partitions_per_insert_block", 0, 100, "Add a limit for the number of partitions in one block"}}}, + {"18.12.17", {{"enable_optimize_predicate_expression", 0, 1, "Optimize predicates to subqueries by default"}}}, +}; + +} diff --git a/src/Core/SettingsFields.cpp b/src/Core/SettingsFields.cpp index 51ffdd10190..d77a510d7f9 100644 --- a/src/Core/SettingsFields.cpp +++ b/src/Core/SettingsFields.cpp @@ -4,6 +4,8 @@ #include #include #include +#include +#include #include #include #include @@ -51,6 +53,37 @@ namespace else return applyVisitor(FieldVisitorConvertToNumber(), f); } + +#ifndef KEEPER_STANDALONE_BUILD + Map stringToMap(const String & str) + { + /// Allow empty string as an empty map + if (str.empty()) + return {}; + + auto type_string = std::make_shared(); + DataTypeMap type_map(type_string, type_string); + auto serialization = type_map.getSerialization(ISerialization::Kind::DEFAULT); + auto column = type_map.createColumn(); + + ReadBufferFromString buf(str); + serialization->deserializeTextEscaped(*column, buf, {}); + return (*column)[0].safeGet(); + } + + Map fieldToMap(const Field & f) + { + if (f.getType() == Field::Types::String) + { + /// Allow to parse Map from string field. For the convenience. + const auto & str = f.get(); + return stringToMap(str); + } + + return f.safeGet(); + } +#endif + } template @@ -291,6 +324,48 @@ void SettingFieldString::readBinary(ReadBuffer & in) *this = std::move(str); } +#ifndef KEEPER_STANDALONE_BUILD + +SettingFieldMap::SettingFieldMap(const Field & f) : value(fieldToMap(f)) {} + +String SettingFieldMap::toString() const +{ + auto type_string = std::make_shared(); + DataTypeMap type_map(type_string, type_string); + auto serialization = type_map.getSerialization(ISerialization::Kind::DEFAULT); + auto column = type_map.createColumn(); + column->insert(value); + + WriteBufferFromOwnString out; + serialization->serializeTextEscaped(*column, 0, out, {}); + return out.str(); +} + + +SettingFieldMap & SettingFieldMap::operator =(const Field & f) +{ + *this = fieldToMap(f); + return *this; +} + +void SettingFieldMap::parseFromString(const String & str) +{ + *this = stringToMap(str); +} + +void SettingFieldMap::writeBinary(WriteBuffer & out) const +{ + DB::writeBinary(value, out); +} + +void SettingFieldMap::readBinary(ReadBuffer & in) +{ + Map map; + DB::readBinary(map, in); + *this = map; +} + +#endif namespace { @@ -350,7 +425,7 @@ void SettingFieldURI::readBinary(ReadBuffer & in) } -void SettingFieldEnumHelpers::writeBinary(const std::string_view & str, WriteBuffer & out) +void SettingFieldEnumHelpers::writeBinary(std::string_view str, WriteBuffer & out) { writeStringBinary(str, out); } diff --git a/src/Core/SettingsFields.h b/src/Core/SettingsFields.h index dcc99f4a2c0..20f2b34084e 100644 --- a/src/Core/SettingsFields.h +++ b/src/Core/SettingsFields.h @@ -146,13 +146,13 @@ struct SettingFieldString String value; bool changed = false; - explicit SettingFieldString(const std::string_view & str = {}) : value(str) {} + explicit SettingFieldString(std::string_view str = {}) : value(str) {} explicit SettingFieldString(const String & str) : SettingFieldString(std::string_view{str}) {} explicit SettingFieldString(String && str) : value(std::move(str)) {} explicit SettingFieldString(const char * str) : SettingFieldString(std::string_view{str}) {} explicit SettingFieldString(const Field & f) : SettingFieldString(f.safeGet()) {} - SettingFieldString & operator =(const std::string_view & str) { value = str; changed = true; return *this; } + SettingFieldString & operator =(std::string_view str) { value = str; changed = true; return *this; } SettingFieldString & operator =(const String & str) { *this = std::string_view{str}; return *this; } SettingFieldString & operator =(String && str) { value = std::move(str); changed = true; return *this; } SettingFieldString & operator =(const char * str) { *this = std::string_view{str}; return *this; } @@ -168,6 +168,32 @@ struct SettingFieldString void readBinary(ReadBuffer & in); }; +#ifndef KEEPER_STANDALONE_BUILD + +struct SettingFieldMap +{ +public: + Map value; + bool changed = false; + + explicit SettingFieldMap(const Map & map = {}) : value(map) {} + explicit SettingFieldMap(Map && map) : value(std::move(map)) {} + explicit SettingFieldMap(const Field & f); + + SettingFieldMap & operator =(const Map & map) { value = map; changed = true; return *this; } + SettingFieldMap & operator =(const Field & f); + + operator const Map &() const { return value; } /// NOLINT + explicit operator Field() const { return value; } + + String toString() const; + void parseFromString(const String & str); + + void writeBinary(WriteBuffer & out) const; + void readBinary(ReadBuffer & in); +}; + +#endif struct SettingFieldChar { @@ -256,7 +282,7 @@ struct SettingFieldEnum struct SettingFieldEnumHelpers { - static void writeBinary(const std::string_view & str, WriteBuffer & out); + static void writeBinary(std::string_view str, WriteBuffer & out); static String readBinary(ReadBuffer & in); }; @@ -286,7 +312,7 @@ void SettingFieldEnum::readBinary(ReadBuffer & in) { \ using EnumType = ENUM_TYPE; \ static const String & toString(EnumType value); \ - static EnumType fromString(const std::string_view & str); \ + static EnumType fromString(std::string_view str); \ }; \ \ using SettingField##NEW_NAME = SettingFieldEnum; @@ -310,7 +336,7 @@ void SettingFieldEnum::readBinary(ReadBuffer & in) ERROR_CODE_FOR_UNEXPECTED_NAME); \ } \ \ - typename SettingField##NEW_NAME::EnumType SettingField##NEW_NAME##Traits::fromString(const std::string_view & str) \ + typename SettingField##NEW_NAME::EnumType SettingField##NEW_NAME##Traits::fromString(std::string_view str) \ { \ static const std::unordered_map map = [] { \ std::unordered_map res; \ @@ -430,7 +456,7 @@ void SettingFieldMultiEnum::readBinary(ReadBuffer & in) using EnumType = ENUM_TYPE; \ static size_t getEnumSize(); \ static const String & toString(EnumType value); \ - static EnumType fromString(const std::string_view & str); \ + static EnumType fromString(std::string_view str); \ }; \ \ using SettingField##NEW_NAME = SettingFieldMultiEnum; diff --git a/src/Daemon/BaseDaemon.cpp b/src/Daemon/BaseDaemon.cpp index 23835df87ea..1d6acc7eac3 100644 --- a/src/Daemon/BaseDaemon.cpp +++ b/src/Daemon/BaseDaemon.cpp @@ -298,7 +298,7 @@ private: /// It will allow client to see failure messages directly. if (thread_ptr) { - query_id = thread_ptr->getQueryId().toString(); + query_id = std::string(thread_ptr->getQueryId()); if (auto thread_group = thread_ptr->getThreadGroup()) { diff --git a/src/DataTypes/DataTypeNullable.cpp b/src/DataTypes/DataTypeNullable.cpp index b354b1278be..a14fb785b96 100644 --- a/src/DataTypes/DataTypeNullable.cpp +++ b/src/DataTypes/DataTypeNullable.cpp @@ -85,6 +85,13 @@ DataTypePtr makeNullable(const DataTypePtr & type) return std::make_shared(type); } +DataTypePtr makeNullableSafe(const DataTypePtr & type) +{ + if (type->canBeInsideNullable()) + return makeNullable(type); + return type; +} + DataTypePtr removeNullable(const DataTypePtr & type) { if (type->isNullable()) diff --git a/src/DataTypes/DataTypeNullable.h b/src/DataTypes/DataTypeNullable.h index c87e4f77008..379119b364c 100644 --- a/src/DataTypes/DataTypeNullable.h +++ b/src/DataTypes/DataTypeNullable.h @@ -51,6 +51,7 @@ private: DataTypePtr makeNullable(const DataTypePtr & type); +DataTypePtr makeNullableSafe(const DataTypePtr & type); DataTypePtr removeNullable(const DataTypePtr & type); } diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index 558b13927c1..1ef86a8c12f 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -214,6 +214,19 @@ size_t DataTypeTuple::getPositionByName(const String & name) const throw Exception("Tuple doesn't have element with name '" + name + "'", ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); } +std::optional DataTypeTuple::tryGetPositionByName(const String & name) const +{ + size_t size = elems.size(); + for (size_t i = 0; i < size; ++i) + { + if (names[i] == name) + { + return std::optional(i); + } + } + return std::nullopt; +} + String DataTypeTuple::getNameByPosition(size_t i) const { if (i == 0 || i > names.size()) diff --git a/src/DataTypes/DataTypeTuple.h b/src/DataTypes/DataTypeTuple.h index 009a2284a0a..eed04631528 100644 --- a/src/DataTypes/DataTypeTuple.h +++ b/src/DataTypes/DataTypeTuple.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB @@ -60,6 +61,7 @@ public: const Strings & getElementNames() const { return names; } size_t getPositionByName(const String & name) const; + std::optional tryGetPositionByName(const String & name) const; String getNameByPosition(size_t i) const; bool haveExplicitNames() const { return have_explicit_names; } diff --git a/src/DataTypes/DataTypesDecimal.h b/src/DataTypes/DataTypesDecimal.h index 00b5e2b9e37..7bcc6593435 100644 --- a/src/DataTypes/DataTypesDecimal.h +++ b/src/DataTypes/DataTypesDecimal.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB @@ -13,6 +14,7 @@ namespace DB namespace ErrorCodes { extern const int DECIMAL_OVERFLOW; + extern const int LOGICAL_ERROR; } /// Implements Decimal(P, S), where P is precision, S is scale. @@ -58,7 +60,7 @@ inline const DataTypeDecimal * checkDecimal(const IDataType & data_type) return typeid_cast *>(&data_type); } -inline UInt32 getDecimalScale(const IDataType & data_type, UInt32 default_value = std::numeric_limits::max()) +inline UInt32 getDecimalScale(const IDataType & data_type) { if (const auto * decimal_type = checkDecimal(data_type)) return decimal_type->getScale(); @@ -68,7 +70,10 @@ inline UInt32 getDecimalScale(const IDataType & data_type, UInt32 default_value return decimal_type->getScale(); if (const auto * decimal_type = checkDecimal(data_type)) return decimal_type->getScale(); - return default_value; + if (const auto * date_time_type = typeid_cast(&data_type)) + return date_time_type->getScale(); + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get decimal scale from type {}", data_type.getName()); } inline UInt32 getDecimalPrecision(const IDataType & data_type) @@ -81,7 +86,10 @@ inline UInt32 getDecimalPrecision(const IDataType & data_type) return decimal_type->getPrecision(); if (const auto * decimal_type = checkDecimal(data_type)) return decimal_type->getPrecision(); - return 0; + if (const auto * date_time_type = typeid_cast(&data_type)) + return date_time_type->getPrecision(); + + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get decimal precision from type {}", data_type.getName()); } template diff --git a/src/DataTypes/IDataType.h b/src/DataTypes/IDataType.h index fce8906abe5..a26c703cd8a 100644 --- a/src/DataTypes/IDataType.h +++ b/src/DataTypes/IDataType.h @@ -532,6 +532,12 @@ inline bool isBool(const DataTypePtr & data_type) return data_type->getName() == "Bool"; } +inline bool isAggregateFunction(const DataTypePtr & data_type) +{ + WhichDataType which(data_type); + return which.isAggregateFunction(); +} + template constexpr bool IsDataTypeDecimal = false; template constexpr bool IsDataTypeNumber = false; template constexpr bool IsDataTypeDateOrDateTime = false; diff --git a/src/DataTypes/NestedUtils.cpp b/src/DataTypes/NestedUtils.cpp index 5dae2b7b413..b28b70f676a 100644 --- a/src/DataTypes/NestedUtils.cpp +++ b/src/DataTypes/NestedUtils.cpp @@ -54,7 +54,7 @@ std::pair splitName(const std::string & name, bool rev return {name.substr(0, idx), name.substr(idx + 1)}; } -std::pair splitName(const std::string_view & name, bool reverse) +std::pair splitName(std::string_view name, bool reverse) { auto idx = (reverse ? name.find_last_of('.') : name.find_first_of('.')); if (idx == std::string::npos || idx == 0 || idx + 1 == name.size()) diff --git a/src/DataTypes/NestedUtils.h b/src/DataTypes/NestedUtils.h index e7cda541f47..38da382254c 100644 --- a/src/DataTypes/NestedUtils.h +++ b/src/DataTypes/NestedUtils.h @@ -13,7 +13,7 @@ namespace Nested /// Splits name of compound identifier by first/last dot (depending on 'reverse' parameter). std::pair splitName(const std::string & name, bool reverse = false); - std::pair splitName(const std::string_view & name, bool reverse = false); + std::pair splitName(std::string_view name, bool reverse = false); /// Returns the prefix of the name to the first '.'. Or the name is unchanged if there is no dot. std::string extractTableName(const std::string & nested_name); diff --git a/src/DataTypes/Serializations/SerializationEnum.cpp b/src/DataTypes/Serializations/SerializationEnum.cpp index 39e9885fe17..a1b9c8bf95a 100644 --- a/src/DataTypes/Serializations/SerializationEnum.cpp +++ b/src/DataTypes/Serializations/SerializationEnum.cpp @@ -18,7 +18,7 @@ void SerializationEnum::serializeText(const IColumn & column, size_t row_n template void SerializationEnum::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeEscapedString(this->getNameForValue(assert_cast(column).getData()[row_num]), ostr); + writeEscapedString(this->getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); } template @@ -69,13 +69,13 @@ void SerializationEnum::deserializeWholeText(IColumn & column, ReadBuffer template void SerializationEnum::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - writeJSONString(this->getNameForValue(assert_cast(column).getData()[row_num]), ostr, settings); + writeJSONString(this->getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr, settings); } template void SerializationEnum::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeXMLStringForTextElement(this->getNameForValue(assert_cast(column).getData()[row_num]), ostr); + writeXMLStringForTextElement(this->getNameForValue(assert_cast(column).getData()[row_num]).toView(), ostr); } template diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.cpp b/src/DataTypes/Serializations/SerializationLowCardinality.cpp index c79f588e46c..8e19c5a740b 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.cpp +++ b/src/DataTypes/Serializations/SerializationLowCardinality.cpp @@ -511,8 +511,6 @@ void SerializationLowCardinality::serializeBinaryBulkWithMultipleStreams( /// Insert used_keys into global dictionary and update sub_index. auto indexes_with_overflow = global_dictionary->uniqueInsertRangeWithOverflow(*keys, 0, keys->size(), settings.low_cardinality_max_dictionary_size); - // size_t max_size = settings.low_cardinality_max_dictionary_size + indexes_with_overflow.overflowed_keys->size(); - // ColumnLowCardinality::Index(indexes_with_overflow.indexes->getPtr()).check(max_size); if (global_dictionary->size() > settings.low_cardinality_max_dictionary_size) throw Exception("Got dictionary with size " + toString(global_dictionary->size()) + @@ -656,11 +654,6 @@ void SerializationLowCardinality::deserializeBinaryBulkWithMultipleStreams( { auto maps = mapIndexWithAdditionalKeys(*indexes_column, global_dictionary->size()); - // ColumnLowCardinality::Index(maps.additional_keys_map->getPtr()).check(additional_keys->size()); - - // ColumnLowCardinality::Index(indexes_column->getPtr()).check( - // maps.dictionary_map->size() + maps.additional_keys_map->size()); - auto used_keys = IColumn::mutate(global_dictionary->getNestedColumn()->index(*maps.dictionary_map, 0)); if (!maps.additional_keys_map->empty()) diff --git a/src/DataTypes/Serializations/SerializationLowCardinality.h b/src/DataTypes/Serializations/SerializationLowCardinality.h index 0a3597e86c7..96e3a297d6a 100644 --- a/src/DataTypes/Serializations/SerializationLowCardinality.h +++ b/src/DataTypes/Serializations/SerializationLowCardinality.h @@ -78,9 +78,6 @@ private: template void deserializeImpl(IColumn & column, DeserializeFunctionPtr func, Args &&... args) const; - - // template - // static MutableColumnUniquePtr createColumnUniqueImpl(const IDataType & keys_type, const Creator & creator); }; } diff --git a/src/DataTypes/Serializations/SerializationSparse.cpp b/src/DataTypes/Serializations/SerializationSparse.cpp index 64db248c5fc..6fa40e460c5 100644 --- a/src/DataTypes/Serializations/SerializationSparse.cpp +++ b/src/DataTypes/Serializations/SerializationSparse.cpp @@ -263,6 +263,12 @@ void SerializationSparse::deserializeBinaryBulkWithMultipleStreams( { auto * state_sparse = checkAndGetState(state); + if (auto cached_column = getFromSubstreamsCache(cache, settings.path)) + { + column = cached_column; + return; + } + if (!settings.continuous_reading) state_sparse->reset(); @@ -281,7 +287,8 @@ void SerializationSparse::deserializeBinaryBulkWithMultipleStreams( size_t values_limit = offsets_data.size() - old_size; settings.path.back() = Substream::SparseElements; - nested->deserializeBinaryBulkWithMultipleStreams(values_column, values_limit, settings, state_sparse->nested, cache); + /// Do not use substream cache while reading values column, because ColumnSparse can be cached only in a whole. + nested->deserializeBinaryBulkWithMultipleStreams(values_column, values_limit, settings, state_sparse->nested, nullptr); settings.path.pop_back(); if (offsets_data.size() + 1 != values_column->size()) @@ -291,6 +298,7 @@ void SerializationSparse::deserializeBinaryBulkWithMultipleStreams( /// 'insertManyDefaults' just increases size of column. column_sparse.insertManyDefaults(read_rows); column = std::move(mutable_column); + addToSubstreamsCache(cache, settings.path, column); } /// All methods below just wrap nested serialization. diff --git a/src/DataTypes/Serializations/SerializationString.cpp b/src/DataTypes/Serializations/SerializationString.cpp index 5614e970315..e07fd4f26cf 100644 --- a/src/DataTypes/Serializations/SerializationString.cpp +++ b/src/DataTypes/Serializations/SerializationString.cpp @@ -213,7 +213,7 @@ void SerializationString::serializeText(const IColumn & column, size_t row_num, void SerializationString::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeEscapedString(assert_cast(column).getDataAt(row_num), ostr); + writeEscapedString(assert_cast(column).getDataAt(row_num).toView(), ostr); } @@ -266,7 +266,7 @@ void SerializationString::deserializeTextQuoted(IColumn & column, ReadBuffer & i void SerializationString::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - writeJSONString(assert_cast(column).getDataAt(row_num), ostr, settings); + writeJSONString(assert_cast(column).getDataAt(row_num).toView(), ostr, settings); } @@ -278,7 +278,7 @@ void SerializationString::deserializeTextJSON(IColumn & column, ReadBuffer & ist void SerializationString::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings &) const { - writeXMLStringForTextElement(assert_cast(column).getDataAt(row_num), ostr); + writeXMLStringForTextElement(assert_cast(column).getDataAt(row_num).toView(), ostr); } diff --git a/src/DataTypes/getLeastSupertype.cpp b/src/DataTypes/getLeastSupertype.cpp index 65ed37f1dcf..fee3cf1553e 100644 --- a/src/DataTypes/getLeastSupertype.cpp +++ b/src/DataTypes/getLeastSupertype.cpp @@ -554,7 +554,11 @@ DataTypePtr getLeastSupertype(const DataTypes & types) UInt32 max_scale = 0; for (const auto & type : types) { - UInt32 scale = getDecimalScale(*type, 0); + auto type_id = type->getTypeId(); + if (type_id != TypeIndex::Decimal32 && type_id != TypeIndex::Decimal64 && type_id != TypeIndex::Decimal128) + continue; + + UInt32 scale = getDecimalScale(*type); if (scale > max_scale) max_scale = scale; } diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index df1e58ca852..6213fa62e3c 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -126,6 +126,20 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String if (!create.attach && !context->getSettingsRef().allow_deprecated_database_ordinary) throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Ordinary database engine is deprecated (see also allow_deprecated_database_ordinary setting)"); + + /// Before 20.7 metadata/db_name.sql file might absent and Ordinary database was attached if there's metadata/db_name/ dir. + /// Between 20.7 and 22.7 metadata/db_name.sql was created in this case as well. + /// Since 20.7 `default` database is created with Atomic engine on the very first server run. + /// The problem is that if server crashed during the very first run and metadata/db_name/ -> store/whatever symlink was created + /// then it's considered as Ordinary database. And it even works somehow + /// until background task tries to remove onused dir from store/... + if (fs::is_symlink(metadata_path)) + throw Exception(ErrorCodes::CANNOT_CREATE_DATABASE, "Metadata directory {} for Ordinary database {} is a symbolic link to {}. " + "It may be a result of manual intervention, crash on very first server start or a bug. " + "Database cannot be attached (it's kind of protection from potential data loss). " + "Metadata directory must not be a symlink and must contain tables metadata files itself. " + "You have to resolve this manually.", + metadata_path, database_name, fs::read_symlink(metadata_path).string()); return std::make_shared(database_name, metadata_path, context); } diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 26ea3b81e3a..fe229ba6ee9 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -202,8 +202,9 @@ void DatabaseOnDisk::createTable( if (create.uuid != create_detached.uuid) throw Exception( ErrorCodes::TABLE_ALREADY_EXISTS, - "Table {}.{} already exist (detached permanently). To attach it back " - "you need to use short ATTACH syntax or a full statement with the same UUID", + "Table {}.{} already exist (detached or detached permanently). To attach it back " + "you need to use short ATTACH syntax (ATTACH TABLE {}.{};)", + backQuote(getDatabaseName()), backQuote(table_name), backQuote(getDatabaseName()), backQuote(table_name)); } diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index 8b85d1b9a63..08a0859e6db 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -79,6 +79,7 @@ void DatabaseMaterializedPostgreSQL::startSynchronization() } catch (...) { + tryLogCurrentException(__PRETTY_FUNCTION__); LOG_ERROR(log, "Unable to load replicated tables list"); throw; } @@ -111,7 +112,16 @@ void DatabaseMaterializedPostgreSQL::startSynchronization() } LOG_TRACE(log, "Loaded {} tables. Starting synchronization", materialized_tables.size()); - replication_handler->startup(/* delayed */false); + + try + { + replication_handler->startup(/* delayed */false); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + throw; + } } diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 10cde43e9e1..eeae110cddf 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -39,9 +39,10 @@ std::set fetchPostgreSQLTablesList(T & tx, const String & postgres_schem std::set tables; if (schemas.size() <= 1) { - std::string query = fmt::format("SELECT tablename FROM pg_catalog.pg_tables " - "WHERE schemaname != 'pg_catalog' AND {}", - postgres_schema.empty() ? "schemaname != 'information_schema'" : "schemaname = " + quoteString(postgres_schema)); + std::string query = fmt::format( + "SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = {}", + postgres_schema.empty() ? quoteString("public") : quoteString(postgres_schema)); + for (auto table_name : tx.template stream(query)) tables.insert(std::get<0>(table_name)); @@ -53,9 +54,10 @@ std::set fetchPostgreSQLTablesList(T & tx, const String & postgres_schem /// If we add schema to table name then table can be accessed only this way: database_name.`schema_name.table_name` for (const auto & schema : schemas) { - std::string query = fmt::format("SELECT tablename FROM pg_catalog.pg_tables " - "WHERE schemaname != 'pg_catalog' AND {}", - postgres_schema.empty() ? "schemaname != 'information_schema'" : "schemaname = " + quoteString(schema)); + std::string query = fmt::format( + "SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = {}", + quoteString(schema)); + for (auto table_name : tx.template stream(query)) tables.insert(schema + '.' + std::get<0>(table_name)); } diff --git a/src/Dictionaries/FlatDictionary.cpp b/src/Dictionaries/FlatDictionary.cpp index d77f0bf825c..c858618c5ff 100644 --- a/src/Dictionaries/FlatDictionary.cpp +++ b/src/Dictionaries/FlatDictionary.cpp @@ -105,7 +105,7 @@ ColumnPtr FlatDictionary::getColumn( getItemsImpl( attribute, ids, - [&](size_t row, const StringRef value, bool is_null) + [&](size_t row, StringRef value, bool is_null) { (*vec_null_map_to)[row] = is_null; out->insertData(value.data, value.size); @@ -115,7 +115,7 @@ ColumnPtr FlatDictionary::getColumn( getItemsImpl( attribute, ids, - [&](size_t, const StringRef value, bool) { out->insertData(value.data, value.size); }, + [&](size_t, StringRef value, bool) { out->insertData(value.data, value.size); }, default_value_extractor); } else diff --git a/src/Dictionaries/HashedArrayDictionary.cpp b/src/Dictionaries/HashedArrayDictionary.cpp index d702a02bc2e..b8ed664e91a 100644 --- a/src/Dictionaries/HashedArrayDictionary.cpp +++ b/src/Dictionaries/HashedArrayDictionary.cpp @@ -585,7 +585,7 @@ ColumnPtr HashedArrayDictionary::getAttributeColumn( getItemsImpl( attribute, keys_object, - [&](size_t row, const StringRef value, bool is_null) + [&](size_t row, StringRef value, bool is_null) { (*vec_null_map_to)[row] = is_null; out->insertData(value.data, value.size); @@ -595,7 +595,7 @@ ColumnPtr HashedArrayDictionary::getAttributeColumn( getItemsImpl( attribute, keys_object, - [&](size_t, const StringRef value, bool) { out->insertData(value.data, value.size); }, + [&](size_t, StringRef value, bool) { out->insertData(value.data, value.size); }, default_value_extractor); } else diff --git a/src/Dictionaries/HashedDictionary.cpp b/src/Dictionaries/HashedDictionary.cpp index c5160c0dfa8..9beac59f274 100644 --- a/src/Dictionaries/HashedDictionary.cpp +++ b/src/Dictionaries/HashedDictionary.cpp @@ -117,7 +117,7 @@ ColumnPtr HashedDictionary::getColumn( getItemsImpl( attribute, extractor, - [&](size_t row, const StringRef value, bool is_null) + [&](size_t row, StringRef value, bool is_null) { (*vec_null_map_to)[row] = is_null; out->insertData(value.data, value.size); @@ -127,7 +127,7 @@ ColumnPtr HashedDictionary::getColumn( getItemsImpl( attribute, extractor, - [&](size_t, const StringRef value, bool) { out->insertData(value.data, value.size); }, + [&](size_t, StringRef value, bool) { out->insertData(value.data, value.size); }, default_value_extractor); } else diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index 46cba702b5d..efb81849126 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -261,7 +261,7 @@ ColumnPtr IPAddressDictionary::getColumn( getItemsImpl( attribute, key_columns, - [&](const size_t, const StringRef value) { out->insertData(value.data, value.size); }, + [&](const size_t, StringRef value) { out->insertData(value.data, value.size); }, default_value_extractor); } else @@ -387,7 +387,7 @@ void IPAddressDictionary::loadData() setAttributeValue(attribute, attribute_column[row]); } - const auto [addr, prefix] = parseIPFromString(std::string_view{key_column_ptr->getDataAt(row)}); + const auto [addr, prefix] = parseIPFromString(key_column_ptr->getDataAt(row).toView()); has_ipv6 = has_ipv6 || (addr.family() == Poco::Net::IPAddress::IPv6); size_t row_number = ip_records.size(); diff --git a/src/Dictionaries/RangeHashedDictionary.cpp b/src/Dictionaries/RangeHashedDictionary.cpp index 261e9166ec8..ad962ca4acc 100644 --- a/src/Dictionaries/RangeHashedDictionary.cpp +++ b/src/Dictionaries/RangeHashedDictionary.cpp @@ -151,7 +151,7 @@ ColumnPtr RangeHashedDictionary::getColumn( getItemsImpl( attribute, modified_key_columns, - [&](size_t row, const StringRef value, bool is_null) + [&](size_t row, StringRef value, bool is_null) { (*vec_null_map_to)[row] = is_null; out->insertData(value.data, value.size); @@ -161,7 +161,7 @@ ColumnPtr RangeHashedDictionary::getColumn( getItemsImpl( attribute, modified_key_columns, - [&](size_t, const StringRef value, bool) + [&](size_t, StringRef value, bool) { out->insertData(value.data, value.size); }, @@ -255,7 +255,7 @@ ColumnPtr RangeHashedDictionary::getColumnInternal( getItemsInternalImpl( attribute, key_to_index, - [&](size_t row, const StringRef value, bool is_null) + [&](size_t row, StringRef value, bool is_null) { (*vec_null_map_to)[row] = is_null; out->insertData(value.data, value.size); @@ -264,7 +264,7 @@ ColumnPtr RangeHashedDictionary::getColumnInternal( getItemsInternalImpl( attribute, key_to_index, - [&](size_t, const StringRef value, bool) + [&](size_t, StringRef value, bool) { out->insertData(value.data, value.size); }); diff --git a/src/Dictionaries/SSDCacheDictionaryStorage.h b/src/Dictionaries/SSDCacheDictionaryStorage.h index d813cf1bcc8..459c4c44668 100644 --- a/src/Dictionaries/SSDCacheDictionaryStorage.h +++ b/src/Dictionaries/SSDCacheDictionaryStorage.h @@ -27,11 +27,6 @@ #include -namespace CurrentMetrics -{ - extern const Metric Write; -} - namespace ProfileEvents { extern const Event FileOpen; @@ -527,8 +522,6 @@ public: throw Exception(ErrorCodes::CANNOT_IO_SUBMIT, "Cannot submit request for asynchronous IO on file {}", file_path); } - // CurrentMetrics::Increment metric_increment_write{CurrentMetrics::Write}; - io_event event; while (io_getevents(aio_context.ctx, 1, 1, &event, nullptr) < 0) diff --git a/src/Dictionaries/XDBCDictionarySource.cpp b/src/Dictionaries/XDBCDictionarySource.cpp index 5e9c2f7ac7a..0a097c4faef 100644 --- a/src/Dictionaries/XDBCDictionarySource.cpp +++ b/src/Dictionaries/XDBCDictionarySource.cpp @@ -275,8 +275,6 @@ void registerDictionarySourceJDBC(DictionarySourceFactory & factory) bool /* created_from_ddl */) -> DictionarySourcePtr { throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Dictionary source of type `jdbc` is disabled until consistent support for nullable fields."); - // BridgeHelperPtr bridge = std::make_shared>(config, context.getSettings().http_receive_timeout, config.getString(config_prefix + ".connection_string")); - // return std::make_unique(dict_struct, config, config_prefix + ".jdbc", sample_block, context, bridge); }; factory.registerSource("jdbc", create_table_source); } diff --git a/src/Disks/DiskEncrypted.cpp b/src/Disks/DiskEncrypted.cpp index 8edb00e5a67..e6479727aad 100644 --- a/src/Disks/DiskEncrypted.cpp +++ b/src/Disks/DiskEncrypted.cpp @@ -8,6 +8,8 @@ #include #include #include +#include +#include namespace DB diff --git a/src/Disks/DiskWebServer.cpp b/src/Disks/DiskWebServer.cpp index 54dce926893..b6cda8288d7 100644 --- a/src/Disks/DiskWebServer.cpp +++ b/src/Disks/DiskWebServer.cpp @@ -74,7 +74,6 @@ void DiskWebServer::initialize(const String & uri_path) const if (file_data.type == FileType::Directory) { directories_to_load.push_back(file_path); - // file_path = fs::path(file_path) / ""; } file_path = file_path.substr(url.size()); diff --git a/src/Disks/IDisk.h b/src/Disks/IDisk.h index 941df99298b..2337fa00af5 100644 --- a/src/Disks/IDisk.h +++ b/src/Disks/IDisk.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include @@ -41,6 +40,10 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; } +class IDisk; +using DiskPtr = std::shared_ptr; +using DisksMap = std::map; + class IReservation; using ReservationPtr = std::unique_ptr; using Reservations = std::vector; @@ -363,7 +366,6 @@ private: std::unique_ptr executor; }; -using DiskPtr = std::shared_ptr; using Disks = std::vector; /** diff --git a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp index 774a7ecaaaa..f58e91669c4 100644 --- a/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp +++ b/src/Disks/IO/AsynchronousReadIndirectBufferFromRemoteFS.cpp @@ -168,6 +168,8 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait}; size_t size = 0; + size_t bytes_read = 0; + if (prefetch_future.valid()) { ProfileEvents::increment(ProfileEvents::RemoteFSPrefetchedReads); @@ -181,6 +183,8 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() /// If prefetch_future is valid, size should always be greater than zero. assert(offset <= size); + bytes_read = size - offset; + ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, watch.elapsedMicroseconds()); } @@ -200,9 +204,11 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() auto offset = result.offset; LOG_TEST(log, "Current size: {}, offset: {}", size, offset); - assert(offset <= size); - if (size) + assert(offset <= size); + bytes_read = size - offset; + + if (bytes_read) { /// Adjust the working buffer so that it ignores `offset` bytes. internal_buffer = Buffer(memory.data(), memory.data() + memory.size()); @@ -222,7 +228,7 @@ bool AsynchronousReadIndirectBufferFromRemoteFS::nextImpl() assert(file_offset_of_buffer_end <= impl->getFileSize()); prefetch_future = {}; - return size; + return bytes_read; } diff --git a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp index b929cea0236..a3d5cfc408d 100644 --- a/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp +++ b/src/Disks/IO/CachedReadBufferFromRemoteFS.cpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace ProfileEvents diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp index 03aa0d81fe6..14614871185 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.cpp @@ -90,7 +90,6 @@ SeekableReadBufferPtr ReadBufferFromAzureBlobStorageGather::createImplementation settings, max_single_read_retries, max_single_download_retries, - settings.remote_fs_buffer_size, /* use_external_buffer */true, read_until_position); } diff --git a/src/Disks/IO/createReadBufferFromFileBase.cpp b/src/Disks/IO/createReadBufferFromFileBase.cpp index 345d0019aa7..d87144dee55 100644 --- a/src/Disks/IO/createReadBufferFromFileBase.cpp +++ b/src/Disks/IO/createReadBufferFromFileBase.cpp @@ -52,7 +52,7 @@ std::unique_ptr createReadBufferFromFileBase( { try { - auto res = std::make_unique(*settings.mmap_cache, filename, 0); + auto res = std::make_unique(*settings.mmap_cache, filename, 0, file_size.value_or(-1)); ProfileEvents::increment(ProfileEvents::CreatedReadBufferMMap); return res; } diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index ada2b76920a..37fbc0fb05c 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -67,8 +67,8 @@ std::unique_ptr AzureObjectStorage::readObject( /// NOLI auto settings_ptr = settings.get(); return std::make_unique( - client.get(), object.absolute_path, read_settings, settings_ptr->max_single_read_retries, - settings_ptr->max_single_download_retries, read_settings.remote_fs_buffer_size); + client.get(), object.absolute_path, patchSettings(read_settings), settings_ptr->max_single_read_retries, + settings_ptr->max_single_download_retries); } std::unique_ptr AzureObjectStorage::readObjects( /// NOLINT @@ -77,18 +77,19 @@ std::unique_ptr AzureObjectStorage::readObjects( /// NOL std::optional, std::optional) const { + ReadSettings disk_read_settings = patchSettings(read_settings); auto settings_ptr = settings.get(); auto reader_impl = std::make_unique( client.get(), objects, settings_ptr->max_single_read_retries, settings_ptr->max_single_download_retries, - read_settings); + disk_read_settings); - if (read_settings.remote_fs_method == RemoteFSReadMethod::threadpool) + if (disk_read_settings.remote_fs_method == RemoteFSReadMethod::threadpool) { auto reader = getThreadPoolReader(); - return std::make_unique(reader, read_settings, std::move(reader_impl)); + return std::make_unique(reader, disk_read_settings, std::move(reader_impl)); } else { @@ -104,7 +105,7 @@ std::unique_ptr AzureObjectStorage::writeObject( /// NO std::optional, FinalizeCallback && finalize_callback, size_t buf_size, - const WriteSettings &) + const WriteSettings & write_settings) { if (mode != WriteMode::Rewrite) throw Exception("Azure storage doesn't support append", ErrorCodes::UNSUPPORTED_METHOD); @@ -113,7 +114,8 @@ std::unique_ptr AzureObjectStorage::writeObject( /// NO client.get(), object.absolute_path, settings.get()->max_single_part_upload_size, - buf_size); + buf_size, + patchSettings(write_settings)); return std::make_unique(std::move(buffer), std::move(finalize_callback), object.absolute_path); } @@ -207,7 +209,7 @@ void AzureObjectStorage::applyNewSettings(const Poco::Util::AbstractConfiguratio { auto new_settings = getAzureBlobStorageSettings(config, config_prefix, context); settings.set(std::move(new_settings)); - + applyRemoteThrottlingSettings(context); /// We don't update client } diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 6df093ebd43..34b3d86b355 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp index 44976b7cf2d..dc70008649e 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/registerDiskAzureBlobStorage.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index ca414a7ee72..0b7d16bd895 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -18,6 +18,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp b/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp index 99606a18517..b8ab2f49202 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageCommon.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.cpp b/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.cpp index a6b2f6b5f02..d1413bd4e88 100644 --- a/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.cpp +++ b/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.cpp @@ -11,7 +11,6 @@ namespace DB namespace ErrorCodes { extern const int NOT_IMPLEMENTED; - extern const int FS_METADATA_ERROR; } FakeMetadataStorageFromDisk::FakeMetadataStorageFromDisk( @@ -114,85 +113,6 @@ const IMetadataStorage & FakeMetadataStorageFromDiskTransaction::getStorageForNo return metadata_storage; } -void FakeMetadataStorageFromDiskTransaction::addOperation(MetadataOperationPtr && operation) -{ - if (state != MetadataFromDiskTransactionState::PREPARING) - throw Exception( - ErrorCodes::FS_METADATA_ERROR, - "Cannot add operations to transaction in {} state, it should be in {} state", - toString(state), toString(MetadataFromDiskTransactionState::PREPARING)); - - operations.emplace_back(std::move(operation)); -} - -void FakeMetadataStorageFromDiskTransaction::commit() -{ - if (state != MetadataFromDiskTransactionState::PREPARING) - throw Exception( - ErrorCodes::FS_METADATA_ERROR, - "Cannot commit transaction in {} state, it should be in {} state", - toString(state), toString(MetadataFromDiskTransactionState::PREPARING)); - - { - std::unique_lock lock(metadata_storage.metadata_mutex); - for (size_t i = 0; i < operations.size(); ++i) - { - try - { - operations[i]->execute(); - } - catch (Exception & ex) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - ex.addMessage(fmt::format("While committing metadata operation #{}", i)); - state = MetadataFromDiskTransactionState::FAILED; - rollback(i); - throw; - } - } - } - - /// Do it in "best effort" mode - for (size_t i = 0; i < operations.size(); ++i) - { - try - { - operations[i]->finalize(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__, fmt::format("Failed to finalize operation #{}", i)); - } - } - - state = MetadataFromDiskTransactionState::COMMITTED; -} - -void FakeMetadataStorageFromDiskTransaction::rollback(size_t until_pos) -{ - /// Otherwise everything is alright - if (state == MetadataFromDiskTransactionState::FAILED) - { - for (int64_t i = until_pos; i >= 0; --i) - { - try - { - operations[i]->undo(); - } - catch (Exception & ex) - { - state = MetadataFromDiskTransactionState::PARTIALLY_ROLLED_BACK; - ex.addMessage(fmt::format("While rolling back operation #{}", i)); - throw; - } - } - } - else - { - /// Nothing to do, transaction committed or not even started to commit - } -} - void FakeMetadataStorageFromDiskTransaction::writeStringToFile(const std::string & path, const std::string & data) { auto wb = disk->writeFile(path); diff --git a/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.h b/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.h index 3fc223ea75b..6d5ae12a157 100644 --- a/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.h +++ b/src/Disks/ObjectStorages/FakeMetadataStorageFromDisk.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -65,12 +66,6 @@ private: const FakeMetadataStorageFromDisk & metadata_storage; std::vector operations; - MetadataFromDiskTransactionState state{MetadataFromDiskTransactionState::PREPARING}; - - void addOperation(MetadataOperationPtr && operation); - - void rollback(size_t until_pos); - public: FakeMetadataStorageFromDiskTransaction( const FakeMetadataStorageFromDisk & metadata_storage_, DiskPtr disk_) @@ -82,7 +77,7 @@ public: const IMetadataStorage & getStorageForNonTransactionalReads() const final; - void commit() final; + void commit() final {} void writeStringToFile(const std::string & path, const std::string & data) override; diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp index 91c265a6716..4ffbf5b2ceb 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.cpp @@ -52,7 +52,7 @@ std::unique_ptr HDFSObjectStorage::readObject( /// NOLIN std::optional, std::optional) const { - return std::make_unique(object.absolute_path, object.absolute_path, config, read_settings); + return std::make_unique(object.absolute_path, object.absolute_path, config, patchSettings(read_settings)); } std::unique_ptr HDFSObjectStorage::readObjects( /// NOLINT @@ -61,7 +61,7 @@ std::unique_ptr HDFSObjectStorage::readObjects( /// NOLI std::optional, std::optional) const { - auto hdfs_impl = std::make_unique(config, objects, read_settings); + auto hdfs_impl = std::make_unique(config, objects, patchSettings(read_settings)); auto buf = std::make_unique(std::move(hdfs_impl)); return std::make_unique(std::move(buf), settings->min_bytes_for_seek); } @@ -72,7 +72,7 @@ std::unique_ptr HDFSObjectStorage::writeObject( /// NOL std::optional attributes, FinalizeCallback && finalize_callback, size_t buf_size, - const WriteSettings &) + const WriteSettings & write_settings) { if (attributes.has_value()) throw Exception( @@ -81,7 +81,7 @@ std::unique_ptr HDFSObjectStorage::writeObject( /// NOL /// Single O_WRONLY in libhdfs adds O_TRUNC auto hdfs_buffer = std::make_unique( - object.absolute_path, config, settings->replication, buf_size, + object.absolute_path, config, settings->replication, patchSettings(write_settings), buf_size, mode == WriteMode::Rewrite ? O_WRONLY : O_WRONLY | O_APPEND); return std::make_unique(std::move(hdfs_buffer), std::move(finalize_callback), object.absolute_path); @@ -155,8 +155,9 @@ void HDFSObjectStorage::copyObject( /// NOLINT } -void HDFSObjectStorage::applyNewSettings(const Poco::Util::AbstractConfiguration &, const std::string &, ContextPtr) +void HDFSObjectStorage::applyNewSettings(const Poco::Util::AbstractConfiguration &, const std::string &, ContextPtr context) { + applyRemoteThrottlingSettings(context); } std::unique_ptr HDFSObjectStorage::cloneObjectStorage(const std::string &, const Poco::Util::AbstractConfiguration &, const std::string &, ContextPtr) diff --git a/src/Disks/ObjectStorages/IObjectStorage.cpp b/src/Disks/ObjectStorages/IObjectStorage.cpp index fc934b829fd..f3ac94768d8 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.cpp +++ b/src/Disks/ObjectStorages/IObjectStorage.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace DB { @@ -47,4 +48,27 @@ std::string IObjectStorage::getCacheBasePath() const throw Exception(ErrorCodes::NOT_IMPLEMENTED, "getCacheBasePath() is not implemented for {}", getName()); } +void IObjectStorage::applyRemoteThrottlingSettings(ContextPtr context) +{ + std::unique_lock lock{throttlers_mutex}; + remote_read_throttler = context->getRemoteReadThrottler(); + remote_write_throttler = context->getRemoteWriteThrottler(); +} + +ReadSettings IObjectStorage::patchSettings(const ReadSettings & read_settings) const +{ + std::unique_lock lock{throttlers_mutex}; + ReadSettings settings{read_settings}; + settings.remote_throttler = remote_read_throttler; + return settings; +} + +WriteSettings IObjectStorage::patchSettings(const WriteSettings & write_settings) const +{ + std::unique_lock lock{throttlers_mutex}; + WriteSettings settings{write_settings}; + settings.remote_throttler = remote_write_throttler; + return settings; +} + } diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index 2526163f4cb..1ab2d75ff86 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -3,9 +3,11 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -166,6 +168,19 @@ public: virtual void removeCacheIfExists(const std::string & /* path */) {} virtual bool supportsCache() const { return false; } + +protected: + /// Should be called from implementation of applyNewSettings() + void applyRemoteThrottlingSettings(ContextPtr context); + + /// Should be used by implementation of read* and write* methods + ReadSettings patchSettings(const ReadSettings & read_settings) const; + WriteSettings patchSettings(const WriteSettings & write_settings) const; + +private: + mutable std::mutex throttlers_mutex; + ThrottlerPtr remote_read_throttler; + ThrottlerPtr remote_write_throttler; }; using ObjectStoragePtr = std::shared_ptr; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp b/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp index a664433a3d0..489772647d1 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromDisk.cpp @@ -87,6 +87,14 @@ DiskObjectStorageMetadataPtr MetadataStorageFromDisk::readMetadataUnlocked(const return metadata; } +DiskObjectStorageMetadataPtr MetadataStorageFromDisk::readMetadataUnlocked(const std::string & path, std::unique_lock &) const +{ + auto metadata = std::make_unique(disk->getPath(), object_storage_root_path, path); + auto str = readFileToString(path); + metadata->deserializeFromString(str); + return metadata; +} + DiskObjectStorageMetadataPtr MetadataStorageFromDisk::readMetadata(const std::string & path) const { std::shared_lock lock(metadata_mutex); @@ -112,13 +120,7 @@ std::unordered_map MetadataStorageFromDisk::getSerializedMetadat void MetadataStorageFromDiskTransaction::createHardLink(const std::string & path_from, const std::string & path_to) { - auto metadata = metadata_storage.readMetadata(path_from); - - metadata->incrementRefCount(); - - writeStringToFile(path_from, metadata->serializeToString()); - - addOperation(std::make_unique(path_from, path_to, *metadata_storage.getDisk())); + addOperation(std::make_unique(path_from, path_to, *metadata_storage.disk, metadata_storage)); } MetadataTransactionPtr MetadataStorageFromDisk::createTransaction() const @@ -177,12 +179,12 @@ void MetadataStorageFromDiskTransaction::commit() toString(state), toString(MetadataFromDiskTransactionState::PREPARING)); { - std::lock_guard lock(metadata_storage.metadata_mutex); + std::unique_lock lock(metadata_storage.metadata_mutex); for (size_t i = 0; i < operations.size(); ++i) { try { - operations[i]->execute(); + operations[i]->execute(lock); } catch (Exception & ex) { @@ -316,29 +318,12 @@ void MetadataStorageFromDiskTransaction::createMetadataFile(const std::string & void MetadataStorageFromDiskTransaction::addBlobToMetadata(const std::string & path, const std::string & blob_name, uint64_t size_in_bytes) { - DiskObjectStorageMetadataPtr metadata; - if (metadata_storage.exists(path)) - { - metadata = metadata_storage.readMetadata(path); - metadata->addObject(blob_name, size_in_bytes); - writeStringToFile(path, metadata->serializeToString()); - } - else - { - createMetadataFile(path, blob_name, size_in_bytes); - } + addOperation(std::make_unique(path, blob_name, metadata_storage.object_storage_root_path, size_in_bytes, *metadata_storage.disk, metadata_storage)); } void MetadataStorageFromDiskTransaction::unlinkMetadata(const std::string & path) { - auto metadata = metadata_storage.readMetadata(path); - uint32_t ref_count = metadata->getRefCount(); - if (ref_count != 0) - { - metadata->decrementRefCount(); - writeStringToFile(path, metadata->serializeToString()); - } - unlinkFile(path); + addOperation(std::make_unique(path, *metadata_storage.disk, metadata_storage)); } } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromDisk.h b/src/Disks/ObjectStorages/MetadataStorageFromDisk.h index e84b8fe36eb..104e9d54bff 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromDisk.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromDisk.h @@ -55,9 +55,9 @@ public: std::string getObjectStorageRootPath() const override { return object_storage_root_path; } -private: DiskObjectStorageMetadataPtr readMetadata(const std::string & path) const; + DiskObjectStorageMetadataPtr readMetadataUnlocked(const std::string & path, std::unique_lock & lock) const; DiskObjectStorageMetadataPtr readMetadataUnlocked(const std::string & path, std::shared_lock & lock) const; }; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp index dce4ae2f1f7..72da240cf8a 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -24,7 +25,7 @@ SetLastModifiedOperation::SetLastModifiedOperation(const std::string & path_, Po { } -void SetLastModifiedOperation::execute() +void SetLastModifiedOperation::execute(std::unique_lock &) { old_timestamp = disk.getLastModified(path); disk.setLastModified(path, new_timestamp); @@ -41,7 +42,7 @@ UnlinkFileOperation::UnlinkFileOperation(const std::string & path_, IDisk & disk { } -void UnlinkFileOperation::execute() +void UnlinkFileOperation::execute(std::unique_lock &) { auto buf = disk.readFile(path); readStringUntilEOF(prev_data, *buf); @@ -61,7 +62,7 @@ CreateDirectoryOperation::CreateDirectoryOperation(const std::string & path_, ID { } -void CreateDirectoryOperation::execute() +void CreateDirectoryOperation::execute(std::unique_lock &) { disk.createDirectory(path); } @@ -77,7 +78,7 @@ CreateDirectoryRecursiveOperation::CreateDirectoryRecursiveOperation(const std:: { } -void CreateDirectoryRecursiveOperation::execute() +void CreateDirectoryRecursiveOperation::execute(std::unique_lock &) { namespace fs = std::filesystem; fs::path p(path); @@ -104,7 +105,7 @@ RemoveDirectoryOperation::RemoveDirectoryOperation(const std::string & path_, ID { } -void RemoveDirectoryOperation::execute() +void RemoveDirectoryOperation::execute(std::unique_lock &) { disk.removeDirectory(path); } @@ -121,7 +122,7 @@ RemoveRecursiveOperation::RemoveRecursiveOperation(const std::string & path_, ID { } -void RemoveRecursiveOperation:: execute() +void RemoveRecursiveOperation::execute(std::unique_lock &) { if (disk.isFile(path)) disk.moveFile(path, temp_path); @@ -146,20 +147,31 @@ void RemoveRecursiveOperation::finalize() disk.removeRecursive(path); } -CreateHardlinkOperation::CreateHardlinkOperation(const std::string & path_from_, const std::string & path_to_, IDisk & disk_) +CreateHardlinkOperation::CreateHardlinkOperation(const std::string & path_from_, const std::string & path_to_, IDisk & disk_, const MetadataStorageFromDisk & metadata_storage_) : path_from(path_from_) , path_to(path_to_) , disk(disk_) + , metadata_storage(metadata_storage_) { } -void CreateHardlinkOperation::execute() +void CreateHardlinkOperation::execute(std::unique_lock & lock) { + auto metadata = metadata_storage.readMetadataUnlocked(path_from, lock); + + metadata->incrementRefCount(); + + write_operation = std::make_unique(path_from, disk, metadata->serializeToString()); + + write_operation->execute(lock); + disk.createHardLink(path_from, path_to); } void CreateHardlinkOperation::undo() { + if (write_operation) + write_operation->undo(); disk.removeFile(path_to); } @@ -170,7 +182,7 @@ MoveFileOperation::MoveFileOperation(const std::string & path_from_, const std:: { } -void MoveFileOperation::execute() +void MoveFileOperation::execute(std::unique_lock &) { disk.moveFile(path_from, path_to); } @@ -187,7 +199,7 @@ MoveDirectoryOperation::MoveDirectoryOperation(const std::string & path_from_, c { } -void MoveDirectoryOperation::execute() +void MoveDirectoryOperation::execute(std::unique_lock &) { disk.moveDirectory(path_from, path_to); } @@ -197,7 +209,6 @@ void MoveDirectoryOperation::undo() disk.moveDirectory(path_to, path_from); } - ReplaceFileOperation::ReplaceFileOperation(const std::string & path_from_, const std::string & path_to_, IDisk & disk_) : path_from(path_from_) , path_to(path_to_) @@ -206,7 +217,7 @@ ReplaceFileOperation::ReplaceFileOperation(const std::string & path_from_, const { } -void ReplaceFileOperation::execute() +void ReplaceFileOperation::execute(std::unique_lock &) { if (disk.exists(path_to)) disk.moveFile(path_to, temp_path_to); @@ -232,7 +243,7 @@ WriteFileOperation::WriteFileOperation(const std::string & path_, IDisk & disk_, { } -void WriteFileOperation::execute() +void WriteFileOperation::execute(std::unique_lock &) { if (disk.exists(path)) { @@ -258,4 +269,62 @@ void WriteFileOperation::undo() } } +void AddBlobOperation::execute(std::unique_lock & metadata_lock) +{ + DiskObjectStorageMetadataPtr metadata; + if (metadata_storage.exists(path)) + metadata = metadata_storage.readMetadataUnlocked(path, metadata_lock); + else + metadata = std::make_unique(disk.getPath(), root_path, path); + + metadata->addObject(blob_name, size_in_bytes); + + write_operation = std::make_unique(path, disk, metadata->serializeToString()); + + write_operation->execute(metadata_lock); +} + +void AddBlobOperation::undo() +{ + if (write_operation) + write_operation->undo(); +} + +void UnlinkMetadataFileOperation::execute(std::unique_lock & metadata_lock) +{ + auto metadata = metadata_storage.readMetadataUnlocked(path, metadata_lock); + uint32_t ref_count = metadata->getRefCount(); + if (ref_count != 0) + { + metadata->decrementRefCount(); + write_operation = std::make_unique(path, disk, metadata->serializeToString()); + write_operation->execute(metadata_lock); + } + unlink_operation = std::make_unique(path, disk); + unlink_operation->execute(metadata_lock); +} + +void UnlinkMetadataFileOperation::undo() +{ + if (write_operation) + write_operation->undo(); + + if (unlink_operation) + unlink_operation->undo(); +} + +void SetReadonlyFileOperation::execute(std::unique_lock & metadata_lock) +{ + auto metadata = metadata_storage.readMetadataUnlocked(path, metadata_lock); + metadata->setReadOnly(); + write_operation = std::make_unique(path, disk, metadata->serializeToString()); + write_operation->execute(metadata_lock); +} + +void SetReadonlyFileOperation::undo() +{ + if (write_operation) + write_operation->undo(); +} + } diff --git a/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.h b/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.h index f998771a68f..5f8e772ebc7 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.h +++ b/src/Disks/ObjectStorages/MetadataStorageFromDiskTransactionOperations.h @@ -4,6 +4,7 @@ namespace DB { +class MetadataStorageFromDisk; class IDisk; /** @@ -12,7 +13,7 @@ class IDisk; struct IMetadataOperation { - virtual void execute() = 0; + virtual void execute(std::unique_lock & metadata_lock) = 0; virtual void undo() = 0; virtual void finalize() {} virtual ~IMetadataOperation() = default; @@ -25,7 +26,7 @@ struct SetLastModifiedOperation final : public IMetadataOperation { SetLastModifiedOperation(const std::string & path_, Poco::Timestamp new_timestamp_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -41,7 +42,7 @@ struct UnlinkFileOperation final : public IMetadataOperation { UnlinkFileOperation(const std::string & path_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -56,7 +57,7 @@ struct CreateDirectoryOperation final : public IMetadataOperation { CreateDirectoryOperation(const std::string & path_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -70,7 +71,7 @@ struct CreateDirectoryRecursiveOperation final : public IMetadataOperation { CreateDirectoryRecursiveOperation(const std::string & path_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -85,7 +86,7 @@ struct RemoveDirectoryOperation final : public IMetadataOperation { RemoveDirectoryOperation(const std::string & path_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -98,7 +99,7 @@ struct RemoveRecursiveOperation final : public IMetadataOperation { RemoveRecursiveOperation(const std::string & path_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -110,12 +111,30 @@ private: std::string temp_path; }; +struct WriteFileOperation final : public IMetadataOperation +{ + WriteFileOperation(const std::string & path_, IDisk & disk_, const std::string & data_); + + void execute(std::unique_lock & metadata_lock) override; + + void undo() override; +private: + std::string path; + IDisk & disk; + std::string data; + bool existed = false; + std::string prev_data; +}; struct CreateHardlinkOperation final : public IMetadataOperation { - CreateHardlinkOperation(const std::string & path_from_, const std::string & path_to_, IDisk & disk_); + CreateHardlinkOperation( + const std::string & path_from_, + const std::string & path_to_, + IDisk & disk_, + const MetadataStorageFromDisk & metadata_storage_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -123,6 +142,8 @@ private: std::string path_from; std::string path_to; IDisk & disk; + std::unique_ptr write_operation; + const MetadataStorageFromDisk & metadata_storage; }; @@ -130,7 +151,7 @@ struct MoveFileOperation final : public IMetadataOperation { MoveFileOperation(const std::string & path_from_, const std::string & path_to_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -145,7 +166,7 @@ struct MoveDirectoryOperation final : public IMetadataOperation { MoveDirectoryOperation(const std::string & path_from_, const std::string & path_to_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -160,7 +181,7 @@ struct ReplaceFileOperation final : public IMetadataOperation { ReplaceFileOperation(const std::string & path_from_, const std::string & path_to_, IDisk & disk_); - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; @@ -173,20 +194,86 @@ private: std::string temp_path_to; }; - -struct WriteFileOperation final : public IMetadataOperation +struct AddBlobOperation final : public IMetadataOperation { - WriteFileOperation(const std::string & path_, IDisk & disk_, const std::string & data_); + AddBlobOperation( + const std::string & path_, + const std::string & blob_name_, + const std::string & root_path_, + uint64_t size_in_bytes_, + IDisk & disk_, + const MetadataStorageFromDisk & metadata_storage_) + : path(path_) + , blob_name(blob_name_) + , root_path(root_path_) + , size_in_bytes(size_in_bytes_) + , disk(disk_) + , metadata_storage(metadata_storage_) + {} - void execute() override; + void execute(std::unique_lock & metadata_lock) override; void undo() override; + +private: + std::string path; + std::string blob_name; + std::string root_path; + uint64_t size_in_bytes; + IDisk & disk; + const MetadataStorageFromDisk & metadata_storage; + + std::unique_ptr write_operation; +}; + + +struct UnlinkMetadataFileOperation final : public IMetadataOperation +{ + UnlinkMetadataFileOperation( + const std::string & path_, + IDisk & disk_, + const MetadataStorageFromDisk & metadata_storage_) + : path(path_) + , disk(disk_) + , metadata_storage(metadata_storage_) + { + } + + void execute(std::unique_lock & metadata_lock) override; + + void undo() override; + private: std::string path; IDisk & disk; - std::string data; - bool existed = false; - std::string prev_data; + const MetadataStorageFromDisk & metadata_storage; + + std::unique_ptr write_operation; + std::unique_ptr unlink_operation; +}; + +struct SetReadonlyFileOperation final : public IMetadataOperation +{ + SetReadonlyFileOperation( + const std::string & path_, + IDisk & disk_, + const MetadataStorageFromDisk & metadata_storage_) + : path(path_) + , disk(disk_) + , metadata_storage(metadata_storage_) + { + } + + void execute(std::unique_lock & metadata_lock) override; + + void undo() override; + +private: + std::string path; + IDisk & disk; + const MetadataStorageFromDisk & metadata_storage; + + std::unique_ptr write_operation; }; } diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index ee9111b7a43..d36bf655c02 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -28,6 +28,7 @@ #include #include #include +#include namespace DB { @@ -139,14 +140,7 @@ std::unique_ptr S3ObjectStorage::readObjects( /// NOLINT { assert(!objects[0].getPathKeyForCache().empty()); - ReadSettings disk_read_settings{read_settings}; - if (cache) - { - if (IFileCache::isReadOnly()) - disk_read_settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true; - - disk_read_settings.remote_fs_cache = cache; - } + ReadSettings disk_read_settings = patchSettings(read_settings); auto settings_ptr = s3_settings.get(); @@ -183,10 +177,9 @@ std::unique_ptr S3ObjectStorage::readObject( /// NOLINT object.absolute_path, version_id, settings_ptr->s3_settings.max_single_read_retries, - read_settings); + patchSettings(read_settings)); } - std::unique_ptr S3ObjectStorage::writeObject( /// NOLINT const StoredObject & object, WriteMode mode, // S3 doesn't support append, only rewrite @@ -195,6 +188,8 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN size_t buf_size, const WriteSettings & write_settings) { + WriteSettings disk_write_settings = IObjectStorage::patchSettings(write_settings); + if (mode != WriteMode::Rewrite) throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 doesn't support append to files"); @@ -211,6 +206,7 @@ std::unique_ptr S3ObjectStorage::writeObject( /// NOLIN attributes, buf_size, threadPoolCallbackRunner(getThreadPoolWriter()), + disk_write_settings, cache_on_write ? cache : nullptr); @@ -485,6 +481,19 @@ void S3ObjectStorage::copyObject( // NOLINT } } +ReadSettings S3ObjectStorage::patchSettings(const ReadSettings & read_settings) const +{ + ReadSettings settings{read_settings}; + if (cache) + { + if (IFileCache::isReadOnly()) + settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true; + + settings.remote_fs_cache = cache; + } + return IObjectStorage::patchSettings(settings); +} + void S3ObjectStorage::setNewSettings(std::unique_ptr && s3_settings_) { s3_settings.set(std::move(s3_settings_)); @@ -517,6 +526,7 @@ void S3ObjectStorage::applyNewSettings(const Poco::Util::AbstractConfiguration & { s3_settings.set(getSettings(config, config_prefix, context)); client.set(getClient(config, config_prefix, context)); + applyRemoteThrottlingSettings(context); } std::unique_ptr S3ObjectStorage::cloneObjectStorage( diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index ee34f8df8f5..8f20671d841 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -11,6 +11,7 @@ #include #include #include +#include namespace DB @@ -142,6 +143,8 @@ public: String getCacheBasePath() const override; private: + ReadSettings patchSettings(const ReadSettings & read_settings) const; + void setNewSettings(std::unique_ptr && s3_settings_); void setNewClient(std::unique_ptr && client_); diff --git a/src/Formats/CapnProtoUtils.cpp b/src/Formats/CapnProtoUtils.cpp index add5220414f..f0d7ddf6fc3 100644 --- a/src/Formats/CapnProtoUtils.cpp +++ b/src/Formats/CapnProtoUtils.cpp @@ -52,7 +52,7 @@ capnp::StructSchema CapnProtoSchemaParser::getMessageSchema(const FormatSchemaIn if (description.find("Parse error") != String::npos) throw Exception(ErrorCodes::CANNOT_PARSE_CAPN_PROTO_SCHEMA, "Cannot parse CapnProto schema {}:{}", schema_info.schemaPath(), e.getLine()); - throw Exception(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception while parsing CapnProro schema: {}, schema dir and file: {}, {}", description, schema_info.schemaDirectory(), schema_info.schemaPath()); + throw Exception(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception while parsing CapnProto schema: {}, schema dir and file: {}, {}", description, schema_info.schemaDirectory(), schema_info.schemaPath()); } auto message_maybe = schema.findNested(schema_info.messageName()); diff --git a/src/Formats/ProtobufSerializer.cpp b/src/Formats/ProtobufSerializer.cpp index 9006c9276d4..b9af9d61da0 100644 --- a/src/Formats/ProtobufSerializer.cpp +++ b/src/Formats/ProtobufSerializer.cpp @@ -77,18 +77,18 @@ namespace return convertChar(c1) == convertChar(c2); } - static bool equals(const std::string_view & s1, const std::string_view & s2) + static bool equals(std::string_view s1, std::string_view s2) { return (s1.length() == s2.length()) && std::equal(s1.begin(), s1.end(), s2.begin(), [](char c1, char c2) { return convertChar(c1) == convertChar(c2); }); } - static bool less(const std::string_view & s1, const std::string_view & s2) + static bool less(std::string_view s1, std::string_view s2) { return std::lexicographical_compare(s1.begin(), s1.end(), s2.begin(), s2.end(), [](char c1, char c2) { return convertChar(c1) < convertChar(c2); }); } - static bool startsWith(const std::string_view & s1, const std::string_view & s2) + static bool startsWith(std::string_view s1, std::string_view s2) { return (s1.length() >= s2.length()) && equals(s1.substr(0, s2.length()), s2); } @@ -195,7 +195,7 @@ namespace { protected: ProtobufSerializerSingleValue( - const std::string_view & column_name_, + std::string_view column_name_, const FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) : column_name(column_name_) @@ -264,7 +264,7 @@ namespace return reader->readFixed(); } - void writeStr(const std::string_view & str) + void writeStr(std::string_view str) { if (!str.empty() || !skip_zero_or_empty) writer->writeString(field_tag, str); @@ -274,7 +274,7 @@ namespace void readStrAndAppend(PaddedPODArray & str) { reader->readStringAndAppend(str); } template - DestType parseFromStr(const std::string_view & str) const + DestType parseFromStr(std::string_view str) const { try { @@ -307,7 +307,7 @@ namespace return result; } - [[noreturn]] void incompatibleColumnType(const std::string_view & column_type) const + [[noreturn]] void incompatibleColumnType(std::string_view column_type) const { throw Exception( ErrorCodes::DATA_TYPE_INCOMPATIBLE_WITH_PROTOBUF_FIELD, @@ -318,7 +318,7 @@ namespace field_descriptor.type_name()); } - [[noreturn]] void cannotConvertValue(const std::string_view & src_value, const std::string_view & src_type_name, const std::string_view & dest_type_name) const + [[noreturn]] void cannotConvertValue(std::string_view src_value, std::string_view src_type_name, std::string_view dest_type_name) const { throw Exception( "Could not convert value '" + String{src_value} + "' from type " + String{src_type_name} + " to type " @@ -351,7 +351,7 @@ namespace public: using ColumnType = ColumnVector; - ProtobufSerializerNumber(const std::string_view & column_name_, const FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) + ProtobufSerializerNumber(std::string_view column_name_, const FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) : ProtobufSerializerSingleValue(column_name_, field_descriptor_, reader_or_writer_) { setFunctions(); @@ -590,7 +590,7 @@ namespace using ColumnType = std::conditional_t; ProtobufSerializerString( - const std::string_view & column_name_, + std::string_view column_name_, const std::shared_ptr & fixed_string_data_type_, const google::protobuf::FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) @@ -604,7 +604,7 @@ namespace } ProtobufSerializerString( - const std::string_view & column_name_, + std::string_view column_name_, const google::protobuf::FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) : ProtobufSerializerSingleValue(column_name_, field_descriptor_, reader_or_writer_) @@ -727,7 +727,7 @@ namespace { case FieldTypeId::TYPE_INT32: { - write_function = [this](const std::string_view & str) { writeInt(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeInt(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readInt(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_int32()); }; break; @@ -735,7 +735,7 @@ namespace case FieldTypeId::TYPE_SINT32: { - write_function = [this](const std::string_view & str) { writeSInt(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeSInt(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readSInt(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_int32()); }; break; @@ -743,7 +743,7 @@ namespace case FieldTypeId::TYPE_UINT32: { - write_function = [this](const std::string_view & str) { writeUInt(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeUInt(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readUInt(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_uint32()); }; break; @@ -751,7 +751,7 @@ namespace case FieldTypeId::TYPE_INT64: { - write_function = [this](const std::string_view & str) { writeInt(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeInt(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readInt(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_int64()); }; break; @@ -759,7 +759,7 @@ namespace case FieldTypeId::TYPE_SINT64: { - write_function = [this](const std::string_view & str) { writeSInt(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeSInt(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readSInt(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_int64()); }; break; @@ -767,7 +767,7 @@ namespace case FieldTypeId::TYPE_UINT64: { - write_function = [this](const std::string_view & str) { writeUInt(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeUInt(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readUInt(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_uint64()); }; break; @@ -775,7 +775,7 @@ namespace case FieldTypeId::TYPE_FIXED32: { - write_function = [this](const std::string_view & str) { writeFixed(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeFixed(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readFixed(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_uint32()); }; break; @@ -783,7 +783,7 @@ namespace case FieldTypeId::TYPE_SFIXED32: { - write_function = [this](const std::string_view & str) { writeFixed(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeFixed(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readFixed(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_int32()); }; break; @@ -791,7 +791,7 @@ namespace case FieldTypeId::TYPE_FIXED64: { - write_function = [this](const std::string_view & str) { writeFixed(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeFixed(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readFixed(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_uint64()); }; break; @@ -799,7 +799,7 @@ namespace case FieldTypeId::TYPE_SFIXED64: { - write_function = [this](const std::string_view & str) { writeFixed(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeFixed(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readFixed(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_int64()); }; break; @@ -807,7 +807,7 @@ namespace case FieldTypeId::TYPE_FLOAT: { - write_function = [this](const std::string_view & str) { writeFixed(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeFixed(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readFixed(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_float()); }; break; @@ -815,7 +815,7 @@ namespace case FieldTypeId::TYPE_DOUBLE: { - write_function = [this](const std::string_view & str) { writeFixed(parseFromStr(str)); }; + write_function = [this](std::string_view str) { writeFixed(parseFromStr(str)); }; read_function = [this](PaddedPODArray & str) { toStringAppend(readFixed(), str); }; default_function = [this]() -> String { return toString(field_descriptor.default_value_double()); }; break; @@ -823,7 +823,7 @@ namespace case FieldTypeId::TYPE_BOOL: { - write_function = [this](const std::string_view & str) + write_function = [this](std::string_view str) { if (str == "true") writeUInt(1); @@ -855,7 +855,7 @@ namespace case FieldTypeId::TYPE_STRING: case FieldTypeId::TYPE_BYTES: { - write_function = [this](const std::string_view & str) { writeStr(str); }; + write_function = [this](std::string_view str) { writeStr(str); }; read_function = [this](PaddedPODArray & str) { readStrAndAppend(str); }; default_function = [this]() -> String { return field_descriptor.default_value_string(); }; break; @@ -863,7 +863,7 @@ namespace case FieldTypeId::TYPE_ENUM: { - write_function = [this](const std::string_view & str) { writeInt(stringToProtobufEnumValue(str)); }; + write_function = [this](std::string_view str) { writeInt(stringToProtobufEnumValue(str)); }; read_function = [this](PaddedPODArray & str) { protobufEnumValueToStringAppend(readInt(), str); }; default_function = [this]() -> String { return field_descriptor.default_value_enum()->name(); }; break; @@ -908,7 +908,7 @@ namespace } } - int stringToProtobufEnumValue(const std::string_view & str) const + int stringToProtobufEnumValue(std::string_view str) const { auto it = string_to_protobuf_enum_value_map.find(str); if (it == string_to_protobuf_enum_value_map.end()) @@ -932,7 +932,7 @@ namespace const std::shared_ptr fixed_string_data_type; const size_t n = 0; - std::function write_function; + std::function write_function; std::function &)> read_function; std::function default_function; std::unordered_map string_to_protobuf_enum_value_map; @@ -953,7 +953,7 @@ namespace using BaseClass = ProtobufSerializerNumber; ProtobufSerializerEnum( - const std::string_view & column_name_, + std::string_view column_name_, const std::shared_ptr & enum_data_type_, const FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) @@ -1067,7 +1067,7 @@ namespace protobuf_enum_value_to_enum_data_type_value_map.emplace(protobuf_enum_value, enum_data_type_value); }; - auto iless = [](const std::string_view & s1, const std::string_view & s2) { return ColumnNameWithProtobufFieldNameComparator::less(s1, s2); }; + auto iless = [](std::string_view s1, std::string_view s2) { return ColumnNameWithProtobufFieldNameComparator::less(s1, s2); }; boost::container::flat_map string_to_protobuf_enum_value_map; typename decltype(string_to_protobuf_enum_value_map)::sequence_type string_to_protobuf_enum_value_seq; for (int i : collections::range(enum_descriptor.value_count())) @@ -1133,9 +1133,9 @@ namespace Int64 readInt() { return ProtobufSerializerSingleValue::readInt(); } void writeInt(Int64 value) { ProtobufSerializerSingleValue::writeInt(value); } - void writeStr(const std::string_view & str) { ProtobufSerializerSingleValue::writeStr(str); } + void writeStr(std::string_view str) { ProtobufSerializerSingleValue::writeStr(str); } void readStr(String & str) { ProtobufSerializerSingleValue::readStr(str); } - [[noreturn]] void cannotConvertValue(const std::string_view & src_value, const std::string_view & src_type_name, const std::string_view & dest_type_name) const { ProtobufSerializerSingleValue::cannotConvertValue(src_value, src_type_name, dest_type_name); } + [[noreturn]] void cannotConvertValue(std::string_view src_value, std::string_view src_type_name, std::string_view dest_type_name) const { ProtobufSerializerSingleValue::cannotConvertValue(src_value, src_type_name, dest_type_name); } const std::shared_ptr enum_data_type; std::unordered_map enum_data_type_value_to_protobuf_enum_value_map; @@ -1152,7 +1152,7 @@ namespace using ColumnType = ColumnDecimal; ProtobufSerializerDecimal( - const std::string_view & column_name_, + std::string_view column_name_, const DataTypeDecimalBase & decimal_data_type_, const FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) @@ -1412,7 +1412,7 @@ namespace { public: ProtobufSerializerDate( - const std::string_view & column_name_, + std::string_view column_name_, const FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) : ProtobufSerializerNumber(column_name_, field_descriptor_, reader_or_writer_) @@ -1490,7 +1490,7 @@ namespace { public: ProtobufSerializerDateTime( - const std::string_view & column_name_, + std::string_view column_name_, const DataTypeDateTime & type, const FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) @@ -1574,7 +1574,7 @@ namespace { public: ProtobufSerializerUUID( - const std::string_view & column_name_, + std::string_view column_name_, const google::protobuf::FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) : ProtobufSerializerSingleValue(column_name_, field_descriptor_, reader_or_writer_) @@ -1654,7 +1654,7 @@ namespace { public: ProtobufSerializerAggregateFunction( - const std::string_view & column_name_, + std::string_view column_name_, const std::shared_ptr & aggregate_function_data_type_, const google::protobuf::FieldDescriptor & field_descriptor_, const ProtobufReaderOrWriter & reader_or_writer_) @@ -2061,7 +2061,7 @@ namespace { public: ProtobufSerializerTupleAsArray( - const std::string_view & column_name_, + std::string_view column_name_, const std::shared_ptr & tuple_data_type_, const FieldDescriptor & field_descriptor_, std::vector> element_serializers_) @@ -2833,7 +2833,7 @@ namespace return field_names; } - static bool columnNameEqualsToFieldName(const std::string_view & column_name, const FieldDescriptor & field_descriptor) + static bool columnNameEqualsToFieldName(std::string_view column_name, const FieldDescriptor & field_descriptor) { std::string_view suffix; return columnNameStartsWithFieldName(column_name, field_descriptor, suffix) && suffix.empty(); @@ -2844,7 +2844,7 @@ namespace /// which doesn't match to the field's name. /// The function requires that rest part of the column's name to be started with a dot '.' or underline '_', /// but doesn't include those '.' or '_' characters into `suffix`. - static bool columnNameStartsWithFieldName(const std::string_view & column_name, const FieldDescriptor & field_descriptor, std::string_view & suffix) + static bool columnNameStartsWithFieldName(std::string_view column_name, const FieldDescriptor & field_descriptor, std::string_view & suffix) { size_t matching_length = 0; const MessageDescriptor & containing_type = *field_descriptor.containing_type(); @@ -2887,7 +2887,7 @@ namespace /// for that case suffixes are also returned. /// This is only the first filter, buildMessageSerializerImpl() does other checks after calling this function. static bool findFieldsByColumnName( - const std::string_view & column_name, + std::string_view column_name, const MessageDescriptor & message_descriptor, std::vector> & out_field_descriptors_with_suffixes, bool google_wrappers_special_treatment) @@ -3030,7 +3030,7 @@ namespace used_column_indices_sorted.reserve(num_columns); size_t sequential_column_index = 0; - auto add_field_serializer = [&](const std::string_view & column_name_, + auto add_field_serializer = [&](std::string_view column_name_, std::vector && column_indices_, const FieldDescriptor & field_descriptor_, std::unique_ptr field_serializer_) @@ -3243,7 +3243,7 @@ namespace /// Builds a serializer for one-to-one match: /// one column is serialized as one field in the protobuf message. std::unique_ptr buildFieldSerializer( - const std::string_view & column_name, + std::string_view column_name, const DataTypePtr & data_type, const FieldDescriptor & field_descriptor, bool allow_repeat, @@ -3395,7 +3395,7 @@ namespace } } - [[noreturn]] static void throwFieldNotRepeated(const FieldDescriptor & field_descriptor, const std::string_view & column_name) + [[noreturn]] static void throwFieldNotRepeated(const FieldDescriptor & field_descriptor, std::string_view column_name) { if (!field_descriptor.is_repeated()) throw Exception( diff --git a/src/Formats/ProtobufWriter.cpp b/src/Formats/ProtobufWriter.cpp index ece4f78b1c8..da680fae601 100644 --- a/src/Formats/ProtobufWriter.cpp +++ b/src/Formats/ProtobufWriter.cpp @@ -196,7 +196,7 @@ template void ProtobufWriter::writeFixed(int field_number, UInt64 value) template void ProtobufWriter::writeFixed(int field_number, Float32 value); template void ProtobufWriter::writeFixed(int field_number, Float64 value); -void ProtobufWriter::writeString(int field_number, const std::string_view & str) +void ProtobufWriter::writeString(int field_number, std::string_view str) { size_t length = str.length(); size_t old_size = buffer.size(); diff --git a/src/Formats/ProtobufWriter.h b/src/Formats/ProtobufWriter.h index 1dcc8f4ef7c..3ede956e910 100644 --- a/src/Formats/ProtobufWriter.h +++ b/src/Formats/ProtobufWriter.h @@ -30,7 +30,7 @@ public: void writeSInt(int field_number, Int64 value); template void writeFixed(int field_number, T value); - void writeString(int field_number, const std::string_view & str); + void writeString(int field_number, std::string_view str); void startRepeatedPack(); void endRepeatedPack(int field_number, bool skip_if_empty); diff --git a/src/Formats/ReadSchemaUtils.cpp b/src/Formats/ReadSchemaUtils.cpp index 11a91bd50dc..058f9b7059b 100644 --- a/src/Formats/ReadSchemaUtils.cpp +++ b/src/Formats/ReadSchemaUtils.cpp @@ -66,7 +66,7 @@ ColumnsDescription readSchemaFromFormat( } catch (const DB::Exception & e) { - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file. Error: {}", format_name, e.message()); + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file. Error: {}. You can specify the structure manually", format_name, e.message()); } } else if (FormatFactory::instance().checkIfFormatHasSchemaReader(format_name)) @@ -75,16 +75,29 @@ ColumnsDescription readSchemaFromFormat( SchemaReaderPtr schema_reader; size_t max_rows_to_read = format_settings ? format_settings->max_rows_to_read_for_schema_inference : context->getSettingsRef().input_format_max_rows_to_read_for_schema_inference; size_t iterations = 0; - while ((buf = read_buffer_iterator())) + while (true) { + bool is_eof = false; + try + { + buf = read_buffer_iterator(); + if (!buf) + break; + is_eof = buf->eof(); + } + catch (...) + { + auto exception_message = getCurrentExceptionMessage(false); + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file: {}. You can specify the structure manually", format_name, exception_message); + } ++iterations; - if (buf->eof()) + if (is_eof) { auto exception_message = fmt::format("Cannot extract table structure from {} format file, file is empty", format_name); if (!retry) - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, exception_message); + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "{}. You can specify the structure manually", exception_message); exception_messages += "\n" + exception_message; continue; @@ -118,14 +131,14 @@ ColumnsDescription readSchemaFromFormat( } if (!retry || !isRetryableSchemaInferenceError(getCurrentExceptionCode())) - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file. Error: {}", format_name, exception_message); + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "Cannot extract table structure from {} format file. Error: {}. You can specify the structure manually", format_name, exception_message); exception_messages += "\n" + exception_message; } } if (names_and_types.empty()) - throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "All attempts to extract table structure from files failed. Errors:{}", exception_messages); + throw Exception(ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE, "All attempts to extract table structure from files failed. Errors:{}\nYou can specify the structure manually", exception_messages); /// If we have "INSERT SELECT" query then try to order /// columns as they are ordered in table schema for formats diff --git a/src/Functions/CustomWeekTransforms.h b/src/Functions/CustomWeekTransforms.h index 8656f9da927..c296c8228b1 100644 --- a/src/Functions/CustomWeekTransforms.h +++ b/src/Functions/CustomWeekTransforms.h @@ -63,12 +63,10 @@ struct ToStartOfWeekImpl static inline UInt16 execute(Int64 t, UInt8 week_mode, const DateLUTImpl & time_zone) { return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); -// return time_zone.toFirstDayNumOfWeek(t, week_mode); } static inline UInt16 execute(UInt32 t, UInt8 week_mode, const DateLUTImpl & time_zone) { return time_zone.toFirstDayNumOfWeek(time_zone.toDayNum(t), week_mode); -// return time_zone.toFirstDayNumOfWeek(t, week_mode); } static inline UInt16 execute(Int32 d, UInt8 week_mode, const DateLUTImpl & time_zone) { diff --git a/src/Functions/FunctionFile.cpp b/src/Functions/FunctionFile.cpp index 01314b52119..c85051e61c0 100644 --- a/src/Functions/FunctionFile.cpp +++ b/src/Functions/FunctionFile.cpp @@ -67,8 +67,8 @@ public: for (size_t row = 0; row < input_rows_count; ++row) { - StringRef filename = column_src->getDataAt(row); - fs::path file_path(filename.data, filename.data + filename.size); + std::string_view filename = column_src->getDataAt(row).toView(); + fs::path file_path(filename.data(), filename.data() + filename.size()); if (file_path.is_relative()) file_path = user_files_absolute_path / file_path; diff --git a/src/Functions/FunctionsAES.cpp b/src/Functions/FunctionsAES.cpp index a2dc7e40489..9ef07e2747d 100644 --- a/src/Functions/FunctionsAES.cpp +++ b/src/Functions/FunctionsAES.cpp @@ -25,7 +25,7 @@ void onError(std::string error_message) throw DB::Exception(error_message, DB::ErrorCodes::OPENSSL_ERROR); } -StringRef foldEncryptionKeyInMySQLCompatitableMode(size_t cipher_key_size, const StringRef & key, std::array & folded_key) +StringRef foldEncryptionKeyInMySQLCompatitableMode(size_t cipher_key_size, StringRef key, std::array & folded_key) { assert(cipher_key_size <= EVP_MAX_KEY_LENGTH); memcpy(folded_key.data(), key.data, cipher_key_size); @@ -38,7 +38,7 @@ StringRef foldEncryptionKeyInMySQLCompatitableMode(size_t cipher_key_size, const return StringRef(folded_key.data(), cipher_key_size); } -const EVP_CIPHER * getCipherByName(const StringRef & cipher_name) +const EVP_CIPHER * getCipherByName(StringRef cipher_name) { // NOTE: cipher obtained not via EVP_CIPHER_fetch() would cause extra work on each context reset // with EVP_CIPHER_CTX_reset() or EVP_EncryptInit_ex(), but using EVP_CIPHER_fetch() diff --git a/src/Functions/FunctionsAES.h b/src/Functions/FunctionsAES.h index d3796081f18..b12fcc00014 100644 --- a/src/Functions/FunctionsAES.h +++ b/src/Functions/FunctionsAES.h @@ -32,9 +32,9 @@ namespace ErrorCodes namespace OpenSSLDetails { [[noreturn]] void onError(std::string error_message); -StringRef foldEncryptionKeyInMySQLCompatitableMode(size_t cipher_key_size, const StringRef & key, std::array & folded_key); +StringRef foldEncryptionKeyInMySQLCompatitableMode(size_t cipher_key_size, StringRef key, std::array & folded_key); -const EVP_CIPHER * getCipherByName(const StringRef & name); +const EVP_CIPHER * getCipherByName(StringRef name); enum class CompatibilityMode { @@ -53,7 +53,7 @@ enum class CipherMode template struct KeyHolder { - inline StringRef setKey(size_t cipher_key_size, const StringRef & key) const + inline StringRef setKey(size_t cipher_key_size, StringRef key) const { if (key.size != cipher_key_size) throw DB::Exception(fmt::format("Invalid key size: {} expected {}", key.size, cipher_key_size), @@ -66,7 +66,7 @@ struct KeyHolder template <> struct KeyHolder { - inline StringRef setKey(size_t cipher_key_size, const StringRef & key) + inline StringRef setKey(size_t cipher_key_size, StringRef key) { if (key.size < cipher_key_size) throw DB::Exception(fmt::format("Invalid key size: {} expected {}", key.size, cipher_key_size), @@ -120,7 +120,7 @@ inline void validateCipherMode(const EVP_CIPHER * evp_cipher) } template -inline void validateIV(const StringRef & iv_value, const size_t cipher_iv_size) +inline void validateIV(StringRef iv_value, const size_t cipher_iv_size) { // In MySQL mode we don't care if IV is longer than expected, only if shorter. if ((mode == CipherMode::MySQLCompatibility && iv_value.size != 0 && iv_value.size < cipher_iv_size) @@ -182,7 +182,7 @@ private: const auto mode = arguments[0].column->getDataAt(0); - if (mode.size == 0 || !std::string_view(mode).starts_with("aes-")) + if (mode.size == 0 || !mode.toView().starts_with("aes-")) throw Exception("Invalid mode: " + mode.toString(), ErrorCodes::BAD_ARGUMENTS); const auto * evp_cipher = getCipherByName(mode); @@ -453,7 +453,7 @@ private: using namespace OpenSSLDetails; const auto mode = arguments[0].column->getDataAt(0); - if (mode.size == 0 || !std::string_view(mode).starts_with("aes-")) + if (mode.size == 0 || !mode.toView().starts_with("aes-")) throw Exception("Invalid mode: " + mode.toString(), ErrorCodes::BAD_ARGUMENTS); const auto * evp_cipher = getCipherByName(mode); diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index e0c42401207..b666602e366 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -1091,8 +1091,6 @@ struct ConvertThroughParsing static constexpr bool to_datetime64 = std::is_same_v; - // using ToFieldType = typename ToDataType::FieldType; - static bool isAllRead(ReadBuffer & in) { /// In case of FixedString, skip zero bytes at end. diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index fa573ac829a..9ab27c2976a 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -251,7 +251,7 @@ private: } case MoveType::Key: { - key = std::string_view{(*arguments[j + 1].column).getDataAt(row)}; + key = (*arguments[j + 1].column).getDataAt(row).toView(); if (!moveToElementByKey(res_element, key)) return false; break; @@ -304,7 +304,7 @@ private: /// Performs moves of types MoveType::Key and MoveType::ConstKey. template - static bool moveToElementByKey(typename JSONParser::Element & element, const std::string_view & key) + static bool moveToElementByKey(typename JSONParser::Element & element, std::string_view key) { if (!element.isObject()) return false; @@ -504,7 +504,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element &, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element &, std::string_view) { ColumnVector & col_vec = assert_cast &>(dest); col_vec.insertValue(1); @@ -532,7 +532,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName &) { return 0; } - static bool insertResultToColumn(IColumn & dest, const Element &, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element &, std::string_view) { /// This function is called only if JSON is valid. /// If JSON isn't valid then `FunctionJSON::Executor::run()` adds default value (=zero) to `dest` without calling this function. @@ -556,7 +556,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { size_t size; if (element.isArray()) @@ -586,7 +586,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element &, const std::string_view & last_key) + static bool insertResultToColumn(IColumn & dest, const Element &, std::string_view last_key) { if (last_key.empty()) return false; @@ -620,7 +620,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { UInt8 type; if (element.isInt64()) @@ -662,7 +662,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { NumberType value; @@ -737,7 +737,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { if (!element.isBool()) return false; @@ -764,7 +764,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { if (element.isNull()) return false; @@ -1164,7 +1164,7 @@ public: extract_tree = JSONExtractTree::build(function_name, result_type); } - bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { return extract_tree->insertResultToColumn(dest, element); } @@ -1207,7 +1207,7 @@ public: extract_tree = JSONExtractTree::build(function_name, value_type); } - bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { if (!element.isObject()) return false; @@ -1251,7 +1251,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { ColumnString & col_str = assert_cast(dest); auto & chars = col_str.getChars(); @@ -1355,7 +1355,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - static bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + static bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { if (!element.isArray()) return false; @@ -1387,7 +1387,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { if (!element.isObject()) return false; @@ -1423,7 +1423,7 @@ public: static size_t getNumberOfIndexArguments(const ColumnsWithTypeAndName & arguments) { return arguments.size() - 1; } - bool insertResultToColumn(IColumn & dest, const Element & element, const std::string_view &) + bool insertResultToColumn(IColumn & dest, const Element & element, std::string_view) { if (!element.isObject()) return false; diff --git a/src/Functions/FunctionsMultiStringFuzzySearch.h b/src/Functions/FunctionsMultiStringFuzzySearch.h index 26cafa7d410..f6ec642fd9b 100644 --- a/src/Functions/FunctionsMultiStringFuzzySearch.h +++ b/src/Functions/FunctionsMultiStringFuzzySearch.h @@ -23,6 +23,12 @@ namespace ErrorCodes extern const int ILLEGAL_COLUMN; } +/** + * multiFuzzyMatchAny(haystack, [pattern_1, pattern_2, ..., pattern_n]) + * multiFuzzyMatchAnyIndex(haystack, [pattern_1, pattern_2, ..., pattern_n]) + * multiFuzzyMatchAllIndices(haystack, [pattern_1, pattern_2, ..., pattern_n]) + * + */ template class FunctionsMultiStringFuzzySearch : public IFunction @@ -99,23 +105,19 @@ public: /// the implementations are responsible for resizing the output column if (col_needles_const) - { Impl::vectorConstant( col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), col_needles_const->getValue(), vec_res, offsets_res, edit_distance, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); - } else - { Impl::vectorVector( col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), col_needles_vector->getData(), col_needles_vector->getOffsets(), vec_res, offsets_res, edit_distance, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); - } // the combination of const haystack + const needle is not implemented because // useDefaultImplementationForConstants() == true makes upper layers convert both to diff --git a/src/Functions/FunctionsMultiStringPosition.h b/src/Functions/FunctionsMultiStringPosition.h index 855b5448b87..3cbbe21f96e 100644 --- a/src/Functions/FunctionsMultiStringPosition.h +++ b/src/Functions/FunctionsMultiStringPosition.h @@ -12,117 +12,90 @@ #include #include #include -#include namespace DB { -/** multiSearchAllPositions(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find first occurrences (positions) of all the const patterns inside haystack +/** + * multiSearchAllPositions(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find first occurrences (positions) of all the const patterns inside haystack * multiSearchAllPositionsUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) * multiSearchAllPositionsCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n]) * multiSearchAllPositionsCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * - * multiSearchFirstPosition(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- returns the first position of the haystack matched by strings or zero if nothing was found - * multiSearchFirstPositionUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * multiSearchFirstPositionCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * multiSearchFirstPositionCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * - * multiSearchAny(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- find any of the const patterns inside haystack and return 0 or 1 - * multiSearchAnyUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * multiSearchAnyCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * multiSearchAnyCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) - - * multiSearchFirstIndex(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- returns the first index of the matched string or zero if nothing was found - * multiSearchFirstIndexUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * multiSearchFirstIndexCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n]) - * multiSearchFirstIndexCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) */ namespace ErrorCodes { - extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_COLUMN; - extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; } -template +template class FunctionsMultiStringPosition : public IFunction { public: - static constexpr auto name = Name::name; + static constexpr auto name = Impl::name; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } - String getName() const override { return name; } - size_t getNumberOfArguments() const override { return 2; } bool useDefaultImplementationForConstants() const override { return true; } - ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override { if (!isString(arguments[0])) throw Exception( - "Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument of function {}", arguments[0]->getName(), getName()); const DataTypeArray * array_type = checkAndGetDataType(arguments[1].get()); if (!array_type || !checkAndGetDataType(array_type->getNestedType().get())) throw Exception( - "Illegal type " + arguments[1]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "Illegal type {} of argument of function {}", arguments[1]->getName(), getName()); return std::make_shared(std::make_shared()); } ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override { + const ColumnPtr & haystack_ptr = arguments[0].column; + const ColumnPtr & needles_ptr = arguments[1].column; + + const ColumnString * col_haystack_vector = checkAndGetColumn(&*haystack_ptr); + const ColumnConst * col_haystack_const = checkAndGetColumnConst(&*haystack_ptr); + assert(static_cast(col_haystack_vector) ^ static_cast(col_haystack_const)); + + const ColumnArray * col_needles_vector = checkAndGetColumn(needles_ptr.get()); + const ColumnConst * col_needles_const = checkAndGetColumnConst(needles_ptr.get()); + assert(static_cast(col_needles_vector) ^ static_cast(col_needles_const)); + + if (col_haystack_const && col_needles_vector) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Function '{}' doesn't support search with non-constant needles in constant haystack", name); + using ResultType = typename Impl::ResultType; - - const ColumnPtr & column_haystack = arguments[0].column; - - const ColumnString * col_haystack_vector = checkAndGetColumn(&*column_haystack); - - const ColumnPtr & arr_ptr = arguments[1].column; - const ColumnConst * col_const_arr = checkAndGetColumnConst(arr_ptr.get()); - - if (!col_const_arr) - throw Exception( - "Illegal column " + arguments[1].column->getName() + ". The array is not const", - ErrorCodes::ILLEGAL_COLUMN); - - Array src_arr = col_const_arr->getValue(); - - if (src_arr.size() > std::numeric_limits::max()) - throw Exception( - "Number of arguments for function " + getName() + " doesn't match: passed " + std::to_string(src_arr.size()) - + ", should be at most 255", - ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); - - std::vector refs; - refs.reserve(src_arr.size()); - for (const auto & el : src_arr) - refs.emplace_back(el.get()); - - const size_t column_haystack_size = column_haystack->size(); - auto col_res = ColumnVector::create(); - auto col_offsets = ColumnArray::ColumnOffsets::create(column_haystack_size); + auto col_offsets = ColumnArray::ColumnOffsets::create(); auto & vec_res = col_res->getData(); auto & offsets_res = col_offsets->getData(); + /// the implementations are responsible for resizing the output column - vec_res.resize(column_haystack_size * refs.size()); - - if (col_haystack_vector) - Impl::vectorConstant(col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), refs, vec_res); + if (col_needles_const) + Impl::vectorConstant( + col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), + col_needles_const->getValue(), + vec_res, offsets_res); else - throw Exception("Illegal column " + arguments[0].column->getName(), ErrorCodes::ILLEGAL_COLUMN); + Impl::vectorVector( + col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), + col_needles_vector->getData(), col_needles_vector->getOffsets(), + vec_res, offsets_res); - size_t refs_size = refs.size(); - size_t accum = refs_size; - - for (size_t i = 0; i < column_haystack_size; ++i, accum += refs_size) - offsets_res[i] = accum; + // the combination of const haystack + const needle is not implemented because + // useDefaultImplementationForConstants() == true makes upper layers convert both to + // non-const columns return ColumnArray::create(std::move(col_res), std::move(col_offsets)); } diff --git a/src/Functions/FunctionsMultiStringSearch.h b/src/Functions/FunctionsMultiStringSearch.h index fb800448bfa..2465567b883 100644 --- a/src/Functions/FunctionsMultiStringSearch.h +++ b/src/Functions/FunctionsMultiStringSearch.h @@ -30,6 +30,11 @@ namespace DB * multiSearchFirstIndexUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) * multiSearchFirstIndexCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n]) * multiSearchFirstIndexCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) + * + * multiSearchFirstPosition(haystack, [pattern_1, pattern_2, ..., pattern_n]) -- returns the leftmost offset of the matched string or zero if nothing was found + * multiSearchFirstPositionUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) + * multiSearchFirstPositionCaseInsensitive(haystack, [pattern_1, pattern_2, ..., pattern_n]) + * multiSearchFirstPositionCaseInsensitiveUTF8(haystack, [pattern_1, pattern_2, ..., pattern_n]) */ namespace ErrorCodes @@ -99,21 +104,17 @@ public: /// the implementations are responsible for resizing the output column if (col_needles_const) - { Impl::vectorConstant( col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), col_needles_const->getValue(), vec_res, offsets_res, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); - } else - { Impl::vectorVector( col_haystack_vector->getChars(), col_haystack_vector->getOffsets(), col_needles_vector->getData(), col_needles_vector->getOffsets(), vec_res, offsets_res, allow_hyperscan, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); - } // the combination of const haystack + const needle is not implemented because // useDefaultImplementationForConstants() == true makes upper layers convert both to diff --git a/src/Functions/MultiSearchAllPositionsImpl.h b/src/Functions/MultiSearchAllPositionsImpl.h index 4356d6110f1..6e1f13d87b6 100644 --- a/src/Functions/MultiSearchAllPositionsImpl.h +++ b/src/Functions/MultiSearchAllPositionsImpl.h @@ -7,17 +7,34 @@ namespace DB { -template +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; +} + +template struct MultiSearchAllPositionsImpl { using ResultType = UInt64; + static constexpr auto name = Name::name; static void vectorConstant( const ColumnString::Chars & haystack_data, const ColumnString::Offsets & haystack_offsets, - const std::vector & needles, - PaddedPODArray & res) + const Array & needles_arr, + PaddedPODArray & vec_res, + PaddedPODArray & offsets_res) { + if (needles_arr.size() > std::numeric_limits::max()) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be at most 255", name, needles_arr.size()); + + std::vector needles; + needles.reserve(needles_arr.size()); + for (const auto & needle : needles_arr) + needles.emplace_back(needle.get()); + auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 { return 1 + Impl::countChars(reinterpret_cast(start), reinterpret_cast(end)); @@ -25,23 +42,96 @@ struct MultiSearchAllPositionsImpl auto searcher = Impl::createMultiSearcherInBigHaystack(needles); - const size_t haystack_string_size = haystack_offsets.size(); + const size_t haystack_size = haystack_offsets.size(); const size_t needles_size = needles.size(); + vec_res.resize(haystack_size * needles.size()); + offsets_res.resize(haystack_size); + /// Something can be uninitialized after the search itself - std::fill(res.begin(), res.end(), 0); + std::fill(vec_res.begin(), vec_res.end(), 0); while (searcher.hasMoreToSearch()) { - size_t prev_offset = 0; - for (size_t j = 0, from = 0; j < haystack_string_size; ++j, from += needles_size) + size_t prev_haystack_offset = 0; + for (size_t j = 0, from = 0; j < haystack_size; ++j, from += needles_size) { - const auto * haystack = &haystack_data[prev_offset]; - const auto * haystack_end = haystack + haystack_offsets[j] - prev_offset - 1; - searcher.searchOneAll(haystack, haystack_end, res.data() + from, res_callback); - prev_offset = haystack_offsets[j]; + const auto * haystack = &haystack_data[prev_haystack_offset]; + const auto * haystack_end = haystack + haystack_offsets[j] - prev_haystack_offset - 1; + searcher.searchOneAll(haystack, haystack_end, vec_res.begin() + from, res_callback); + prev_haystack_offset = haystack_offsets[j]; } } + + size_t accum = needles_size; + for (size_t i = 0; i < haystack_size; ++i) + { + offsets_res[i] = accum; + accum += needles_size; + } + } + + static void vectorVector( + const ColumnString::Chars & haystack_data, + const ColumnString::Offsets & haystack_offsets, + const IColumn & needles_data, + const ColumnArray::Offsets & needles_offsets, + PaddedPODArray & vec_res, + PaddedPODArray & offsets_res) + { + size_t prev_haystack_offset = 0; + size_t prev_needles_offset = 0; + + auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 + { + return 1 + Impl::countChars(reinterpret_cast(start), reinterpret_cast(end)); + }; + + offsets_res.reserve(haystack_offsets.size()); + + const ColumnString * needles_data_string = checkAndGetColumn(&needles_data); + + std::vector needles; + + for (size_t i = 0; i < haystack_offsets.size(); ++i) + { + needles.reserve(needles_offsets[i] - prev_needles_offset); + + for (size_t j = prev_needles_offset; j < needles_offsets[i]; ++j) + { + needles.emplace_back(needles_data_string->getDataAt(j).toView()); + } + + const size_t needles_size = needles.size(); + + if (needles_size > std::numeric_limits::max()) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be at most 255", name, needles_size); + + vec_res.resize(vec_res.size() + needles_size); + + auto searcher = Impl::createMultiSearcherInBigHaystack(needles); /// sub-optimal + + /// Something can be uninitialized after the search itself + std::fill(vec_res.begin() + vec_res.size() - needles_size, vec_res.end(), 0); + + while (searcher.hasMoreToSearch()) + { + const auto * haystack = &haystack_data[prev_haystack_offset]; + const auto * haystack_end = haystack + haystack_offsets[i] - prev_haystack_offset - 1; + searcher.searchOneAll(haystack, haystack_end, vec_res.begin() + vec_res.size() - needles_size, res_callback); + } + + if (offsets_res.empty()) + offsets_res.push_back(needles_size); + else + offsets_res.push_back(offsets_res.back() + needles_size); + + prev_haystack_offset = haystack_offsets[i]; + prev_needles_offset = needles_offsets[i]; + needles.clear(); + } } }; diff --git a/src/Functions/MultiSearchImpl.h b/src/Functions/MultiSearchImpl.h index b9ce0293234..d42c2ca43e4 100644 --- a/src/Functions/MultiSearchImpl.h +++ b/src/Functions/MultiSearchImpl.h @@ -38,7 +38,7 @@ struct MultiSearchImpl if (needles_arr.size() > std::numeric_limits::max()) throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Number of arguments for function {} doesn't match: passed {}, should be at most {}", - name, std::to_string(needles_arr.size()), std::to_string(std::numeric_limits::max())); + name, needles_arr.size(), std::to_string(std::numeric_limits::max())); std::vector needles; needles.reserve(needles_arr.size()); diff --git a/src/Functions/URL/domain.h b/src/Functions/URL/domain.h index 18efe969216..1245bb20182 100644 --- a/src/Functions/URL/domain.h +++ b/src/Functions/URL/domain.h @@ -8,22 +8,22 @@ namespace DB { -inline StringRef checkAndReturnHost(const Pos & pos, const Pos & dot_pos, const Pos & start_of_host) +inline std::string_view checkAndReturnHost(const Pos & pos, const Pos & dot_pos, const Pos & start_of_host) { if (!dot_pos || start_of_host >= pos || pos - dot_pos == 1) - return StringRef{}; + return std::string_view{}; auto after_dot = *(dot_pos + 1); if (after_dot == ':' || after_dot == '/' || after_dot == '?' || after_dot == '#') - return StringRef{}; + return std::string_view{}; - return StringRef(start_of_host, pos - start_of_host); + return std::string_view(start_of_host, pos - start_of_host); } /// Extracts host from given url. /// -/// @return empty StringRef if the host is not valid (i.e. it does not have dot, or there no symbol after dot). -inline StringRef getURLHost(const char * data, size_t size) +/// @return empty string view if the host is not valid (i.e. it does not have dot, or there no symbol after dot). +inline std::string_view getURLHost(const char * data, size_t size) { Pos pos = data; Pos end = data + size; @@ -61,7 +61,7 @@ inline StringRef getURLHost(const char * data, size_t size) case ';': case '=': case '&': - return StringRef{}; + return std::string_view{}; default: goto exloop; } @@ -106,7 +106,7 @@ exloop: if ((scheme_end - pos) > 2 && *pos == ':' && *(pos + 1) == '/' && *(pos case ';': case '=': case '&': - return StringRef{}; + return std::string_view{}; } } @@ -120,20 +120,20 @@ struct ExtractDomain static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) { - StringRef host = getURLHost(data, size); + std::string_view host = getURLHost(data, size); - if (host.size == 0) + if (host.empty()) { res_data = data; res_size = 0; } else { - if (without_www && host.size > 4 && !strncmp(host.data, "www.", 4)) - host = { host.data + 4, host.size - 4 }; + if (without_www && host.size() > 4 && !strncmp(host.data(), "www.", 4)) + host = { host.data() + 4, host.size() - 4 }; - res_data = host.data; - res_size = host.size; + res_data = host.data(); + res_size = host.size(); } } }; diff --git a/src/Functions/URL/netloc.cpp b/src/Functions/URL/netloc.cpp index cb28083f4c7..723eea138c3 100644 --- a/src/Functions/URL/netloc.cpp +++ b/src/Functions/URL/netloc.cpp @@ -12,7 +12,7 @@ struct ExtractNetloc /// We use the same as domain function static size_t getReserveLengthForElement() { return 15; } - static inline StringRef getNetworkLocation(const char * data, size_t size) + static std::string_view getNetworkLocation(const char * data, size_t size) { Pos pos = data; Pos end = data + size; @@ -51,7 +51,7 @@ struct ExtractNetloc case ';': case '=': case '&': - return StringRef{}; + return std::string_view(); default: goto exloop; } @@ -76,18 +76,18 @@ struct ExtractNetloc { case '/': if (has_identification) - return StringRef(start_of_host, pos - start_of_host); + return std::string_view(start_of_host, pos - start_of_host); else slash_pos = pos; break; case '?': if (has_identification) - return StringRef(start_of_host, pos - start_of_host); + return std::string_view(start_of_host, pos - start_of_host); else question_mark_pos = pos; break; case '#': - return StringRef(start_of_host, pos - start_of_host); + return std::string_view(start_of_host, pos - start_of_host); case '@': /// foo:bar@example.ru has_identification = true; break; @@ -108,23 +108,23 @@ struct ExtractNetloc case '=': case '&': return pos > start_of_host - ? StringRef(start_of_host, std::min(std::min(pos - 1, question_mark_pos), slash_pos) - start_of_host) - : StringRef{}; + ? std::string_view(start_of_host, std::min(std::min(pos - 1, question_mark_pos), slash_pos) - start_of_host) + : std::string_view(); } } if (has_identification) - return StringRef(start_of_host, pos - start_of_host); + return std::string_view(start_of_host, pos - start_of_host); else - return StringRef(start_of_host, std::min(std::min(pos, question_mark_pos), slash_pos) - start_of_host); + return std::string_view(start_of_host, std::min(std::min(pos, question_mark_pos), slash_pos) - start_of_host); } static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) { - StringRef host = getNetworkLocation(data, size); + std::string_view host = getNetworkLocation(data, size); - res_data = host.data; - res_size = host.size; + res_data = host.data(); + res_size = host.size(); } }; diff --git a/src/Functions/URL/port.cpp b/src/Functions/URL/port.cpp index e2dbc75ab3f..3bbcdcea560 100644 --- a/src/Functions/URL/port.cpp +++ b/src/Functions/URL/port.cpp @@ -94,13 +94,13 @@ private: const char * p = reinterpret_cast(buf.data()) + offset; const char * end = p + size; - StringRef host = getURLHost(p, size); - if (!host.size) + std::string_view host = getURLHost(p, size); + if (host.empty()) return default_port; - if (host.size == size) + if (host.size() == size) return default_port; - p = host.data + host.size; + p = host.data() + host.size(); if (*p++ != ':') return default_port; diff --git a/src/Functions/URL/protocol.h b/src/Functions/URL/protocol.h index b83eaae7e90..74c0bb820b4 100644 --- a/src/Functions/URL/protocol.h +++ b/src/Functions/URL/protocol.h @@ -8,7 +8,7 @@ namespace DB { /// Extracts scheme from given url. -inline StringRef getURLScheme(const char * data, size_t size) +inline std::string_view getURLScheme(const char * data, size_t size) { // scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) const char * pos = data; @@ -24,7 +24,7 @@ inline StringRef getURLScheme(const char * data, size_t size) } } - return StringRef(data, pos - data); + return std::string_view(data, pos - data); } return {}; @@ -42,10 +42,10 @@ struct ExtractProtocol res_data = data; res_size = 0; - StringRef scheme = getURLScheme(data, size); - Pos pos = data + scheme.size; + std::string_view scheme = getURLScheme(data, size); + Pos pos = data + scheme.size(); - if (scheme.size == 0 || (data + size) - pos < 4) + if (scheme.empty() || (data + size) - pos < 4) return; if (pos[0] == ':') diff --git a/src/Functions/URL/topLevelDomain.cpp b/src/Functions/URL/topLevelDomain.cpp index 6aa6e689357..ade9439d8ec 100644 --- a/src/Functions/URL/topLevelDomain.cpp +++ b/src/Functions/URL/topLevelDomain.cpp @@ -11,7 +11,7 @@ struct ExtractTopLevelDomain static void execute(Pos data, size_t size, Pos & res_data, size_t & res_size) { - StringRef host = getURLHost(data, size); + StringRef host = StringRef(getURLHost(data, size)); res_data = data; res_size = 0; diff --git a/src/Functions/countMatches.h b/src/Functions/countMatches.h index 397515c8bba..2834b8c764b 100644 --- a/src/Functions/countMatches.h +++ b/src/Functions/countMatches.h @@ -79,7 +79,7 @@ public: current_src_offset = src_offsets[i]; Pos end = reinterpret_cast(&src_chars[current_src_offset]) - 1; - StringRef str(pos, end - pos); + std::string_view str(pos, end - pos); vec_res[i] = countMatches(str, re, matches); } @@ -87,7 +87,7 @@ public: } else if (const ColumnConst * col_const_str = checkAndGetColumnConstStringOrFixedString(column_haystack)) { - StringRef str = col_const_str->getDataColumn().getDataAt(0); + std::string_view str = col_const_str->getDataColumn().getDataAt(0).toView(); uint64_t matches_count = countMatches(str, re, matches); return result_type->createColumnConst(input_rows_count, matches_count); } @@ -95,13 +95,13 @@ public: throw Exception(ErrorCodes::LOGICAL_ERROR, "Error in FunctionCountMatches::getReturnTypeImpl()"); } - static uint64_t countMatches(StringRef src, const Regexps::Regexp & re, OptimizedRegularExpression::MatchVec & matches) + static uint64_t countMatches(std::string_view src, const Regexps::Regexp & re, OptimizedRegularExpression::MatchVec & matches) { /// Only one match is required, no need to copy more. static const unsigned matches_limit = 1; - Pos pos = reinterpret_cast(src.data); - Pos end = reinterpret_cast(src.data + src.size); + Pos pos = reinterpret_cast(src.data()); + Pos end = reinterpret_cast(src.data() + src.size()); uint64_t match_count = 0; while (true) diff --git a/src/Functions/extractGroups.cpp b/src/Functions/extractGroups.cpp index 940e76df1c0..c6633732aaa 100644 --- a/src/Functions/extractGroups.cpp +++ b/src/Functions/extractGroups.cpp @@ -87,10 +87,10 @@ public: for (size_t i = 0; i < input_rows_count; ++i) { - StringRef current_row = column_haystack->getDataAt(i); + std::string_view current_row = column_haystack->getDataAt(i).toView(); - if (re2->Match(re2_st::StringPiece(current_row.data, current_row.size), - 0, current_row.size, re2_st::RE2::UNANCHORED, matched_groups.data(), matched_groups.size())) + if (re2->Match(re2_st::StringPiece(current_row.data(), current_row.size()), + 0, current_row.size(), re2_st::RE2::UNANCHORED, matched_groups.data(), matched_groups.size())) { // 1 is to exclude group #0 which is whole re match. for (size_t group = 1; group <= groups_count; ++group) diff --git a/src/Functions/formatReadableTimeDelta.cpp b/src/Functions/formatReadableTimeDelta.cpp index d781d227c64..219c2d95353 100644 --- a/src/Functions/formatReadableTimeDelta.cpp +++ b/src/Functions/formatReadableTimeDelta.cpp @@ -94,19 +94,19 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { - StringRef maximum_unit_str; + std::string_view maximum_unit_str; if (arguments.size() == 2) { const ColumnPtr & maximum_unit_column = arguments[1].column; const ColumnConst * maximum_unit_const_col = checkAndGetColumnConstStringOrFixedString(maximum_unit_column.get()); if (maximum_unit_const_col) - maximum_unit_str = maximum_unit_const_col->getDataColumn().getDataAt(0); + maximum_unit_str = maximum_unit_const_col->getDataColumn().getDataAt(0).toView(); } Unit max_unit; /// Default means "use all available units". - if (maximum_unit_str.size == 0 || maximum_unit_str == "years") + if (maximum_unit_str.empty() || maximum_unit_str == "years") max_unit = Years; else if (maximum_unit_str == "months") max_unit = Months; @@ -122,7 +122,7 @@ public: throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected value of maximum unit argument ({}) for function {}, the only allowed values are:" " 'seconds', 'minutes', 'hours', 'days', 'months', 'years'.", - maximum_unit_str.toString(), getName()); + maximum_unit_str, getName()); auto col_to = ColumnString::create(); diff --git a/src/Functions/geohashDecode.cpp b/src/Functions/geohashDecode.cpp index 199d1a62f1d..b4e5d8e46e9 100644 --- a/src/Functions/geohashDecode.cpp +++ b/src/Functions/geohashDecode.cpp @@ -64,8 +64,8 @@ public: for (size_t i = 0; i < count; ++i) { - StringRef encoded_string = encoded->getDataAt(i); - geohashDecode(encoded_string.data, encoded_string.size, &lon_data[i], &lat_data[i]); + std::string_view encoded_string = encoded->getDataAt(i).toView(); + geohashDecode(encoded_string.data(), encoded_string.size(), &lon_data[i], &lat_data[i]); } MutableColumns result; diff --git a/src/Functions/getSetting.cpp b/src/Functions/getSetting.cpp index 7e146448dd6..a27f698d54c 100644 --- a/src/Functions/getSetting.cpp +++ b/src/Functions/getSetting.cpp @@ -56,7 +56,7 @@ private: throw Exception{"The argument of function " + String{name} + " should be a constant string with the name of a setting", ErrorCodes::ILLEGAL_COLUMN}; - std::string_view setting_name{column->getDataAt(0)}; + std::string_view setting_name{column->getDataAt(0).toView()}; return getContext()->getSettingsRef().get(setting_name); } }; diff --git a/src/Functions/isConstant.cpp b/src/Functions/isConstant.cpp index 6d76cfc1dcc..09b29aaf260 100644 --- a/src/Functions/isConstant.cpp +++ b/src/Functions/isConstant.cpp @@ -29,6 +29,8 @@ public: bool useDefaultImplementationForNothing() const override { return false; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } size_t getNumberOfArguments() const override diff --git a/src/Functions/isIPAddressContainedIn.cpp b/src/Functions/isIPAddressContainedIn.cpp index 5ef247f7346..a6f94c77ad1 100644 --- a/src/Functions/isIPAddressContainedIn.cpp +++ b/src/Functions/isIPAddressContainedIn.cpp @@ -27,7 +27,7 @@ class IPAddressVariant { public: - explicit IPAddressVariant(const StringRef & address_str) + explicit IPAddressVariant(std::string_view address_str) { /// IP address parser functions require that the input is /// NULL-terminated so we need to copy it. @@ -75,21 +75,20 @@ struct IPAddressCIDR UInt8 prefix; }; -IPAddressCIDR parseIPWithCIDR(const StringRef cidr_str) +IPAddressCIDR parseIPWithCIDR(std::string_view cidr_str) { - std::string_view cidr_str_view(cidr_str); - size_t pos_slash = cidr_str_view.find('/'); + size_t pos_slash = cidr_str.find('/'); if (pos_slash == 0) throw DB::Exception("Error parsing IP address with prefix: " + std::string(cidr_str), DB::ErrorCodes::CANNOT_PARSE_TEXT); if (pos_slash == std::string_view::npos) throw DB::Exception("The text does not contain '/': " + std::string(cidr_str), DB::ErrorCodes::CANNOT_PARSE_TEXT); - std::string_view addr_str = cidr_str_view.substr(0, pos_slash); - IPAddressVariant addr(StringRef{addr_str.data(), addr_str.size()}); + std::string_view addr_str = cidr_str.substr(0, pos_slash); + IPAddressVariant addr(addr_str); uint8_t prefix = 0; - auto prefix_str = cidr_str_view.substr(pos_slash+1); + auto prefix_str = cidr_str.substr(pos_slash+1); const auto * prefix_str_end = prefix_str.data() + prefix_str.size(); auto [parse_end, parse_error] = std::from_chars(prefix_str.data(), prefix_str_end, prefix); @@ -189,8 +188,8 @@ namespace DB const auto & col_addr = col_addr_const.getDataColumn(); const auto & col_cidr = col_cidr_const.getDataColumn(); - const auto addr = IPAddressVariant(col_addr.getDataAt(0)); - const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(0)); + const auto addr = IPAddressVariant(col_addr.getDataAt(0).toView()); + const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(0).toView()); ColumnUInt8::MutablePtr col_res = ColumnUInt8::create(1); ColumnUInt8::Container & vec_res = col_res->getData(); @@ -205,14 +204,14 @@ namespace DB { const auto & col_addr = col_addr_const.getDataColumn(); - const auto addr = IPAddressVariant(col_addr.getDataAt (0)); + const auto addr = IPAddressVariant(col_addr.getDataAt(0).toView()); ColumnUInt8::MutablePtr col_res = ColumnUInt8::create(input_rows_count); ColumnUInt8::Container & vec_res = col_res->getData(); for (size_t i = 0; i < input_rows_count; ++i) { - const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(i)); + const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(i).toView()); vec_res[i] = isAddressInRange(addr, cidr) ? 1 : 0; } return col_res; @@ -223,13 +222,13 @@ namespace DB { const auto & col_cidr = col_cidr_const.getDataColumn(); - const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(0)); + const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(0).toView()); ColumnUInt8::MutablePtr col_res = ColumnUInt8::create(input_rows_count); ColumnUInt8::Container & vec_res = col_res->getData(); for (size_t i = 0; i < input_rows_count; ++i) { - const auto addr = IPAddressVariant(col_addr.getDataAt(i)); + const auto addr = IPAddressVariant(col_addr.getDataAt(i).toView()); vec_res[i] = isAddressInRange(addr, cidr) ? 1 : 0; } return col_res; @@ -243,8 +242,8 @@ namespace DB for (size_t i = 0; i < input_rows_count; ++i) { - const auto addr = IPAddressVariant(col_addr.getDataAt(i)); - const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(i)); + const auto addr = IPAddressVariant(col_addr.getDataAt(i).toView()); + const auto cidr = parseIPWithCIDR(col_cidr.getDataAt(i).toView()); vec_res[i] = isAddressInRange(addr, cidr) ? 1 : 0; } diff --git a/src/Functions/isNotNull.cpp b/src/Functions/isNotNull.cpp index 49c5964012a..44ea8aeaeb0 100644 --- a/src/Functions/isNotNull.cpp +++ b/src/Functions/isNotNull.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -32,6 +33,7 @@ public: size_t getNumberOfArguments() const override { return 1; } bool useDefaultImplementationForNulls() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; } @@ -43,6 +45,18 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const ColumnWithTypeAndName & elem = arguments[0]; + if (elem.type->isLowCardinalityNullable()) + { + const auto * low_cardinality_column = checkAndGetColumn(*elem.column); + const size_t null_index = low_cardinality_column->getDictionary().getNullValueIndex(); + auto res = DataTypeUInt8().createColumn(); + auto & data = typeid_cast(*res).getData(); + data.reserve(low_cardinality_column->size()); + for (size_t i = 0; i != low_cardinality_column->size(); ++i) + data.push_back(low_cardinality_column->getIndexAt(i) != null_index); + return res; + } + if (const auto * nullable = checkAndGetColumn(*elem.column)) { /// Return the negated null map. diff --git a/src/Functions/isNull.cpp b/src/Functions/isNull.cpp index f9111b2dbbb..e22b1cf469c 100644 --- a/src/Functions/isNull.cpp +++ b/src/Functions/isNull.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -30,6 +31,7 @@ public: size_t getNumberOfArguments() const override { return 1; } bool useDefaultImplementationForNulls() const override { return false; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } bool useDefaultImplementationForConstants() const override { return true; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; } @@ -42,6 +44,18 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t) const override { const ColumnWithTypeAndName & elem = arguments[0]; + if (elem.type->isLowCardinalityNullable()) + { + const auto * low_cardinality_column = checkAndGetColumn(*elem.column); + size_t null_index = low_cardinality_column->getDictionary().getNullValueIndex(); + auto res = DataTypeUInt8().createColumn(); + auto & data = typeid_cast(*res).getData(); + data.reserve(low_cardinality_column->size()); + for (size_t i = 0; i != low_cardinality_column->size(); ++i) + data.push_back(low_cardinality_column->getIndexAt(i) == null_index); + return res; + } + if (const auto * nullable = checkAndGetColumn(*elem.column)) { /// Merely return the embedded null map. diff --git a/src/Functions/isNullable.cpp b/src/Functions/isNullable.cpp index 35cefdbfe63..3680ac7ccb0 100644 --- a/src/Functions/isNullable.cpp +++ b/src/Functions/isNullable.cpp @@ -29,6 +29,8 @@ public: bool useDefaultImplementationForConstants() const override { return true; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + ColumnNumbers getArgumentsThatDontImplyNullableReturnType(size_t /*number_of_arguments*/) const override { return {0}; } bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; } @@ -46,7 +48,7 @@ public: ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { const auto & elem = arguments[0]; - return ColumnUInt8::create(input_rows_count, isColumnNullable(*elem.column)); + return ColumnUInt8::create(input_rows_count, isColumnNullable(*elem.column) || elem.type->isLowCardinalityNullable()); } }; diff --git a/src/Functions/multiSearchAllPositions.cpp b/src/Functions/multiSearchAllPositions.cpp index 5d9b3f5e2fd..53f3da9cde6 100644 --- a/src/Functions/multiSearchAllPositions.cpp +++ b/src/Functions/multiSearchAllPositions.cpp @@ -15,7 +15,7 @@ struct NameMultiSearchAllPositions }; using FunctionMultiSearchAllPositions - = FunctionsMultiStringPosition, NameMultiSearchAllPositions>; + = FunctionsMultiStringPosition>; } diff --git a/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp b/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp index 9f93284a769..55c112eb093 100644 --- a/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp +++ b/src/Functions/multiSearchAllPositionsCaseInsensitive.cpp @@ -15,7 +15,7 @@ struct NameMultiSearchAllPositionsCaseInsensitive }; using FunctionMultiSearchAllPositionsCaseInsensitive - = FunctionsMultiStringPosition, NameMultiSearchAllPositionsCaseInsensitive>; + = FunctionsMultiStringPosition>; } diff --git a/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp b/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp index 8864a00a8d3..df9de8a17ec 100644 --- a/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp +++ b/src/Functions/multiSearchAllPositionsCaseInsensitiveUTF8.cpp @@ -14,9 +14,8 @@ struct NameMultiSearchAllPositionsCaseInsensitiveUTF8 static constexpr auto name = "multiSearchAllPositionsCaseInsensitiveUTF8"; }; -using FunctionMultiSearchAllPositionsCaseInsensitiveUTF8 = FunctionsMultiStringPosition< - MultiSearchAllPositionsImpl, - NameMultiSearchAllPositionsCaseInsensitiveUTF8>; +using FunctionMultiSearchAllPositionsCaseInsensitiveUTF8 + = FunctionsMultiStringPosition>; } diff --git a/src/Functions/multiSearchAllPositionsUTF8.cpp b/src/Functions/multiSearchAllPositionsUTF8.cpp index 3922a859c3a..e5f9a02afcc 100644 --- a/src/Functions/multiSearchAllPositionsUTF8.cpp +++ b/src/Functions/multiSearchAllPositionsUTF8.cpp @@ -15,7 +15,7 @@ struct NameMultiSearchAllPositionsUTF8 }; using FunctionMultiSearchAllPositionsUTF8 - = FunctionsMultiStringPosition, NameMultiSearchAllPositionsUTF8>; + = FunctionsMultiStringPosition>; } diff --git a/src/Functions/parseTimeDelta.cpp b/src/Functions/parseTimeDelta.cpp new file mode 100644 index 00000000000..8cb7c229ae8 --- /dev/null +++ b/src/Functions/parseTimeDelta.cpp @@ -0,0 +1,312 @@ +#include +#include + +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int ILLEGAL_TYPE_OF_ARGUMENT; + extern const int BAD_ARGUMENTS; +} + +namespace +{ + const std::unordered_map time_unit_to_float = + { + {"years", 365 * 24 * 3600}, + {"year", 365 * 24 * 3600}, + {"yr", 365 * 24 * 3600}, + {"y", 365 * 24 * 3600}, + + {"months", 30.5 * 24 * 3600}, + {"month", 30.5 * 24 * 3600}, + {"mo", 30.5 * 24 * 3600}, + + {"weeks", 7 * 24 * 3600}, + {"week", 7 * 24 * 3600}, + {"w", 7 * 24 * 3600}, + + {"days", 24 * 3600}, + {"day", 24 * 3600}, + {"d", 24 * 3600}, + + {"hours", 3600}, + {"hour", 3600}, + {"hr", 3600}, + {"h", 3600}, + + {"minutes", 60}, + {"minute", 60}, + {"min", 60}, + {"m", 60}, + + {"seconds", 1}, + {"second", 1}, + {"sec", 1}, + {"s", 1}, + + {"milliseconds", 1e-3}, + {"millisecond", 1e-3}, + {"millisec", 1e-3}, + {"ms", 1e-3}, + + {"microseconds", 1e-6}, + {"microsecond", 1e-6}, + {"microsec", 1e-6}, + {"μs", 1e-6}, + {"us", 1e-6}, + + {"nanoseconds", 1e-9}, + {"nanosecond", 1e-9}, + {"nanosec", 1e-9}, + {"ns", 1e-9}, + }; + + /** Prints amount of seconds in form of: + * "1 year 2 months 4 weeks 12 days 3 hours 1 minute 33 seconds". + * ' ', ';', '-', '+', ',', ':' can be used as separator, eg. "1yr-2mo", "2m:6s" + * + * valid expressions: + * SELECT parseTimeDelta('1 min 35 sec'); + * SELECT parseTimeDelta('0m;11.23s.'); + * SELECT parseTimeDelta('11hr 25min 3.1s'); + * SELECT parseTimeDelta('0.00123 seconds'); + * SELECT parseTimeDelta('1yr2mo'); + * SELECT parseTimeDelta('11s+22min'); + * SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ; 33 seconds'); + * + * invalid expressions: + * SELECT parseTimeDelta(); + * SELECT parseTimeDelta('1yr', 1); + * SELECT parseTimeDelta(1); + * SELECT parseTimeDelta(' '); + * SELECT parseTimeDelta('-1yr'); + * SELECT parseTimeDelta('1yr-'); + * SELECT parseTimeDelta('yr2mo'); + * SELECT parseTimeDelta('1.yr2mo'); + * SELECT parseTimeDelta('1-yr'); + * SELECT parseTimeDelta('1 1yr'); + * SELECT parseTimeDelta('1yyr'); + * SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ;. 33 seconds'); + * + * The length of years and months (and even days in presence of time adjustments) are rough: + * year is just 365 days, month is 30.5 days, day is 86400 seconds, similarly to what formatReadableTimeDelta is doing. + */ + class FunctionParseTimeDelta : public IFunction + { + public: + static constexpr auto name = "parseTimeDelta"; + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + + String getName() const override { return name; } + + bool isVariadic() const override { return true; } + + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + + size_t getNumberOfArguments() const override { return 0; } + + DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override + { + if (arguments.empty()) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 1.", + getName(), + toString(arguments.size())); + + if (arguments.size() > 1) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 1.", + getName(), + toString(arguments.size())); + + const IDataType & type = *arguments[0]; + + if (!isString(type)) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot format {} as time string.", type.getName()); + + return std::make_shared(); + } + + bool useDefaultImplementationForConstants() const override { return true; } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override + { + auto col_to = ColumnFloat64::create(); + auto & res_data = col_to->getData(); + + for (size_t i = 0; i < input_rows_count; ++i) + { + std::string_view str{arguments[0].column->getDataAt(i)}; + Int64 token_tail = 0; + Int64 token_front = 0; + Int64 last_pos = str.length() - 1; + Float64 result = 0; + + /// ignore '.' and ' ' at the end of string + while (last_pos >= 0 && (str[last_pos] == ' ' || str[last_pos] == '.')) + --last_pos; + + /// no valid characters + if (last_pos < 0) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Invalid expression for function {}, don't find valid characters, str: \"{}\".", + getName(), + String(str)); + } + + /// last pos character must be character and not be separator or number after ignoring '.' and ' ' + if (!isalpha(str[last_pos])) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid expression for function {}, str: \"{}\".", getName(), String(str)); + } + + /// scan spaces at the beginning + scanSpaces(str, token_tail, last_pos); + token_front = token_tail; + + while (token_tail <= last_pos) + { + /// scan unsigned integer + if (!scanUnsignedInteger(str, token_tail, last_pos)) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Invalid expression for function {}, find number failed, str: \"{}\".", + getName(), + String(str)); + } + + /// if there is a '.', then scan another integer to get a float number + if (token_tail <= last_pos && str[token_tail] == '.') + { + token_tail++; + if (!scanUnsignedInteger(str, token_tail, last_pos)) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Invalid expression for function {}, find number after '.' failed, str: \"{}\".", + getName(), + String(str)); + } + } + + /// convert float/integer string to float + Float64 base = 0; + std::string_view base_str = str.substr(token_front, token_tail - token_front); + auto value = boost::convert(base_str, boost::cnv::strtol()); + if (!value.has_value()) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Invalid expression for function {}, convert string to float64 failed: \"{}\".", + getName(), + String(base_str)); + } + base = value.get(); + + scanSpaces(str, token_tail, last_pos); + token_front = token_tail; + + /// scan a unit + if (!scanUnit(str, token_tail, last_pos)) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Invalid expression for function {}, find unit failed, str: \"{}\".", + getName(), + String(str)); + } + + /// get unit number + std::string_view unit = str.substr(token_front, token_tail - token_front); + auto iter = time_unit_to_float.find(unit); + if (iter == time_unit_to_float.end()) /// not find unit + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, "Invalid expression for function {}, parse unit failed: \"{}\".", getName(), unit); + } + result += base * iter->second; + + /// scan separator between two tokens + scanSeparator(str, token_tail, last_pos); + token_front = token_tail; + } + + res_data.emplace_back(result); + } + + return col_to; + } + + /// scan an unsigned integer number + static bool scanUnsignedInteger(std::string_view & str, Int64 & index, Int64 last_pos) + { + int64_t begin_index = index; + while (index <= last_pos && isdigit(str[index])) + { + index++; + } + return index != begin_index; + } + + /// scan a unit + static bool scanUnit(std::string_view & str, Int64 & index, Int64 last_pos) + { + int64_t begin_index = index; + while (index <= last_pos && !isdigit(str[index]) && !isSeparator(str[index])) + { + index++; + } + return index != begin_index; + } + + /// scan spaces + static void scanSpaces(std::string_view & str, Int64 & index, Int64 last_pos) + { + while (index <= last_pos && (str[index] == ' ')) + { + index++; + } + } + + /// scan for characters to ignore + static void scanSeparator(std::string_view & str, Int64 & index, Int64 last_pos) + { + /// ignore spaces + scanSpaces(str, index, last_pos); + + /// ignore separator + if (index <= last_pos && isSeparator(str[index])) + { + index++; + } + + scanSpaces(str, index, last_pos); + } + + static bool isSeparator(char symbol) + { + return symbol == ';' || symbol == '-' || symbol == '+' || symbol == ',' || symbol == ':' || symbol == ' '; + } + }; + +} + +void registerFunctionParseTimeDelta(FunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/src/Functions/registerFunctions.cpp b/src/Functions/registerFunctions.cpp index bafaf61c2f5..f578bfc9d68 100644 --- a/src/Functions/registerFunctions.cpp +++ b/src/Functions/registerFunctions.cpp @@ -60,7 +60,7 @@ void registerFunctionsTimeWindow(FunctionFactory &); void registerFunctionToBool(FunctionFactory &); void registerFunctionMinSampleSize(FunctionFactory &); -// meilisearch +/// For Meilisearch void registerFunctionMeiliMatch(FunctionFactory & factory); #if USE_SSL @@ -126,8 +126,11 @@ void registerFunctions() registerFunctionsTimeWindow(factory); registerFunctionToBool(factory); registerFunctionMinSampleSize(factory); + registerFunctionTid(factory); + registerFunctionLogTrace(factory); + registerFunctionHashID(factory); - //meilisearch + /// For Meilisearch registerFunctionMeiliMatch(factory); #if USE_SSL @@ -137,9 +140,6 @@ void registerFunctions() registerFunctionAESDecryptMysql(factory); registerFunctionShowCertificate(factory); #endif - registerFunctionTid(factory); - registerFunctionLogTrace(factory); - registerFunctionHashID(factory); } } diff --git a/src/Functions/registerFunctionsFormatting.cpp b/src/Functions/registerFunctionsFormatting.cpp index e434b0e49f0..02353fac812 100644 --- a/src/Functions/registerFunctionsFormatting.cpp +++ b/src/Functions/registerFunctionsFormatting.cpp @@ -7,6 +7,7 @@ void registerFunctionsBitToArray(FunctionFactory &); void registerFunctionFormatReadableSize(FunctionFactory &); void registerFunctionFormatReadableQuantity(FunctionFactory &); void registerFunctionFormatReadableTimeDelta(FunctionFactory &); +void registerFunctionParseTimeDelta(FunctionFactory &); void registerFunctionsFormatting(FunctionFactory & factory) { @@ -14,6 +15,7 @@ void registerFunctionsFormatting(FunctionFactory & factory) registerFunctionFormatReadableSize(factory); registerFunctionFormatReadableQuantity(factory); registerFunctionFormatReadableTimeDelta(factory); + registerFunctionParseTimeDelta(factory); } } diff --git a/src/Functions/reinterpretAs.cpp b/src/Functions/reinterpretAs.cpp index ad357c74402..a31b41b55f2 100644 --- a/src/Functions/reinterpretAs.cpp +++ b/src/Functions/reinterpretAs.cpp @@ -288,9 +288,9 @@ private: ColumnFixedString::Offset offset = 0; for (size_t i = 0; i < rows; ++i) { - StringRef data = src.getDataAt(i); + std::string_view data = src.getDataAt(i).toView(); - memcpy(&data_to[offset], data.data, std::min(n, data.size)); + memcpy(&data_to[offset], data.data(), std::min(n, data.size())); offset += n; } } diff --git a/src/Functions/stringToH3.cpp b/src/Functions/stringToH3.cpp index 1cafd1661f1..db13534b3d2 100644 --- a/src/Functions/stringToH3.cpp +++ b/src/Functions/stringToH3.cpp @@ -84,7 +84,7 @@ private: auto h3index = h3index_source.getWhole(); // convert to std::string and get the c_str to have the delimiting \0 at the end. - auto h3index_str = StringRef(h3index.data, h3index.size).toString(); + auto h3index_str = std::string(reinterpret_cast(h3index.data), h3index.size); res_data[row_num] = stringToH3(h3index_str.c_str()); if (res_data[row_num] == 0) diff --git a/src/Functions/tupleElement.cpp b/src/Functions/tupleElement.cpp index 023dc266b43..92ca6b85714 100644 --- a/src/Functions/tupleElement.cpp +++ b/src/Functions/tupleElement.cpp @@ -18,6 +18,10 @@ namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; extern const int ILLEGAL_INDEX; + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int NOT_FOUND_COLUMN_IN_BLOCK; + extern const int NUMBER_OF_DIMENSIONS_MISMATCHED; + extern const int SIZES_OF_ARRAYS_DOESNT_MATCH; } namespace @@ -40,9 +44,11 @@ public: return name; } + bool isVariadic() const override { return true; } + size_t getNumberOfArguments() const override { - return 2; + return 0; } bool useDefaultImplementationForConstants() const override @@ -59,8 +65,14 @@ public: DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override { - size_t count_arrays = 0; + const size_t number_of_arguments = arguments.size(); + if (number_of_arguments < 2 || number_of_arguments > 3) + throw Exception("Number of arguments for function " + getName() + " doesn't match: passed " + + toString(number_of_arguments) + ", should be 2 or 3", + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH); + + size_t count_arrays = 0; const IDataType * tuple_col = arguments[0].type.get(); while (const DataTypeArray * array = checkAndGetDataType(tuple_col)) { @@ -72,16 +84,34 @@ public: if (!tuple) throw Exception("First argument for function " + getName() + " must be tuple or array of tuple.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - size_t index = getElementNum(arguments[1].column, *tuple); - DataTypePtr out_return_type = tuple->getElements()[index]; + auto index = getElementNum(arguments[1].column, *tuple, number_of_arguments); + if (index.has_value()) + { + DataTypePtr out_return_type = tuple->getElements()[index.value()]; - for (; count_arrays; --count_arrays) - out_return_type = std::make_shared(out_return_type); + for (; count_arrays; --count_arrays) + out_return_type = std::make_shared(out_return_type); - return out_return_type; + return out_return_type; + } + else + { + const IDataType * default_col = arguments[2].type.get(); + size_t default_argument_count_arrays = 0; + if (const DataTypeArray * array = checkAndGetDataType(default_col)) + { + default_argument_count_arrays = array->getNumberOfDimensions(); + } + + if (count_arrays != default_argument_count_arrays) + { + throw Exception(ErrorCodes::NUMBER_OF_DIMENSIONS_MISMATCHED, "Dimension of types mismatched between first argument and third argument. Dimension of 1st argument: {}. Dimension of 3rd argument: {}.",count_arrays, default_argument_count_arrays); + } + return arguments[2].type; + } } - ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override { Columns array_offsets; @@ -89,6 +119,12 @@ public: const IDataType * tuple_type = first_arg.type.get(); const IColumn * tuple_col = first_arg.column.get(); + bool first_arg_is_const = false; + if (typeid_cast(tuple_col)) + { + tuple_col = assert_cast(tuple_col)->getDataColumnPtr().get(); + first_arg_is_const = true; + } while (const DataTypeArray * array_type = checkAndGetDataType(tuple_type)) { const ColumnArray * array_col = assert_cast(tuple_col); @@ -103,18 +139,87 @@ public: if (!tuple_type_concrete || !tuple_col_concrete) throw Exception("First argument for function " + getName() + " must be tuple or array of tuple.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - size_t index = getElementNum(arguments[1].column, *tuple_type_concrete); - ColumnPtr res = tuple_col_concrete->getColumns()[index]; + auto index = getElementNum(arguments[1].column, *tuple_type_concrete, arguments.size()); + + if (!index.has_value()) + { + if (!array_offsets.empty()) + { + recursiveCheckArrayOffsets(arguments[0].column, arguments[2].column, array_offsets.size()); + } + return arguments[2].column; + } + + ColumnPtr res = tuple_col_concrete->getColumns()[index.value()]; /// Wrap into Arrays for (auto it = array_offsets.rbegin(); it != array_offsets.rend(); ++it) res = ColumnArray::create(res, *it); + if (first_arg_is_const) + { + res = ColumnConst::create(res, input_rows_count); + } return res; } private: - size_t getElementNum(const ColumnPtr & index_column, const DataTypeTuple & tuple) const + + void recursiveCheckArrayOffsets(ColumnPtr col_x, ColumnPtr col_y, size_t depth) const + { + for (size_t i = 1; i < depth; ++i) + { + checkArrayOffsets(col_x, col_y); + col_x = assert_cast(col_x.get())->getDataPtr(); + col_y = assert_cast(col_y.get())->getDataPtr(); + } + checkArrayOffsets(col_x, col_y); + } + + void checkArrayOffsets(ColumnPtr col_x, ColumnPtr col_y) const + { + if (isColumnConst(*col_x)) + { + checkArrayOffsetsWithFirstArgConst(col_x, col_y); + } + else if (isColumnConst(*col_y)) + { + checkArrayOffsetsWithFirstArgConst(col_y, col_x); + } + else + { + const auto & array_x = *assert_cast(col_x.get()); + const auto & array_y = *assert_cast(col_y.get()); + if (!array_x.hasEqualOffsets(array_y)) + { + throw Exception("The argument 1 and argument 3 of function " + getName() + " have different array sizes", ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH); + } + } + } + + void checkArrayOffsetsWithFirstArgConst(ColumnPtr col_x, ColumnPtr col_y) const + { + col_x = assert_cast(col_x.get())->getDataColumnPtr(); + col_y = col_y->convertToFullColumnIfConst(); + const auto & array_x = *assert_cast(col_x.get()); + const auto & array_y = *assert_cast(col_y.get()); + + const auto & offsets_x = array_x.getOffsets(); + const auto & offsets_y = array_y.getOffsets(); + + ColumnArray::Offset prev_offset = 0; + size_t row_size = offsets_y.size(); + for (size_t row = 0; row < row_size; ++row) + { + if (unlikely(offsets_x[0] != offsets_y[row] - prev_offset)) + { + throw Exception("The argument 1 and argument 3 of function " + getName() + " have different array sizes", ErrorCodes::SIZES_OF_ARRAYS_DOESNT_MATCH); + } + prev_offset = offsets_y[row]; + } + } + + std::optional getElementNum(const ColumnPtr & index_column, const DataTypeTuple & tuple, const size_t argument_size) const { if ( checkAndGetColumnConst(index_column.get()) @@ -131,11 +236,21 @@ private: if (index > tuple.getElements().size()) throw Exception("Index for tuple element is out of range.", ErrorCodes::ILLEGAL_INDEX); - return index - 1; + return std::optional(index - 1); } else if (const auto * name_col = checkAndGetColumnConst(index_column.get())) { - return tuple.getPositionByName(name_col->getValue()); + auto index = tuple.tryGetPositionByName(name_col->getValue()); + if (index.has_value()) + { + return index; + } + + if (argument_size == 2) + { + throw Exception("Tuple doesn't have element with name '" + name_col->getValue() + "'", ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK); + } + return std::nullopt; } else throw Exception("Second argument to " + getName() + " must be a constant UInt or String", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); diff --git a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp index add18d8d12e..1bf889540eb 100644 --- a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp +++ b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp @@ -90,7 +90,10 @@ bool AsynchronousReadBufferFromFileDescriptor::nextImpl() prefetch_future = {}; file_offset_of_buffer_end += size; - if (size) + assert(offset <= size); + size_t bytes_read = size - offset; + + if (bytes_read) { prefetch_buffer.swap(memory); /// Adjust the working buffer so that it ignores `offset` bytes. @@ -109,7 +112,10 @@ bool AsynchronousReadBufferFromFileDescriptor::nextImpl() auto [size, offset] = asyncReadInto(memory.data(), memory.size()).get(); file_offset_of_buffer_end += size; - if (size) + assert(offset <= size); + size_t bytes_read = size - offset; + + if (bytes_read) { /// Adjust the working buffer so that it ignores `offset` bytes. internal_buffer = Buffer(memory.data(), memory.data() + memory.size()); diff --git a/src/IO/CascadeWriteBuffer.cpp b/src/IO/CascadeWriteBuffer.cpp index 616fbe9b789..ca11290c71b 100644 --- a/src/IO/CascadeWriteBuffer.cpp +++ b/src/IO/CascadeWriteBuffer.cpp @@ -50,8 +50,6 @@ void CascadeWriteBuffer::nextImpl() } set(curr_buffer->position(), curr_buffer->buffer().end() - curr_buffer->position()); -// std::cerr << "CascadeWriteBuffer a count=" << count() << " bytes=" << bytes << " offset=" << offset() -// << " bytes+size=" << bytes + buffer().size() << "\n"; } diff --git a/src/IO/Operators.h b/src/IO/Operators.h index 114ab692dc3..06ff20c43e8 100644 --- a/src/IO/Operators.h +++ b/src/IO/Operators.h @@ -56,7 +56,7 @@ template WriteBuffer & operator<< (BinaryManipWriteBuffer buf, inline WriteBuffer & operator<< (EscapeManipWriteBuffer buf, const String & x) { writeEscapedString(x, buf); return buf; } inline WriteBuffer & operator<< (EscapeManipWriteBuffer buf, std::string_view x) { writeEscapedString(x, buf); return buf; } -inline WriteBuffer & operator<< (EscapeManipWriteBuffer buf, StringRef x) { writeEscapedString(x, buf); return buf; } +inline WriteBuffer & operator<< (EscapeManipWriteBuffer buf, StringRef x) { writeEscapedString(x.toView(), buf); return buf; } inline WriteBuffer & operator<< (EscapeManipWriteBuffer buf, const char * x) { writeEscapedString(x, strlen(x), buf); return buf; } inline WriteBuffer & operator<< (QuoteManipWriteBuffer buf, const char * x) { writeAnyQuotedString<'\''>(x, x + strlen(x), buf.get()); return buf; } diff --git a/src/IO/ReadBufferFromAzureBlobStorage.cpp b/src/IO/ReadBufferFromAzureBlobStorage.cpp index 4b594700979..3e6581cd786 100644 --- a/src/IO/ReadBufferFromAzureBlobStorage.cpp +++ b/src/IO/ReadBufferFromAzureBlobStorage.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -26,7 +27,6 @@ ReadBufferFromAzureBlobStorage::ReadBufferFromAzureBlobStorage( const ReadSettings & read_settings_, size_t max_single_read_retries_, size_t max_single_download_retries_, - size_t tmp_buffer_size_, bool use_external_buffer_, size_t read_until_position_) : ReadBufferFromFileBase(read_settings_.remote_fs_buffer_size, nullptr, 0) @@ -34,7 +34,8 @@ ReadBufferFromAzureBlobStorage::ReadBufferFromAzureBlobStorage( , path(path_) , max_single_read_retries(max_single_read_retries_) , max_single_download_retries(max_single_download_retries_) - , tmp_buffer_size(tmp_buffer_size_) + , read_settings(read_settings_) + , tmp_buffer_size(read_settings.remote_fs_buffer_size) , use_external_buffer(use_external_buffer_) , read_until_position(read_until_position_) { @@ -76,6 +77,8 @@ bool ReadBufferFromAzureBlobStorage::nextImpl() try { bytes_read = data_stream->ReadToCount(reinterpret_cast(data_ptr), to_read_bytes); + if (read_settings.remote_throttler) + read_settings.remote_throttler->add(bytes_read); break; } catch (const Azure::Storage::StorageException & e) diff --git a/src/IO/ReadBufferFromAzureBlobStorage.h b/src/IO/ReadBufferFromAzureBlobStorage.h index 94cac10a2df..5396fcf9719 100644 --- a/src/IO/ReadBufferFromAzureBlobStorage.h +++ b/src/IO/ReadBufferFromAzureBlobStorage.h @@ -23,7 +23,6 @@ public: const ReadSettings & read_settings_, size_t max_single_read_retries_, size_t max_single_download_retries_, - size_t tmp_buffer_size_, bool use_external_buffer_ = false, size_t read_until_position_ = 0); @@ -48,6 +47,7 @@ private: const String path; size_t max_single_read_retries; size_t max_single_download_retries; + ReadSettings read_settings; std::vector tmp_buffer; size_t tmp_buffer_size; bool use_external_buffer; diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 406b519df79..920e76cd7d0 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #ifdef HAS_RESERVED_IDENTIFIER diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index 40b0717c8b1..73c651189cd 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index 83c05a392a1..7fb432eab22 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -5,12 +5,13 @@ #include #include -#include #include #include #include +#include +#include #include #include @@ -164,6 +165,8 @@ bool ReadBufferFromS3::nextImpl() ProfileEvents::increment(ProfileEvents::ReadBufferFromS3Bytes, working_buffer.size()); offset += working_buffer.size(); + if (read_settings.remote_throttler) + read_settings.remote_throttler->add(working_buffer.size()); return true; } @@ -299,7 +302,6 @@ std::unique_ptr ReadBufferFromS3::initialize() if (outcome.IsSuccess()) { read_result = outcome.GetResultWithOwnership(); - size_t buffer_size = use_external_buffer ? 0 : read_settings.remote_fs_buffer_size; return std::make_unique(read_result.GetBody(), buffer_size); } diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp index f09292cd349..c2b0a0f65d7 100644 --- a/src/IO/ReadHelpers.cpp +++ b/src/IO/ReadHelpers.cpp @@ -1053,7 +1053,7 @@ template void readDateTimeTextFallback(time_t &, ReadBuffer &, const DateL template bool readDateTimeTextFallback(time_t &, ReadBuffer &, const DateLUTImpl &); -void skipJSONField(ReadBuffer & buf, const StringRef & name_of_field) +void skipJSONField(ReadBuffer & buf, StringRef name_of_field) { if (buf.eof()) throw Exception("Unexpected EOF for key '" + name_of_field.toString() + "'", ErrorCodes::INCORRECT_DATA); diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index 4cd07dddf25..7a5df1ed5ac 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -836,7 +836,7 @@ template inline T parse(const char * data, size_t size); template -inline T parseFromString(const std::string_view & str) +inline T parseFromString(std::string_view str) { return parse(str.data(), str.size()); } @@ -1238,7 +1238,7 @@ inline void skipWhitespaceIfAny(ReadBuffer & buf, bool one_line = false) } /// Skips json value. -void skipJSONField(ReadBuffer & buf, const StringRef & name_of_field); +void skipJSONField(ReadBuffer & buf, StringRef name_of_field); /** Read serialized exception. @@ -1338,7 +1338,7 @@ inline T parseWithSizeSuffix(const char * data, size_t size) } template -inline T parseWithSizeSuffix(const std::string_view & s) +inline T parseWithSizeSuffix(std::string_view s) { return parseWithSizeSuffix(s.data(), s.size()); } diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 78d5d6f3d65..2a2691e3c06 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB { @@ -89,6 +90,9 @@ struct ReadSettings FileCachePtr remote_fs_cache; + /// Bandwidth throttler to use during reading + ThrottlerPtr remote_throttler; + size_t http_max_tries = 1; size_t http_retry_initial_backoff_ms = 100; size_t http_retry_max_backoff_ms = 1600; diff --git a/src/IO/WriteBufferFromAzureBlobStorage.cpp b/src/IO/WriteBufferFromAzureBlobStorage.cpp index 51d8bf6aba2..bc7b505cd91 100644 --- a/src/IO/WriteBufferFromAzureBlobStorage.cpp +++ b/src/IO/WriteBufferFromAzureBlobStorage.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB @@ -15,11 +16,13 @@ WriteBufferFromAzureBlobStorage::WriteBufferFromAzureBlobStorage( const String & blob_path_, size_t max_single_part_upload_size_, size_t buf_size_, + const WriteSettings & write_settings_, std::optional> attributes_) : BufferWithOwnMemory(buf_size_, nullptr, 0) , blob_container_client(blob_container_client_) , max_single_part_upload_size(max_single_part_upload_size_) , blob_path(blob_path_) + , write_settings(write_settings_) , attributes(attributes_) { } @@ -84,6 +87,9 @@ void WriteBufferFromAzureBlobStorage::nextImpl() } block_blob_client.CommitBlockList(block_ids); + + if (write_settings.remote_throttler) + write_settings.remote_throttler->add(read); } } diff --git a/src/IO/WriteBufferFromAzureBlobStorage.h b/src/IO/WriteBufferFromAzureBlobStorage.h index ef13a24abd8..0005705e68c 100644 --- a/src/IO/WriteBufferFromAzureBlobStorage.h +++ b/src/IO/WriteBufferFromAzureBlobStorage.h @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -24,6 +25,7 @@ public: const String & blob_path_, size_t max_single_part_upload_size_, size_t buf_size_, + const WriteSettings & write_settings_, std::optional> attributes_ = {}); ~WriteBufferFromAzureBlobStorage() override; @@ -36,6 +38,7 @@ private: std::shared_ptr blob_container_client; size_t max_single_part_upload_size; const String blob_path; + WriteSettings write_settings; std::optional> attributes; }; diff --git a/src/IO/WriteBufferFromS3.cpp b/src/IO/WriteBufferFromS3.cpp index 432304d6d5d..51f0c0d0743 100644 --- a/src/IO/WriteBufferFromS3.cpp +++ b/src/IO/WriteBufferFromS3.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include @@ -61,6 +62,7 @@ WriteBufferFromS3::WriteBufferFromS3( std::optional> object_metadata_, size_t buffer_size_, ScheduleFunc schedule_, + const WriteSettings & write_settings_, FileCachePtr cache_) : BufferWithOwnMemory(buffer_size_, nullptr, 0) , bucket(bucket_) @@ -70,6 +72,7 @@ WriteBufferFromS3::WriteBufferFromS3( , s3_settings(s3_settings_) , object_metadata(std::move(object_metadata_)) , schedule(std::move(schedule_)) + , write_settings(write_settings_) , cache(cache_) { allocateBuffer(); @@ -121,8 +124,9 @@ void WriteBufferFromS3::nextImpl() } ProfileEvents::increment(ProfileEvents::WriteBufferFromS3Bytes, offset()); - last_part_size += offset(); + if (write_settings.remote_throttler) + write_settings.remote_throttler->add(offset()); /// Data size exceeds singlepart upload threshold, need to use multipart upload. if (multipart_upload_id.empty() && last_part_size > s3_settings.max_single_part_upload_size) @@ -462,7 +466,6 @@ void WriteBufferFromS3::processPutRequest(PutObjectTask & task) { auto outcome = client_ptr->PutObject(task.req); bool with_pool = static_cast(schedule); - if (outcome.IsSuccess()) LOG_TRACE(log, "Single part upload has completed. Bucket: {}, Key: {}, Object size: {}, WithPool: {}", bucket, key, task.req.GetContentLength(), with_pool); else diff --git a/src/IO/WriteBufferFromS3.h b/src/IO/WriteBufferFromS3.h index 4cdc39b80a0..e79051823c4 100644 --- a/src/IO/WriteBufferFromS3.h +++ b/src/IO/WriteBufferFromS3.h @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -55,6 +56,7 @@ public: std::optional> object_metadata_ = std::nullopt, size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE, ScheduleFunc schedule_ = {}, + const WriteSettings & write_settings_ = {}, FileCachePtr cache_ = nullptr); ~WriteBufferFromS3() override; @@ -119,6 +121,8 @@ private: Poco::Logger * log = &Poco::Logger::get("WriteBufferFromS3"); + WriteSettings write_settings; + FileCachePtr cache; size_t current_download_offset = 0; std::optional file_segments_holder; diff --git a/src/IO/WriteHelpers.cpp b/src/IO/WriteHelpers.cpp index a6d492b85b0..cb341e60a8b 100644 --- a/src/IO/WriteHelpers.cpp +++ b/src/IO/WriteHelpers.cpp @@ -66,9 +66,9 @@ void writeException(const Exception & e, WriteBuffer & buf, bool with_stack_trac /// The same, but quotes apply only if there are characters that do not match the identifier without quotes template -static inline void writeProbablyQuotedStringImpl(const StringRef & s, WriteBuffer & buf, F && write_quoted_string) +static inline void writeProbablyQuotedStringImpl(StringRef s, WriteBuffer & buf, F && write_quoted_string) { - if (isValidIdentifier(std::string_view{s}) + if (isValidIdentifier(s.toView()) /// This are valid identifiers but are problematic if present unquoted in SQL query. && !(s.size == strlen("distinct") && 0 == strncasecmp(s.data, "distinct", strlen("distinct"))) && !(s.size == strlen("all") && 0 == strncasecmp(s.data, "all", strlen("all")))) @@ -79,19 +79,19 @@ static inline void writeProbablyQuotedStringImpl(const StringRef & s, WriteBuffe write_quoted_string(s, buf); } -void writeProbablyBackQuotedString(const StringRef & s, WriteBuffer & buf) +void writeProbablyBackQuotedString(StringRef s, WriteBuffer & buf) { - writeProbablyQuotedStringImpl(s, buf, [](const StringRef & s_, WriteBuffer & buf_) { return writeBackQuotedString(s_, buf_); }); + writeProbablyQuotedStringImpl(s, buf, [](StringRef s_, WriteBuffer & buf_) { return writeBackQuotedString(s_, buf_); }); } -void writeProbablyDoubleQuotedString(const StringRef & s, WriteBuffer & buf) +void writeProbablyDoubleQuotedString(StringRef s, WriteBuffer & buf) { - writeProbablyQuotedStringImpl(s, buf, [](const StringRef & s_, WriteBuffer & buf_) { return writeDoubleQuotedString(s_, buf_); }); + writeProbablyQuotedStringImpl(s, buf, [](StringRef s_, WriteBuffer & buf_) { return writeDoubleQuotedString(s_, buf_); }); } -void writeProbablyBackQuotedStringMySQL(const StringRef & s, WriteBuffer & buf) +void writeProbablyBackQuotedStringMySQL(StringRef s, WriteBuffer & buf) { - writeProbablyQuotedStringImpl(s, buf, [](const StringRef & s_, WriteBuffer & buf_) { return writeBackQuotedStringMySQL(s_, buf_); }); + writeProbablyQuotedStringImpl(s, buf, [](StringRef s_, WriteBuffer & buf_) { return writeBackQuotedStringMySQL(s_, buf_); }); } void writePointerHex(const void * ptr, WriteBuffer & buf) diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 5eab75f14b1..2903a70b61a 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -102,7 +102,7 @@ inline void writeStringBinary(const std::string & s, WriteBuffer & buf) buf.write(s.data(), s.size()); } -inline void writeStringBinary(const StringRef & s, WriteBuffer & buf) +inline void writeStringBinary(StringRef s, WriteBuffer & buf) { writeVarUInt(s.size, buf); buf.write(s.data, s.size); @@ -113,7 +113,7 @@ inline void writeStringBinary(const char * s, WriteBuffer & buf) writeStringBinary(StringRef{s}, buf); } -inline void writeStringBinary(const std::string_view & s, WriteBuffer & buf) +inline void writeStringBinary(std::string_view s, WriteBuffer & buf) { writeStringBinary(StringRef{s}, buf); } @@ -360,19 +360,9 @@ void writeAnyEscapedString(const char * begin, const char * end, WriteBuffer & b } -inline void writeJSONString(const StringRef & s, WriteBuffer & buf, const FormatSettings & settings) +inline void writeJSONString(std::string_view s, WriteBuffer & buf, const FormatSettings & settings) { - writeJSONString(s.data, s.data + s.size, buf, settings); -} - -inline void writeJSONString(const std::string_view & s, WriteBuffer & buf, const FormatSettings & settings) -{ - writeJSONString(StringRef{s}, buf, settings); -} - -inline void writeJSONString(const String & s, WriteBuffer & buf, const FormatSettings & settings) -{ - writeJSONString(StringRef{s}, buf, settings); + writeJSONString(s.data(), s.data() + s.size(), buf, settings); } template @@ -417,7 +407,7 @@ void writeJSONNumber(T x, WriteBuffer & ostr, const FormatSettings & settings) template -void writeAnyEscapedString(const String & s, WriteBuffer & buf) +void writeAnyEscapedString(std::string_view s, WriteBuffer & buf) { writeAnyEscapedString(s.data(), s.data() + s.size(), buf); } @@ -428,19 +418,7 @@ inline void writeEscapedString(const char * str, size_t size, WriteBuffer & buf) writeAnyEscapedString<'\''>(str, str + size, buf); } - -inline void writeEscapedString(const String & s, WriteBuffer & buf) -{ - writeEscapedString(s.data(), s.size(), buf); -} - - -inline void writeEscapedString(const StringRef & ref, WriteBuffer & buf) -{ - writeEscapedString(ref.data, ref.size, buf); -} - -inline void writeEscapedString(const std::string_view & ref, WriteBuffer & buf) +inline void writeEscapedString(std::string_view ref, WriteBuffer & buf) { writeEscapedString(ref.data(), ref.size(), buf); } @@ -455,16 +433,9 @@ void writeAnyQuotedString(const char * begin, const char * end, WriteBuffer & bu template -void writeAnyQuotedString(const String & s, WriteBuffer & buf) +void writeAnyQuotedString(std::string_view ref, WriteBuffer & buf) { - writeAnyQuotedString(s.data(), s.data() + s.size(), buf); -} - - -template -void writeAnyQuotedString(const StringRef & ref, WriteBuffer & buf) -{ - writeAnyQuotedString(ref.data, ref.data + ref.size, buf); + writeAnyQuotedString(ref.data(), ref.data() + ref.size(), buf); } @@ -473,12 +444,12 @@ inline void writeQuotedString(const String & s, WriteBuffer & buf) writeAnyQuotedString<'\''>(s, buf); } -inline void writeQuotedString(const StringRef & ref, WriteBuffer & buf) +inline void writeQuotedString(StringRef ref, WriteBuffer & buf) { - writeAnyQuotedString<'\''>(ref, buf); + writeAnyQuotedString<'\''>(ref.toView(), buf); } -inline void writeQuotedString(const std::string_view & ref, WriteBuffer & buf) +inline void writeQuotedString(std::string_view ref, WriteBuffer & buf) { writeAnyQuotedString<'\''>(ref.data(), ref.data() + ref.size(), buf); } @@ -488,24 +459,24 @@ inline void writeDoubleQuotedString(const String & s, WriteBuffer & buf) writeAnyQuotedString<'"'>(s, buf); } -inline void writeDoubleQuotedString(const StringRef & s, WriteBuffer & buf) +inline void writeDoubleQuotedString(StringRef s, WriteBuffer & buf) { - writeAnyQuotedString<'"'>(s, buf); + writeAnyQuotedString<'"'>(s.toView(), buf); } -inline void writeDoubleQuotedString(const std::string_view & s, WriteBuffer & buf) +inline void writeDoubleQuotedString(std::string_view s, WriteBuffer & buf) { writeAnyQuotedString<'"'>(s.data(), s.data() + s.size(), buf); } /// Outputs a string in backquotes. -inline void writeBackQuotedString(const StringRef & s, WriteBuffer & buf) +inline void writeBackQuotedString(StringRef s, WriteBuffer & buf) { - writeAnyQuotedString<'`'>(s, buf); + writeAnyQuotedString<'`'>(s.toView(), buf); } /// Outputs a string in backquotes for MySQL. -inline void writeBackQuotedStringMySQL(const StringRef & s, WriteBuffer & buf) +inline void writeBackQuotedStringMySQL(StringRef s, WriteBuffer & buf) { writeChar('`', buf); writeAnyEscapedString<'`', true>(s.data, s.data + s.size, buf); @@ -514,9 +485,9 @@ inline void writeBackQuotedStringMySQL(const StringRef & s, WriteBuffer & buf) /// Write quoted if the string doesn't look like and identifier. -void writeProbablyBackQuotedString(const StringRef & s, WriteBuffer & buf); -void writeProbablyDoubleQuotedString(const StringRef & s, WriteBuffer & buf); -void writeProbablyBackQuotedStringMySQL(const StringRef & s, WriteBuffer & buf); +void writeProbablyBackQuotedString(StringRef s, WriteBuffer & buf); +void writeProbablyDoubleQuotedString(StringRef s, WriteBuffer & buf); +void writeProbablyBackQuotedStringMySQL(StringRef s, WriteBuffer & buf); /** Outputs the string in for the CSV format. @@ -559,7 +530,7 @@ void writeCSVString(const String & s, WriteBuffer & buf) } template -void writeCSVString(const StringRef & s, WriteBuffer & buf) +void writeCSVString(StringRef s, WriteBuffer & buf) { writeCSVString(s.data, s.data + s.size, buf); } @@ -611,16 +582,11 @@ inline void writeXMLStringForTextElementOrAttributeValue(const char * begin, con } } -inline void writeXMLStringForTextElementOrAttributeValue(const String & s, WriteBuffer & buf) +inline void writeXMLStringForTextElementOrAttributeValue(std::string_view s, WriteBuffer & buf) { writeXMLStringForTextElementOrAttributeValue(s.data(), s.data() + s.size(), buf); } -inline void writeXMLStringForTextElementOrAttributeValue(const StringRef & s, WriteBuffer & buf) -{ - writeXMLStringForTextElementOrAttributeValue(s.data, s.data + s.size, buf); -} - /// Writing a string to a text node in XML (not into an attribute - otherwise you need more escaping). inline void writeXMLStringForTextElement(const char * begin, const char * end, WriteBuffer & buf) { @@ -652,16 +618,11 @@ inline void writeXMLStringForTextElement(const char * begin, const char * end, W } } -inline void writeXMLStringForTextElement(const String & s, WriteBuffer & buf) +inline void writeXMLStringForTextElement(std::string_view s, WriteBuffer & buf) { writeXMLStringForTextElement(s.data(), s.data() + s.size(), buf); } -inline void writeXMLStringForTextElement(const StringRef & s, WriteBuffer & buf) -{ - writeXMLStringForTextElement(s.data, s.data + s.size, buf); -} - template void formatHex(IteratorSrc src, IteratorDst dst, size_t num_bytes); void formatUUID(const UInt8 * src16, UInt8 * dst36); @@ -890,8 +851,8 @@ requires is_arithmetic_v inline void writeBinary(const T & x, WriteBuffer & buf) { writePODBinary(x, buf); } inline void writeBinary(const String & x, WriteBuffer & buf) { writeStringBinary(x, buf); } -inline void writeBinary(const StringRef & x, WriteBuffer & buf) { writeStringBinary(x, buf); } -inline void writeBinary(const std::string_view & x, WriteBuffer & buf) { writeStringBinary(x, buf); } +inline void writeBinary(StringRef x, WriteBuffer & buf) { writeStringBinary(x, buf); } +inline void writeBinary(std::string_view x, WriteBuffer & buf) { writeStringBinary(x, buf); } inline void writeBinary(const Decimal32 & x, WriteBuffer & buf) { writePODBinary(x, buf); } inline void writeBinary(const Decimal64 & x, WriteBuffer & buf) { writePODBinary(x, buf); } inline void writeBinary(const Decimal128 & x, WriteBuffer & buf) { writePODBinary(x, buf); } @@ -1015,9 +976,9 @@ inline void writeQuoted(const T & x, WriteBuffer & buf) { writeText(x, buf); } inline void writeQuoted(const String & x, WriteBuffer & buf) { writeQuotedString(x, buf); } -inline void writeQuoted(const std::string_view & x, WriteBuffer & buf) { writeQuotedString(x, buf); } +inline void writeQuoted(std::string_view x, WriteBuffer & buf) { writeQuotedString(x, buf); } -inline void writeQuoted(const StringRef & x, WriteBuffer & buf) { writeQuotedString(x, buf); } +inline void writeQuoted(StringRef x, WriteBuffer & buf) { writeQuotedString(x, buf); } inline void writeQuoted(const LocalDate & x, WriteBuffer & buf) { @@ -1048,9 +1009,9 @@ inline void writeDoubleQuoted(const T & x, WriteBuffer & buf) { writeText(x, buf inline void writeDoubleQuoted(const String & x, WriteBuffer & buf) { writeDoubleQuotedString(x, buf); } -inline void writeDoubleQuoted(const std::string_view & x, WriteBuffer & buf) { writeDoubleQuotedString(x, buf); } +inline void writeDoubleQuoted(std::string_view x, WriteBuffer & buf) { writeDoubleQuotedString(x, buf); } -inline void writeDoubleQuoted(const StringRef & x, WriteBuffer & buf) { writeDoubleQuotedString(x, buf); } +inline void writeDoubleQuoted(StringRef x, WriteBuffer & buf) { writeDoubleQuotedString(x, buf); } inline void writeDoubleQuoted(const LocalDate & x, WriteBuffer & buf) { diff --git a/src/IO/WriteSettings.h b/src/IO/WriteSettings.h index 3464bb31664..7530b27794a 100644 --- a/src/IO/WriteSettings.h +++ b/src/IO/WriteSettings.h @@ -1,5 +1,7 @@ #pragma once +#include + namespace DB { @@ -7,6 +9,9 @@ namespace DB struct WriteSettings { bool enable_filesystem_cache_on_write_operations = false; + + /// Bandwidth throttler to use during writing + ThrottlerPtr remote_throttler; }; } diff --git a/src/Interpreters/ActionLocksManager.cpp b/src/Interpreters/ActionLocksManager.cpp index 8f081f3d470..7b57b8803cd 100644 --- a/src/Interpreters/ActionLocksManager.cpp +++ b/src/Interpreters/ActionLocksManager.cpp @@ -23,20 +23,6 @@ ActionLocksManager::ActionLocksManager(ContextPtr context_) : WithContext(contex { } -template -inline void forEachTable(F && f, ContextPtr context) -{ - for (auto & elem : DatabaseCatalog::instance().getDatabases()) - for (auto iterator = elem.second->getTablesIterator(context); iterator->isValid(); iterator->next()) - if (auto table = iterator->table()) - f(table); -} - -void ActionLocksManager::add(StorageActionBlockType action_type, ContextPtr context_) -{ - forEachTable([&](const StoragePtr & table) { add(table, action_type); }, context_); -} - void ActionLocksManager::add(const StorageID & table_id, StorageActionBlockType action_type) { if (auto table = DatabaseCatalog::instance().tryGetTable(table_id, getContext())) @@ -54,14 +40,6 @@ void ActionLocksManager::add(const StoragePtr & table, StorageActionBlockType ac } } -void ActionLocksManager::remove(StorageActionBlockType action_type) -{ - std::lock_guard lock(mutex); - - for (auto & storage_elem : storage_locks) - storage_elem.second.erase(action_type); -} - void ActionLocksManager::remove(const StorageID & table_id, StorageActionBlockType action_type) { if (auto table = DatabaseCatalog::instance().tryGetTable(table_id, getContext())) diff --git a/src/Interpreters/ActionLocksManager.h b/src/Interpreters/ActionLocksManager.h index be112e71950..d1da81a8dd4 100644 --- a/src/Interpreters/ActionLocksManager.h +++ b/src/Interpreters/ActionLocksManager.h @@ -20,14 +20,10 @@ class ActionLocksManager : WithContext public: explicit ActionLocksManager(ContextPtr context); - /// Adds new locks for each table - void add(StorageActionBlockType action_type, ContextPtr context); /// Add new lock for a table if it has not been already added void add(const StorageID & table_id, StorageActionBlockType action_type); void add(const StoragePtr & table, StorageActionBlockType action_type); - /// Remove locks for all tables - void remove(StorageActionBlockType action_type); /// Removes a lock for a table if it exists void remove(const StorageID & table_id, StorageActionBlockType action_type); void remove(const StoragePtr & table, StorageActionBlockType action_type); diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 2703773f464..b91fd7ac5cf 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -448,17 +448,7 @@ static ColumnWithTypeAndName executeActionForHeader(const ActionsDAG::Node * nod { case ActionsDAG::ActionType::FUNCTION: { - // bool all_args_are_const = true; - - // for (const auto & argument : arguments) - // if (typeid_cast(argument.column.get()) == nullptr) - // all_args_are_const = false; - res_column.column = node->function->execute(arguments, res_column.type, 0, true); - - // if (!all_args_are_const) - // res_column.column = res_column.column->convertToFullColumnIfConst(); - break; } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index feb07727725..716849465de 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -238,7 +238,7 @@ struct AggregationMethodString std::optional shuffleKeyColumns(std::vector &, const Sizes &) { return {}; } - static void insertKeyIntoColumns(const StringRef & key, std::vector & key_columns, const Sizes &) + static void insertKeyIntoColumns(StringRef key, std::vector & key_columns, const Sizes &) { static_cast(key_columns[0])->insertData(key.data, key.size); } @@ -270,7 +270,7 @@ struct AggregationMethodStringNoCache std::optional shuffleKeyColumns(std::vector &, const Sizes &) { return {}; } - static void insertKeyIntoColumns(const StringRef & key, std::vector & key_columns, const Sizes &) + static void insertKeyIntoColumns(StringRef key, std::vector & key_columns, const Sizes &) { static_cast(key_columns[0])->insertData(key.data, key.size); } @@ -302,7 +302,7 @@ struct AggregationMethodFixedString std::optional shuffleKeyColumns(std::vector &, const Sizes &) { return {}; } - static void insertKeyIntoColumns(const StringRef & key, std::vector & key_columns, const Sizes &) + static void insertKeyIntoColumns(StringRef key, std::vector & key_columns, const Sizes &) { static_cast(key_columns[0])->insertData(key.data, key.size); } @@ -333,7 +333,7 @@ struct AggregationMethodFixedStringNoCache std::optional shuffleKeyColumns(std::vector &, const Sizes &) { return {}; } - static void insertKeyIntoColumns(const StringRef & key, std::vector & key_columns, const Sizes &) + static void insertKeyIntoColumns(StringRef key, std::vector & key_columns, const Sizes &) { static_cast(key_columns[0])->insertData(key.data, key.size); } @@ -501,7 +501,7 @@ struct AggregationMethodSerialized std::optional shuffleKeyColumns(std::vector &, const Sizes &) { return {}; } - static void insertKeyIntoColumns(const StringRef & key, std::vector & key_columns, const Sizes &) + static void insertKeyIntoColumns(StringRef key, std::vector & key_columns, const Sizes &) { const auto * pos = key.data; for (auto & column : key_columns) diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index b057b6ee641..9fd27fc28b6 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -989,9 +989,15 @@ void AsynchronousMetrics::update(std::chrono::system_clock::time_point update_ti if (s.rfind("processor", 0) == 0) { + /// s390x example: processor 0: version = FF, identification = 039C88, machine = 3906 + /// non s390x example: processor : 0 if (auto colon = s.find_first_of(':')) { +#ifdef __s390x__ + core_id = std::stoi(s.substr(10)); /// 10: length of "processor" plus 1 +#else core_id = std::stoi(s.substr(colon + 2)); +#endif } } else if (s.rfind("cpu MHz", 0) == 0) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index b83b6420548..a5629b33d22 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -228,8 +228,10 @@ struct ContextSharedPart mutable std::unique_ptr distributed_schedule_pool; /// A thread pool that can run different jobs in background (used for distributed sends) mutable std::unique_ptr message_broker_schedule_pool; /// A thread pool that can run different jobs in background (used for message brokers, like RabbitMQ and Kafka) - mutable ThrottlerPtr replicated_fetches_throttler; /// A server-wide throttler for replicated fetches - mutable ThrottlerPtr replicated_sends_throttler; /// A server-wide throttler for replicated sends + mutable ThrottlerPtr replicated_fetches_throttler; /// A server-wide throttler for replicated fetches + mutable ThrottlerPtr replicated_sends_throttler; /// A server-wide throttler for replicated sends + mutable ThrottlerPtr remote_read_throttler; /// A server-wide throttler for remote IO reads + mutable ThrottlerPtr remote_write_throttler; /// A server-wide throttler for remote IO writes MultiVersion macros; /// Substitutions extracted from config. std::unique_ptr ddl_worker; /// Process ddl commands from zk. @@ -854,13 +856,13 @@ void Context::checkAccessImpl(const Args &... args) const } void Context::checkAccess(const AccessFlags & flags) const { return checkAccessImpl(flags); } -void Context::checkAccess(const AccessFlags & flags, const std::string_view & database) const { return checkAccessImpl(flags, database); } -void Context::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const { return checkAccessImpl(flags, database, table); } -void Context::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const { return checkAccessImpl(flags, database, table, column); } -void Context::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); } -void Context::checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); } +void Context::checkAccess(const AccessFlags & flags, std::string_view database) const { return checkAccessImpl(flags, database); } +void Context::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl(flags, database, table); } +void Context::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl(flags, database, table, column); } +void Context::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); } +void Context::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); } void Context::checkAccess(const AccessFlags & flags, const StorageID & table_id) const { checkAccessImpl(flags, table_id.getDatabaseName(), table_id.getTableName()); } -void Context::checkAccess(const AccessFlags & flags, const StorageID & table_id, const std::string_view & column) const { checkAccessImpl(flags, table_id.getDatabaseName(), table_id.getTableName(), column); } +void Context::checkAccess(const AccessFlags & flags, const StorageID & table_id, std::string_view column) const { checkAccessImpl(flags, table_id.getDatabaseName(), table_id.getTableName(), column); } void Context::checkAccess(const AccessFlags & flags, const StorageID & table_id, const std::vector & columns) const { checkAccessImpl(flags, table_id.getDatabaseName(), table_id.getTableName(), columns); } void Context::checkAccess(const AccessFlags & flags, const StorageID & table_id, const Strings & columns) const { checkAccessImpl(flags, table_id.getDatabaseName(), table_id.getTableName(), columns); } void Context::checkAccess(const AccessRightsElement & element) const { return checkAccessImpl(element); } @@ -1189,7 +1191,7 @@ void Context::setSettings(const Settings & settings_) } -void Context::setSetting(const StringRef & name, const String & value) +void Context::setSetting(std::string_view name, const String & value) { auto lock = getLock(); if (name == "profile") @@ -1197,14 +1199,14 @@ void Context::setSetting(const StringRef & name, const String & value) setCurrentProfile(value); return; } - settings.set(std::string_view{name}, value); + settings.set(name, value); if (name == "readonly" || name == "allow_ddl" || name == "allow_introspection_functions") calculateAccessRights(); } -void Context::setSetting(const StringRef & name, const Field & value) +void Context::setSetting(std::string_view name, const Field & value) { auto lock = getLock(); if (name == "profile") @@ -1212,7 +1214,7 @@ void Context::setSetting(const StringRef & name, const Field & value) setCurrentProfile(value.safeGet()); return; } - settings.set(std::string_view{name}, value); + settings.set(name, value); if (name == "readonly" || name == "allow_ddl" || name == "allow_introspection_functions") calculateAccessRights(); @@ -1930,6 +1932,26 @@ ThrottlerPtr Context::getReplicatedSendsThrottler() const return shared->replicated_sends_throttler; } +ThrottlerPtr Context::getRemoteReadThrottler() const +{ + auto lock = getLock(); + if (!shared->remote_read_throttler) + shared->remote_read_throttler = std::make_shared( + settings.max_remote_read_network_bandwidth_for_server); + + return shared->remote_read_throttler; +} + +ThrottlerPtr Context::getRemoteWriteThrottler() const +{ + auto lock = getLock(); + if (!shared->remote_write_throttler) + shared->remote_write_throttler = std::make_shared( + settings.max_remote_write_network_bandwidth_for_server); + + return shared->remote_write_throttler; +} + bool Context::hasDistributedDDL() const { return getConfigRef().has("distributed_ddl"); @@ -3436,6 +3458,8 @@ ReadSettings Context::getReadSettings() const res.mmap_threshold = settings.min_bytes_to_use_mmap_io; res.priority = settings.read_priority; + res.remote_throttler = getRemoteReadThrottler(); + res.http_max_tries = settings.http_max_tries; res.http_retry_initial_backoff_ms = settings.http_retry_initial_backoff_ms; res.http_retry_max_backoff_ms = settings.http_retry_max_backoff_ms; @@ -3452,6 +3476,8 @@ WriteSettings Context::getWriteSettings() const res.enable_filesystem_cache_on_write_operations = settings.enable_filesystem_cache_on_write_operations; + res.remote_throttler = getRemoteWriteThrottler(); + return res; } diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 37c6b4c9caa..2d498d81326 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -474,13 +474,13 @@ public: /// Checks access rights. /// Empty database means the current database. void checkAccess(const AccessFlags & flags) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::string_view & column) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const std::vector & columns) const; - void checkAccess(const AccessFlags & flags, const std::string_view & database, const std::string_view & table, const Strings & columns) const; + void checkAccess(const AccessFlags & flags, std::string_view database) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const; + void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const; void checkAccess(const AccessFlags & flags, const StorageID & table_id) const; - void checkAccess(const AccessFlags & flags, const StorageID & table_id, const std::string_view & column) const; + void checkAccess(const AccessFlags & flags, const StorageID & table_id, std::string_view column) const; void checkAccess(const AccessFlags & flags, const StorageID & table_id, const std::vector & columns) const; void checkAccess(const AccessFlags & flags, const StorageID & table_id, const Strings & columns) const; void checkAccess(const AccessRightsElement & element) const; @@ -607,8 +607,8 @@ public: void setSettings(const Settings & settings_); /// Set settings by name. - void setSetting(const StringRef & name, const String & value); - void setSetting(const StringRef & name, const Field & value); + void setSetting(std::string_view name, const String & value); + void setSetting(std::string_view name, const Field & value); void applySettingChange(const SettingChange & change); void applySettingsChanges(const SettingsChanges & changes); @@ -820,6 +820,8 @@ public: ThrottlerPtr getReplicatedFetchesThrottler() const; ThrottlerPtr getReplicatedSendsThrottler() const; + ThrottlerPtr getRemoteReadThrottler() const; + ThrottlerPtr getRemoteWriteThrottler() const; /// Has distributed_ddl configuration or not. bool hasDistributedDDL() const; diff --git a/src/Interpreters/CrossToInnerJoinVisitor.cpp b/src/Interpreters/CrossToInnerJoinVisitor.cpp index d438ea9394e..cfa979f4036 100644 --- a/src/Interpreters/CrossToInnerJoinVisitor.cpp +++ b/src/Interpreters/CrossToInnerJoinVisitor.cpp @@ -39,7 +39,10 @@ struct JoinedElement : element(table_element) { if (element.table_join) + { join = element.table_join->as(); + original_kind = join->kind; + } } void checkTableName(const DatabaseAndTableWithAlias & table, const String & current_database) const @@ -61,6 +64,8 @@ struct JoinedElement join->kind = ASTTableJoin::Kind::Cross; } + ASTTableJoin::Kind getOriginalKind() const { return original_kind; } + bool rewriteCrossToInner(ASTPtr on_expression) { if (join->kind != ASTTableJoin::Kind::Cross) @@ -83,6 +88,8 @@ struct JoinedElement private: const ASTTablesInSelectQueryElement & element; ASTTableJoin * join = nullptr; + + ASTTableJoin::Kind original_kind; }; bool isAllowedToRewriteCrossJoin(const ASTPtr & node, const Aliases & aliases) @@ -251,10 +258,17 @@ void CrossToInnerJoinMatcher::visit(ASTSelectQuery & select, ASTPtr &, Data & da } } - if (data.cross_to_inner_join_rewrite > 1 && !rewritten) + if (joined.getOriginalKind() == ASTTableJoin::Kind::Comma && + data.cross_to_inner_join_rewrite > 1 && + !rewritten) { - throw Exception(ErrorCodes::INCORRECT_QUERY, "Failed to rewrite '{} WHERE {}' to INNER JOIN", - query_before, queryToString(select.where())); + throw Exception( + ErrorCodes::INCORRECT_QUERY, + "Failed to rewrite comma join to INNER. " + "Please, try to simplify WHERE section " + "or set the setting `cross_to_inner_join_rewrite` to 1 to allow slow CROSS JOIN for this case " + "(cannot rewrite '{} WHERE {}' to INNER JOIN)", + query_before, queryToString(select.where())); } } } diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index bae2aed2cd5..02d3e5eac32 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -234,7 +234,7 @@ void DatabaseCatalog::shutdownImpl() view_dependencies.clear(); } -bool DatabaseCatalog::isPredefinedDatabase(const std::string_view & database_name) +bool DatabaseCatalog::isPredefinedDatabase(std::string_view database_name) { return database_name == TEMPORARY_DATABASE || database_name == SYSTEM_DATABASE || database_name == INFORMATION_SCHEMA || database_name == INFORMATION_SCHEMA_UPPERCASE; @@ -403,10 +403,11 @@ void DatabaseCatalog::attachDatabase(const String & database_name, const Databas std::lock_guard lock{databases_mutex}; assertDatabaseDoesntExistUnlocked(database_name); databases.emplace(database_name, database); - NOEXCEPT_SCOPE; - UUID db_uuid = database->getUUID(); - if (db_uuid != UUIDHelpers::Nil) - addUUIDMapping(db_uuid, database, nullptr); + NOEXCEPT_SCOPE({ + UUID db_uuid = database->getUUID(); + if (db_uuid != UUIDHelpers::Nil) + addUUIDMapping(db_uuid, database, nullptr); + }); } diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 133cf0c5126..d82ad56eadd 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -131,7 +131,7 @@ public: static constexpr const char * INFORMATION_SCHEMA_UPPERCASE = "INFORMATION_SCHEMA"; /// Returns true if a passed name is one of the predefined databases' names. - static bool isPredefinedDatabase(const std::string_view & database_name); + static bool isPredefinedDatabase(std::string_view database_name); static DatabaseCatalog & init(ContextMutablePtr global_context_); static DatabaseCatalog & instance(); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 483ffad67b7..8a14c09819a 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -45,6 +45,9 @@ #include #include +#include +#include +#include #include #include #include @@ -345,6 +348,7 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions) group_by_kind = GroupByKind::GROUPING_SETS; else group_by_kind = GroupByKind::ORDINARY; + bool use_nulls = group_by_kind != GroupByKind::ORDINARY && getContext()->getSettingsRef().group_by_use_nulls; /// For GROUPING SETS with multiple groups we always add virtual __grouping_set column /// With set number, which is used as an additional key at the stage of merging aggregating data. @@ -399,7 +403,7 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions) } } - NameAndTypePair key{column_name, node->result_type}; + NameAndTypePair key{column_name, use_nulls ? makeNullableSafe(node->result_type) : node->result_type }; grouping_set_list.push_back(key); @@ -453,7 +457,7 @@ void ExpressionAnalyzer::analyzeAggregation(ActionsDAGPtr & temp_actions) } } - NameAndTypePair key{column_name, node->result_type}; + NameAndTypePair key = NameAndTypePair{ column_name, use_nulls ? makeNullableSafe(node->result_type) : node->result_type }; /// Aggregation keys are uniqued. if (!unique_keys.contains(key.name)) @@ -1253,7 +1257,7 @@ JoinPtr SelectQueryExpressionAnalyzer::makeJoin( } ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( - ExpressionActionsChain & chain, bool only_types, const Names & additional_required_columns) + ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getSelectQuery(); if (!select_query->prewhere()) @@ -1290,14 +1294,6 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendPrewhere( NameSet required_source_columns(required_columns.begin(), required_columns.end()); required_source_columns.insert(first_action_names.begin(), first_action_names.end()); - /// Add required columns to required output in order not to remove them after prewhere execution. - /// TODO: add sampling and final execution to common chain. - for (const auto & column : additional_required_columns) - { - if (required_source_columns.contains(column)) - step.addRequiredOutput(column); - } - auto names = step.actions()->getNames(); NameSet name_set(names.begin(), names.end()); @@ -1497,6 +1493,28 @@ void SelectQueryExpressionAnalyzer::appendExpressionsAfterWindowFunctions(Expres } } +void SelectQueryExpressionAnalyzer::appendGroupByModifiers(ActionsDAGPtr & before_aggregation, ExpressionActionsChain & chain, bool /* only_types */) +{ + const auto * select_query = getAggregatingQuery(); + + if (!select_query->groupBy() || !(select_query->group_by_with_rollup || select_query->group_by_with_cube)) + return; + + auto source_columns = before_aggregation->getResultColumns(); + ColumnsWithTypeAndName result_columns; + + for (const auto & source_column : source_columns) + { + if (source_column.type->canBeInsideNullable()) + result_columns.emplace_back(makeNullableSafe(source_column.type), source_column.name); + else + result_columns.push_back(source_column); + } + ExpressionActionsChain::Step & step = chain.lastStep(before_aggregation->getNamesAndTypesList()); + + step.actions() = ActionsDAG::makeConvertingActions(source_columns, result_columns, ActionsDAG::MatchColumnsMode::Position); +} + void SelectQueryExpressionAnalyzer::appendSelectSkipWindowExpressions(ExpressionActionsChain::Step & step, ASTPtr const & node) { if (auto * function = node->as()) @@ -1824,6 +1842,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( bool second_stage_, bool only_types, const FilterDAGInfoPtr & filter_info_, + const FilterDAGInfoPtr & additional_filter, const Block & source_header) : first_stage(first_stage_) , second_stage(second_stage_) @@ -1844,12 +1863,28 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( const Settings & settings = context->getSettingsRef(); const ConstStoragePtr & storage = query_analyzer.storage(); + Names additional_required_columns_after_prewhere; ssize_t prewhere_step_num = -1; ssize_t where_step_num = -1; ssize_t having_step_num = -1; auto finalize_chain = [&](ExpressionActionsChain & chain) { + if (prewhere_step_num >= 0) + { + ExpressionActionsChain::Step & step = *chain.steps.at(prewhere_step_num); + + auto required_columns = prewhere_info->prewhere_actions->getRequiredColumnsNames(); + NameSet required_source_columns(required_columns.begin(), required_columns.end()); + /// Add required columns to required output in order not to remove them after prewhere execution. + /// TODO: add sampling and final execution to common chain. + for (const auto & column : additional_required_columns_after_prewhere) + { + if (required_source_columns.contains(column)) + step.addRequiredOutput(column); + } + } + chain.finalize(); finalize(chain, prewhere_step_num, where_step_num, having_step_num, query); @@ -1859,7 +1894,6 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( { ExpressionActionsChain chain(context); - Names additional_required_columns_after_prewhere; if (storage && (query.sampleSize() || settings.parallel_replicas_count > 1)) { @@ -1875,13 +1909,20 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( columns_for_final.begin(), columns_for_final.end()); } + if (storage && additional_filter) + { + Names columns_for_additional_filter = additional_filter->actions->getRequiredColumnsNames(); + additional_required_columns_after_prewhere.insert(additional_required_columns_after_prewhere.end(), + columns_for_additional_filter.begin(), columns_for_additional_filter.end()); + } + if (storage && filter_info_) { filter_info = filter_info_; filter_info->do_remove_column = true; } - if (auto actions = query_analyzer.appendPrewhere(chain, !first_stage, additional_required_columns_after_prewhere)) + if (auto actions = query_analyzer.appendPrewhere(chain, !first_stage)) { /// Prewhere is always the first one. prewhere_step_num = 0; @@ -1949,6 +1990,9 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( query_analyzer.appendAggregateFunctionsArguments(chain, only_types || !first_stage); before_aggregation = chain.getLastActions(); + if (settings.group_by_use_nulls) + query_analyzer.appendGroupByModifiers(before_aggregation, chain, only_types); + finalize_chain(chain); if (query_analyzer.appendHaving(chain, only_types || !second_stage)) @@ -1976,6 +2020,13 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( && !query.final() && join_allow_read_in_order; + if (storage && optimize_read_in_order) + { + Names columns_for_sorting_key = metadata_snapshot->getColumnsRequiredForSortingKey(); + additional_required_columns_after_prewhere.insert(additional_required_columns_after_prewhere.end(), + columns_for_sorting_key.begin(), columns_for_sorting_key.end()); + } + /// If there is aggregation, we execute expressions in SELECT and ORDER BY on the initiating server, otherwise on the source servers. query_analyzer.appendSelect(chain, only_types || (need_aggregate ? !second_stage : !first_stage)); diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index aae45482a97..da92bc10832 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -281,6 +281,7 @@ struct ExpressionAnalysisResult bool second_stage, bool only_types, const FilterDAGInfoPtr & filter_info, + const FilterDAGInfoPtr & additional_filter, /// for setting additional_filters const Block & source_header); /// Filter for row-level security. @@ -403,7 +404,7 @@ private: /// remove_filter is set in ExpressionActionsChain::finalize(); /// Columns in `additional_required_columns` will not be removed (they can be used for e.g. sampling or FINAL modifier). - ActionsDAGPtr appendPrewhere(ExpressionActionsChain & chain, bool only_types, const Names & additional_required_columns); + ActionsDAGPtr appendPrewhere(ExpressionActionsChain & chain, bool only_types); bool appendWhere(ExpressionActionsChain & chain, bool only_types); bool appendGroupBy(ExpressionActionsChain & chain, bool only_types, bool optimize_aggregation_in_order, ManyExpressionActions &); void appendAggregateFunctionsArguments(ExpressionActionsChain & chain, bool only_types); @@ -412,6 +413,8 @@ private: void appendExpressionsAfterWindowFunctions(ExpressionActionsChain & chain, bool only_types); void appendSelectSkipWindowExpressions(ExpressionActionsChain::Step & step, ASTPtr const & node); + void appendGroupByModifiers(ActionsDAGPtr & before_aggregation, ExpressionActionsChain & chain, bool only_types); + /// After aggregation: bool appendHaving(ExpressionActionsChain & chain, bool only_types); /// appendSelect diff --git a/src/Interpreters/HashJoin.cpp b/src/Interpreters/HashJoin.cpp index b54c77b385f..722ba81451a 100644 --- a/src/Interpreters/HashJoin.cpp +++ b/src/Interpreters/HashJoin.cpp @@ -2100,7 +2100,6 @@ std::shared_ptr HashJoin::getNonJoinedBlocks(const Block & left if (multiple_disjuncts) { /// ... calculate `left_columns_count` ... - // throw DB::Exception(ErrorCodes::NOT_IMPLEMENTED, "TODO"); size_t left_columns_count = left_sample_block.columns(); auto non_joined = std::make_unique>(*this, max_block_size); return std::make_shared(std::move(non_joined), result_sample_block, left_columns_count, table_join->leftToRightKeyRemap()); diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp index 4ac1d33468f..05486f65da5 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.cpp @@ -4,6 +4,13 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include namespace DB { @@ -81,6 +88,53 @@ void IInterpreterUnionOrSelectQuery::setQuota(QueryPipeline & pipeline) const pipeline.setQuota(quota); } +static ASTPtr parseAdditionalPostFilter(const Context & context) +{ + const auto & settings = context.getSettingsRef(); + const String & filter = settings.additional_result_filter; + if (filter.empty()) + return nullptr; + + ParserExpression parser; + return parseQuery( + parser, filter.data(), filter.data() + filter.size(), + "additional filter", settings.max_query_size, settings.max_parser_depth); +} + +static ActionsDAGPtr makeAdditionalPostFilter(ASTPtr & ast, ContextPtr context, const Block & header) +{ + auto syntax_result = TreeRewriter(context).analyze(ast, header.getNamesAndTypesList()); + String result_column_name = ast->getColumnName(); + auto dag = ExpressionAnalyzer(ast, syntax_result, context).getActionsDAG(false, false); + const ActionsDAG::Node * result_node = &dag->findInIndex(result_column_name); + auto & index = dag->getIndex(); + index.clear(); + index.reserve(dag->getInputs().size() + 1); + for (const auto * node : dag->getInputs()) + index.push_back(node); + + index.push_back(result_node); + + return dag; +} + +void IInterpreterUnionOrSelectQuery::addAdditionalPostFilter(QueryPlan & plan) const +{ + if (options.subquery_depth != 0) + return; + + auto ast = parseAdditionalPostFilter(*context); + if (!ast) + return; + + auto dag = makeAdditionalPostFilter(ast, context, plan.getCurrentDataStream().header); + std::string filter_name = dag->getIndex().back()->result_name; + auto filter_step = std::make_unique( + plan.getCurrentDataStream(), std::move(dag), std::move(filter_name), true); + filter_step->setStepDescription("Additional result filter"); + plan.addStep(std::move(filter_step)); +} + void IInterpreterUnionOrSelectQuery::addStorageLimits(const StorageLimitsList & limits) { for (const auto & val : limits) diff --git a/src/Interpreters/IInterpreterUnionOrSelectQuery.h b/src/Interpreters/IInterpreterUnionOrSelectQuery.h index 98e0432f3d5..a1c86f9de85 100644 --- a/src/Interpreters/IInterpreterUnionOrSelectQuery.h +++ b/src/Interpreters/IInterpreterUnionOrSelectQuery.h @@ -72,6 +72,8 @@ protected: /// Set quotas to query pipeline. void setQuota(QueryPipeline & pipeline) const; + /// Add filter from additional_post_filter setting. + void addAdditionalPostFilter(QueryPlan & plan) const; static StorageLimits getStorageLimits(const Context & context, const SelectQueryOptions & options); }; diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index 3fad4374abe..6715947da43 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -146,14 +146,14 @@ namespace struct QueryASTSettings { bool graph = false; - bool rewrite = false; + bool optimize = false; constexpr static char name[] = "AST"; std::unordered_map> boolean_settings = { {"graph", graph}, - {"rewrite", rewrite} + {"optimize", optimize} }; }; @@ -280,7 +280,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl() case ASTExplainQuery::ParsedAST: { auto settings = checkAndGetSettings(ast.getSettings()); - if (settings.rewrite) + if (settings.optimize) { ExplainAnalyzedSyntaxVisitor::Data data(getContext()); ExplainAnalyzedSyntaxVisitor(data).visit(query); diff --git a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp index 4d0c82d3345..d6add3f77a9 100644 --- a/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp +++ b/src/Interpreters/InterpreterSelectIntersectExceptQuery.cpp @@ -138,6 +138,7 @@ void InterpreterSelectIntersectExceptQuery::buildQueryPlan(QueryPlan & query_pla auto step = std::make_unique(std::move(data_streams), final_operator, max_threads); query_plan.unitePlans(std::move(step), std::move(plans)); + addAdditionalPostFilter(query_plan); query_plan.addInterpreterContext(context); } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index ac31588d210..24bbaea7dcf 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -109,8 +109,17 @@ namespace ErrorCodes } /// Assumes `storage` is set and the table filter (row-level security) is not empty. -String InterpreterSelectQuery::generateFilterActions(ActionsDAGPtr & actions, const Names & prerequisite_columns) const +FilterDAGInfoPtr generateFilterActions( + const StorageID & table_id, + const ASTPtr & row_policy_filter, + const ContextPtr & context, + const StoragePtr & storage, + const StorageSnapshotPtr & storage_snapshot, + const StorageMetadataPtr & metadata_snapshot, + Names & prerequisite_columns) { + auto filter_info = std::make_shared(); + const auto & db_name = table_id.getDatabaseName(); const auto & table_name = table_id.getTableName(); @@ -146,16 +155,24 @@ String InterpreterSelectQuery::generateFilterActions(ActionsDAGPtr & actions, co /// Using separate expression analyzer to prevent any possible alias injection auto syntax_result = TreeRewriter(context).analyzeSelect(query_ast, TreeRewriterResult({}, storage, storage_snapshot)); SelectQueryExpressionAnalyzer analyzer(query_ast, syntax_result, context, metadata_snapshot); - actions = analyzer.simpleSelectActions(); + filter_info->actions = analyzer.simpleSelectActions(); - auto column_name = expr_list->children.at(0)->getColumnName(); - actions->removeUnusedActions(NameSet{column_name}); - actions->projectInput(false); + filter_info->column_name = expr_list->children.at(0)->getColumnName(); + filter_info->actions->removeUnusedActions(NameSet{filter_info->column_name}); + filter_info->actions->projectInput(false); - for (const auto * node : actions->getInputs()) - actions->getIndex().push_back(node); + for (const auto * node : filter_info->actions->getInputs()) + filter_info->actions->getIndex().push_back(node); - return column_name; + auto required_columns_from_filter = filter_info->actions->getRequiredColumns(); + + for (const auto & column : required_columns_from_filter) + { + if (prerequisite_columns.end() == std::find(prerequisite_columns.begin(), prerequisite_columns.end(), column.name)) + prerequisite_columns.push_back(column.name); + } + + return filter_info; } InterpreterSelectQuery::InterpreterSelectQuery( @@ -269,6 +286,33 @@ static void checkAccessRightsForSelect( context->checkAccess(AccessType::SELECT, table_id, syntax_analyzer_result.requiredSourceColumnsForAccessCheck()); } +static ASTPtr parseAdditionalFilterConditionForTable( + const Map & setting, + const DatabaseAndTableWithAlias & target, + const Context & context) +{ + for (size_t i = 0; i < setting.size(); ++i) + { + const auto & tuple = setting[i].safeGet(); + auto & table = tuple.at(0).safeGet(); + auto & filter = tuple.at(1).safeGet(); + + if (table == target.alias || + (table == target.table && context.getCurrentDatabase() == target.database) || + (table == target.database + '.' + target.table)) + { + /// Try to parse expression + ParserExpression parser; + const auto & settings = context.getSettingsRef(); + return parseQuery( + parser, filter.data(), filter.data() + filter.size(), + "additional filter", settings.max_query_size, settings.max_parser_depth); + } + } + + return nullptr; +} + /// Returns true if we should ignore quotas and limits for a specified table in the system database. static bool shouldIgnoreQuotaAndLimits(const StorageID & table_id) { @@ -448,6 +492,10 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (storage) view = dynamic_cast(storage.get()); + if (!settings.additional_table_filters.value.empty() && storage && !joined_tables.tablesWithColumns().empty()) + query_info.additional_filter_ast = parseAdditionalFilterConditionForTable( + settings.additional_table_filters, joined_tables.tablesWithColumns().front().table, *context); + auto analyze = [&] (bool try_move_to_prewhere) { /// Allow push down and other optimizations for VIEW: replace with subquery and rewrite it. @@ -566,16 +614,16 @@ InterpreterSelectQuery::InterpreterSelectQuery( /// Fix source_header for filter actions. if (row_policy_filter) { - filter_info = std::make_shared(); - filter_info->column_name = generateFilterActions(filter_info->actions, required_columns); + filter_info = generateFilterActions( + table_id, row_policy_filter, context, storage, storage_snapshot, metadata_snapshot, required_columns); + } - auto required_columns_from_filter = filter_info->actions->getRequiredColumns(); + if (query_info.additional_filter_ast) + { + additional_filter_info = generateFilterActions( + table_id, query_info.additional_filter_ast, context, storage, storage_snapshot, metadata_snapshot, required_columns); - for (const auto & column : required_columns_from_filter) - { - if (required_columns.end() == std::find(required_columns.begin(), required_columns.end(), column.name)) - required_columns.push_back(column.name); - } + additional_filter_info->do_remove_column = true; } source_header = storage_snapshot->getSampleBlockForColumns(required_columns); @@ -735,7 +783,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl() && options.to_stage > QueryProcessingStage::WithMergeableState; analysis_result = ExpressionAnalysisResult( - *query_analyzer, metadata_snapshot, first_stage, second_stage, options.only_analyze, filter_info, source_header); + *query_analyzer, metadata_snapshot, first_stage, second_stage, options.only_analyze, filter_info, additional_filter_info, source_header); if (options.to_stage == QueryProcessingStage::Enum::FetchColumns) { @@ -786,8 +834,16 @@ Block InterpreterSelectQuery::getSampleBlockImpl() if (analysis_result.use_grouping_set_key) res.insert({ nullptr, std::make_shared(), "__grouping_set" }); - for (const auto & key : query_analyzer->aggregationKeys()) - res.insert({nullptr, header.getByName(key.name).type, key.name}); + if (context->getSettingsRef().group_by_use_nulls && analysis_result.use_grouping_set_key) + { + for (const auto & key : query_analyzer->aggregationKeys()) + res.insert({nullptr, makeNullableSafe(header.getByName(key.name).type), key.name}); + } + else + { + for (const auto & key : query_analyzer->aggregationKeys()) + res.insert({nullptr, header.getByName(key.name).type, key.name}); + } for (const auto & aggregate : query_analyzer->aggregates()) { @@ -1295,6 +1351,18 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

( + query_plan.getCurrentDataStream(), + additional_filter_info->actions, + additional_filter_info->column_name, + additional_filter_info->do_remove_column); + + additional_filter_step->setStepDescription("Additional filter"); + query_plan.addStep(std::move(additional_filter_step)); + } + if (expressions.before_array_join) { QueryPlanStepPtr before_array_join_step @@ -1937,6 +2005,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc && storage && storage->getName() != "MaterializedMySQL" && !row_policy_filter + && !query_info.additional_filter_ast && processing_stage == QueryProcessingStage::FetchColumns && query_analyzer->hasAggregation() && (query_analyzer->aggregates().size() == 1) @@ -2036,6 +2105,7 @@ void InterpreterSelectQuery::executeFetchColumns(QueryProcessingStage::Enum proc && !query.limit_with_ties && !query.prewhere() && !query.where() + && !query_info.additional_filter_ast && !query.groupBy() && !query.having() && !query.orderBy() @@ -2326,6 +2396,7 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac merge_threads, temporary_data_merge_threads, storage_has_evenly_distributed_read, + settings.group_by_use_nulls, std::move(group_by_info), std::move(group_by_sort_description), should_produce_results_in_order_of_bucket_number); @@ -2402,9 +2473,9 @@ void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modific QueryPlanStepPtr step; if (modificator == Modificator::ROLLUP) - step = std::make_unique(query_plan.getCurrentDataStream(), std::move(params), final); + step = std::make_unique(query_plan.getCurrentDataStream(), std::move(params), final, settings.group_by_use_nulls); else if (modificator == Modificator::CUBE) - step = std::make_unique(query_plan.getCurrentDataStream(), std::move(params), final); + step = std::make_unique(query_plan.getCurrentDataStream(), std::move(params), final, settings.group_by_use_nulls); query_plan.addStep(std::move(step)); } diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index a95ff00bc0d..e70490f13ac 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -189,8 +189,6 @@ private: void executeMergeSorted(QueryPlan & query_plan, const SortDescription & sort_description, UInt64 limit, const std::string & description); - String generateFilterActions(ActionsDAGPtr & actions, const Names & prerequisite_columns = {}) const; - enum class Modificator { ROLLUP = 0, @@ -217,6 +215,9 @@ private: ASTPtr row_policy_filter; FilterDAGInfoPtr filter_info; + /// For additional_filter setting. + FilterDAGInfoPtr additional_filter_info; + QueryProcessingStage::Enum from_stage = QueryProcessingStage::FetchColumns; /// List of columns to read to execute the query. diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index 9f87a47fced..b2622607760 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -280,7 +280,6 @@ Block InterpreterSelectWithUnionQuery::getSampleBlock(const ASTPtr & query_ptr_, void InterpreterSelectWithUnionQuery::buildQueryPlan(QueryPlan & query_plan) { - // auto num_distinct_union = optimizeUnionList(); size_t num_plans = nested_interpreters.size(); const Settings & settings = context->getSettingsRef(); @@ -357,6 +356,7 @@ void InterpreterSelectWithUnionQuery::buildQueryPlan(QueryPlan & query_plan) } } + addAdditionalPostFilter(query_plan); query_plan.addInterpreterContext(context); } diff --git a/src/Interpreters/JIT/CHJIT.cpp b/src/Interpreters/JIT/CHJIT.cpp index 9eec82b4179..c2f3fc7c27d 100644 --- a/src/Interpreters/JIT/CHJIT.cpp +++ b/src/Interpreters/JIT/CHJIT.cpp @@ -244,28 +244,6 @@ private: } }; -// class AssemblyPrinter -// { -// public: - -// explicit AssemblyPrinter(llvm::TargetMachine &target_machine_) -// : target_machine(target_machine_) -// { -// } - -// void print(llvm::Module & module) -// { -// llvm::legacy::PassManager pass_manager; -// target_machine.Options.MCOptions.AsmVerbose = true; -// if (target_machine.addPassesToEmitFile(pass_manager, llvm::errs(), nullptr, llvm::CodeGenFileType::CGFT_AssemblyFile)) -// throw Exception(ErrorCodes::CANNOT_COMPILE_CODE, "MachineCode cannot be printed"); - -// pass_manager.run(module); -// } -// private: -// llvm::TargetMachine & target_machine; -// }; - /** MemoryManager for module. * Keep total allocated size during RuntimeDyld linker execution. */ diff --git a/src/Interpreters/MergeTreeTransaction.cpp b/src/Interpreters/MergeTreeTransaction.cpp index 432116feaf5..f438194b87b 100644 --- a/src/Interpreters/MergeTreeTransaction.cpp +++ b/src/Interpreters/MergeTreeTransaction.cpp @@ -12,16 +12,16 @@ namespace ErrorCodes { extern const int INVALID_TRANSACTION; extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; } -static TableLockHolder getLockForOrdinary(const StoragePtr & storage) +static void checkNotOrdinaryDatabase(const StoragePtr & storage) { if (storage->getStorageID().uuid != UUIDHelpers::Nil) - return {}; + return; - /// Maybe we should just throw an exception and do not support Ordinary database? - auto default_timeout = std::chrono::milliseconds(10 * 1000); - return storage->lockForShare(RWLockImpl::NO_QUERY, default_timeout); + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Table {} belongs to database with Ordinary engine. " + "This engine is deprecated and is not supported in transactions.", storage->getStorageID().getNameForLogs()); } MergeTreeTransaction::MergeTreeTransaction(CSN snapshot_, LocalTID local_tid_, UUID host_id, std::list::iterator snapshot_it_) @@ -131,29 +131,26 @@ void MergeTreeTransaction::addNewPartAndRemoveCovered(const StoragePtr & storage void MergeTreeTransaction::addNewPart(const StoragePtr & storage, const DataPartPtr & new_part) { - auto maybe_lock = getLockForOrdinary(storage); + checkNotOrdinaryDatabase(storage); std::lock_guard lock{mutex}; checkIsNotCancelled(); storages.insert(storage); - if (maybe_lock) - table_read_locks_for_ordinary_db.emplace_back(std::move(maybe_lock)); creating_parts.push_back(new_part); } void MergeTreeTransaction::removeOldPart(const StoragePtr & storage, const DataPartPtr & part_to_remove, const TransactionInfoContext & context) { - auto maybe_lock = getLockForOrdinary(storage); + checkNotOrdinaryDatabase(storage); { std::lock_guard lock{mutex}; checkIsNotCancelled(); part_to_remove->version.lockRemovalTID(tid, context); - NOEXCEPT_SCOPE; - storages.insert(storage); - if (maybe_lock) - table_read_locks_for_ordinary_db.emplace_back(std::move(maybe_lock)); - removing_parts.push_back(part_to_remove); + NOEXCEPT_SCOPE({ + storages.insert(storage); + removing_parts.push_back(part_to_remove); + }); } part_to_remove->appendRemovalTIDToVersionMetadata(); @@ -161,12 +158,10 @@ void MergeTreeTransaction::removeOldPart(const StoragePtr & storage, const DataP void MergeTreeTransaction::addMutation(const StoragePtr & table, const String & mutation_id) { - auto maybe_lock = getLockForOrdinary(table); + checkNotOrdinaryDatabase(table); std::lock_guard lock{mutex}; checkIsNotCancelled(); storages.insert(table); - if (maybe_lock) - table_read_locks_for_ordinary_db.emplace_back(std::move(maybe_lock)); mutations.emplace_back(table, mutation_id); } @@ -215,6 +210,7 @@ scope_guard MergeTreeTransaction::beforeCommit() void MergeTreeTransaction::afterCommit(CSN assigned_csn) noexcept { + LockMemoryExceptionInThread memory_tracker_lock(VariableContext::Global); /// Write allocated CSN into version metadata, so we will know CSN without reading it from transaction log /// and we will be able to remove old entries from transaction log in ZK. /// It's not a problem if server crash before CSN is written, because we already have TID in data part and entry in the log. @@ -250,6 +246,7 @@ void MergeTreeTransaction::afterCommit(CSN assigned_csn) noexcept bool MergeTreeTransaction::rollback() noexcept { + LockMemoryExceptionInThread memory_tracker_lock(VariableContext::Global); CSN expected = Tx::UnknownCSN; bool need_rollback = csn.compare_exchange_strong(expected, Tx::RolledBackCSN); diff --git a/src/Interpreters/MergeTreeTransaction.h b/src/Interpreters/MergeTreeTransaction.h index 7397ea12c12..f2d8d29d244 100644 --- a/src/Interpreters/MergeTreeTransaction.h +++ b/src/Interpreters/MergeTreeTransaction.h @@ -76,7 +76,6 @@ private: /// Lists of changes made by transaction std::unordered_set storages TSA_GUARDED_BY(mutex); - std::vector table_read_locks_for_ordinary_db TSA_GUARDED_BY(mutex); DataPartsVector creating_parts TSA_GUARDED_BY(mutex); DataPartsVector removing_parts TSA_GUARDED_BY(mutex); using RunningMutationsList = std::vector>; diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index c3152f31808..d12eca407d9 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -201,8 +201,8 @@ bool PartLog::addNewParts( { PartLogElement elem; - if (query_id.data && query_id.size) - elem.query_id.insert(0, query_id.data, query_id.size); + if (!query_id.empty()) + elem.query_id.insert(0, query_id.data(), query_id.size()); elem.event_type = PartLogElement::NEW_PART; //-V1048 diff --git a/src/Interpreters/ThreadStatusExt.cpp b/src/Interpreters/ThreadStatusExt.cpp index 53d7fd0457a..9ad3dc7c4a9 100644 --- a/src/Interpreters/ThreadStatusExt.cpp +++ b/src/Interpreters/ThreadStatusExt.cpp @@ -343,7 +343,7 @@ void ThreadStatus::finalizeQueryProfiler() void ThreadStatus::detachQuery(bool exit_if_already_detached, bool thread_exits) { - NOEXCEPT_SCOPE; + LockMemoryExceptionInThread lock_memory_tracker(VariableContext::Global); if (exit_if_already_detached && thread_state == ThreadState::DetachedFromQuery) { diff --git a/src/Interpreters/TransactionLog.cpp b/src/Interpreters/TransactionLog.cpp index a08f940a748..b349fdc1b23 100644 --- a/src/Interpreters/TransactionLog.cpp +++ b/src/Interpreters/TransactionLog.cpp @@ -140,8 +140,7 @@ void TransactionLog::loadEntries(Strings::const_iterator beg, Strings::const_ite } futures.clear(); - NOEXCEPT_SCOPE_STRICT; - { + NOEXCEPT_SCOPE_STRICT({ std::lock_guard lock{mutex}; for (const auto & entry : loaded) { @@ -151,7 +150,8 @@ void TransactionLog::loadEntries(Strings::const_iterator beg, Strings::const_ite tid_to_csn.emplace(entry.first, entry.second); } last_loaded_entry = last_entry; - } + }); + { std::lock_guard lock{running_list_mutex}; latest_snapshot = loaded.back().second.csn; @@ -445,10 +445,11 @@ CSN TransactionLog::commitTransaction(const MergeTreeTransactionPtr & txn, bool /// Do not allow exceptions between commit point and the and of transaction finalization /// (otherwise it may stuck in COMMITTING state holding snapshot). - NOEXCEPT_SCOPE_STRICT; - /// FIXME Transactions: Sequential node numbers in ZooKeeper are Int32, but 31 bit is not enough for production use - /// (overflow is possible in a several weeks/months of active usage) - allocated_csn = deserializeCSN(csn_path_created.substr(zookeeper_path_log.size() + 1)); + NOEXCEPT_SCOPE_STRICT({ + /// FIXME Transactions: Sequential node numbers in ZooKeeper are Int32, but 31 bit is not enough for production use + /// (overflow is possible in a several weeks/months of active usage) + allocated_csn = deserializeCSN(csn_path_created.substr(zookeeper_path_log.size() + 1)); + }); } return finalizeCommittedTransaction(txn.get(), allocated_csn, state_guard); @@ -456,6 +457,7 @@ CSN TransactionLog::commitTransaction(const MergeTreeTransactionPtr & txn, bool CSN TransactionLog::finalizeCommittedTransaction(MergeTreeTransaction * txn, CSN allocated_csn, scope_guard & state_guard) noexcept { + LockMemoryExceptionInThread memory_tracker_lock(VariableContext::Global); chassert(!allocated_csn == txn->isReadOnly()); if (allocated_csn) { @@ -501,6 +503,7 @@ bool TransactionLog::waitForCSNLoaded(CSN csn) const void TransactionLog::rollbackTransaction(const MergeTreeTransactionPtr & txn) noexcept { + LockMemoryExceptionInThread memory_tracker_lock(VariableContext::Global); LOG_TRACE(log, "Rolling back transaction {}{}", txn->tid, std::uncaught_exceptions() ? fmt::format(" due to uncaught exception (code: {})", getCurrentExceptionCode()) : ""); diff --git a/src/Interpreters/TransactionsInfoLog.cpp b/src/Interpreters/TransactionsInfoLog.cpp index 0498ee00e9e..b62cd4672d8 100644 --- a/src/Interpreters/TransactionsInfoLog.cpp +++ b/src/Interpreters/TransactionsInfoLog.cpp @@ -55,7 +55,7 @@ void TransactionsInfoLogElement::fillCommonFields(const TransactionInfoContext * event_time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); thread_id = getThreadId(); - query_id = CurrentThread::getQueryId().toString(); + query_id = std::string(CurrentThread::getQueryId()); if (!context) return; diff --git a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp index b58b90b6d47..070fac7ccbd 100644 --- a/src/Interpreters/TranslateQualifiedNamesVisitor.cpp +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.cpp @@ -31,7 +31,7 @@ namespace ErrorCodes extern const int UNSUPPORTED_JOIN_KEYS; extern const int LOGICAL_ERROR; } -bool TranslateQualifiedNamesMatcher::Data::matchColumnName(const std::string_view & name, const String & column_name, DataTypePtr column_type) +bool TranslateQualifiedNamesMatcher::Data::matchColumnName(std::string_view name, const String & column_name, DataTypePtr column_type) { if (name.size() < column_name.size()) return false; diff --git a/src/Interpreters/TranslateQualifiedNamesVisitor.h b/src/Interpreters/TranslateQualifiedNamesVisitor.h index e0c2f6b6bc0..73e45fc7ea0 100644 --- a/src/Interpreters/TranslateQualifiedNamesVisitor.h +++ b/src/Interpreters/TranslateQualifiedNamesVisitor.h @@ -39,7 +39,7 @@ public: bool hasTable() const { return !tables.empty(); } bool processAsterisks() const { return hasTable() && has_columns; } bool unknownColumn(size_t table_pos, const ASTIdentifier & identifier) const; - static bool matchColumnName(const std::string_view & name, const String & column_name, DataTypePtr column_type); + static bool matchColumnName(std::string_view name, const String & column_name, DataTypePtr column_type); }; static void visit(ASTPtr & ast, Data & data); diff --git a/src/Interpreters/UserDefinedSQLObjectsLoader.cpp b/src/Interpreters/UserDefinedSQLObjectsLoader.cpp index 75b91f3a817..c6f50fc4a0a 100644 --- a/src/Interpreters/UserDefinedSQLObjectsLoader.cpp +++ b/src/Interpreters/UserDefinedSQLObjectsLoader.cpp @@ -43,7 +43,7 @@ UserDefinedSQLObjectsLoader::UserDefinedSQLObjectsLoader() : log(&Poco::Logger::get("UserDefinedSQLObjectsLoader")) {} -void UserDefinedSQLObjectsLoader::loadUserDefinedObject(ContextPtr context, UserDefinedSQLObjectType object_type, const std::string_view & name, const String & path) +void UserDefinedSQLObjectsLoader::loadUserDefinedObject(ContextPtr context, UserDefinedSQLObjectType object_type, std::string_view name, const String & path) { auto name_ref = StringRef(name.data(), name.size()); LOG_DEBUG(log, "Loading user defined object {} from file {}", backQuote(name_ref), path); diff --git a/src/Interpreters/UserDefinedSQLObjectsLoader.h b/src/Interpreters/UserDefinedSQLObjectsLoader.h index 2e747f67a8d..9dfba1181c1 100644 --- a/src/Interpreters/UserDefinedSQLObjectsLoader.h +++ b/src/Interpreters/UserDefinedSQLObjectsLoader.h @@ -29,7 +29,7 @@ public: private: - void loadUserDefinedObject(ContextPtr context, UserDefinedSQLObjectType object_type, const std::string_view & object_name, const String & file_path); + void loadUserDefinedObject(ContextPtr context, UserDefinedSQLObjectType object_type, std::string_view object_name, const String & file_path); Poco::Logger * log; bool enable_persistence = true; }; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 85c4ea261a0..ae915aab867 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -876,7 +876,7 @@ static std::tuple executeQueryImpl( { auto progress_out = process_list_elem->getProgressOut(); elem.result_rows = progress_out.written_rows; - elem.result_bytes = progress_out.written_rows; + elem.result_bytes = progress_out.written_bytes; } if (elem.read_rows != 0) diff --git a/src/Loggers/ExtendedLogChannel.cpp b/src/Loggers/ExtendedLogChannel.cpp index fa414216aa5..116892b9030 100644 --- a/src/Loggers/ExtendedLogChannel.cpp +++ b/src/Loggers/ExtendedLogChannel.cpp @@ -28,8 +28,8 @@ ExtendedLogMessage ExtendedLogMessage::getFrom(const Poco::Message & base) if (current_thread) { auto query_id_ref = CurrentThread::getQueryId(); - if (query_id_ref.size) - msg_ext.query_id.assign(query_id_ref.data, query_id_ref.size); + if (!query_id_ref.empty()) + msg_ext.query_id.assign(query_id_ref.data(), query_id_ref.size()); } msg_ext.thread_id = getThreadId(); diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index 69927c430dc..39d89f56e91 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -509,6 +509,25 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format settings.ostr << ')'; written = true; } + + if (!written && name == "viewIfPermitted"sv) + { + /// viewIfPermitted() needs special formatting: ELSE instead of comma between arguments, and better indents too. + const auto * nl_or_nothing = settings.one_line ? "" : "\n"; + auto indent0 = settings.one_line ? "" : String(4u * frame.indent, ' '); + auto indent1 = settings.one_line ? "" : String(4u * (frame.indent + 1), ' '); + auto indent2 = settings.one_line ? "" : String(4u * (frame.indent + 2), ' '); + settings.ostr << (settings.hilite ? hilite_function : "") << name << "(" << (settings.hilite ? hilite_none : "") << nl_or_nothing; + FormatStateStacked frame_nested = frame; + frame_nested.need_parens = false; + frame_nested.indent += 2; + arguments->children[0]->formatImpl(settings, state, frame_nested); + settings.ostr << nl_or_nothing << indent1 << (settings.hilite ? hilite_keyword : "") << (settings.one_line ? " " : "") + << "ELSE " << (settings.hilite ? hilite_none : "") << nl_or_nothing << indent2; + arguments->children[1]->formatImpl(settings, state, frame_nested); + settings.ostr << nl_or_nothing << indent0 << ")"; + return; + } } if (!written && arguments->children.size() >= 2) diff --git a/src/Parsers/ASTPartition.h b/src/Parsers/ASTPartition.h index 1bd16d55795..fbe05ce3a8e 100644 --- a/src/Parsers/ASTPartition.h +++ b/src/Parsers/ASTPartition.h @@ -1,7 +1,6 @@ #pragma once #include -#include namespace DB diff --git a/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp b/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp index 83156c6a8e1..2c25fc14e7d 100644 --- a/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp +++ b/src/Parsers/Access/ParserCreateRowPolicyQuery.cpp @@ -75,7 +75,7 @@ namespace { for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - const std::string_view & command = RowPolicyFilterTypeInfo::get(filter_type).command; + std::string_view command = RowPolicyFilterTypeInfo::get(filter_type).command; commands.emplace(command); } } @@ -96,7 +96,7 @@ namespace for (auto filter_type : collections::range(RowPolicyFilterType::MAX)) { - const std::string_view & command = RowPolicyFilterTypeInfo::get(filter_type).command; + std::string_view command = RowPolicyFilterTypeInfo::get(filter_type).command; if (ParserKeyword{command.data()}.ignore(pos, expected)) { res_commands.emplace(command); diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 045f6aad2b5..bd65305cc52 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -1068,13 +1068,16 @@ bool ParserFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) bool ParserTableFunctionView::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserIdentifier id_parser; - ParserKeyword view("VIEW"); ParserSelectWithUnionQuery select; ASTPtr identifier; ASTPtr query; - if (!view.ignore(pos, expected)) + bool if_permitted = false; + + if (ParserKeyword{"VIEWIFPERMITTED"}.ignore(pos, expected)) + if_permitted = true; + else if (!ParserKeyword{"VIEW"}.ignore(pos, expected)) return false; if (pos->type != TokenType::OpeningRoundBracket) @@ -1094,15 +1097,30 @@ bool ParserTableFunctionView::parseImpl(Pos & pos, ASTPtr & node, Expected & exp return false; } + ASTPtr else_ast; + if (if_permitted) + { + if (!ParserKeyword{"ELSE"}.ignore(pos, expected)) + return false; + + if (!ParserWithOptionalAlias{std::make_unique(true, true), true}.parse(pos, else_ast, expected)) + return false; + } + if (pos->type != TokenType::ClosingRoundBracket) return false; + ++pos; + + auto expr_list = std::make_shared(); + expr_list->children.push_back(query); + if (if_permitted) + expr_list->children.push_back(else_ast); + auto function_node = std::make_shared(); tryGetIdentifierNameInto(identifier, function_node->name); - auto expr_list_with_single_query = std::make_shared(); - expr_list_with_single_query->children.push_back(query); - function_node->name = "view"; - function_node->arguments = expr_list_with_single_query; + function_node->name = if_permitted ? "viewIfPermitted" : "view"; + function_node->arguments = expr_list; function_node->children.push_back(function_node->arguments); node = function_node; return true; @@ -1971,6 +1989,7 @@ const char * ParserAlias::restricted_keywords[] = "WITH", "INTERSECT", "EXCEPT", + "ELSE", nullptr }; diff --git a/src/Parsers/ExpressionElementParsers.h b/src/Parsers/ExpressionElementParsers.h index f4dfe80f43e..3883631b61c 100644 --- a/src/Parsers/ExpressionElementParsers.h +++ b/src/Parsers/ExpressionElementParsers.h @@ -162,7 +162,7 @@ protected: bool is_table_function; }; -// A special function parser for view table function. +// A special function parser for view and viewIfPermitted table functions. // It parses an SELECT query as its argument and doesn't support getColumnName(). class ParserTableFunctionView : public IParserBase { diff --git a/src/Parsers/ExpressionListParsers.h b/src/Parsers/ExpressionListParsers.h index 2b127dc2607..05c7ec946ee 100644 --- a/src/Parsers/ExpressionListParsers.h +++ b/src/Parsers/ExpressionListParsers.h @@ -9,6 +9,11 @@ #include #include +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc99-extensions" +#endif + namespace DB { @@ -564,3 +569,7 @@ protected: }; } + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index b73919f4f36..1999eff37a8 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include @@ -26,7 +25,7 @@ namespace ErrorCodes using IdentifierNameSet = std::set; class WriteBuffer; - +using Strings = std::vector; /** Element of the syntax tree (hereinafter - directed acyclic graph with elements of semantics) */ diff --git a/src/Parsers/MySQL/ASTDeclareOption.h b/src/Parsers/MySQL/ASTDeclareOption.h index a9529924567..c493c49c61b 100644 --- a/src/Parsers/MySQL/ASTDeclareOption.h +++ b/src/Parsers/MySQL/ASTDeclareOption.h @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index ce79ccf708a..275f3bc75cc 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -584,7 +584,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe auto storage_parse_result = storage_p.parse(pos, storage, expected); - if (storage_parse_result && need_parse_as_select()) + if ((storage_parse_result || is_temporary) && need_parse_as_select()) { if (!select_p.parse(pos, select, expected)) return false; diff --git a/src/Parsers/ParserSetQuery.cpp b/src/Parsers/ParserSetQuery.cpp index 5f69db633ac..0ff437bcfb1 100644 --- a/src/Parsers/ParserSetQuery.cpp +++ b/src/Parsers/ParserSetQuery.cpp @@ -12,12 +12,63 @@ namespace DB { +class ParserLiteralOrMap : public IParserBase +{ +public: +protected: + const char * getName() const override { return "literal or map"; } + bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override + { + { + ParserLiteral literal; + if (literal.parse(pos, node, expected)) + return true; + } + + ParserToken l_br(TokenType::OpeningCurlyBrace); + ParserToken r_br(TokenType::ClosingCurlyBrace); + ParserToken comma(TokenType::Comma); + ParserToken colon(TokenType::Colon); + ParserStringLiteral literal; + + if (!l_br.ignore(pos, expected)) + return false; + + Map map; + + while (!r_br.ignore(pos, expected)) + { + if (!map.empty() && !comma.ignore(pos, expected)) + return false; + + ASTPtr key; + ASTPtr val; + + if (!literal.parse(pos, key, expected)) + return false; + + if (!colon.ignore(pos, expected)) + return false; + + if (!literal.parse(pos, val, expected)) + return false; + + Tuple tuple; + tuple.push_back(std::move(key->as()->value)); + tuple.push_back(std::move(val->as()->value)); + map.push_back(std::move(tuple)); + } + + node = std::make_shared(std::move(map)); + return true; + } +}; /// Parse `name = value`. bool ParserSetQuery::parseNameValuePair(SettingChange & change, IParser::Pos & pos, Expected & expected) { ParserCompoundIdentifier name_p; - ParserLiteral value_p; + ParserLiteralOrMap value_p; ParserToken s_eq(TokenType::Equals); ASTPtr name; diff --git a/src/Processors/Executors/PipelineExecutor.cpp b/src/Processors/Executors/PipelineExecutor.cpp index 68225d73ff1..29c57e08573 100644 --- a/src/Processors/Executors/PipelineExecutor.cpp +++ b/src/Processors/Executors/PipelineExecutor.cpp @@ -205,7 +205,6 @@ void PipelineExecutor::executeStepImpl(size_t thread_num, std::atomic_bool * yie Stopwatch total_time_watch; #endif - // auto & node = tasks.getNode(thread_num); auto & context = tasks.getThreadContext(thread_num); bool yield = false; diff --git a/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp b/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp index 5232d9166af..ebd9783b4fd 100644 --- a/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp +++ b/src/Processors/Formats/Impl/ArrowBufferedStreams.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include diff --git a/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp index ddee20c187b..b63b1e7b9b1 100644 --- a/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/AvroRowOutputFormat.cpp @@ -179,15 +179,15 @@ AvroSerializer::SchemaWithSerializeFn AvroSerializer::createSchemaWithSerializeF if (traits->isStringAsString(column_name)) return {avro::StringSchema(), [](const IColumn & column, size_t row_num, avro::Encoder & encoder) { - const StringRef & s = assert_cast(column).getDataAt(row_num); - encoder.encodeString(s.toString()); + const std::string_view & s = assert_cast(column).getDataAt(row_num).toView(); + encoder.encodeString(std::string(s)); } }; else return {avro::BytesSchema(), [](const IColumn & column, size_t row_num, avro::Encoder & encoder) { - const StringRef & s = assert_cast(column).getDataAt(row_num); - encoder.encodeBytes(reinterpret_cast(s.data), s.size); + const std::string_view & s = assert_cast(column).getDataAt(row_num).toView(); + encoder.encodeBytes(reinterpret_cast(s.data()), s.size()); } }; case TypeIndex::FixedString: @@ -196,8 +196,8 @@ AvroSerializer::SchemaWithSerializeFn AvroSerializer::createSchemaWithSerializeF auto schema = avro::FixedSchema(size, "fixed_" + toString(type_name_increment)); return {schema, [](const IColumn & column, size_t row_num, avro::Encoder & encoder) { - const StringRef & s = assert_cast(column).getDataAt(row_num); - encoder.encodeFixed(reinterpret_cast(s.data), s.size); + const std::string_view & s = assert_cast(column).getDataAt(row_num).toView(); + encoder.encodeFixed(reinterpret_cast(s.data()), s.size()); }}; } case TypeIndex::Enum8: @@ -343,8 +343,8 @@ AvroSerializer::SchemaWithSerializeFn AvroSerializer::createSchemaWithSerializeF auto keys_serializer = [](const IColumn & column, size_t row_num, avro::Encoder & encoder) { - const StringRef & s = column.getDataAt(row_num); - encoder.encodeString(s.toString()); + const std::string_view & s = column.getDataAt(row_num).toView(); + encoder.encodeString(std::string(s)); }; const auto & values_type = map_type.getValueType(); diff --git a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp index e3cc896466b..34c120c3f52 100644 --- a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp +++ b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp @@ -365,8 +365,8 @@ namespace DB } else { - StringRef string_ref = internal_column.getDataAt(string_i); - status = builder.Append(string_ref.data, string_ref.size); + std::string_view string_ref = internal_column.getDataAt(string_i).toView(); + status = builder.Append(string_ref.data(), string_ref.size()); } checkStatus(status, write_column->getName(), format_name); } diff --git a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp index ad173e449d6..50145fd5bc0 100644 --- a/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp @@ -30,6 +30,7 @@ namespace DB namespace ErrorCodes { extern const int LOGICAL_ERROR; + extern const int INCORRECT_DATA; } CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header, Params params_, const FormatSchemaInfo & info, const FormatSettings & format_settings_) @@ -264,20 +265,20 @@ bool CapnProtoRowInputFormat::readRow(MutableColumns & columns, RowReadExtension if (in->eof()) return false; - auto array = readMessage(); - -#if CAPNP_VERSION >= 7000 && CAPNP_VERSION < 8000 - capnp::UnalignedFlatArrayMessageReader msg(array); -#else - capnp::FlatArrayMessageReader msg(array); -#endif - - auto root_reader = msg.getRoot(root); - - for (size_t i = 0; i != columns.size(); ++i) + try { - auto value = getReaderByColumnName(root_reader, column_names[i]); - insertValue(*columns[i], column_types[i], value, format_settings.capn_proto.enum_comparing_mode); + auto array = readMessage(); + capnp::FlatArrayMessageReader msg(array); + auto root_reader = msg.getRoot(root); + for (size_t i = 0; i != columns.size(); ++i) + { + auto value = getReaderByColumnName(root_reader, column_names[i]); + insertValue(*columns[i], column_types[i], value, format_settings.capn_proto.enum_comparing_mode); + } + } + catch (const kj::Exception & e) + { + throw Exception(ErrorCodes::INCORRECT_DATA, "Cannot read row: {}", e.getDescription().cStr()); } return true; diff --git a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp index 9eef72f95da..3bcea8a8843 100644 --- a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.cpp @@ -62,7 +62,7 @@ const String & JSONEachRowRowInputFormat::columnName(size_t i) const return getPort().getHeader().getByPosition(i).name; } -inline size_t JSONEachRowRowInputFormat::columnIndex(const StringRef & name, size_t key_index) +inline size_t JSONEachRowRowInputFormat::columnIndex(StringRef name, size_t key_index) { /// Optimization by caching the order of fields (which is almost always the same) /// and a quick check to match the next expected field, instead of searching the hash table. @@ -124,7 +124,7 @@ static inline void skipColonDelimeter(ReadBuffer & istr) skipWhitespaceIfAny(istr); } -void JSONEachRowRowInputFormat::skipUnknownField(const StringRef & name_ref) +void JSONEachRowRowInputFormat::skipUnknownField(StringRef name_ref) { if (!format_settings.skip_unknown_fields) throw Exception("Unknown field found while parsing JSONEachRow format: " + name_ref.toString(), ErrorCodes::INCORRECT_DATA); diff --git a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h index 1da14a532de..1673d55b9fd 100644 --- a/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h +++ b/src/Processors/Formats/Impl/JSONEachRowRowInputFormat.h @@ -40,9 +40,9 @@ private: void syncAfterError() override; const String & columnName(size_t i) const; - size_t columnIndex(const StringRef & name, size_t key_index); + size_t columnIndex(StringRef name, size_t key_index); bool advanceToNextKey(size_t key_index); - void skipUnknownField(const StringRef & name_ref); + void skipUnknownField(StringRef name_ref); StringRef readColumnName(ReadBuffer & buf); void readField(size_t index, MutableColumns & columns); void readJSONObject(MutableColumns & columns); diff --git a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp index e53aafb4e56..91183ebf633 100644 --- a/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowOutputFormat.cpp @@ -98,16 +98,16 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr } case TypeIndex::String: { - const StringRef & string = assert_cast(column).getDataAt(row_num); - packer.pack_bin(string.size); - packer.pack_bin_body(string.data, string.size); + const std::string_view & string = assert_cast(column).getDataAt(row_num).toView(); + packer.pack_bin(string.size()); + packer.pack_bin_body(string.data(), string.size()); return; } case TypeIndex::FixedString: { - const StringRef & string = assert_cast(column).getDataAt(row_num); - packer.pack_bin(string.size); - packer.pack_bin_body(string.data, string.size); + const std::string_view & string = assert_cast(column).getDataAt(row_num).toView(); + packer.pack_bin(string.size()); + packer.pack_bin_body(string.data(), string.size()); return; } case TypeIndex::Array: @@ -178,18 +178,18 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr { WriteBufferFromOwnString buf; writeBinary(uuid_column.getElement(row_num), buf); - StringRef uuid_bin = buf.stringRef(); - packer.pack_bin(uuid_bin.size); - packer.pack_bin_body(uuid_bin.data, uuid_bin.size); + std::string_view uuid_bin = buf.stringRef().toView(); + packer.pack_bin(uuid_bin.size()); + packer.pack_bin_body(uuid_bin.data(), uuid_bin.size()); return; } case FormatSettings::MsgPackUUIDRepresentation::STR: { WriteBufferFromOwnString buf; writeText(uuid_column.getElement(row_num), buf); - StringRef uuid_text = buf.stringRef(); - packer.pack_str(uuid_text.size); - packer.pack_bin_body(uuid_text.data, uuid_text.size); + std::string_view uuid_text = buf.stringRef().toView(); + packer.pack_str(uuid_text.size()); + packer.pack_bin_body(uuid_text.data(), uuid_text.size()); return; } case FormatSettings::MsgPackUUIDRepresentation::EXT: @@ -198,9 +198,9 @@ void MsgPackRowOutputFormat::serializeField(const IColumn & column, DataTypePtr UUID value = uuid_column.getElement(row_num); writeBinaryBigEndian(value.toUnderType().items[0], buf); writeBinaryBigEndian(value.toUnderType().items[1], buf); - StringRef uuid_ext = buf.stringRef(); + std::string_view uuid_ext = buf.stringRef().toView(); packer.pack_ext(sizeof(UUID), int8_t(MsgPackExtensionTypes::UUIDType)); - packer.pack_ext_body(uuid_ext.data, uuid_ext.size); + packer.pack_ext_body(uuid_ext.data(), uuid_ext.size()); return; } } diff --git a/src/Processors/Formats/Impl/NativeFormat.cpp b/src/Processors/Formats/Impl/NativeFormat.cpp index 423fd483712..a8e2ddf95e4 100644 --- a/src/Processors/Formats/Impl/NativeFormat.cpp +++ b/src/Processors/Formats/Impl/NativeFormat.cpp @@ -74,15 +74,6 @@ protected: if (chunk) { auto block = getPort(PortKind::Main).getHeader(); - - // const auto & info = chunk.getChunkInfo(); - // const auto * agg_info = typeid_cast(info.get()); - // if (agg_info) - // { - // block.info.bucket_num = agg_info->bucket_num; - // block.info.is_overflows = agg_info->is_overflows; - // } - block.setColumns(chunk.detachColumns()); writer.write(block); } diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp index 5e979c3d35a..1de2acbb3b9 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp @@ -225,9 +225,9 @@ void ORCBlockOutputFormat::writeStrings( } string_orc_column.notNull[i] = 1; - const StringRef & string = string_column.getDataAt(i); - string_orc_column.data[i] = const_cast(string.data); - string_orc_column.length[i] = string.size; + const std::string_view & string = string_column.getDataAt(i).toView(); + string_orc_column.data[i] = const_cast(string.data()); + string_orc_column.length[i] = string.size(); } string_orc_column.numElements = string_column.size(); } diff --git a/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.cpp b/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.cpp index 9a38a11f6f0..1d0e987f0c4 100644 --- a/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.cpp +++ b/src/Processors/Formats/Impl/RawBLOBRowOutputFormat.cpp @@ -17,8 +17,8 @@ RawBLOBRowOutputFormat::RawBLOBRowOutputFormat( void RawBLOBRowOutputFormat::writeField(const IColumn & column, const ISerialization &, size_t row_num) { - StringRef value = column.getDataAt(row_num); - out.write(value.data, value.size); + std::string_view value = column.getDataAt(row_num).toView(); + out.write(value.data(), value.size()); } diff --git a/src/Processors/Merges/Algorithms/Graphite.cpp b/src/Processors/Merges/Algorithms/Graphite.cpp index f77bb790332..2448a1e2a94 100644 --- a/src/Processors/Merges/Algorithms/Graphite.cpp +++ b/src/Processors/Merges/Algorithms/Graphite.cpp @@ -71,11 +71,11 @@ static const Graphite::Pattern undef_pattern = .type = undef_pattern.TypeUndef, }; -inline static const Patterns & selectPatternsForMetricType(const Graphite::Params & params, const StringRef path) +inline static const Patterns & selectPatternsForMetricType(const Graphite::Params & params, std::string_view path) { if (params.patterns_typed) { - std::string_view path_view = path.toView(); + std::string_view path_view = path; if (path_view.find("?"sv) == path_view.npos) return params.patterns_plain; else @@ -89,7 +89,7 @@ inline static const Patterns & selectPatternsForMetricType(const Graphite::Param Graphite::RollupRule selectPatternForPath( const Graphite::Params & params, - StringRef path) + std::string_view path) { const Graphite::Pattern * first_match = &undef_pattern; @@ -119,7 +119,7 @@ Graphite::RollupRule selectPatternForPath( } else { - if (pattern.regexp->match(path.data, path.size)) + if (pattern.regexp->match(path.data(), path.size())) { /// General pattern with matched path if (pattern.type == pattern.TypeAll) diff --git a/src/Processors/Merges/Algorithms/Graphite.h b/src/Processors/Merges/Algorithms/Graphite.h index 05306ebe30f..46b1bbbfcad 100644 --- a/src/Processors/Merges/Algorithms/Graphite.h +++ b/src/Processors/Merges/Algorithms/Graphite.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include @@ -147,7 +146,7 @@ struct Params using RollupRule = std::pair; -Graphite::RollupRule selectPatternForPath(const Graphite::Params & params, StringRef path); +Graphite::RollupRule selectPatternForPath(const Graphite::Params & params, std::string_view path); void setGraphitePatternsFromConfig(ContextPtr context, const String & config_element, Graphite::Params & params); diff --git a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.cpp index eff62d73f50..467ded19f4d 100644 --- a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.cpp @@ -120,7 +120,7 @@ IMergingAlgorithm::Status GraphiteRollupSortedAlgorithm::merge() return Status(current.impl->order); } - StringRef next_path = current->all_columns[columns_definition.path_column_num]->getDataAt(current->getRow()); + std::string_view next_path = current->all_columns[columns_definition.path_column_num]->getDataAt(current->getRow()).toView(); bool new_path = is_first || next_path != current_group_path; is_first = false; @@ -190,7 +190,7 @@ IMergingAlgorithm::Status GraphiteRollupSortedAlgorithm::merge() current_subgroup_newest_row.set(current, sources[current.impl->order].chunk); /// Small hack: group and subgroups have the same path, so we can set current_group_path here instead of startNextGroup - /// But since we keep in memory current_subgroup_newest_row's block, we could use StringRef for current_group_path and don't + /// But since we keep in memory current_subgroup_newest_row's block, we could use string_view for current_group_path and don't /// make deep copy of the path. current_group_path = next_path; } diff --git a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h index 4968cbfc470..d6d2f66fb82 100644 --- a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h @@ -92,7 +92,7 @@ private: */ /// Path name of current bucket - StringRef current_group_path; + std::string_view current_group_path; static constexpr size_t max_row_refs = 2; /// current_subgroup_newest_row, current_row. /// Last row with maximum version for current primary key (time bucket). diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 25719166acd..77db1e06d06 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -10,7 +10,7 @@ namespace DB MergingSortedAlgorithm::MergingSortedAlgorithm( Block header_, size_t num_inputs, - SortDescription description_, + const SortDescription & description_, size_t max_block_size, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, @@ -18,7 +18,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( bool use_average_block_sizes) : header(std::move(header_)) , merged_data(header.cloneEmptyColumns(), use_average_block_sizes, max_block_size) - , description(std::move(description_)) + , description(description_) , limit(limit_) , out_row_sources_buf(out_row_sources_buf_) , current_inputs(num_inputs) @@ -29,7 +29,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( sort_description_types.reserve(description.size()); /// Replace column names in description to positions. - for (auto & column_description : description) + for (const auto & column_description : description) { has_collation |= column_description.collator != nullptr; sort_description_types.emplace_back(header.getByName(column_description.column_name).type); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index cf32e5fd4dd..2537c48b128 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -16,7 +16,7 @@ public: MergingSortedAlgorithm( Block header_, size_t num_inputs, - SortDescription description_, + const SortDescription & description_, size_t max_block_size, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, @@ -38,7 +38,7 @@ private: /// Settings SortDescription description; - UInt64 limit; + const UInt64 limit; bool has_collation = false; /// Used in Vertical merge algorithm to gather non-PK/non-index columns (on next step) diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index 4cb74ffc71e..9ee91ef6db7 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -10,7 +10,7 @@ namespace DB MergingSortedTransform::MergingSortedTransform( const Block & header, size_t num_inputs, - SortDescription description_, + const SortDescription & description_, size_t max_block_size, SortingQueueStrategy sorting_queue_strategy, UInt64 limit_, @@ -19,10 +19,14 @@ MergingSortedTransform::MergingSortedTransform( bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( - num_inputs, header, header, have_all_inputs_, limit_, + num_inputs, + header, + header, + have_all_inputs_, + limit_, header, num_inputs, - std::move(description_), + description_, max_block_size, sorting_queue_strategy, limit_, diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 16e3e2791ee..50586177c6d 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -14,7 +14,7 @@ public: MergingSortedTransform( const Block & header, size_t num_inputs, - SortDescription description, + const SortDescription & description, size_t max_block_size, SortingQueueStrategy sorting_queue_strategy, UInt64 limit_ = 0, diff --git a/src/Processors/QueryPlan/AggregatingStep.cpp b/src/Processors/QueryPlan/AggregatingStep.cpp index 0a4b12084eb..f4e3749bd70 100644 --- a/src/Processors/QueryPlan/AggregatingStep.cpp +++ b/src/Processors/QueryPlan/AggregatingStep.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -46,22 +47,32 @@ Block appendGroupingSetColumn(Block header) return res; } -static Block appendGroupingColumn(Block block, const GroupingSetsParamsList & params) +static inline void convertToNullable(Block & header, const Names & keys) +{ + for (const auto & key : keys) + { + auto & column = header.getByName(key); + + column.type = makeNullableSafe(column.type); + column.column = makeNullableSafe(column.column); + } +} + +Block generateOutputHeader(const Block & input_header, const Names & keys, bool use_nulls) +{ + auto header = appendGroupingSetColumn(input_header); + if (use_nulls) + convertToNullable(header, keys); + return header; +} + + +static Block appendGroupingColumn(Block block, const Names & keys, const GroupingSetsParamsList & params, bool use_nulls) { if (params.empty()) return block; - Block res; - - size_t rows = block.rows(); - auto column = ColumnUInt64::create(rows); - - res.insert({ColumnPtr(std::move(column)), std::make_shared(), "__grouping_set"}); - - for (auto & col : block) - res.insert(std::move(col)); - - return res; + return generateOutputHeader(block, keys, use_nulls); } AggregatingStep::AggregatingStep( @@ -74,11 +85,12 @@ AggregatingStep::AggregatingStep( size_t merge_threads_, size_t temporary_data_merge_threads_, bool storage_has_evenly_distributed_read_, + bool group_by_use_nulls_, InputOrderInfoPtr group_by_info_, SortDescription group_by_sort_description_, bool should_produce_results_in_order_of_bucket_number_) : ITransformingStep( - input_stream_, appendGroupingColumn(params_.getHeader(input_stream_.header, final_), grouping_sets_params_), getTraits(should_produce_results_in_order_of_bucket_number_), false) + input_stream_, appendGroupingColumn(params_.getHeader(input_stream_.header, final_), params_.keys, grouping_sets_params_, group_by_use_nulls_), getTraits(should_produce_results_in_order_of_bucket_number_), false) , params(std::move(params_)) , grouping_sets_params(std::move(grouping_sets_params_)) , final(final_) @@ -87,6 +99,7 @@ AggregatingStep::AggregatingStep( , merge_threads(merge_threads_) , temporary_data_merge_threads(temporary_data_merge_threads_) , storage_has_evenly_distributed_read(storage_has_evenly_distributed_read_) + , group_by_use_nulls(group_by_use_nulls_) , group_by_info(std::move(group_by_info_)) , group_by_sort_description(std::move(group_by_sort_description_)) , should_produce_results_in_order_of_bucket_number(should_produce_results_in_order_of_bucket_number_) @@ -217,6 +230,8 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B assert(ports.size() == grouping_sets_size); auto output_header = transform_params->getHeader(); + if (group_by_use_nulls) + convertToNullable(output_header, params.keys); for (size_t set_counter = 0; set_counter < grouping_sets_size; ++set_counter) { @@ -236,6 +251,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B const auto & missing_columns = grouping_sets_params[set_counter].missing_keys; + auto to_nullable_function = FunctionFactory::instance().get("toNullable", nullptr); for (size_t i = 0; i < output_header.columns(); ++i) { auto & col = output_header.getByPosition(i); @@ -251,7 +267,13 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B index.push_back(node); } else - index.push_back(dag->getIndex()[header.getPositionByName(col.name)]); + { + const auto * column_node = dag->getIndex()[header.getPositionByName(col.name)]; + if (group_by_use_nulls && column_node->result_type->canBeInsideNullable()) + index.push_back(&dag->addFunction(to_nullable_function, { column_node }, col.name)); + else + index.push_back(column_node); + } } dag->getIndex().swap(index); @@ -396,7 +418,7 @@ void AggregatingStep::updateOutputStream() { output_stream = createOutputStream( input_streams.front(), - appendGroupingColumn(params.getHeader(input_streams.front().header, final), grouping_sets_params), + appendGroupingColumn(params.getHeader(input_streams.front().header, final), params.keys, grouping_sets_params, group_by_use_nulls), getDataStreamTraits()); } diff --git a/src/Processors/QueryPlan/AggregatingStep.h b/src/Processors/QueryPlan/AggregatingStep.h index 0e982d76940..71130b65adb 100644 --- a/src/Processors/QueryPlan/AggregatingStep.h +++ b/src/Processors/QueryPlan/AggregatingStep.h @@ -20,6 +20,7 @@ struct GroupingSetsParams using GroupingSetsParamsList = std::vector; Block appendGroupingSetColumn(Block header); +Block generateOutputHeader(const Block & input_header, const Names & keys, bool use_nulls); /// Aggregation. See AggregatingTransform. class AggregatingStep : public ITransformingStep @@ -35,6 +36,7 @@ public: size_t merge_threads_, size_t temporary_data_merge_threads_, bool storage_has_evenly_distributed_read_, + bool group_by_use_nulls_, InputOrderInfoPtr group_by_info_, SortDescription group_by_sort_description_, bool should_produce_results_in_order_of_bucket_number_); @@ -62,6 +64,7 @@ private: size_t temporary_data_merge_threads; bool storage_has_evenly_distributed_read; + bool group_by_use_nulls; InputOrderInfoPtr group_by_info; SortDescription group_by_sort_description; diff --git a/src/Processors/QueryPlan/CubeStep.cpp b/src/Processors/QueryPlan/CubeStep.cpp index b0c57491085..52539dec75f 100644 --- a/src/Processors/QueryPlan/CubeStep.cpp +++ b/src/Processors/QueryPlan/CubeStep.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB { @@ -24,27 +25,41 @@ static ITransformingStep::Traits getTraits() }; } -CubeStep::CubeStep(const DataStream & input_stream_, Aggregator::Params params_, bool final_) - : ITransformingStep(input_stream_, appendGroupingSetColumn(params_.getHeader(input_stream_.header, final_)), getTraits()) +CubeStep::CubeStep(const DataStream & input_stream_, Aggregator::Params params_, bool final_, bool use_nulls_) + : ITransformingStep(input_stream_, generateOutputHeader(params_.getHeader(input_stream_.header, final_), params_.keys, use_nulls_), getTraits()) , keys_size(params_.keys_size) , params(std::move(params_)) , final(final_) + , use_nulls(use_nulls_) { /// Aggregation keys are distinct for (const auto & key : params.keys) output_stream->distinct_columns.insert(key); } -ProcessorPtr addGroupingSetForTotals(const Block & header, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number) +ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, bool use_nulls, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number) { auto dag = std::make_shared(header.getColumnsWithTypeAndName()); + auto & index = dag->getIndex(); + + if (use_nulls) + { + auto to_nullable = FunctionFactory::instance().get("toNullable", nullptr); + for (const auto & key : keys) + { + const auto * node = dag->getIndex()[header.getPositionByName(key)]; + if (node->result_type->canBeInsideNullable()) + { + dag->addOrReplaceInIndex(dag->addFunction(to_nullable, { node }, node->result_name)); + } + } + } auto grouping_col = ColumnUInt64::create(1, grouping_set_number); const auto * grouping_node = &dag->addColumn( {ColumnPtr(std::move(grouping_col)), std::make_shared(), "__grouping_set"}); grouping_node = &dag->materializeNode(*grouping_node); - auto & index = dag->getIndex(); index.insert(index.begin(), grouping_node); auto expression = std::make_shared(dag, settings.getActionsSettings()); @@ -58,10 +73,10 @@ void CubeStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQue pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) -> ProcessorPtr { if (stream_type == QueryPipelineBuilder::StreamType::Totals) - return addGroupingSetForTotals(header, settings, (UInt64(1) << keys_size) - 1); + return addGroupingSetForTotals(header, params.keys, use_nulls, settings, (UInt64(1) << keys_size) - 1); auto transform_params = std::make_shared(header, std::move(params), final); - return std::make_shared(header, std::move(transform_params)); + return std::make_shared(header, std::move(transform_params), use_nulls); }); } @@ -73,7 +88,7 @@ const Aggregator::Params & CubeStep::getParams() const void CubeStep::updateOutputStream() { output_stream = createOutputStream( - input_streams.front(), appendGroupingSetColumn(params.getHeader(input_streams.front().header, final)), getDataStreamTraits()); + input_streams.front(), generateOutputHeader(params.getHeader(input_streams.front().header, final), params.keys, use_nulls), getDataStreamTraits()); /// Aggregation keys are distinct for (const auto & key : params.keys) diff --git a/src/Processors/QueryPlan/CubeStep.h b/src/Processors/QueryPlan/CubeStep.h index 87f22de7fc6..8a03a33a088 100644 --- a/src/Processors/QueryPlan/CubeStep.h +++ b/src/Processors/QueryPlan/CubeStep.h @@ -13,7 +13,7 @@ using AggregatingTransformParamsPtr = std::shared_ptr #include #include +#include #include #include #include diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 5d5c7e9cb2c..46be5ea1d7d 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include namespace DB { @@ -9,6 +10,8 @@ using PartitionIdToMaxBlock = std::unordered_map; class Pipe; +using MergeTreeReadTaskCallback = std::function(PartitionReadRequest)>; + struct MergeTreeDataSelectSamplingData { bool use_sampling = false; diff --git a/src/Processors/QueryPlan/RollupStep.cpp b/src/Processors/QueryPlan/RollupStep.cpp index 169976195ea..3305f24602f 100644 --- a/src/Processors/QueryPlan/RollupStep.cpp +++ b/src/Processors/QueryPlan/RollupStep.cpp @@ -22,18 +22,19 @@ static ITransformingStep::Traits getTraits() }; } -RollupStep::RollupStep(const DataStream & input_stream_, Aggregator::Params params_, bool final_) - : ITransformingStep(input_stream_, appendGroupingSetColumn(params_.getHeader(input_stream_.header, final_)), getTraits()) +RollupStep::RollupStep(const DataStream & input_stream_, Aggregator::Params params_, bool final_, bool use_nulls_) + : ITransformingStep(input_stream_, generateOutputHeader(params_.getHeader(input_stream_.header, final_), params_.keys, use_nulls_), getTraits()) , params(std::move(params_)) , keys_size(params.keys_size) , final(final_) + , use_nulls(use_nulls_) { /// Aggregation keys are distinct for (const auto & key : params.keys) output_stream->distinct_columns.insert(key); } -ProcessorPtr addGroupingSetForTotals(const Block & header, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number); +ProcessorPtr addGroupingSetForTotals(const Block & header, const Names & keys, bool use_nulls, const BuildQueryPipelineSettings & settings, UInt64 grouping_set_number); void RollupStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { @@ -42,10 +43,10 @@ void RollupStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) -> ProcessorPtr { if (stream_type == QueryPipelineBuilder::StreamType::Totals) - return addGroupingSetForTotals(header, settings, keys_size); + return addGroupingSetForTotals(header, params.keys, use_nulls, settings, keys_size); auto transform_params = std::make_shared(header, std::move(params), true); - return std::make_shared(header, std::move(transform_params)); + return std::make_shared(header, std::move(transform_params), use_nulls); }); } diff --git a/src/Processors/QueryPlan/RollupStep.h b/src/Processors/QueryPlan/RollupStep.h index c59bf9f3ee9..866de7178fa 100644 --- a/src/Processors/QueryPlan/RollupStep.h +++ b/src/Processors/QueryPlan/RollupStep.h @@ -13,7 +13,7 @@ using AggregatingTransformParamsPtr = std::shared_ptrcancel(&read_context); - // is_async_state = false; } void RemoteSource::onUpdatePorts() @@ -135,7 +134,6 @@ void RemoteSource::onUpdatePorts() { was_query_canceled = true; query_executor->finish(&read_context); - // is_async_state = false; } } diff --git a/src/Processors/TTL/TTLAggregationAlgorithm.cpp b/src/Processors/TTL/TTLAggregationAlgorithm.cpp index d8b022f0acb..0d160b8d32d 100644 --- a/src/Processors/TTL/TTLAggregationAlgorithm.cpp +++ b/src/Processors/TTL/TTLAggregationAlgorithm.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { diff --git a/src/Processors/Transforms/CubeTransform.cpp b/src/Processors/Transforms/CubeTransform.cpp index b80ca29327f..669aaddd1df 100644 --- a/src/Processors/Transforms/CubeTransform.cpp +++ b/src/Processors/Transforms/CubeTransform.cpp @@ -1,6 +1,7 @@ #include #include #include +#include "Processors/Transforms/RollupTransform.h" namespace DB { @@ -9,61 +10,32 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -CubeTransform::CubeTransform(Block header, AggregatingTransformParamsPtr params_) - : IAccumulatingTransform(std::move(header), appendGroupingSetColumn(params_->getHeader())) - , params(std::move(params_)) +CubeTransform::CubeTransform(Block header, AggregatingTransformParamsPtr params_, bool use_nulls_) + : GroupByModifierTransform(std::move(header), params_, use_nulls_) , aggregates_mask(getAggregatesMask(params->getHeader(), params->params.aggregates)) { - keys.reserve(params->params.keys_size); - for (const auto & key : params->params.keys) - keys.emplace_back(input.getHeader().getPositionByName(key)); - if (keys.size() >= 8 * sizeof(mask)) throw Exception("Too many keys are used for CubeTransform.", ErrorCodes::LOGICAL_ERROR); } -Chunk CubeTransform::merge(Chunks && chunks, bool final) -{ - BlocksList rollup_blocks; - for (auto & chunk : chunks) - rollup_blocks.emplace_back(getInputPort().getHeader().cloneWithColumns(chunk.detachColumns())); - - auto rollup_block = params->aggregator.mergeBlocks(rollup_blocks, final); - auto num_rows = rollup_block.rows(); - return Chunk(rollup_block.getColumns(), num_rows); -} - -void CubeTransform::consume(Chunk chunk) -{ - consumed_chunks.emplace_back(std::move(chunk)); -} - -MutableColumnPtr getColumnWithDefaults(Block const & header, size_t key, size_t n); - Chunk CubeTransform::generate() { if (!consumed_chunks.empty()) { - if (consumed_chunks.size() > 1) - cube_chunk = merge(std::move(consumed_chunks), false); - else - cube_chunk = std::move(consumed_chunks.front()); + mergeConsumed(); - consumed_chunks.clear(); - - auto num_rows = cube_chunk.getNumRows(); + auto num_rows = current_chunk.getNumRows(); mask = (static_cast(1) << keys.size()) - 1; - current_columns = cube_chunk.getColumns(); + current_columns = current_chunk.getColumns(); current_zero_columns.clear(); current_zero_columns.reserve(keys.size()); - auto const & input_header = getInputPort().getHeader(); for (auto key : keys) - current_zero_columns.emplace_back(getColumnWithDefaults(input_header, key, num_rows)); + current_zero_columns.emplace_back(getColumnWithDefaults(key, num_rows)); } - auto gen_chunk = std::move(cube_chunk); + auto gen_chunk = std::move(current_chunk); if (mask) { @@ -78,7 +50,7 @@ Chunk CubeTransform::generate() Chunks chunks; chunks.emplace_back(std::move(columns), current_columns.front()->size()); - cube_chunk = merge(std::move(chunks), false); + current_chunk = merge(std::move(chunks), !use_nulls, false); } finalizeChunk(gen_chunk, aggregates_mask); diff --git a/src/Processors/Transforms/CubeTransform.h b/src/Processors/Transforms/CubeTransform.h index bd33eabd750..54a41e8f44e 100644 --- a/src/Processors/Transforms/CubeTransform.h +++ b/src/Processors/Transforms/CubeTransform.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include #include @@ -9,30 +10,23 @@ namespace DB /// Takes blocks after grouping, with non-finalized aggregate functions. /// Calculates all subsets of columns and aggregates over them. -class CubeTransform : public IAccumulatingTransform +class CubeTransform : public GroupByModifierTransform { public: - CubeTransform(Block header, AggregatingTransformParamsPtr params); + CubeTransform(Block header, AggregatingTransformParamsPtr params, bool use_nulls_); String getName() const override { return "CubeTransform"; } protected: - void consume(Chunk chunk) override; Chunk generate() override; private: - AggregatingTransformParamsPtr params; - ColumnNumbers keys; const ColumnsMask aggregates_mask; - Chunks consumed_chunks; - Chunk cube_chunk; Columns current_columns; Columns current_zero_columns; UInt64 mask = 0; UInt64 grouping_set = 0; - - Chunk merge(Chunks && chunks, bool final); }; } diff --git a/src/Processors/Transforms/FinishSortingTransform.cpp b/src/Processors/Transforms/FinishSortingTransform.cpp index 3842e034de7..86b5c4c9a00 100644 --- a/src/Processors/Transforms/FinishSortingTransform.cpp +++ b/src/Processors/Transforms/FinishSortingTransform.cpp @@ -36,9 +36,8 @@ FinishSortingTransform::FinishSortingTransform( /// The target description is modified in SortingTransform constructor. /// To avoid doing the same actions with description_sorted just copy it from prefix of target description. - size_t prefix_size = description_sorted_.size(); - for (size_t i = 0; i < prefix_size; ++i) - description_with_positions.emplace_back(description[i], header_without_constants.getPositionByName(description[i].column_name)); + for (const auto & column_sort_desc : description_sorted_) + description_with_positions.emplace_back(column_sort_desc, header_without_constants.getPositionByName(column_sort_desc.column_name)); } void FinishSortingTransform::consume(Chunk chunk) diff --git a/src/Processors/Transforms/PartialSortingTransform.cpp b/src/Processors/Transforms/PartialSortingTransform.cpp index b0f866cb3fd..3a2cecd3b26 100644 --- a/src/Processors/Transforms/PartialSortingTransform.cpp +++ b/src/Processors/Transforms/PartialSortingTransform.cpp @@ -85,7 +85,7 @@ bool compareWithThreshold(const ColumnRawPtrs & raw_block_columns, size_t min_bl } PartialSortingTransform::PartialSortingTransform( - const Block & header_, SortDescription & description_, UInt64 limit_) + const Block & header_, const SortDescription & description_, UInt64 limit_) : ISimpleTransform(header_, header_, false) , description(description_) , limit(limit_) @@ -93,9 +93,8 @@ PartialSortingTransform::PartialSortingTransform( // Sorting by no columns doesn't make sense. assert(!description_.empty()); - size_t description_size = description.size(); - for (size_t i = 0; i < description_size; ++i) - description_with_positions.emplace_back(description[i], header_.getPositionByName(description[i].column_name)); + for (const auto & column_sort_desc : description) + description_with_positions.emplace_back(column_sort_desc, header_.getPositionByName(column_sort_desc.column_name)); } void PartialSortingTransform::transform(Chunk & chunk) diff --git a/src/Processors/Transforms/PartialSortingTransform.h b/src/Processors/Transforms/PartialSortingTransform.h index 78ce80bdeeb..6dab4497fc7 100644 --- a/src/Processors/Transforms/PartialSortingTransform.h +++ b/src/Processors/Transforms/PartialSortingTransform.h @@ -15,7 +15,7 @@ public: /// limit - if not 0, then you can sort each block not completely, but only `limit` first rows by order. PartialSortingTransform( const Block & header_, - SortDescription & description_, + const SortDescription & description_, UInt64 limit_ = 0); String getName() const override { return "PartialSortingTransform"; } @@ -26,9 +26,9 @@ protected: void transform(Chunk & chunk) override; private: - SortDescription description; + const SortDescription description; SortDescriptionWithPositions description_with_positions; - UInt64 limit; + const UInt64 limit; RowsBeforeLimitCounterPtr read_rows; Columns sort_description_threshold_columns; diff --git a/src/Processors/Transforms/RollupTransform.cpp b/src/Processors/Transforms/RollupTransform.cpp index e5351d1d5e2..a5d67fb2f15 100644 --- a/src/Processors/Transforms/RollupTransform.cpp +++ b/src/Processors/Transforms/RollupTransform.cpp @@ -1,36 +1,80 @@ #include #include #include +#include namespace DB { -RollupTransform::RollupTransform(Block header, AggregatingTransformParamsPtr params_) - : IAccumulatingTransform(std::move(header), appendGroupingSetColumn(params_->getHeader())) +GroupByModifierTransform::GroupByModifierTransform(Block header, AggregatingTransformParamsPtr params_, bool use_nulls_) + : IAccumulatingTransform(std::move(header), generateOutputHeader(params_->getHeader(), params_->params.keys, use_nulls_)) , params(std::move(params_)) - , aggregates_mask(getAggregatesMask(params->getHeader(), params->params.aggregates)) + , use_nulls(use_nulls_) { keys.reserve(params->params.keys_size); for (const auto & key : params->params.keys) keys.emplace_back(input.getHeader().getPositionByName(key)); + + intermediate_header = getOutputPort().getHeader(); + intermediate_header.erase(0); + + if (use_nulls) + { + auto output_aggregator_params = params->params; + output_aggregator = std::make_unique(intermediate_header, output_aggregator_params); + } } -void RollupTransform::consume(Chunk chunk) +void GroupByModifierTransform::consume(Chunk chunk) { consumed_chunks.emplace_back(std::move(chunk)); } -Chunk RollupTransform::merge(Chunks && chunks, bool final) +void GroupByModifierTransform::mergeConsumed() { - BlocksList rollup_blocks; - for (auto & chunk : chunks) - rollup_blocks.emplace_back(getInputPort().getHeader().cloneWithColumns(chunk.detachColumns())); + if (consumed_chunks.size() > 1) + current_chunk = merge(std::move(consumed_chunks), true, false); + else + current_chunk = std::move(consumed_chunks.front()); - auto rollup_block = params->aggregator.mergeBlocks(rollup_blocks, final); - auto num_rows = rollup_block.rows(); - return Chunk(rollup_block.getColumns(), num_rows); + size_t rows = current_chunk.getNumRows(); + auto columns = current_chunk.getColumns(); + if (use_nulls) + { + for (auto key : keys) + columns[key] = makeNullableSafe(columns[key]); + } + current_chunk = Chunk{ columns, rows }; + + consumed_chunks.clear(); } +Chunk GroupByModifierTransform::merge(Chunks && chunks, bool is_input, bool final) +{ + auto header = is_input ? getInputPort().getHeader() : intermediate_header; + + BlocksList blocks; + for (auto & chunk : chunks) + blocks.emplace_back(header.cloneWithColumns(chunk.detachColumns())); + + auto current_block = is_input ? params->aggregator.mergeBlocks(blocks, final) : output_aggregator->mergeBlocks(blocks, final); + auto num_rows = current_block.rows(); + return Chunk(current_block.getColumns(), num_rows); +} + +MutableColumnPtr GroupByModifierTransform::getColumnWithDefaults(size_t key, size_t n) const +{ + auto const & col = intermediate_header.getByPosition(key); + auto result_column = col.column->cloneEmpty(); + col.type->insertManyDefaultsInto(*result_column, n); + return result_column; +} + +RollupTransform::RollupTransform(Block header, AggregatingTransformParamsPtr params_, bool use_nulls_) + : GroupByModifierTransform(std::move(header), params_, use_nulls_) + , aggregates_mask(getAggregatesMask(params->getHeader(), params->params.aggregates)) +{} + MutableColumnPtr getColumnWithDefaults(Block const & header, size_t key, size_t n) { auto const & col = header.getByPosition(key); @@ -43,16 +87,11 @@ Chunk RollupTransform::generate() { if (!consumed_chunks.empty()) { - if (consumed_chunks.size() > 1) - rollup_chunk = merge(std::move(consumed_chunks), false); - else - rollup_chunk = std::move(consumed_chunks.front()); - - consumed_chunks.clear(); + mergeConsumed(); last_removed_key = keys.size(); } - auto gen_chunk = std::move(rollup_chunk); + auto gen_chunk = std::move(current_chunk); if (last_removed_key) { @@ -61,11 +100,11 @@ Chunk RollupTransform::generate() auto num_rows = gen_chunk.getNumRows(); auto columns = gen_chunk.getColumns(); - columns[key] = getColumnWithDefaults(getInputPort().getHeader(), key, num_rows); + columns[key] = getColumnWithDefaults(key, num_rows); Chunks chunks; chunks.emplace_back(std::move(columns), num_rows); - rollup_chunk = merge(std::move(chunks), false); + current_chunk = merge(std::move(chunks), !use_nulls, false); } finalizeChunk(gen_chunk, aggregates_mask); diff --git a/src/Processors/Transforms/RollupTransform.h b/src/Processors/Transforms/RollupTransform.h index 1630df23579..e9fa0818779 100644 --- a/src/Processors/Transforms/RollupTransform.h +++ b/src/Processors/Transforms/RollupTransform.h @@ -1,4 +1,6 @@ #pragma once +#include +#include #include #include #include @@ -6,29 +8,49 @@ namespace DB { -/// Takes blocks after grouping, with non-finalized aggregate functions. -/// Calculates subtotals and grand totals values for a set of columns. -class RollupTransform : public IAccumulatingTransform +struct GroupByModifierTransform : public IAccumulatingTransform { -public: - RollupTransform(Block header, AggregatingTransformParamsPtr params); - String getName() const override { return "RollupTransform"; } + GroupByModifierTransform(Block header, AggregatingTransformParamsPtr params_, bool use_nulls_); protected: void consume(Chunk chunk) override; + + void mergeConsumed(); + + Chunk merge(Chunks && chunks, bool is_input, bool final); + + MutableColumnPtr getColumnWithDefaults(size_t key, size_t n) const; + + AggregatingTransformParamsPtr params; + + bool use_nulls; + + ColumnNumbers keys; + + std::unique_ptr output_aggregator; + + Block intermediate_header; + + Chunks consumed_chunks; + Chunk current_chunk; +}; + +/// Takes blocks after grouping, with non-finalized aggregate functions. +/// Calculates subtotals and grand totals values for a set of columns. +class RollupTransform : public GroupByModifierTransform +{ +public: + RollupTransform(Block header, AggregatingTransformParamsPtr params, bool use_nulls_); + String getName() const override { return "RollupTransform"; } + +protected: Chunk generate() override; private: - AggregatingTransformParamsPtr params; - ColumnNumbers keys; const ColumnsMask aggregates_mask; - Chunks consumed_chunks; - Chunk rollup_chunk; size_t last_removed_key = 0; size_t set_counter = 0; - - Chunk merge(Chunks && chunks, bool final); }; } diff --git a/src/Processors/Transforms/SortingTransform.h b/src/Processors/Transforms/SortingTransform.h index a607e52550d..d9a30699f92 100644 --- a/src/Processors/Transforms/SortingTransform.h +++ b/src/Processors/Transforms/SortingTransform.h @@ -73,8 +73,8 @@ public: ~SortingTransform() override; protected: - Status prepare() override final; - void work() override final; + Status prepare() final; + void work() final; virtual void consume(Chunk chunk) = 0; virtual void generate() = 0; @@ -82,7 +82,7 @@ protected: SortDescription description; size_t max_merged_block_size; - UInt64 limit; + const UInt64 limit; /// Before operation, will remove constant columns from blocks. And after, place constant columns back. /// (to avoid excessive virtual function calls and because constants cannot be serialized in Native format for temporary files) diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 88a52defa1e..340b85efae9 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -323,7 +323,6 @@ QueryPipelineBuilderPtr QueryPipelineBuilder::mergePipelines( left->pipe.processors.emplace_back(transform); left->pipe.processors.insert(left->pipe.processors.end(), right->pipe.processors.begin(), right->pipe.processors.end()); - // left->pipe.holder = std::move(right->pipe.holder); left->pipe.header = left->pipe.output_ports.front()->getHeader(); left->pipe.max_parallel_streams = std::max(left->pipe.max_parallel_streams, right->pipe.max_parallel_streams); return left; diff --git a/src/QueryPipeline/RemoteInserter.cpp b/src/QueryPipeline/RemoteInserter.cpp index ce2ba23576d..58fed6e5466 100644 --- a/src/QueryPipeline/RemoteInserter.cpp +++ b/src/QueryPipeline/RemoteInserter.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace DB diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index 4178d0d62da..573e43e9d7a 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -1573,14 +1573,14 @@ namespace auto & log_entry = *result.add_logs(); log_entry.set_time(column_time.getElement(row)); log_entry.set_time_microseconds(column_time_microseconds.getElement(row)); - StringRef query_id = column_query_id.getDataAt(row); - log_entry.set_query_id(query_id.data, query_id.size); + std::string_view query_id = column_query_id.getDataAt(row).toView(); + log_entry.set_query_id(query_id.data(), query_id.size()); log_entry.set_thread_id(column_thread_id.getElement(row)); log_entry.set_level(static_cast<::clickhouse::grpc::LogsLevel>(column_level.getElement(row))); - StringRef source = column_source.getDataAt(row); - log_entry.set_source(source.data, source.size); - StringRef text = column_text.getDataAt(row); - log_entry.set_text(text.data, text.size); + std::string_view source = column_source.getDataAt(row).toView(); + log_entry.set_source(source.data(), source.size()); + std::string_view text = column_text.getDataAt(row).toView(); + log_entry.set_text(text.data(), text.size()); } } } diff --git a/src/Server/HTTPHandlerRequestFilter.h b/src/Server/HTTPHandlerRequestFilter.h index 3236b35d5ae..b04472fbea5 100644 --- a/src/Server/HTTPHandlerRequestFilter.h +++ b/src/Server/HTTPHandlerRequestFilter.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include @@ -23,16 +22,16 @@ namespace ErrorCodes using CompiledRegexPtr = std::shared_ptr; -static inline bool checkRegexExpression(const StringRef & match_str, const CompiledRegexPtr & compiled_regex) +static inline bool checkRegexExpression(std::string_view match_str, const CompiledRegexPtr & compiled_regex) { int num_captures = compiled_regex->NumberOfCapturingGroups() + 1; re2::StringPiece matches[num_captures]; - re2::StringPiece match_input(match_str.data, match_str.size); - return compiled_regex->Match(match_input, 0, match_str.size, re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures); + re2::StringPiece match_input(match_str.data(), match_str.size()); + return compiled_regex->Match(match_input, 0, match_str.size(), re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures); } -static inline bool checkExpression(const StringRef & match_str, const std::pair & expression) +static inline bool checkExpression(std::string_view match_str, const std::pair & expression) { if (expression.second) return checkRegexExpression(match_str, expression.second); @@ -71,7 +70,7 @@ static inline auto urlFilter(Poco::Util::AbstractConfiguration & config, const s const auto & uri = request.getURI(); const auto & end = find_first_symbols<'?'>(uri.data(), uri.data() + uri.size()); - return checkExpression(StringRef(uri.data(), end - uri.data()), expression); + return checkExpression(std::string_view(uri.data(), end - uri.data()), expression); }; } @@ -93,7 +92,7 @@ static inline auto headersFilter(Poco::Util::AbstractConfiguration & config, con for (const auto & [header_name, header_expression] : headers_expression) { const auto & header_value = request.get(header_name, ""); - if (!checkExpression(StringRef(header_value.data(), header_value.size()), header_expression)) + if (!checkExpression(std::string_view(header_value.data(), header_value.size()), header_expression)) return false; } diff --git a/src/Server/KeeperTCPHandler.h b/src/Server/KeeperTCPHandler.h index 9895c335c96..ee83c4fa21b 100644 --- a/src/Server/KeeperTCPHandler.h +++ b/src/Server/KeeperTCPHandler.h @@ -9,7 +9,6 @@ #include #include "IServer.h" #include -#include #include #include #include diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index 5e9da48fc68..bf2638f7bc3 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp index 121a22e764c..eab5c2ab134 100644 --- a/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp +++ b/src/Storages/HDFS/AsynchronousReadBufferFromHDFS.cpp @@ -112,6 +112,8 @@ bool AsynchronousReadBufferFromHDFS::nextImpl() Stopwatch next_watch; Int64 wait = -1; size_t size = 0; + size_t bytes_read = 0; + if (prefetch_future.valid()) { ProfileEvents::increment(ProfileEvents::RemoteFSPrefetchedReads); @@ -126,7 +128,9 @@ bool AsynchronousReadBufferFromHDFS::nextImpl() LOG_TEST(log, "Current size: {}, offset: {}", size, offset); /// If prefetch_future is valid, size should always be greater than zero. - assert(offset < size); + assert(offset <= size); + bytes_read = size - offset; + wait = watch.elapsedMicroseconds(); ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, wait); } @@ -147,9 +151,10 @@ bool AsynchronousReadBufferFromHDFS::nextImpl() auto offset = result.offset; LOG_TEST(log, "Current size: {}, offset: {}", size, offset); - assert(offset < size); + assert(offset <= size); + bytes_read = size - offset; - if (size) + if (bytes_read) { /// Adjust the working buffer so that it ignores `offset` bytes. internal_buffer = Buffer(memory.data(), memory.data() + memory.size()); @@ -166,7 +171,7 @@ bool AsynchronousReadBufferFromHDFS::nextImpl() sum_duration += next_watch.elapsedMicroseconds(); sum_wait += wait; - return size; + return bytes_read; } off_t AsynchronousReadBufferFromHDFS::seek(off_t offset, int whence) diff --git a/src/Storages/HDFS/ReadBufferFromHDFS.cpp b/src/Storages/HDFS/ReadBufferFromHDFS.cpp index 291479b61f2..fab810a1e49 100644 --- a/src/Storages/HDFS/ReadBufferFromHDFS.cpp +++ b/src/Storages/HDFS/ReadBufferFromHDFS.cpp @@ -2,6 +2,7 @@ #if USE_HDFS #include +#include #include #include @@ -24,13 +25,13 @@ ReadBufferFromHDFS::~ReadBufferFromHDFS() = default; struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory { - String hdfs_uri; String hdfs_file_path; hdfsFile fin; HDFSBuilderWrapper builder; HDFSFSPtr fs; + ReadSettings read_settings; off_t file_offset = 0; off_t read_until_position = 0; @@ -39,11 +40,13 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory(buf_size_) + const ReadSettings & read_settings_, + size_t read_until_position_) + : BufferWithOwnMemory(read_settings_.remote_fs_buffer_size) , hdfs_uri(hdfs_uri_) , hdfs_file_path(hdfs_file_path_) , builder(createHDFSBuilder(hdfs_uri_, config_)) + , read_settings(read_settings_) , read_until_position(read_until_position_) { fs = createHDFSFS(builder.get()); @@ -97,6 +100,8 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemoryadd(bytes_read); return true; } @@ -130,7 +135,7 @@ ReadBufferFromHDFS::ReadBufferFromHDFS( size_t read_until_position_) : ReadBufferFromFileBase(read_settings_.remote_fs_buffer_size, nullptr, 0) , impl(std::make_unique( - hdfs_uri_, hdfs_file_path_, config_, read_settings_.remote_fs_buffer_size, read_until_position_)) + hdfs_uri_, hdfs_file_path_, config_, read_settings_, read_until_position_)) { } diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index 91ad8b42d1d..57e893e9683 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -412,7 +413,13 @@ public: const CompressionMethod compression_method) : SinkToStorage(sample_block) { - write_buf = wrapWriteBufferWithCompressionMethod(std::make_unique(uri, context->getGlobalContext()->getConfigRef(), context->getSettingsRef().hdfs_replication), compression_method, 3); + write_buf = wrapWriteBufferWithCompressionMethod( + std::make_unique( + uri, + context->getGlobalContext()->getConfigRef(), + context->getSettingsRef().hdfs_replication, + context->getWriteSettings()), + compression_method, 3); writer = FormatFactory::instance().getOutputFormatParallelIfPossible(format, *write_buf, sample_block, context); } diff --git a/src/Storages/HDFS/WriteBufferFromHDFS.cpp b/src/Storages/HDFS/WriteBufferFromHDFS.cpp index 42ec3962beb..f8079d95f3c 100644 --- a/src/Storages/HDFS/WriteBufferFromHDFS.cpp +++ b/src/Storages/HDFS/WriteBufferFromHDFS.cpp @@ -4,9 +4,9 @@ #include #include +#include #include - namespace DB { @@ -24,15 +24,18 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl hdfsFile fout; HDFSBuilderWrapper builder; HDFSFSPtr fs; + WriteSettings write_settings; WriteBufferFromHDFSImpl( const std::string & hdfs_uri_, const Poco::Util::AbstractConfiguration & config_, int replication_, + const WriteSettings & write_settings_, int flags) : hdfs_uri(hdfs_uri_) , builder(createHDFSBuilder(hdfs_uri, config_)) , fs(createHDFSFS(builder.get())) + , write_settings(write_settings_) { const size_t begin_of_path = hdfs_uri.find('/', hdfs_uri.find("//") + 2); const String path = hdfs_uri.substr(begin_of_path); @@ -44,7 +47,6 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl throw Exception("Unable to open HDFS file: " + path + " error: " + std::string(hdfsGetLastError()), ErrorCodes::CANNOT_OPEN_FILE); } - } ~WriteBufferFromHDFSImpl() @@ -56,6 +58,8 @@ struct WriteBufferFromHDFS::WriteBufferFromHDFSImpl int write(const char * start, size_t size) const { int bytes_written = hdfsWrite(fs.get(), fout, start, size); + if (write_settings.remote_throttler) + write_settings.remote_throttler->add(bytes_written); if (bytes_written < 0) throw Exception("Fail to write HDFS file: " + hdfs_uri + " " + std::string(hdfsGetLastError()), @@ -77,10 +81,11 @@ WriteBufferFromHDFS::WriteBufferFromHDFS( const std::string & hdfs_name_, const Poco::Util::AbstractConfiguration & config_, int replication_, + const WriteSettings & write_settings_, size_t buf_size_, int flags_) : BufferWithOwnMemory(buf_size_) - , impl(std::make_unique(hdfs_name_, config_, replication_, flags_)) + , impl(std::make_unique(hdfs_name_, config_, replication_, write_settings_, flags_)) { } diff --git a/src/Storages/HDFS/WriteBufferFromHDFS.h b/src/Storages/HDFS/WriteBufferFromHDFS.h index fe9af7dfba4..3cc11a35186 100644 --- a/src/Storages/HDFS/WriteBufferFromHDFS.h +++ b/src/Storages/HDFS/WriteBufferFromHDFS.h @@ -4,6 +4,7 @@ #if USE_HDFS #include +#include #include #include #include @@ -24,6 +25,7 @@ public: const String & hdfs_name_, const Poco::Util::AbstractConfiguration & config_, int replication_, + const WriteSettings & write_settings_ = {}, size_t buf_size_ = DBMS_DEFAULT_BUFFER_SIZE, int flags = O_WRONLY); diff --git a/src/Storages/Hive/HiveCommon.cpp b/src/Storages/Hive/HiveCommon.cpp index 7b2f04f7073..609adcf65c9 100644 --- a/src/Storages/Hive/HiveCommon.cpp +++ b/src/Storages/Hive/HiveCommon.cpp @@ -66,7 +66,6 @@ HiveMetastoreClient::HiveTableMetadataPtr HiveMetastoreClient::getTableMetadata( }; tryCallHiveClient(client_call); - // bool update_cache = shouldUpdateTableMetadata(db_name, table_name, partitions); String cache_key = getCacheKey(db_name, table_name); HiveTableMetadataPtr metadata = table_metadata_cache.get(cache_key); diff --git a/src/Storages/Hive/HiveFile.cpp b/src/Storages/Hive/HiveFile.cpp index 09c3aff4455..fc08c046f93 100644 --- a/src/Storages/Hive/HiveFile.cpp +++ b/src/Storages/Hive/HiveFile.cpp @@ -79,13 +79,23 @@ Range createRangeFromParquetStatistics(std::shared_ptr IHiveFile::getRows() { - if (!rows) - rows = getRowsImpl(); + if (!has_init_rows) + { + std::lock_guard lock(mutex); + if (!has_init_rows) + { + rows = getRowsImpl(); + has_init_rows = true; + } + } return rows; } void IHiveFile::loadFileMinMaxIndex() { + if (file_minmax_idx_loaded) + return; + std::lock_guard lock(mutex); if (file_minmax_idx_loaded) return; loadFileMinMaxIndexImpl(); @@ -94,6 +104,9 @@ void IHiveFile::loadFileMinMaxIndex() void IHiveFile::loadSplitMinMaxIndexes() { + if (split_minmax_idxes_loaded) + return; + std::lock_guard lock(mutex); if (split_minmax_idxes_loaded) return; loadSplitMinMaxIndexesImpl(); diff --git a/src/Storages/Hive/HiveFile.h b/src/Storages/Hive/HiveFile.h index cbdf17bd5b5..a4bd345aa48 100644 --- a/src/Storages/Hive/HiveFile.h +++ b/src/Storages/Hive/HiveFile.h @@ -149,6 +149,7 @@ protected: String path; UInt64 last_modify_time; size_t size; + std::atomic has_init_rows = false; std::optional rows; NamesAndTypesList index_names_and_types; @@ -162,6 +163,9 @@ protected: /// Skip splits for this file after applying minmax index (if any) std::unordered_set skip_splits; std::shared_ptr storage_settings; + + /// IHiveFile would be shared among multi threads, need lock's protection to update min/max indexes. + std::mutex mutex; }; using HiveFilePtr = std::shared_ptr; diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index b268066c054..ddd9f526091 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -218,7 +218,10 @@ public: auto get_raw_read_buf = [&]() -> std::unique_ptr { auto buf = std::make_unique( - hdfs_namenode_url, current_path, getContext()->getGlobalContext()->getConfigRef(), getContext()->getReadSettings()); + hdfs_namenode_url, + current_path, + getContext()->getGlobalContext()->getConfigRef(), + getContext()->getReadSettings()); bool thread_pool_read = read_settings.remote_fs_method == RemoteFSReadMethod::threadpool; if (thread_pool_read) diff --git a/src/Storages/MergeTree/BackgroundJobsAssignee.cpp b/src/Storages/MergeTree/BackgroundJobsAssignee.cpp index 81445f40ed6..9617d16f6f1 100644 --- a/src/Storages/MergeTree/BackgroundJobsAssignee.cpp +++ b/src/Storages/MergeTree/BackgroundJobsAssignee.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index 0e655166abf..3609a65bc71 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -127,12 +127,13 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write { if (part && part->isProjectionPart()) { - data.reportBrokenPart(part->getParentPart()->name); + auto parent_part = part->getParentPart()->shared_from_this(); + data.reportBrokenPart(parent_part); } + else if (part) + data.reportBrokenPart(part); else - { - data.reportBrokenPart(part_name); - } + LOG_TRACE(log, "Part {} was not found, do not report it as broken", part_name); }; try @@ -174,6 +175,8 @@ void Service::processQuery(const HTMLForm & params, ReadBuffer & /*body*/, Write std::sregex_token_iterator()); if (data_settings->allow_remote_fs_zero_copy_replication && + /// In memory data part does not have metadata yet. + !isInMemoryPart(part) && client_protocol_version >= REPLICATION_PROTOCOL_VERSION_WITH_PARTS_ZERO_COPY) { auto disk_type = part->data_part_storage->getDiskType(); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 60941108f00..95f25aa1955 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -603,22 +603,6 @@ String IMergeTreeDataPart::getColumnNameWithMinimumCompressedSize( return *minimum_size_column; } -// String IMergeTreeDataPart::getFullPath() const -// { -// if (relative_path.empty()) -// throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR); - -// return fs::path(storage.getFullPathOnDisk(volume->getDisk())) / (parent_part ? parent_part->relative_path : "") / relative_path / ""; -// } - -// String IMergeTreeDataPart::getRelativePath() const -// { -// if (relative_path.empty()) -// throw Exception("Part relative_path cannot be empty. It's bug.", ErrorCodes::LOGICAL_ERROR); - -// return fs::path(storage.relative_data_path) / (parent_part ? parent_part->relative_path : "") / relative_path / ""; -// } - void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checksums, bool check_consistency) { assertOnDisk(); diff --git a/src/Storages/MergeTree/IMergeTreeReader.cpp b/src/Storages/MergeTree/IMergeTreeReader.cpp index b8aeb8e6a5a..3acb4910e28 100644 --- a/src/Storages/MergeTree/IMergeTreeReader.cpp +++ b/src/Storages/MergeTree/IMergeTreeReader.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index 3b94b85607a..dbcca1443b5 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -30,9 +30,6 @@ public: } protected: - // using SerializationState = ISerialization::SerializeBinaryBulkStatePtr; - - // ISerialization::OutputStreamGetter createStreamGetter(const String & name, WrittenOffsetColumns & offset_columns); /// Remove all columns marked expired in data_part. Also, clears checksums /// and columns array. Return set of removed files names. diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 40f23fe5294..daf31698aad 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -2050,7 +2050,7 @@ bool KeyCondition::mayBeTrueInRange( } String KeyCondition::RPNElement::toString() const { return toString("column " + std::to_string(key_column), false); } -String KeyCondition::RPNElement::toString(const std::string_view & column_name, bool print_constants) const +String KeyCondition::RPNElement::toString(std::string_view column_name, bool print_constants) const { auto print_wrapped_column = [this, &column_name, print_constants](WriteBuffer & buf) { diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 9a8719afa19..af85a90dd62 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -320,7 +320,7 @@ private: : function(function_), range(range_), key_column(key_column_) {} String toString() const; - String toString(const std::string_view & column_name, bool print_constants) const; + String toString(std::string_view column_name, bool print_constants) const; Function function = FUNCTION_UNKNOWN; diff --git a/src/Storages/MergeTree/MergeList.cpp b/src/Storages/MergeTree/MergeList.cpp index d866345defe..13237371e9a 100644 --- a/src/Storages/MergeTree/MergeList.cpp +++ b/src/Storages/MergeTree/MergeList.cpp @@ -27,7 +27,7 @@ MemoryTrackerThreadSwitcher::MemoryTrackerThreadSwitcher(MergeListEntry & merge_ prev_untracked_memory = current_thread->untracked_memory; current_thread->untracked_memory = merge_list_entry->untracked_memory; - prev_query_id = current_thread->getQueryId().toString(); + prev_query_id = std::string(current_thread->getQueryId()); current_thread->setQueryId(merge_list_entry->query_id); } diff --git a/src/Storages/MergeTree/MergePlainMergeTreeTask.h b/src/Storages/MergeTree/MergePlainMergeTreeTask.h index 0f6d38d2cbf..7488b9655fe 100644 --- a/src/Storages/MergeTree/MergePlainMergeTreeTask.h +++ b/src/Storages/MergeTree/MergePlainMergeTreeTask.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 7426b384394..dc468174dfa 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -596,7 +596,6 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c const auto & projections = global_ctx->metadata_snapshot->getProjections(); - // tasks_for_projections.reserve(projections.size()); for (const auto & projection : projections) { diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp index c3f83771338..234487763d7 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.cpp @@ -124,7 +124,7 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) /// All operations with queues are considered no to do any allocations - auto erase_from_active = [this, item] + auto erase_from_active = [this, &item]() TSA_REQUIRES(mutex) { active.erase(std::remove(active.begin(), active.end(), item), active.end()); }; @@ -138,33 +138,35 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) } catch (const Exception & e) { - NOEXCEPT_SCOPE; - ALLOW_ALLOCATIONS_IN_SCOPE; - if (e.code() == ErrorCodes::ABORTED) /// Cancelled merging parts is not an error - log as info. - LOG_INFO(log, fmt::runtime(getCurrentExceptionMessage(false))); - else - tryLogCurrentException(__PRETTY_FUNCTION__); + NOEXCEPT_SCOPE({ + ALLOW_ALLOCATIONS_IN_SCOPE; + if (e.code() == ErrorCodes::ABORTED) /// Cancelled merging parts is not an error - log as info. + LOG_INFO(log, fmt::runtime(getCurrentExceptionMessage(false))); + else + tryLogCurrentException(__PRETTY_FUNCTION__); + }); } catch (...) { - NOEXCEPT_SCOPE; - ALLOW_ALLOCATIONS_IN_SCOPE; - tryLogCurrentException(__PRETTY_FUNCTION__); + NOEXCEPT_SCOPE({ + ALLOW_ALLOCATIONS_IN_SCOPE; + tryLogCurrentException(__PRETTY_FUNCTION__); + }); } if (need_execute_again) { std::lock_guard guard(mutex); + erase_from_active(); if (item->is_currently_deleting) { - erase_from_active(); - /// This is significant to order the destructors. { - NOEXCEPT_SCOPE; - ALLOW_ALLOCATIONS_IN_SCOPE; - item->task.reset(); + NOEXCEPT_SCOPE({ + ALLOW_ALLOCATIONS_IN_SCOPE; + item->task.reset(); + }); } item->is_done.set(); item = nullptr; @@ -176,7 +178,6 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) /// Otherwise the destruction of the task won't be ordered with the destruction of the /// storage. pending.push(std::move(item)); - erase_from_active(); has_tasks.notify_one(); item = nullptr; return; @@ -197,18 +198,20 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) } catch (const Exception & e) { - NOEXCEPT_SCOPE; - ALLOW_ALLOCATIONS_IN_SCOPE; - if (e.code() == ErrorCodes::ABORTED) /// Cancelled merging parts is not an error - log as info. - LOG_INFO(log, fmt::runtime(getCurrentExceptionMessage(false))); - else - tryLogCurrentException(__PRETTY_FUNCTION__); + NOEXCEPT_SCOPE({ + ALLOW_ALLOCATIONS_IN_SCOPE; + if (e.code() == ErrorCodes::ABORTED) /// Cancelled merging parts is not an error - log as info. + LOG_INFO(log, fmt::runtime(getCurrentExceptionMessage(false))); + else + tryLogCurrentException(__PRETTY_FUNCTION__); + }); } catch (...) { - NOEXCEPT_SCOPE; - ALLOW_ALLOCATIONS_IN_SCOPE; - tryLogCurrentException(__PRETTY_FUNCTION__); + NOEXCEPT_SCOPE({ + ALLOW_ALLOCATIONS_IN_SCOPE; + tryLogCurrentException(__PRETTY_FUNCTION__); + }); } @@ -218,9 +221,10 @@ void MergeTreeBackgroundExecutor::routine(TaskRuntimeDataPtr item) /// The thread that shutdowns storage will scan queues in order to find some tasks to wait for, but will find nothing. /// So, the destructor of a task and the destructor of a storage will be executed concurrently. { - NOEXCEPT_SCOPE; - ALLOW_ALLOCATIONS_IN_SCOPE; - item->task.reset(); + NOEXCEPT_SCOPE({ + ALLOW_ALLOCATIONS_IN_SCOPE; + item->task.reset(); + }); } item->is_done.set(); @@ -243,7 +247,7 @@ void MergeTreeBackgroundExecutor::threadFunction() TaskRuntimeDataPtr item; { std::unique_lock lock(mutex); - has_tasks.wait(lock, [this](){ return !pending.empty() || shutdown; }); + has_tasks.wait(lock, [this]() TSA_REQUIRES(mutex) { return !pending.empty() || shutdown; }); if (shutdown) break; @@ -256,9 +260,10 @@ void MergeTreeBackgroundExecutor::threadFunction() } catch (...) { - NOEXCEPT_SCOPE; - ALLOW_ALLOCATIONS_IN_SCOPE; - tryLogCurrentException(__PRETTY_FUNCTION__); + NOEXCEPT_SCOPE({ + ALLOW_ALLOCATIONS_IN_SCOPE; + tryLogCurrentException(__PRETTY_FUNCTION__); + }); } } } diff --git a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h index de5af5199c0..a2f97d6bbc6 100644 --- a/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h +++ b/src/Storages/MergeTree/MergeTreeBackgroundExecutor.h @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -15,7 +14,9 @@ #include #include #include +#include #include + namespace DB { namespace ErrorCodes @@ -50,7 +51,8 @@ struct TaskRuntimeData ExecutableTaskPtr task; CurrentMetrics::Metric metric; - std::atomic_bool is_currently_deleting{false}; + /// Guarded by MergeTreeBackgroundExecutor<>::mutex + bool is_currently_deleting{false}; /// Actually autoreset=false is needed only for unit test /// where multiple threads could remove tasks corresponding to the same storage /// This scenario in not possible in reality. @@ -200,19 +202,21 @@ public: private: String name; - size_t threads_count{0}; - size_t max_tasks_count{0}; + size_t threads_count TSA_GUARDED_BY(mutex) = 0; + size_t max_tasks_count TSA_GUARDED_BY(mutex) = 0; CurrentMetrics::Metric metric; void routine(TaskRuntimeDataPtr item); - void threadFunction(); + + /// libc++ does not provide TSA support for std::unique_lock -> TSA_NO_THREAD_SAFETY_ANALYSIS + void threadFunction() TSA_NO_THREAD_SAFETY_ANALYSIS; /// Initially it will be empty - Queue pending{}; - boost::circular_buffer active{0}; + Queue pending TSA_GUARDED_BY(mutex); + boost::circular_buffer active TSA_GUARDED_BY(mutex); mutable std::mutex mutex; - std::condition_variable has_tasks; - std::atomic_bool shutdown{false}; + std::condition_variable has_tasks TSA_GUARDED_BY(mutex); + bool shutdown TSA_GUARDED_BY(mutex) = false; ThreadPool pool; Poco::Logger * log = &Poco::Logger::get("MergeTreeBackgroundExecutor"); }; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 64aaa40bd4c..727ebc9c3cc 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -4942,78 +4942,78 @@ MergeTreeData::DataPartsVector MergeTreeData::Transaction::commit(MergeTreeData: } } - NOEXCEPT_SCOPE; + NOEXCEPT_SCOPE({ + auto current_time = time(nullptr); - auto current_time = time(nullptr); + size_t add_bytes = 0; + size_t add_rows = 0; + size_t add_parts = 0; - size_t add_bytes = 0; - size_t add_rows = 0; - size_t add_parts = 0; + size_t reduce_bytes = 0; + size_t reduce_rows = 0; + size_t reduce_parts = 0; - size_t reduce_bytes = 0; - size_t reduce_rows = 0; - size_t reduce_parts = 0; - - for (const DataPartPtr & part : precommitted_parts) - { - auto part_in_memory = asInMemoryPart(part); - if (part_in_memory && settings->in_memory_parts_enable_wal) + for (const DataPartPtr & part : precommitted_parts) { - if (!wal) - wal = data.getWriteAheadLog(); - - wal->addPart(part_in_memory); - } - - DataPartPtr covering_part; - DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock); - if (covering_part) - { - LOG_WARNING(data.log, "Tried to commit obsolete part {} covered by {}", part->name, covering_part->getNameWithState()); - - part->remove_time.store(0, std::memory_order_relaxed); /// The part will be removed without waiting for old_parts_lifetime seconds. - data.modifyPartState(part, DataPartState::Outdated); - } - else - { - if (!txn) - MergeTreeTransaction::addNewPartAndRemoveCovered(data.shared_from_this(), part, covered_parts, NO_TRANSACTION_RAW); - - total_covered_parts.insert(total_covered_parts.end(), covered_parts.begin(), covered_parts.end()); - for (const auto & covered_part : covered_parts) + auto part_in_memory = asInMemoryPart(part); + if (part_in_memory && settings->in_memory_parts_enable_wal) { - covered_part->remove_time.store(current_time, std::memory_order_relaxed); + if (!wal) + wal = data.getWriteAheadLog(); - reduce_bytes += covered_part->getBytesOnDisk(); - reduce_rows += covered_part->rows_count; - - data.modifyPartState(covered_part, DataPartState::Outdated); - data.removePartContributionToColumnAndSecondaryIndexSizes(covered_part); + wal->addPart(part_in_memory); } - reduce_parts += covered_parts.size(); + DataPartPtr covering_part; + DataPartsVector covered_parts = data.getActivePartsToReplace(part->info, part->name, covering_part, *owing_parts_lock); + if (covering_part) + { + LOG_WARNING(data.log, "Tried to commit obsolete part {} covered by {}", part->name, covering_part->getNameWithState()); - add_bytes += part->getBytesOnDisk(); - add_rows += part->rows_count; - ++add_parts; + part->remove_time.store(0, std::memory_order_relaxed); /// The part will be removed without waiting for old_parts_lifetime seconds. + data.modifyPartState(part, DataPartState::Outdated); + } + else + { + if (!txn) + MergeTreeTransaction::addNewPartAndRemoveCovered(data.shared_from_this(), part, covered_parts, NO_TRANSACTION_RAW); - data.modifyPartState(part, DataPartState::Active); - data.addPartContributionToColumnAndSecondaryIndexSizes(part); + total_covered_parts.insert(total_covered_parts.end(), covered_parts.begin(), covered_parts.end()); + for (const auto & covered_part : covered_parts) + { + covered_part->remove_time.store(current_time, std::memory_order_relaxed); + + reduce_bytes += covered_part->getBytesOnDisk(); + reduce_rows += covered_part->rows_count; + + data.modifyPartState(covered_part, DataPartState::Outdated); + data.removePartContributionToColumnAndSecondaryIndexSizes(covered_part); + } + + reduce_parts += covered_parts.size(); + + add_bytes += part->getBytesOnDisk(); + add_rows += part->rows_count; + ++add_parts; + + data.modifyPartState(part, DataPartState::Active); + data.addPartContributionToColumnAndSecondaryIndexSizes(part); + } } - } - if (reduce_parts == 0) - { - for (const auto & part : precommitted_parts) - data.updateObjectColumns(part, parts_lock); - } - else - data.resetObjectColumnsFromActiveParts(parts_lock); + if (reduce_parts == 0) + { + for (const auto & part : precommitted_parts) + data.updateObjectColumns(part, parts_lock); + } + else + data.resetObjectColumnsFromActiveParts(parts_lock); - ssize_t diff_bytes = add_bytes - reduce_bytes; - ssize_t diff_rows = add_rows - reduce_rows; - ssize_t diff_parts = add_parts - reduce_parts; - data.increaseDataVolume(diff_bytes, diff_rows, diff_parts); + ssize_t diff_bytes = add_bytes - reduce_bytes; + ssize_t diff_rows = add_rows - reduce_rows; + ssize_t diff_parts = add_parts - reduce_parts; + data.increaseDataVolume(diff_bytes, diff_rows, diff_parts); + }); } clear(); @@ -6031,8 +6031,10 @@ void MergeTreeData::reportBrokenPart(MergeTreeData::DataPartPtr & data_part) con broken_part_callback(part->name); } } - else + else if (data_part && data_part->getState() == IMergeTreeDataPart::State::Active) broken_part_callback(data_part->name); + else + LOG_DEBUG(log, "Will not check potentially broken part {} because it's not active", data_part->getNameWithState()); } MergeTreeData::MatcherFn MergeTreeData::getPartitionMatcher(const ASTPtr & partition_ast, ContextPtr local_context) const diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 0b6e757ab49..7c3bc21f391 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -56,6 +56,9 @@ struct ZeroCopyLock; class IBackupEntry; using BackupEntries = std::vector>>; +class MergeTreeTransaction; +using MergeTreeTransactionPtr = std::shared_ptr; + /// Auxiliary struct holding information about the future merged or mutated part. struct EmergingPartInfo { @@ -669,12 +672,7 @@ public: AlterLockHolder & table_lock_holder); /// Should be called if part data is suspected to be corrupted. - void reportBrokenPart(const String & name) const - { - broken_part_callback(name); - } - - /// Same as above but has the ability to check all other parts + /// Has the ability to check all other parts /// which reside on the same disk of the suspicious part. void reportBrokenPart(MergeTreeData::DataPartPtr & data_part) const; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 85231aca253..3916eae1556 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -383,6 +383,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read( merge_threads, temporary_data_merge_threads, /* storage_has_evenly_distributed_read_= */ false, + /* group_by_use_nulls */ false, std::move(group_by_info), std::move(group_by_sort_description), should_produce_results_in_order_of_bucket_number); @@ -1470,6 +1471,9 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange( { // Do exclusion search, where we drop ranges that do not match + if (settings.merge_tree_coarse_index_granularity <= 1) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Setting merge_tree_coarse_index_granularity should be greater than 1"); + size_t min_marks_for_seek = roundRowsOrBytesToMarks( settings.merge_tree_min_rows_for_seek, settings.merge_tree_min_bytes_for_seek, diff --git a/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp index 280ce82cfce..655ca003deb 100644 --- a/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeInOrderSelectProcessor.cpp @@ -44,7 +44,7 @@ catch (...) { /// Suspicion of the broken part. A part is added to the queue for verification. if (getCurrentExceptionCode() != ErrorCodes::MEMORY_LIMIT_EXCEEDED) - storage.reportBrokenPart(data_part->name); + storage.reportBrokenPart(data_part); throw; } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index 8dc353714cb..ac0146edc3a 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -15,12 +15,6 @@ MergeTreeIndexGranularity::MergeTreeIndexGranularity(const std::vector & { } - -MergeTreeIndexGranularity::MergeTreeIndexGranularity(size_t marks_count, size_t fixed_granularity) - : marks_rows_partial_sums(marks_count, fixed_granularity) -{ -} - /// Rows after mark to next mark size_t MergeTreeIndexGranularity::getMarkRows(size_t mark_index) const { diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h index 0923a496174..0d1480d68cb 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.h @@ -20,8 +20,6 @@ private: public: MergeTreeIndexGranularity() = default; explicit MergeTreeIndexGranularity(const std::vector & marks_rows_partial_sums_); - MergeTreeIndexGranularity(size_t marks_count, size_t fixed_granularity); - /// Return count of rows between marks size_t getRowsCountInRange(const MarkRange & range) const; diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index 81026989f95..4ea6ec11ecc 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp index a9dce5b5ebe..9ed8fe0ad14 100644 --- a/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp +++ b/src/Storages/MergeTree/MergeTreeWriteAheadLog.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -126,7 +127,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeWriteAheadLog::restore(const Stor std::unique_lock lock(write_mutex); MergeTreeData::MutableDataPartsVector parts; - auto in = disk->readFile(path, {}); + auto in = disk->readFile(path); NativeReader block_in(*in, 0); NameSet dropped_parts; diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index d48a8b90646..deab5e748c7 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -39,7 +39,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream( metadata_snapshot_, indices_to_recalc, default_codec, - std::move(writer_settings), + writer_settings, index_granularity); auto * writer_on_disk = dynamic_cast(writer.get()); diff --git a/src/Storages/MergeTree/PartitionPruner.cpp b/src/Storages/MergeTree/PartitionPruner.cpp index e85e8e833f8..dce52ef64e9 100644 --- a/src/Storages/MergeTree/PartitionPruner.cpp +++ b/src/Storages/MergeTree/PartitionPruner.cpp @@ -15,9 +15,21 @@ bool PartitionPruner::canBePruned(const DataPart & part) { const auto & partition_value = part.partition.value; std::vector index_value(partition_value.begin(), partition_value.end()); + for (auto & field : index_value) + { + // NULL_LAST + if (field.isNull()) + field = POSITIVE_INFINITY; + } is_valid = partition_condition.mayBeTrueInRange( partition_value.size(), index_value.data(), index_value.data(), partition_key.data_types); partition_filter_map.emplace(partition_id, is_valid); + if (!is_valid) + { + WriteBufferFromOwnString buf; + part.partition.serializeText(part.storage, buf, FormatSettings{}); + LOG_TRACE(&Poco::Logger::get("PartitionPruner"), "Partition {} gets pruned", buf.str()); + } } return !is_valid; } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index f6c80baba05..2c32d9f266c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1102,9 +1102,8 @@ bool ReplicatedMergeTreeQueue::isCoveredByFuturePartsImpl(const LogEntry & entry if (future_part.isDisjoint(result_part)) continue; - /// Parts are not disjoint, so new_part_name either contains or covers future_part. - if (!(future_part.contains(result_part) || result_part.contains(future_part))) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected non-disjoint parts: {} and {}", future_part_elem.first, new_part_name); + /// Parts are not disjoint. They can be even intersecting and it's not a problem, + /// because we may have two queue entries producing intersecting parts if there's DROP_RANGE between them (so virtual_parts are ok). /// We cannot execute `entry` (or upgrade its actual_part_name to `new_part_name`) /// while any covered or covering parts are processed. @@ -2337,6 +2336,12 @@ bool ReplicatedMergeTreeMergePredicate::hasDropRange(const MergeTreePartInfo & n return queue.hasDropRange(new_drop_range_info); } +String ReplicatedMergeTreeMergePredicate::getCoveringVirtualPart(const String & part_name) const +{ + std::lock_guard lock(queue.state_mutex); + return queue.virtual_parts.getContainingPart(MergeTreePartInfo::fromPartName(part_name, queue.format_version)); +} + ReplicatedMergeTreeQueue::SubscriberHandler ReplicatedMergeTreeQueue::addSubscriber(ReplicatedMergeTreeQueue::SubscriberCallBack && callback) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index a88d9182bbf..f4cae7152ef 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -519,8 +519,12 @@ public: /// The version of "log" node that is used to check that no new merges have appeared. int32_t getVersion() const { return merges_version; } + /// Returns true if there's a drop range covering new_drop_range_info bool hasDropRange(const MergeTreePartInfo & new_drop_range_info) const; + /// Returns virtual part covering part_name (if any) or empty string + String getCoveringVirtualPart(const String & part_name) const; + private: const ReplicatedMergeTreeQueue & queue; diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index c9c9877cc93..e0e2acc3436 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -31,6 +31,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; extern const int BAD_ARGUMENTS; extern const int POSTGRESQL_REPLICATION_INTERNAL_ERROR; + extern const int QUERY_NOT_ALLOWED; } class TemporaryReplicationSlot @@ -188,6 +189,17 @@ void PostgreSQLReplicationHandler::shutdown() } +void PostgreSQLReplicationHandler::assertInitialized() const +{ + if (!replication_handler_initialized) + { + throw Exception( + ErrorCodes::QUERY_NOT_ALLOWED, + "PostgreSQL replication initialization did not finish successfully. Please check logs for error messages"); + } +} + + void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) { postgres::Connection replication_connection(connection_info, /* replication */true); @@ -239,7 +251,7 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// Throw in case of single MaterializedPostgreSQL storage, because initial setup is done immediately /// (unlike database engine where it is done in a separate thread). - if (throw_on_error) + if (throw_on_error && !is_materialized_postgresql_database) throw; } } @@ -314,6 +326,8 @@ void PostgreSQLReplicationHandler::startSynchronization(bool throw_on_error) /// Do not rely anymore on saved storage pointers. materialized_storages.clear(); + + replication_handler_initialized = true; } @@ -393,12 +407,20 @@ void PostgreSQLReplicationHandler::cleanupFunc() cleanup_task->scheduleAfter(CLEANUP_RESCHEDULE_MS); } +PostgreSQLReplicationHandler::ConsumerPtr PostgreSQLReplicationHandler::getConsumer() +{ + if (!consumer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Consumer not initialized"); + return consumer; +} void PostgreSQLReplicationHandler::consumerFunc() { + assertInitialized(); + std::vector> skipped_tables; - bool schedule_now = consumer->consume(skipped_tables); + bool schedule_now = getConsumer()->consume(skipped_tables); LOG_DEBUG(log, "checking for skipped tables: {}", skipped_tables.size()); if (!skipped_tables.empty()) @@ -603,8 +625,10 @@ void PostgreSQLReplicationHandler::removeTableFromPublication(pqxx::nontransacti void PostgreSQLReplicationHandler::setSetting(const SettingChange & setting) { + assertInitialized(); + consumer_task->deactivate(); - consumer->setSetting(setting); + getConsumer()->setSetting(setting); consumer_task->activateAndSchedule(); } @@ -758,6 +782,15 @@ std::set PostgreSQLReplicationHandler::fetchRequiredTables() { pqxx::nontransaction tx(connection.getRef()); result_tables = fetchPostgreSQLTablesList(tx, schema_list.empty() ? postgres_schema : schema_list); + + std::string tables_string; + for (const auto & table : result_tables) + { + if (!tables_string.empty()) + tables_string += ", "; + tables_string += table; + } + LOG_DEBUG(log, "Tables list was fetched from PostgreSQL directly: {}", tables_string); } } } @@ -824,6 +857,8 @@ PostgreSQLTableStructurePtr PostgreSQLReplicationHandler::fetchTableStructure( void PostgreSQLReplicationHandler::addTableToReplication(StorageMaterializedPostgreSQL * materialized_storage, const String & postgres_table_name) { + assertInitialized(); + /// Note: we have to ensure that replication consumer task is stopped when we reload table, because otherwise /// it can read wal beyond start lsn position (from which this table is being loaded), which will result in losing data. consumer_task->deactivate(); @@ -858,7 +893,7 @@ void PostgreSQLReplicationHandler::addTableToReplication(StorageMaterializedPost } /// Pass storage to consumer and lsn position, from which to start receiving replication messages for this table. - consumer->addNested(postgres_table_name, nested_storage_info, start_lsn); + getConsumer()->addNested(postgres_table_name, nested_storage_info, start_lsn); LOG_TRACE(log, "Table `{}` successfully added to replication", postgres_table_name); } catch (...) @@ -876,6 +911,8 @@ void PostgreSQLReplicationHandler::addTableToReplication(StorageMaterializedPost void PostgreSQLReplicationHandler::removeTableFromReplication(const String & postgres_table_name) { + assertInitialized(); + consumer_task->deactivate(); try { @@ -887,7 +924,7 @@ void PostgreSQLReplicationHandler::removeTableFromReplication(const String & pos } /// Pass storage to consumer and lsn position, from which to start receiving replication messages for this table. - consumer->removeNested(postgres_table_name); + getConsumer()->removeNested(postgres_table_name); } catch (...) { @@ -966,7 +1003,7 @@ void PostgreSQLReplicationHandler::reloadFromSnapshot(const std::vectorgetStorageID().getNameForLogs(), nested_sample_block.dumpStructure()); /// Pass pointer to new nested table into replication consumer, remove current table from skip list and set start lsn position. - consumer->updateNested(table_name, StorageInfo(nested_storage, std::move(table_attributes)), relation_id, start_lsn); + getConsumer()->updateNested(table_name, StorageInfo(nested_storage, std::move(table_attributes)), relation_id, start_lsn); auto table_to_drop = DatabaseCatalog::instance().getTable(StorageID(temp_table_id.database_name, temp_table_id.table_name, table_id.uuid), nested_context); auto drop_table_id = table_to_drop->getStorageID(); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h index 16e531f5247..89f16457bfe 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.h @@ -18,6 +18,8 @@ class PostgreSQLReplicationHandler : WithContext friend class TemporaryReplicationSlot; public: + using ConsumerPtr = std::shared_ptr; + PostgreSQLReplicationHandler( const String & replication_identifier, const String & postgres_database_, @@ -87,6 +89,8 @@ private: void consumerFunc(); + ConsumerPtr getConsumer(); + StorageInfo loadFromSnapshot(postgres::Connection & connection, std::string & snapshot_name, const String & table_name, StorageMaterializedPostgreSQL * materialized_storage); void reloadFromSnapshot(const std::vector> & relation_data); @@ -97,6 +101,8 @@ private: std::pair getSchemaAndTableName(const String & table_name) const; + void assertInitialized() const; + Poco::Logger * log; /// If it is not attach, i.e. a create query, then if publication already exists - always drop it. @@ -134,7 +140,7 @@ private: String replication_slot, publication_name; /// Replication consumer. Manages decoding of replication stream and syncing into tables. - std::shared_ptr consumer; + ConsumerPtr consumer; BackgroundSchedulePool::TaskHolder startup_task; BackgroundSchedulePool::TaskHolder consumer_task; @@ -146,6 +152,8 @@ private: MaterializedStorages materialized_storages; UInt64 milliseconds_to_wait; + + bool replication_handler_initialized = false; }; } diff --git a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp index 73f0c8bd44e..f831f81cd22 100644 --- a/src/Storages/RabbitMQ/StorageRabbitMQ.cpp +++ b/src/Storages/RabbitMQ/StorageRabbitMQ.cpp @@ -578,16 +578,6 @@ void StorageRabbitMQ::bindQueue(size_t queue_id, AMQP::TcpChannel & rabbit_chann } } - /// Impose default settings if there are no user-defined settings. - if (!queue_settings.contains("x-max-length")) - { - queue_settings["x-max-length"] = queue_size; - } - if (!queue_settings.contains("x-overflow")) - { - queue_settings["x-overflow"] = "reject-publish"; - } - /// If queue_base - a single name, then it can be used as one specific queue, from which to read. /// Otherwise it is used as a generator (unique for current table) of queue names, because it allows to /// maximize performance - via setting `rabbitmq_num_queues`. diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index 3ff4baa0b11..b188cef065e 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index bdb4c392c48..5046a0b6fe0 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -156,6 +156,10 @@ struct SelectQueryInfoBase PrewhereInfoPtr prewhere_info; + /// This is an additional filer applied to current table. + /// It is needed only for additional PK filtering. + ASTPtr additional_filter_ast; + ReadInOrderOptimizerPtr order_optimizer; /// Can be modified while reading from storage InputOrderInfoPtr input_order_info; diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index a03acd3731d..ccb88992732 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -535,15 +535,16 @@ StorageLog::StorageLog( const ConstraintsDescription & constraints_, const String & comment, bool attach, - size_t max_compress_block_size_) + ContextMutablePtr context_) : IStorage(table_id_) + , WithMutableContext(context_) , engine_name(engine_name_) , disk(std::move(disk_)) , table_path(relative_path_) , use_marks_file(engine_name == "Log") , marks_file_path(table_path + DBMS_STORAGE_LOG_MARKS_FILE_NAME) , file_checker(disk, table_path + "sizes.json") - , max_compress_block_size(max_compress_block_size_) + , max_compress_block_size(context_->getSettingsRef().max_compress_block_size) { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); @@ -750,9 +751,9 @@ static std::chrono::seconds getLockTimeout(ContextPtr context) return std::chrono::seconds{lock_timeout}; } -void StorageLog::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr context, TableExclusiveLockHolder &) +void StorageLog::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr local_context, TableExclusiveLockHolder &) { - WriteLock lock{rwlock, getLockTimeout(context)}; + WriteLock lock{rwlock, getLockTimeout(local_context)}; if (!lock) throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED); @@ -769,6 +770,7 @@ void StorageLog::truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr marks_loaded = true; num_marks_saved = 0; + getContext()->dropMMappedFileCache(); } @@ -776,14 +778,14 @@ Pipe StorageLog::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & /*query_info*/, - ContextPtr context, + ContextPtr local_context, QueryProcessingStage::Enum /*processed_stage*/, size_t max_block_size, unsigned num_streams) { storage_snapshot->check(column_names); - auto lock_timeout = getLockTimeout(context); + auto lock_timeout = getLockTimeout(local_context); loadMarks(lock_timeout); ReadLock lock{rwlock, lock_timeout}; @@ -817,7 +819,7 @@ Pipe StorageLog::read( bool limited_by_file_sizes = !use_marks_file; size_t row_limit = std::numeric_limits::max(); - ReadSettings read_settings = context->getReadSettings(); + ReadSettings read_settings = local_context->getReadSettings(); Pipes pipes; for (size_t stream = 0; stream < num_streams; ++stream) @@ -848,18 +850,18 @@ Pipe StorageLog::read( return Pipe::unitePipes(std::move(pipes)); } -SinkToStoragePtr StorageLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) +SinkToStoragePtr StorageLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) { - WriteLock lock{rwlock, getLockTimeout(context)}; + WriteLock lock{rwlock, getLockTimeout(local_context)}; if (!lock) throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED); return std::make_shared(*this, metadata_snapshot, std::move(lock)); } -CheckResults StorageLog::checkData(const ASTPtr & /* query */, ContextPtr context) +CheckResults StorageLog::checkData(const ASTPtr & /* query */, ContextPtr local_context) { - ReadLock lock{rwlock, getLockTimeout(context)}; + ReadLock lock{rwlock, getLockTimeout(local_context)}; if (!lock) throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED); @@ -1114,7 +1116,7 @@ void registerStorageLog(StorageFactory & factory) args.constraints, args.comment, args.attach, - args.getContext()->getSettings().max_compress_block_size); + args.getContext()); }; factory.registerStorage("Log", create_fn, features); diff --git a/src/Storages/StorageLog.h b/src/Storages/StorageLog.h index 778633440a4..2e677dd3161 100644 --- a/src/Storages/StorageLog.h +++ b/src/Storages/StorageLog.h @@ -12,6 +12,7 @@ namespace DB { + class IBackup; using BackupPtr = std::shared_ptr; @@ -21,7 +22,7 @@ using BackupPtr = std::shared_ptr; * Also implements TinyLog - a table engine that is suitable for small chunks of the log. * It differs from Log in the absence of mark files. */ -class StorageLog final : public IStorage +class StorageLog final : public IStorage, public WithMutableContext { friend class LogSource; friend class LogSink; @@ -40,7 +41,7 @@ public: const ConstraintsDescription & constraints_, const String & comment, bool attach, - size_t max_compress_block_size_); + ContextMutablePtr context_); ~StorageLog() override; String getName() const override { return engine_name; } @@ -49,16 +50,16 @@ public: const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, - ContextPtr context, + ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned num_streams) override; - SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; + SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) override; void rename(const String & new_path_to_table_data, const StorageID & new_table_id) override; - CheckResults checkData(const ASTPtr & /* query */, ContextPtr /* context */) override; + CheckResults checkData(const ASTPtr & query, ContextPtr local_context) override; void truncate(const ASTPtr &, const StorageMetadataPtr &, ContextPtr, TableExclusiveLockHolder &) override; diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 520b5534fe3..219093e8d75 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -1584,8 +1584,6 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry) return true; /// NOTE Deletion from `virtual_parts` is not done, but it is only necessary for merge. } - // bool do_fetch = false; - switch (entry.type) { case LogEntry::ATTACH_PART: @@ -1593,7 +1591,6 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry) [[fallthrough]]; case LogEntry::GET_PART: return executeFetch(entry); - // do_fetch = true; case LogEntry::MERGE_PARTS: throw Exception(ErrorCodes::LOGICAL_ERROR, "Merge has to be executed by another function"); case LogEntry::MUTATE_PART: @@ -1609,8 +1606,6 @@ bool StorageReplicatedMergeTree::executeLogEntry(LogEntry & entry) default: throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected log entry type: {}", static_cast(entry.type)); } - - // return true; } @@ -1837,8 +1832,8 @@ void StorageReplicatedMergeTree::executeDropRange(const LogEntry & entry) LOG_TRACE(log, "Executing DROP_RANGE {}", entry.new_part_name); auto drop_range_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range_info.partition_id, drop_range_info.max_block); - part_check_thread.cancelRemovedPartsCheck(drop_range_info); queue.removePartProducingOpsInRange(getZooKeeper(), drop_range_info, entry); + part_check_thread.cancelRemovedPartsCheck(drop_range_info); /// Delete the parts contained in the range to be deleted. /// It's important that no old parts remain (after the merge), because otherwise, @@ -1906,8 +1901,8 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry) if (replace) { getContext()->getMergeList().cancelInPartition(getStorageID(), drop_range.partition_id, drop_range.max_block); - part_check_thread.cancelRemovedPartsCheck(drop_range); queue.removePartProducingOpsInRange(getZooKeeper(), drop_range, entry); + part_check_thread.cancelRemovedPartsCheck(drop_range); } else { @@ -7953,12 +7948,31 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP while (true) { + /// We should be careful when creating an empty part, because we are not sure that this part is still needed. + /// For example, it's possible that part (or partition) was dropped (or replaced) concurrently. + /// We can enqueue part for check from DataPartExchange or SelectProcessor + /// and it's hard to synchronize it with ReplicatedMergeTreeQueue and PartCheckThread... + /// But at least we can ignore parts that are definitely not needed according to virtual parts and drop ranges. + auto pred = queue.getMergePredicate(zookeeper); + String covering_virtual = pred.getCoveringVirtualPart(lost_part_name); + if (covering_virtual.empty()) + { + LOG_WARNING(log, "Will not create empty part instead of lost {}, because there's no covering part in replication queue", lost_part_name); + return false; + } + if (pred.hasDropRange(MergeTreePartInfo::fromPartName(covering_virtual, format_version))) + { + LOG_WARNING(log, "Will not create empty part instead of lost {}, because it's covered by DROP_RANGE", lost_part_name); + return false; + } Coordination::Requests ops; Coordination::Stat replicas_stat; auto replicas_path = fs::path(zookeeper_path) / "replicas"; Strings replicas = zookeeper->getChildren(replicas_path, &replicas_stat); + ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/log", pred.getVersion())); + /// In rare cases new replica can appear during check ops.emplace_back(zkutil::makeCheckRequest(replicas_path, replicas_stat.version)); @@ -7988,7 +8002,7 @@ bool StorageReplicatedMergeTree::createEmptyPartInsteadOfLost(zkutil::ZooKeeperP } else if (code == Coordination::Error::ZBADVERSION) { - LOG_INFO(log, "Looks like new replica appearead while creating new empty part, will retry"); + LOG_INFO(log, "Looks like log was updated or new replica appeared while creating new empty part, will retry"); } else { diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index bed21a9affc..130bc75a65c 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -587,7 +587,8 @@ public: s3_configuration_.rw_settings, std::nullopt, DBMS_DEFAULT_BUFFER_SIZE, - threadPoolCallbackRunner(IOThreadPool::get())), + threadPoolCallbackRunner(IOThreadPool::get()), + context->getWriteSettings()), compression_method, 3); writer diff --git a/src/Storages/StorageStripeLog.cpp b/src/Storages/StorageStripeLog.cpp index eb8bc9b1d51..e3f477936db 100644 --- a/src/Storages/StorageStripeLog.cpp +++ b/src/Storages/StorageStripeLog.cpp @@ -265,14 +265,15 @@ StorageStripeLog::StorageStripeLog( const ConstraintsDescription & constraints_, const String & comment, bool attach, - size_t max_compress_block_size_) + ContextMutablePtr context_) : IStorage(table_id_) + , WithMutableContext(context_) , disk(std::move(disk_)) , table_path(relative_path_) , data_file_path(table_path + "data.bin") , index_file_path(table_path + "index.mrk") , file_checker(disk, table_path + "sizes.json") - , max_compress_block_size(max_compress_block_size_) + , max_compress_block_size(context_->getSettings().max_compress_block_size) , log(&Poco::Logger::get("StorageStripeLog")) { StorageInMemoryMetadata storage_metadata; @@ -330,9 +331,9 @@ void StorageStripeLog::rename(const String & new_path_to_table_data, const Stora } -static std::chrono::seconds getLockTimeout(ContextPtr context) +static std::chrono::seconds getLockTimeout(ContextPtr local_context) { - const Settings & settings = context->getSettingsRef(); + const Settings & settings = local_context->getSettingsRef(); Int64 lock_timeout = settings.lock_acquire_timeout.totalSeconds(); if (settings.max_execution_time.totalSeconds() != 0 && settings.max_execution_time.totalSeconds() < lock_timeout) lock_timeout = settings.max_execution_time.totalSeconds(); @@ -344,14 +345,14 @@ Pipe StorageStripeLog::read( const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & /*query_info*/, - ContextPtr context, + ContextPtr local_context, QueryProcessingStage::Enum /*processed_stage*/, const size_t /*max_block_size*/, unsigned num_streams) { storage_snapshot->check(column_names); - auto lock_timeout = getLockTimeout(context); + auto lock_timeout = getLockTimeout(local_context); loadIndices(lock_timeout); ReadLock lock{rwlock, lock_timeout}; @@ -369,7 +370,7 @@ Pipe StorageStripeLog::read( if (num_streams > size) num_streams = size; - ReadSettings read_settings = context->getReadSettings(); + ReadSettings read_settings = local_context->getReadSettings(); Pipes pipes; for (size_t stream = 0; stream < num_streams; ++stream) @@ -390,9 +391,9 @@ Pipe StorageStripeLog::read( } -SinkToStoragePtr StorageStripeLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr context) +SinkToStoragePtr StorageStripeLog::write(const ASTPtr & /*query*/, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) { - WriteLock lock{rwlock, getLockTimeout(context)}; + WriteLock lock{rwlock, getLockTimeout(local_context)}; if (!lock) throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED); @@ -400,9 +401,9 @@ SinkToStoragePtr StorageStripeLog::write(const ASTPtr & /*query*/, const Storage } -CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, ContextPtr context) +CheckResults StorageStripeLog::checkData(const ASTPtr & /* query */, ContextPtr local_context) { - ReadLock lock{rwlock, getLockTimeout(context)}; + ReadLock lock{rwlock, getLockTimeout(local_context)}; if (!lock) throw Exception("Lock timeout exceeded", ErrorCodes::TIMEOUT_EXCEEDED); @@ -420,6 +421,7 @@ void StorageStripeLog::truncate(const ASTPtr &, const StorageMetadataPtr &, Cont indices_loaded = true; num_indices_saved = 0; + getContext()->dropMMappedFileCache(); } @@ -686,7 +688,7 @@ void registerStorageStripeLog(StorageFactory & factory) args.constraints, args.comment, args.attach, - args.getContext()->getSettings().max_compress_block_size); + args.getContext()); }, features); } diff --git a/src/Storages/StorageStripeLog.h b/src/Storages/StorageStripeLog.h index 3faffff381d..efdf18c0f7b 100644 --- a/src/Storages/StorageStripeLog.h +++ b/src/Storages/StorageStripeLog.h @@ -20,7 +20,7 @@ using BackupPtr = std::shared_ptr; /** Implements a table engine that is suitable for small chunks of the log. * In doing so, stores all the columns in a single Native file, with a nearby index. */ -class StorageStripeLog final : public IStorage +class StorageStripeLog final : public IStorage, public WithMutableContext { friend class StripeLogSource; friend class StripeLogSink; @@ -34,7 +34,7 @@ public: const ConstraintsDescription & constraints_, const String & comment, bool attach, - size_t max_compress_block_size_); + ContextMutablePtr context_); ~StorageStripeLog() override; @@ -44,16 +44,16 @@ public: const Names & column_names, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, - ContextPtr context, + ContextPtr local_context, QueryProcessingStage::Enum processed_stage, size_t max_block_size, unsigned num_streams) override; - SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr context) override; + SinkToStoragePtr write(const ASTPtr & query, const StorageMetadataPtr & metadata_snapshot, ContextPtr local_context) override; void rename(const String & new_path_to_table_data, const StorageID & new_table_id) override; - CheckResults checkData(const ASTPtr & /* query */, ContextPtr /* context */) override; + CheckResults checkData(const ASTPtr & query, ContextPtr ocal_context) override; bool storesDataOnDisk() const override { return true; } Strings getDataPaths() const override { return {DB::fullPath(disk, table_path)}; } diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index bbbad012547..3377af685f0 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -180,9 +180,13 @@ void StorageView::replaceWithSubquery(ASTSelectQuery & outer_query, ASTPtr view_ if (!table_expression->database_and_table_name) { // If it's a view table function, add a fake db.table name. - if (table_expression->table_function && table_expression->table_function->as()->name == "view") - table_expression->database_and_table_name = std::make_shared("__view"); - else + if (table_expression->table_function) + { + auto table_function_name = table_expression->table_function->as()->name; + if ((table_function_name == "view") || (table_function_name == "viewIfPermitted")) + table_expression->database_and_table_name = std::make_shared("__view"); + } + if (!table_expression->database_and_table_name) throw Exception("Logical error: incorrect table expression", ErrorCodes::LOGICAL_ERROR); } diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index 5f5a7887e80..d86a0d4f5df 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -131,6 +131,7 @@ const char * auto_contributors[] { "Anton Okhitin", "Anton Okulov", "Anton Patsev", + "Anton Petrov", "Anton Popov", "Anton Tihonov", "Anton Tikhonov", @@ -149,6 +150,7 @@ const char * auto_contributors[] { "Artem Zuikov", "Artemeey", "Artemkin Pavel", + "Arthur Passos", "Arthur Petukhovsky", "Arthur Tokarchuk", "Arthur Wong", @@ -193,7 +195,9 @@ const char * auto_contributors[] { "Chao Ma", "Chao Wang", "CheSema", + "Chebarykov Pavel", "Chen Yufei", + "Cheng Pan", "Chienlung Cheung", "Christian", "Christoph Wurm", @@ -248,6 +252,7 @@ const char * auto_contributors[] { "Dmitry Moskowski", "Dmitry Muzyka", "Dmitry Novik", + "Dmitry Pavlov", "Dmitry Petukhov", "Dmitry Rubashkin", "Dmitry S..ky / skype: dvska-at-skype", @@ -280,6 +285,7 @@ const char * auto_contributors[] { "Evgeniy Udodov", "Evgeny", "Evgeny Konkov", + "Evgeny Kruglov", "Evgeny Markov", "Ewout", "FArthur-cmd", @@ -323,6 +329,7 @@ const char * auto_contributors[] { "Grigory", "Grigory Buteyko", "Grigory Pervakov", + "GruffGemini", "Guillaume Tassery", "Guo Wei (William)", "Haavard Kvaalen", @@ -330,6 +337,7 @@ const char * auto_contributors[] { "HaiBo Li", "Hamoon", "Han Fei", + "Harry Lee", "Harry-Lee", "HarryLeeIBM", "Hasitha Kanchana", @@ -386,6 +394,7 @@ const char * auto_contributors[] { "Jake Liu", "Jakub Kuklis", "James Maidment", + "James Morrison", "JaosnHsieh", "Jason", "Jason Keirstead", @@ -402,6 +411,7 @@ const char * auto_contributors[] { "John Hummel", "John Skopis", "Jonatas Freitas", + "Jordi Villar", "João Figueiredo", "Julian Gilyadov", "Julian Zhou", @@ -444,6 +454,7 @@ const char * auto_contributors[] { "Larry Luo", "Lars Eidnes", "Latysheva Alexandra", + "Laurie Li", "Lemore", "Leonardo Cecchi", "Leonid Krylov", @@ -516,6 +527,7 @@ const char * auto_contributors[] { "Michael Monashev", "Michael Nutt", "Michael Razuvaev", + "Michael Schnerring", "Michael Smitasin", "Michail Safronov", "Michal Lisowski", @@ -632,6 +644,7 @@ const char * auto_contributors[] { "Pawel Rog", "Peignon Melvyn", "Peng Jian", + "Peng Liu", "Persiyanov Dmitriy Andreevich", "Pervakov Grigorii", "Pervakov Grigory", @@ -643,6 +656,7 @@ const char * auto_contributors[] { "Pxl", "Pysaoke", "Quid37", + "Rafael Acevedo", "Rafael David Tinoco", "Rajkumar", "Rajkumar Varada", @@ -670,6 +684,7 @@ const char * auto_contributors[] { "Roman Nozdrin", "Roman Peshkurov", "Roman Tsisyk", + "Roman Vasin", "Roman Zhukov", "Roy Bellingan", "Ruslan", @@ -685,6 +700,7 @@ const char * auto_contributors[] { "SaltTan", "Sami Kerola", "Samuel Chou", + "San", "Saulius Valatka", "Sean Haynes", "Sean Lafferty", @@ -760,6 +776,7 @@ const char * auto_contributors[] { "Tiaonmmn", "Tigran Khudaverdyan", "Timur Magomedov", + "Timur Solodovnikov", "TiunovNN", "Tobias Adamson", "Tobias Lins", @@ -814,6 +831,8 @@ const char * auto_contributors[] { "Vladimir C", "Vladimir Ch", "Vladimir Chebotarev", + "Vladimir Chebotaryov", + "Vladimir Galunshchikov", "Vladimir Golovchenko", "Vladimir Goncharov", "Vladimir Klimontovich", @@ -823,6 +842,7 @@ const char * auto_contributors[] { "Vladimir Smirnov", "Vladislav Rassokhin", "Vladislav Smirnov", + "Vladislav V", "Vojtech Splichal", "Volodymyr Kuznetsov", "Vsevolod Orlov", @@ -831,6 +851,7 @@ const char * auto_contributors[] { "W", "Wang Fenjin", "WangZengrui", + "Wangyang Guo", "Weiqing Xu", "William Shallum", "Winter Zhang", @@ -838,6 +859,7 @@ const char * auto_contributors[] { "Xianda Ke", "Xiang Zhou", "Xin Wang", + "Xoel Lopez Barata", "Xudong Zhang", "Y Lu", "Yakko Majuri", @@ -855,6 +877,8 @@ const char * auto_contributors[] { "Yong Wang", "Yong-Hao Zou", "Youenn Lebras", + "Yu, Peng", + "Yuko Takagi", "Yuntao Wu", "Yuri Dyachenko", "Yurii Vlasenko", @@ -871,6 +895,7 @@ const char * auto_contributors[] { "Zijie Lu", "Zoran Pandovski", "a.palagashvili", + "aaapetrenko", "abdrakhmanov", "abel-wang", "abyss7", @@ -933,6 +958,7 @@ const char * auto_contributors[] { "chang.chen", "changvvb", "chasingegg", + "chen", "chen9t", "chengy8934", "chenjian", @@ -1110,6 +1136,8 @@ const char * auto_contributors[] { "linceyou", "lincion", "lingo-xp", + "lingpeng0314", + "lirulei", "listar", "litao91", "liu-bov", @@ -1119,10 +1147,13 @@ const char * auto_contributors[] { "liuyimin", "liyang", "liyang830", + "lokax", "lomberts", "loneylee", "long2ice", + "loyispa", "lthaooo", + "ltrk2", "ltybc-coder", "luc1ph3r", "lulichao", @@ -1213,6 +1244,7 @@ const char * auto_contributors[] { "redclusive", "rfraposa", "ritaank", + "rnbondarenko", "robert", "robot-clickhouse", "robot-metrika-test", @@ -1225,6 +1257,7 @@ const char * auto_contributors[] { "ryzuo", "s-kat", "santaux", + "santrancisco", "satanson", "save-my-heart", "sdk2", @@ -1327,6 +1360,7 @@ const char * auto_contributors[] { "zhangxiao871", "zhangyifan27", "zhangyuli1", + "zhao zhou", "zhen ni", "zhifeng", "zhongyuankai", diff --git a/src/Storages/System/StorageSystemGrants.cpp b/src/Storages/System/StorageSystemGrants.cpp index 26bd241023a..461efd7f640 100644 --- a/src/Storages/System/StorageSystemGrants.cpp +++ b/src/Storages/System/StorageSystemGrants.cpp @@ -36,8 +36,11 @@ NamesAndTypesList StorageSystemGrants::getNamesAndTypes() void StorageSystemGrants::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_USERS | AccessType::SHOW_ROLES); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_USERS | AccessType::SHOW_ROLES); + std::vector ids = access_control.findAll(); boost::range::push_back(ids, access_control.findAll()); diff --git a/src/Storages/System/StorageSystemParts.cpp b/src/Storages/System/StorageSystemParts.cpp index 01bba669c0e..1b207d1d165 100644 --- a/src/Storages/System/StorageSystemParts.cpp +++ b/src/Storages/System/StorageSystemParts.cpp @@ -13,6 +13,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Storages/System/StorageSystemPrivileges.cpp b/src/Storages/System/StorageSystemPrivileges.cpp index 8cf1accfe34..70163979f72 100644 --- a/src/Storages/System/StorageSystemPrivileges.cpp +++ b/src/Storages/System/StorageSystemPrivileges.cpp @@ -85,7 +85,7 @@ void StorageSystemPrivileges::fillData(MutableColumns & res_columns, ContextPtr, auto & column_parent_group = assert_cast(assert_cast(*res_columns[column_index]).getNestedColumn()).getData(); auto & column_parent_group_null_map = assert_cast(*res_columns[column_index++]).getNullMapData(); - auto add_row = [&](AccessType access_type, const std::string_view & aliases, Level max_level, AccessType parent_group) + auto add_row = [&](AccessType access_type, std::string_view aliases, Level max_level, AccessType parent_group) { column_access_type.push_back(static_cast(access_type)); diff --git a/src/Storages/System/StorageSystemQuotaLimits.cpp b/src/Storages/System/StorageSystemQuotaLimits.cpp index c98e060a62f..0261d3d2cd9 100644 --- a/src/Storages/System/StorageSystemQuotaLimits.cpp +++ b/src/Storages/System/StorageSystemQuotaLimits.cpp @@ -66,8 +66,11 @@ NamesAndTypesList StorageSystemQuotaLimits::getNamesAndTypes() void StorageSystemQuotaLimits::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_QUOTAS); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_QUOTAS); + std::vector ids = access_control.findAll(); size_t column_index = 0; diff --git a/src/Storages/System/StorageSystemQuotaUsage.cpp b/src/Storages/System/StorageSystemQuotaUsage.cpp index 54f403803d6..6ba47a86dbf 100644 --- a/src/Storages/System/StorageSystemQuotaUsage.cpp +++ b/src/Storages/System/StorageSystemQuotaUsage.cpp @@ -78,7 +78,11 @@ NamesAndTypesList StorageSystemQuotaUsage::getNamesAndTypesImpl(bool add_column_ void StorageSystemQuotaUsage::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_QUOTAS); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. + const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_QUOTAS); + auto usage = context->getQuotaUsage(); if (!usage) return; diff --git a/src/Storages/System/StorageSystemQuotas.cpp b/src/Storages/System/StorageSystemQuotas.cpp index 046db151684..17863fa7326 100644 --- a/src/Storages/System/StorageSystemQuotas.cpp +++ b/src/Storages/System/StorageSystemQuotas.cpp @@ -53,8 +53,11 @@ NamesAndTypesList StorageSystemQuotas::getNamesAndTypes() void StorageSystemQuotas::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_QUOTAS); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_QUOTAS); + std::vector ids = access_control.findAll(); size_t column_index = 0; diff --git a/src/Storages/System/StorageSystemQuotasUsage.cpp b/src/Storages/System/StorageSystemQuotasUsage.cpp index fae0629a209..a3c97247111 100644 --- a/src/Storages/System/StorageSystemQuotasUsage.cpp +++ b/src/Storages/System/StorageSystemQuotasUsage.cpp @@ -15,7 +15,11 @@ NamesAndTypesList StorageSystemQuotasUsage::getNamesAndTypes() void StorageSystemQuotasUsage::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_QUOTAS); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. + const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_QUOTAS); + auto all_quotas_usage = context->getAccessControl().getAllQuotasUsage(); StorageSystemQuotaUsage::fillDataImpl(res_columns, context, /* add_column_is_current = */ true, all_quotas_usage); } diff --git a/src/Storages/System/StorageSystemRoleGrants.cpp b/src/Storages/System/StorageSystemRoleGrants.cpp index 94ee28cfe83..cf5a24f88cd 100644 --- a/src/Storages/System/StorageSystemRoleGrants.cpp +++ b/src/Storages/System/StorageSystemRoleGrants.cpp @@ -31,8 +31,11 @@ NamesAndTypesList StorageSystemRoleGrants::getNamesAndTypes() void StorageSystemRoleGrants::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_USERS | AccessType::SHOW_ROLES); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_USERS | AccessType::SHOW_ROLES); + std::vector ids = access_control.findAll(); boost::range::push_back(ids, access_control.findAll()); diff --git a/src/Storages/System/StorageSystemRoles.cpp b/src/Storages/System/StorageSystemRoles.cpp index e5b8d53ce7e..5fda021428a 100644 --- a/src/Storages/System/StorageSystemRoles.cpp +++ b/src/Storages/System/StorageSystemRoles.cpp @@ -27,8 +27,11 @@ NamesAndTypesList StorageSystemRoles::getNamesAndTypes() void StorageSystemRoles::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_ROLES); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_ROLES); + std::vector ids = access_control.findAll(); size_t column_index = 0; diff --git a/src/Storages/System/StorageSystemRowPolicies.cpp b/src/Storages/System/StorageSystemRowPolicies.cpp index 064f610730d..c0bc38edc21 100644 --- a/src/Storages/System/StorageSystemRowPolicies.cpp +++ b/src/Storages/System/StorageSystemRowPolicies.cpp @@ -53,8 +53,11 @@ NamesAndTypesList StorageSystemRowPolicies::getNamesAndTypes() void StorageSystemRowPolicies::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_ROW_POLICIES); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_ROW_POLICIES); + std::vector ids = access_control.findAll(); size_t column_index = 0; diff --git a/src/Storages/System/StorageSystemSettingsChanges.cpp b/src/Storages/System/StorageSystemSettingsChanges.cpp new file mode 100644 index 00000000000..e84fd44fcc3 --- /dev/null +++ b/src/Storages/System/StorageSystemSettingsChanges.cpp @@ -0,0 +1,37 @@ +#include +#include +#include +#include +#include +#include + +namespace DB +{ +NamesAndTypesList StorageSystemSettingsChanges::getNamesAndTypes() +{ + return { + {"version", std::make_shared()}, + {"changes", + std::make_shared(std::make_shared( + DataTypes{ + std::make_shared(), + std::make_shared(), + std::make_shared(), + std::make_shared()}, + Names{"name", "previous_value", "new_value", "reason"}))}, + }; +} + +void StorageSystemSettingsChanges::fillData(MutableColumns & res_columns, ContextPtr, const SelectQueryInfo &) const +{ + for (auto it = settings_changes_history.rbegin(); it != settings_changes_history.rend(); ++it) + { + res_columns[0]->insert(it->first.toString()); + Array changes; + for (const auto & change : it->second) + changes.push_back(Tuple{change.name, toString(change.previous_value), toString(change.new_value), change.reason}); + res_columns[1]->insert(changes); + } +} + +} diff --git a/src/Storages/System/StorageSystemSettingsChanges.h b/src/Storages/System/StorageSystemSettingsChanges.h new file mode 100644 index 00000000000..283487df51b --- /dev/null +++ b/src/Storages/System/StorageSystemSettingsChanges.h @@ -0,0 +1,28 @@ +#pragma once + +#include + + +namespace DB +{ + +class Context; + + +/** Implements system table "settings_changes", which allows to get information + * about the settings changes through different ClickHouse versions. + */ +class StorageSystemSettingsChanges final : public IStorageSystemOneBlock +{ +public: + std::string getName() const override { return "SystemSettingsChanges"; } + + static NamesAndTypesList getNamesAndTypes(); + +protected: + using IStorageSystemOneBlock::IStorageSystemOneBlock; + + void fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo & query_info) const override; +}; + +} diff --git a/src/Storages/System/StorageSystemSettingsProfileElements.cpp b/src/Storages/System/StorageSystemSettingsProfileElements.cpp index 8013a3f2e9e..565ff5e471e 100644 --- a/src/Storages/System/StorageSystemSettingsProfileElements.cpp +++ b/src/Storages/System/StorageSystemSettingsProfileElements.cpp @@ -37,8 +37,11 @@ NamesAndTypesList StorageSystemSettingsProfileElements::getNamesAndTypes() void StorageSystemSettingsProfileElements::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_SETTINGS_PROFILES); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_SETTINGS_PROFILES); + std::vector ids = access_control.findAll(); boost::range::push_back(ids, access_control.findAll()); boost::range::push_back(ids, access_control.findAll()); diff --git a/src/Storages/System/StorageSystemSettingsProfiles.cpp b/src/Storages/System/StorageSystemSettingsProfiles.cpp index d03848ba68b..069c8762154 100644 --- a/src/Storages/System/StorageSystemSettingsProfiles.cpp +++ b/src/Storages/System/StorageSystemSettingsProfiles.cpp @@ -34,8 +34,11 @@ NamesAndTypesList StorageSystemSettingsProfiles::getNamesAndTypes() void StorageSystemSettingsProfiles::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_SETTINGS_PROFILES); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_SETTINGS_PROFILES); + std::vector ids = access_control.findAll(); size_t column_index = 0; diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index cdd04964f55..549ce193137 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -90,10 +90,10 @@ namespace const ucontext_t signal_context = *reinterpret_cast(context); stack_trace = StackTrace(signal_context); - StringRef query_id = CurrentThread::getQueryId(); - query_id_size = std::min(query_id.size, max_query_id_size); - if (query_id.data && query_id.size) - memcpy(query_id_data, query_id.data, query_id_size); + std::string_view query_id = CurrentThread::getQueryId(); + query_id_size = std::min(query_id.size(), max_query_id_size); + if (!query_id.empty()) + memcpy(query_id_data, query_id.data(), query_id_size); /// This is unneeded (because we synchronize through pipe) but makes TSan happy. data_ready_num.store(notification_num, std::memory_order_release); diff --git a/src/Storages/System/StorageSystemUsers.cpp b/src/Storages/System/StorageSystemUsers.cpp index be56abfa3e8..d7cdf280d14 100644 --- a/src/Storages/System/StorageSystemUsers.cpp +++ b/src/Storages/System/StorageSystemUsers.cpp @@ -60,8 +60,11 @@ NamesAndTypesList StorageSystemUsers::getNamesAndTypes() void StorageSystemUsers::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - context->checkAccess(AccessType::SHOW_USERS); + /// If "select_from_system_db_requires_grant" is enabled the access rights were already checked in InterpreterSelectQuery. const auto & access_control = context->getAccessControl(); + if (!access_control.doesSelectFromSystemDatabaseRequireGrant()) + context->checkAccess(AccessType::SHOW_USERS); + std::vector ids = access_control.findAll(); size_t column_index = 0; diff --git a/src/Storages/System/attachSystemTables.cpp b/src/Storages/System/attachSystemTables.cpp index a86a04c4444..dbef2df953b 100644 --- a/src/Storages/System/attachSystemTables.cpp +++ b/src/Storages/System/attachSystemTables.cpp @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -100,6 +101,7 @@ void attachSystemTablesLocal(ContextPtr context, IDatabase & system_database) attach(context, system_database, "functions"); attach(context, system_database, "events"); attach(context, system_database, "settings"); + attach(context, system_database, "settings_changes"); attach>(context, system_database, "merge_tree_settings"); attach>(context, system_database, "replicated_merge_tree_settings"); attach(context, system_database, "build_options"); diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 8fa4d02e8e1..8acd7434d51 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -123,6 +123,17 @@ ColumnsDescription getStructureOfRemoteTable( std::string fail_messages; + /// Use local shard as first priority, as it needs no network communication + for (const auto & shard_info : shards_info) + { + if (shard_info.isLocal()) + { + const auto & res = getStructureOfRemoteTableInShard(cluster, shard_info, table_id, context, table_func_ptr); + chassert(!res.empty()); + return res; + } + } + for (const auto & shard_info : shards_info) { try diff --git a/src/Storages/tests/gtest_storage_log.cpp b/src/Storages/tests/gtest_storage_log.cpp index f5fdb606018..3fa2f93b484 100644 --- a/src/Storages/tests/gtest_storage_log.cpp +++ b/src/Storages/tests/gtest_storage_log.cpp @@ -40,7 +40,7 @@ DB::StoragePtr createStorage(DB::DiskPtr & disk) StoragePtr table = std::make_shared( "Log", disk, "table/", StorageID("test", "test"), ColumnsDescription{names_and_types}, - ConstraintsDescription{}, String{}, false, static_cast(1048576)); + ConstraintsDescription{}, String{}, false, getContext().context); table->startup(); diff --git a/src/TableFunctions/ITableFunction.cpp b/src/TableFunctions/ITableFunction.cpp index 639240fd105..82b6230dc30 100644 --- a/src/TableFunctions/ITableFunction.cpp +++ b/src/TableFunctions/ITableFunction.cpp @@ -23,7 +23,12 @@ StoragePtr ITableFunction::execute(const ASTPtr & ast_function, ContextPtr conte ColumnsDescription cached_columns, bool use_global_context) const { ProfileEvents::increment(ProfileEvents::TableFunctionExecute); - context->checkAccess(AccessType::CREATE_TEMPORARY_TABLE | getSourceAccessType()); + + AccessFlags required_access = getSourceAccessType(); + String function_name = getName(); + if ((function_name != "null") && (function_name != "view") && (function_name != "viewIfPermitted")) + required_access |= AccessType::CREATE_TEMPORARY_TABLE; + context->checkAccess(required_access); auto context_to_use = use_global_context ? context->getGlobalContext() : context; diff --git a/src/TableFunctions/TableFunctionViewIfPermitted.cpp b/src/TableFunctions/TableFunctionViewIfPermitted.cpp new file mode 100644 index 00000000000..dbc4d40d079 --- /dev/null +++ b/src/TableFunctions/TableFunctionViewIfPermitted.cpp @@ -0,0 +1,113 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "registerTableFunctions.h" + + +namespace DB +{ +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; + extern const int ACCESS_DENIED; +} + + +const ASTSelectWithUnionQuery & TableFunctionViewIfPermitted::getSelectQuery() const +{ + return *create.select; +} + +void TableFunctionViewIfPermitted::parseArguments(const ASTPtr & ast_function, ContextPtr context) +{ + const auto * function = ast_function->as(); + if (!function || !function->arguments || (function->arguments->children.size() != 2)) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Table function '{}' requires two arguments: a SELECT query and a table function", + getName()); + + const auto & arguments = function->arguments->children; + auto * select = arguments[0]->as(); + if (!select) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function '{}' requires a SELECT query as its first argument", getName()); + create.set(create.select, select->clone()); + + else_ast = arguments[1]; + if (!else_ast->as()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function '{}' requires a table function as its second argument", getName()); + else_table_function = TableFunctionFactory::instance().get(else_ast, context); +} + +ColumnsDescription TableFunctionViewIfPermitted::getActualTableStructure(ContextPtr context) const +{ + return else_table_function->getActualTableStructure(context); +} + +StoragePtr TableFunctionViewIfPermitted::executeImpl( + const ASTPtr & /* ast_function */, ContextPtr context, const std::string & table_name, ColumnsDescription /* cached_columns */) const +{ + StoragePtr storage; + auto columns = getActualTableStructure(context); + + if (isPermitted(context, columns)) + { + storage = std::make_shared(StorageID(getDatabaseName(), table_name), create, columns, ""); + } + else + { + storage = else_table_function->execute(else_ast, context, table_name); + } + + storage->startup(); + return storage; +} + +bool TableFunctionViewIfPermitted::isPermitted(const ContextPtr & context, const ColumnsDescription & else_columns) const +{ + Block sample_block; + + try + { + /// Will throw ACCESS_DENIED if the current user is not allowed to execute the SELECT query. + sample_block = InterpreterSelectWithUnionQuery::getSampleBlock(create.children[0], context); + } + catch (Exception & e) + { + if (e.code() == ErrorCodes::ACCESS_DENIED) + return false; + throw; + } + + /// We check that columns match only if permitted (otherwise we could reveal the structure to an user who must not know it). + ColumnsDescription columns{sample_block.getNamesAndTypesList()}; + if (columns != else_columns) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Table function '{}' requires a SELECT query with the result columns matching a table function after 'ELSE'. " + "Currently the result columns of the SELECT query are {}, and the table function after 'ELSE' gives {}", + getName(), + columns.toString(), + else_columns.toString()); + } + + return true; +} + +void registerTableFunctionViewIfPermitted(TableFunctionFactory & factory) +{ + factory.registerFunction(); +} + +} diff --git a/src/TableFunctions/TableFunctionViewIfPermitted.h b/src/TableFunctions/TableFunctionViewIfPermitted.h new file mode 100644 index 00000000000..0fd0b050eaf --- /dev/null +++ b/src/TableFunctions/TableFunctionViewIfPermitted.h @@ -0,0 +1,35 @@ +#pragma once + +#include +#include +#include + +namespace DB +{ + +/* viewIfPermitted(query ELSE null('structure')) + * Works as "view(query)" if the current user has the permissions required to execute "query"; works as "null('structure')" otherwise. + */ +class TableFunctionViewIfPermitted : public ITableFunction +{ +public: + static constexpr auto name = "viewIfPermitted"; + std::string getName() const override { return name; } + + const ASTSelectWithUnionQuery & getSelectQuery() const; + +private: + StoragePtr executeImpl(const ASTPtr & ast_function, ContextPtr context, const String & table_name, ColumnsDescription cached_columns) const override; + const char * getStorageTypeName() const override { return "ViewIfPermitted"; } + + void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; + ColumnsDescription getActualTableStructure(ContextPtr context) const override; + + bool isPermitted(const ContextPtr & context, const ColumnsDescription & else_columns) const; + + ASTCreateQuery create; + ASTPtr else_ast; + TableFunctionPtr else_table_function; +}; + +} diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index 12ca4abe113..3ef93c9b69d 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -42,6 +42,7 @@ void registerTableFunctions() registerTableFunctionJDBC(factory); registerTableFunctionView(factory); + registerTableFunctionViewIfPermitted(factory); #if USE_MYSQL registerTableFunctionMySQL(factory); diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index 49a1ef60a6b..d7e38403cae 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -40,6 +40,7 @@ void registerTableFunctionODBC(TableFunctionFactory & factory); void registerTableFunctionJDBC(TableFunctionFactory & factory); void registerTableFunctionView(TableFunctionFactory & factory); +void registerTableFunctionViewIfPermitted(TableFunctionFactory & factory); #if USE_MYSQL void registerTableFunctionMySQL(TableFunctionFactory & factory); diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py index dbf5adfe174..4bb7a619b9f 100644 --- a/tests/ci/build_report_check.py +++ b/tests/ci/build_report_check.py @@ -19,7 +19,10 @@ from report import create_build_html_report from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo -from commit_status_helper import get_commit +from commit_status_helper import ( + get_commit, + fail_simple_check, +) from ci_config import CI_CONFIG from rerun_helper import RerunHelper @@ -151,6 +154,12 @@ def main(): needs_data = json.load(file_handler) required_builds = len(needs_data) + # A report might be empty in case of `do not test` label, for example. + # We should still be able to merge such PRs. + all_skipped = needs_data is not None and all( + i["result"] == "skipped" for i in needs_data.values() + ) + logging.info("The next builds are required: %s", ", ".join(needs_data)) gh = Github(get_best_robot_token()) @@ -228,6 +237,8 @@ def main(): total_groups = len(build_results) logging.info("Totally got %s artifact groups", total_groups) if total_groups == 0: + if not all_skipped: + fail_simple_check(gh, pr_info, f"{build_check_name} failed") logging.error("No success builds, failing check") sys.exit(1) @@ -297,6 +308,8 @@ def main(): ) if summary_status == "error": + if not all_skipped: + fail_simple_check(gh, pr_info, f"{build_check_name} failed") sys.exit(1) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index 745284b2b29..334a24ed7af 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -1,72 +1,497 @@ #!/usr/bin/env python3 +""" +A plan: + - TODO: consider receiving GH objects cache from S3, but it's really a few + of requests to API currently + - Get all open release PRs (20.10, 21.8, 22.5, etc.) + - Get all pull-requests between the date of the merge-base for the oldest PR with + labels pr-must-backport and version-specific v21.8-must-backport, but without + pr-backported + - Iterate over gotten PRs: + - for pr-must-backport: + - check if all backport-PRs are created. If yes, + set pr-backported label and finish + - If not, create either cherrypick PRs or merge cherrypick (in the same + stage, if mergable) and create backport-PRs + - If successfull, set pr-backported label on the PR + + - for version-specific labels: + - the same, check, cherry-pick, backport, pr-backported + +Cherry-pick stage: + - From time to time the cherry-pick fails, if it was done manually. In the + case we check if it's even needed, and mark the release as done somehow. +""" import argparse import logging import os -import subprocess +from contextlib import contextmanager +from datetime import date, timedelta +from subprocess import CalledProcessError +from typing import List, Optional -from env_helper import GITHUB_WORKSPACE, TEMP_PATH +from env_helper import TEMP_PATH from get_robot_token import get_best_robot_token +from git_helper import git_runner, is_shallow +from github_helper import ( + GitHub, + PullRequest, + PullRequests, + Repository, +) from ssh import SSHKey -from cherry_pick_utils.backport import Backport -from cherry_pick_utils.cherrypick import CherryPick + + +class Labels: + LABEL_MUST_BACKPORT = "pr-must-backport" + LABEL_BACKPORT = "pr-backport" + LABEL_BACKPORTED = "pr-backported" + LABEL_CHERRYPICK = "pr-cherrypick" + LABEL_DO_NOT_TEST = "do not test" + + +class ReleaseBranch: + CHERRYPICK_DESCRIPTION = """This pull-request is a first step of an automated \ + backporting. +It contains changes like after calling a local command `git cherry-pick`. +If you intend to continue backporting this changes, then resolve all conflicts if any. +Otherwise, if you do not want to backport them, then just close this pull-request. + +The check results does not matter at this step - you can safely ignore them. +Also this pull-request will be merged automatically as it reaches the mergeable state, \ + but you always can merge it manually. +""" + BACKPORT_DESCRIPTION = """This pull-request is a last step of an automated \ +backporting. +Treat it as a standard pull-request: look at the checks and resolve conflicts. +Merge it only if you intend to backport changes to the target branch, otherwise just \ + close it. +""" + REMOTE = "" + + def __init__(self, name: str, pr: PullRequest): + self.name = name + self.pr = pr + self.cherrypick_branch = f"cherrypick/{name}/{pr.merge_commit_sha}" + self.backport_branch = f"backport/{name}/{pr.number}" + self.cherrypick_pr = None # type: Optional[PullRequest] + self.backport_pr = None # type: Optional[PullRequest] + self._backported = None # type: Optional[bool] + self.git_prefix = ( # All commits to cherrypick are done as robot-clickhouse + "git -c user.email=robot-clickhouse@clickhouse.com " + "-c user.name=robot-clickhouse -c commit.gpgsign=false" + ) + self.pre_check() + + def pre_check(self): + branch_updated = git_runner( + f"git branch -a --contains={self.pr.merge_commit_sha} " + f"{self.REMOTE}/{self.name}" + ) + if branch_updated: + self._backported = True + + def pop_prs(self, prs: PullRequests): + to_pop = [] # type: List[int] + for i, pr in enumerate(prs): + if self.name not in pr.head.ref: + continue + if pr.head.ref.startswith(f"cherrypick/{self.name}"): + self.cherrypick_pr = pr + to_pop.append(i) + elif pr.head.ref.startswith(f"backport/{self.name}"): + self.backport_pr = pr + to_pop.append(i) + else: + logging.error( + "PR #%s doesn't head ref starting with known suffix", + pr.number, + ) + for i in reversed(to_pop): + # Going from the tail to keep the order and pop greater index first + prs.pop(i) + + def process(self, dry_run: bool): + if self.backported: + return + if not self.cherrypick_pr: + if dry_run: + logging.info( + "DRY RUN: Would create cherrypick PR for #%s", self.pr.number + ) + return + self.create_cherrypick() + if self.backported: + return + if self.cherrypick_pr is not None: + # Try to merge cherrypick instantly + if self.cherrypick_pr.mergeable and self.cherrypick_pr.state != "closed": + self.cherrypick_pr.merge() + # The PR needs update, since PR.merge doesn't update the object + self.cherrypick_pr.update() + if self.cherrypick_pr.merged: + if dry_run: + logging.info( + "DRY RUN: Would create backport PR for #%s", self.pr.number + ) + return + self.create_backport() + return + elif self.cherrypick_pr.state == "closed": + logging.info( + "The cherrypick PR #%s for PR #%s is discarded", + self.cherrypick_pr.number, + self.pr.number, + ) + self._backported = True + return + logging.info( + "Cherrypick PR #%s for PR #%s have conflicts and unable to be merged", + self.cherrypick_pr.number, + self.pr.number, + ) + + def create_cherrypick(self): + # First, create backport branch: + # Checkout release branch with discarding every change + git_runner(f"{self.git_prefix} checkout -f {self.name}") + # Create or reset backport branch + git_runner(f"{self.git_prefix} checkout -B {self.backport_branch}") + # Merge all changes from PR's the first parent commit w/o applying anything + # It will allow to create a merge commit like it would be a cherry-pick + first_parent = git_runner(f"git rev-parse {self.pr.merge_commit_sha}^1") + git_runner(f"{self.git_prefix} merge -s ours --no-edit {first_parent}") + + # Second step, create cherrypick branch + git_runner( + f"{self.git_prefix} branch -f " + f"{self.cherrypick_branch} {self.pr.merge_commit_sha}" + ) + + # Check if there actually any changes between branches. If no, then no + # other actions are required. It's possible when changes are backported + # manually to the release branch already + try: + output = git_runner( + f"{self.git_prefix} merge --no-commit --no-ff {self.cherrypick_branch}" + ) + # 'up-to-date', 'up to date', who knows what else (╯°v°)╯ ^┻━┻ + if output.startswith("Already up") and output.endswith("date."): + # The changes are already in the release branch, we are done here + logging.info( + "Release branch %s already contain changes from %s", + self.name, + self.pr.number, + ) + self._backported = True + return + except CalledProcessError: + # There are most probably conflicts, they'll be resolved in PR + git_runner(f"{self.git_prefix} reset --merge") + else: + # There are changes to apply, so continue + git_runner(f"{self.git_prefix} reset --merge") + + # Push, create the cherrypick PR, lable and assign it + for branch in [self.cherrypick_branch, self.backport_branch]: + git_runner(f"{self.git_prefix} push -f {self.REMOTE} {branch}:{branch}") + + self.cherrypick_pr = self.pr.base.repo.create_pull( + title=f"Cherry pick #{self.pr.number} to {self.name}: {self.pr.title}", + body=f"Original pull-request #{self.pr.number}\n\n" + f"{self.CHERRYPICK_DESCRIPTION}", + base=self.backport_branch, + head=self.cherrypick_branch, + ) + self.cherrypick_pr.add_to_labels(Labels.LABEL_CHERRYPICK) + self.cherrypick_pr.add_to_labels(Labels.LABEL_DO_NOT_TEST) + self.cherrypick_pr.add_to_assignees(self.pr.assignee) + self.cherrypick_pr.add_to_assignees(self.pr.user) + + def create_backport(self): + # Checkout the backport branch from the remote and make all changes to + # apply like they are only one cherry-pick commit on top of release + git_runner(f"{self.git_prefix} checkout -f {self.backport_branch}") + git_runner( + f"{self.git_prefix} pull --ff-only {self.REMOTE} {self.backport_branch}" + ) + merge_base = git_runner( + f"{self.git_prefix} merge-base " + f"{self.REMOTE}/{self.name} {self.backport_branch}" + ) + git_runner(f"{self.git_prefix} reset --soft {merge_base}") + title = f"Backport #{self.pr.number} to {self.name}: {self.pr.title}" + git_runner(f"{self.git_prefix} commit -a --allow-empty -F -", input=title) + + # Push with force, create the backport PR, lable and assign it + git_runner( + f"{self.git_prefix} push -f {self.REMOTE} " + f"{self.backport_branch}:{self.backport_branch}" + ) + self.backport_pr = self.pr.base.repo.create_pull( + title=title, + body=f"Original pull-request #{self.pr.number}\n" + f"Cherry-pick pull-request #{self.cherrypick_pr.number}\n\n" + f"{self.BACKPORT_DESCRIPTION}", + base=self.name, + head=self.backport_branch, + ) + self.backport_pr.add_to_labels(Labels.LABEL_BACKPORT) + self.backport_pr.add_to_assignees(self.pr.assignee) + self.backport_pr.add_to_assignees(self.pr.user) + + @property + def backported(self) -> bool: + if self._backported is not None: + return self._backported + return self.backport_pr is not None + + def __repr__(self): + return self.name + + +class Backport: + def __init__(self, gh: GitHub, repo: str, dry_run: bool): + self.gh = gh + self._repo_name = repo + self.dry_run = dry_run + + self._query = f"type:pr repo:{repo}" + self._remote = "" + self._repo = None # type: Optional[Repository] + self.release_prs = [] # type: PullRequests + self.release_branches = [] # type: List[str] + self.labels_to_backport = [] # type: List[str] + self.prs_for_backport = [] # type: PullRequests + self.error = None # type: Optional[Exception] + + @property + def remote(self) -> str: + if not self._remote: + # lines of "origin git@github.com:ClickHouse/ClickHouse.git (fetch)" + remotes = git_runner("git remote -v").split("\n") + # We need the first word from the first matching result + self._remote = tuple( + remote.split(maxsplit=1)[0] + for remote in remotes + if f"github.com/{self._repo_name}" in remote # https + or f"github.com:{self._repo_name}" in remote # ssh + )[0] + git_runner(f"git fetch {self._remote}") + ReleaseBranch.REMOTE = self._remote + return self._remote + + def receive_release_prs(self): + logging.info("Getting release PRs") + self.release_prs = self.gh.get_pulls_from_search( + query=f"{self._query} is:open", + sort="created", + order="asc", + label="release", + ) + self.release_branches = [pr.head.ref for pr in self.release_prs] + self.labels_to_backport = [ + f"v{branch}-must-backport" for branch in self.release_branches + ] + logging.info("Active releases: %s", ", ".join(self.release_branches)) + + def receive_prs_for_backport(self): + # The commit is the oldest open release branch's merge-base + since_commit = git_runner( + f"git merge-base {self.remote}/{self.release_branches[0]} " + f"{self.remote}/{self.default_branch}" + ) + since_date = date.fromisoformat( + git_runner.run(f"git log -1 --format=format:%cs {since_commit}") + ) + # To not have a possible TZ issues + tomorrow = date.today() + timedelta(days=1) + logging.info("Receive PRs suppose to be backported") + self.prs_for_backport = self.gh.get_pulls_from_search( + query=f"{self._query} -label:pr-backported", + label=",".join(self.labels_to_backport + [Labels.LABEL_MUST_BACKPORT]), + merged=[since_date, tomorrow], + ) + logging.info( + "PRs to be backported:\n %s", + "\n ".join([pr.html_url for pr in self.prs_for_backport]), + ) + + def process_backports(self): + for pr in self.prs_for_backport: + try: + self.process_pr(pr) + except Exception as e: + logging.error( + "During processing the PR #%s error occured: %s", pr.number, e + ) + self.error = e + + def process_pr(self, pr: PullRequest): + pr_labels = [label.name for label in pr.labels] + if Labels.LABEL_MUST_BACKPORT in pr_labels: + branches = [ + ReleaseBranch(br, pr) for br in self.release_branches + ] # type: List[ReleaseBranch] + else: + branches = [ + ReleaseBranch(br, pr) + for br in [ + label.split("-", 1)[0][1:] # v21.8-must-backport + for label in pr_labels + if label in self.labels_to_backport + ] + ] + if not branches: + # This is definitely some error. There must be at least one branch + # It also make the whole program exit code non-zero + self.error = Exception( + f"There are no branches to backport PR #{pr.number}, logical error" + ) + raise self.error + + logging.info( + " PR #%s is suppose to be backported to %s", + pr.number, + ", ".join(map(str, branches)), + ) + # All PRs for cherrypick and backport branches as heads + query_suffix = " ".join( + [ + f"head:{branch.backport_branch} head:{branch.cherrypick_branch}" + for branch in branches + ] + ) + bp_cp_prs = self.gh.get_pulls_from_search( + query=f"{self._query} {query_suffix}", + ) + for br in branches: + br.pop_prs(bp_cp_prs) + + if bp_cp_prs: + # This is definitely some error. All prs must be consumed by + # branches with ReleaseBranch.pop_prs. It also make the whole + # program exit code non-zero + self.error = Exception( + "The following PRs are not filtered by release branches:\n" + "\n".join(map(str, bp_cp_prs)) + ) + raise self.error + + if all(br.backported for br in branches): + # Let's check if the PR is already backported + self.mark_pr_backported(pr) + return + + for br in branches: + br.process(self.dry_run) + + if all(br.backported for br in branches): + # And check it after the running + self.mark_pr_backported(pr) + + def mark_pr_backported(self, pr: PullRequest): + if self.dry_run: + logging.info("DRY RUN: would mark PR #%s as done", pr.number) + return + pr.add_to_labels(Labels.LABEL_BACKPORTED) + logging.info( + "PR #%s is successfully labeled with `%s`", + pr.number, + Labels.LABEL_BACKPORTED, + ) + + @property + def repo(self) -> Repository: + if self._repo is None: + try: + self._repo = self.release_prs[0].base.repo + except IndexError as exc: + raise Exception( + "`repo` is available only after the `receive_release_prs`" + ) from exc + return self._repo + + @property + def default_branch(self) -> str: + return self.repo.default_branch def parse_args(): parser = argparse.ArgumentParser("Create cherry-pick and backport PRs") parser.add_argument("--token", help="github token, if not set, used from smm") + parser.add_argument( + "--repo", default="ClickHouse/ClickHouse", help="repo owner/name" + ) parser.add_argument("--dry-run", action="store_true", help="do not create anything") + parser.add_argument( + "--debug-helpers", + action="store_true", + help="add debug logging for git_helper and github_helper", + ) return parser.parse_args() +@contextmanager +def clear_repo(): + orig_ref = git_runner("git branch --show-current") or git_runner( + "git rev-parse HEAD" + ) + try: + yield + except (Exception, KeyboardInterrupt): + git_runner(f"git checkout -f {orig_ref}") + raise + else: + git_runner(f"git checkout -f {orig_ref}") + + +@contextmanager +def stash(): + need_stash = bool(git_runner("git diff HEAD")) + if need_stash: + git_runner("git stash push --no-keep-index -m 'running cherry_pick.py'") + try: + with clear_repo(): + yield + except (Exception, KeyboardInterrupt): + if need_stash: + git_runner("git stash pop") + raise + else: + if need_stash: + git_runner("git stash pop") + + def main(): + if not os.path.exists(TEMP_PATH): + os.makedirs(TEMP_PATH) + args = parse_args() + if args.debug_helpers: + logging.getLogger("github_helper").setLevel(logging.DEBUG) + logging.getLogger("git_helper").setLevel(logging.DEBUG) token = args.token or get_best_robot_token() - bp = Backport( - token, - os.environ.get("REPO_OWNER"), - os.environ.get("REPO_NAME"), - os.environ.get("REPO_TEAM"), - ) - - cherry_pick = CherryPick( - token, - os.environ.get("REPO_OWNER"), - os.environ.get("REPO_NAME"), - os.environ.get("REPO_TEAM"), - 1, - "master", - ) - # Use the same _gh in both objects to have a proper cost - # pylint: disable=protected-access - for key in bp._gh.api_costs: - if key in cherry_pick._gh.api_costs: - bp._gh.api_costs[key] += cherry_pick._gh.api_costs[key] - for key in cherry_pick._gh.api_costs: - if key not in bp._gh.api_costs: - bp._gh.api_costs[key] = cherry_pick._gh.api_costs[key] - cherry_pick._gh = bp._gh - # pylint: enable=protected-access - - def cherrypick_run(pr_data, branch): - cherry_pick.update_pr_branch(pr_data, branch) - return cherry_pick.execute(GITHUB_WORKSPACE, args.dry_run) - - try: - bp.execute(GITHUB_WORKSPACE, "origin", None, cherrypick_run) - except subprocess.CalledProcessError as e: - logging.error(e.output) + gh = GitHub(token, per_page=100) + bp = Backport(gh, args.repo, args.dry_run) + bp.gh.cache_path = str(f"{TEMP_PATH}/gh_cache") + bp.receive_release_prs() + bp.receive_prs_for_backport() + bp.process_backports() + if bp.error is not None: + logging.error("Finished successfully, but errors occured!") + raise bp.error if __name__ == "__main__": logging.basicConfig(level=logging.INFO) - if not os.path.exists(TEMP_PATH): - os.makedirs(TEMP_PATH) - - if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""): - with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"): + assert not is_shallow() + with stash(): + if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""): + with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"): + main() + else: main() - else: - main() diff --git a/tests/ci/cherry_pick_utils/__init__.py b/tests/ci/cherry_pick_utils/__init__.py deleted file mode 100644 index faa18be5bbf..00000000000 --- a/tests/ci/cherry_pick_utils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- diff --git a/tests/ci/cherry_pick_utils/backport.py b/tests/ci/cherry_pick_utils/backport.py deleted file mode 100644 index 1bc910886de..00000000000 --- a/tests/ci/cherry_pick_utils/backport.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- - -import argparse -import logging -import os -import re -import sys - -sys.path.append(os.path.dirname(__file__)) - -from cherrypick import CherryPick -from query import Query as RemoteRepo -from local import Repository as LocalRepo - - -class Backport: - def __init__(self, token, owner, name, team): - self._gh = RemoteRepo( - token, owner=owner, name=name, team=team, max_page_size=60, min_page_size=7 - ) - self._token = token - self.default_branch_name = self._gh.default_branch - self.ssh_url = self._gh.ssh_url - - def getPullRequests(self, from_commit): - return self._gh.get_pull_requests(from_commit) - - def getBranchesWithRelease(self): - branches = set() - for pull_request in self._gh.find_pull_requests("release"): - branches.add(pull_request["headRefName"]) - return branches - - def execute(self, repo, upstream, until_commit, run_cherrypick): - repo = LocalRepo(repo, upstream, self.default_branch_name) - all_branches = repo.get_release_branches() # [(branch_name, base_commit)] - - release_branches = self.getBranchesWithRelease() - - branches = [] - # iterate over all branches to preserve their precedence. - for branch in all_branches: - if branch[0] in release_branches: - branches.append(branch) - - if not branches: - logging.info("No release branches found!") - return - - logging.info( - "Found release branches: %s", ", ".join([br[0] for br in branches]) - ) - - if not until_commit: - until_commit = branches[0][1] - pull_requests = self.getPullRequests(until_commit) - - backport_map = {} - pr_map = {pr["number"]: pr for pr in pull_requests} - - RE_MUST_BACKPORT = re.compile(r"^v(\d+\.\d+)-must-backport$") - RE_NO_BACKPORT = re.compile(r"^v(\d+\.\d+)-no-backport$") - RE_BACKPORTED = re.compile(r"^v(\d+\.\d+)-backported$") - - # pull-requests are sorted by ancestry from the most recent. - for pr in pull_requests: - while repo.comparator(branches[-1][1]) >= repo.comparator( - pr["mergeCommit"]["oid"] - ): - logging.info( - "PR #%s is already inside %s. Dropping this branch for further PRs", - pr["number"], - branches[-1][0], - ) - branches.pop() - - logging.info("Processing PR #%s", pr["number"]) - - assert len(branches) != 0 - - branch_set = {branch[0] for branch in branches} - - # First pass. Find all must-backports - for label in pr["labels"]["nodes"]: - if label["name"] == "pr-must-backport": - backport_map[pr["number"]] = branch_set.copy() - continue - matched = RE_MUST_BACKPORT.match(label["name"]) - if matched: - if pr["number"] not in backport_map: - backport_map[pr["number"]] = set() - backport_map[pr["number"]].add(matched.group(1)) - - # Second pass. Find all no-backports - for label in pr["labels"]["nodes"]: - if label["name"] == "pr-no-backport" and pr["number"] in backport_map: - del backport_map[pr["number"]] - break - matched_no_backport = RE_NO_BACKPORT.match(label["name"]) - matched_backported = RE_BACKPORTED.match(label["name"]) - if ( - matched_no_backport - and pr["number"] in backport_map - and matched_no_backport.group(1) in backport_map[pr["number"]] - ): - backport_map[pr["number"]].remove(matched_no_backport.group(1)) - logging.info( - "\tskipping %s because of forced no-backport", - matched_no_backport.group(1), - ) - elif ( - matched_backported - and pr["number"] in backport_map - and matched_backported.group(1) in backport_map[pr["number"]] - ): - backport_map[pr["number"]].remove(matched_backported.group(1)) - logging.info( - "\tskipping %s because it's already backported manually", - matched_backported.group(1), - ) - - for pr, branches in list(backport_map.items()): - statuses = [] - for branch in branches: - branch_status = run_cherrypick(pr_map[pr], branch) - statuses.append(f"{branch}, and the status is: {branch_status}") - logging.info( - "PR #%s needs to be backported to:\n\t%s", pr, "\n\t".join(statuses) - ) - - # print API costs - logging.info("\nGitHub API total costs for backporting per query:") - for name, value in list(self._gh.api_costs.items()): - logging.info("%s : %s", name, value) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - "--token", type=str, required=True, help="token for Github access" - ) - parser.add_argument( - "--repo", - type=str, - required=True, - help="path to full repository", - metavar="PATH", - ) - parser.add_argument( - "--til", type=str, help="check PRs from HEAD til this commit", metavar="COMMIT" - ) - parser.add_argument( - "--dry-run", - action="store_true", - help="do not create or merge any PRs", - default=False, - ) - parser.add_argument( - "--verbose", - "-v", - action="store_true", - help="more verbose output", - default=False, - ) - parser.add_argument( - "--upstream", - "-u", - type=str, - help="remote name of upstream in repository", - default="origin", - ) - args = parser.parse_args() - - if args.verbose: - logging.basicConfig( - format="%(message)s", stream=sys.stdout, level=logging.DEBUG - ) - else: - logging.basicConfig(format="%(message)s", stream=sys.stdout, level=logging.INFO) - - cherry_pick = CherryPick( - args.token, "ClickHouse", "ClickHouse", "core", 1, "master" - ) - - def cherrypick_run(pr_data, branch): - cherry_pick.update_pr_branch(pr_data, branch) - return cherry_pick.execute(args.repo, args.dry_run) - - bp = Backport(args.token, "ClickHouse", "ClickHouse", "core") - bp.execute(args.repo, args.upstream, args.til, cherrypick_run) diff --git a/tests/ci/cherry_pick_utils/cherrypick.py b/tests/ci/cherry_pick_utils/cherrypick.py deleted file mode 100644 index c844beaee88..00000000000 --- a/tests/ci/cherry_pick_utils/cherrypick.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -Backports changes from PR to release branch. -Requires multiple separate runs as part of the implementation. - -First run should do the following: -1. Merge release branch with a first parent of merge-commit of PR (using 'ours' strategy). (branch: backport/{branch}/{pr}) -2. Create temporary branch over merge-commit to use it for PR creation. (branch: cherrypick/{merge_commit}) -3. Create PR from temporary branch to backport branch (emulating cherry-pick). - -Second run checks PR from previous run to be merged or at least being mergeable. If it's not merged then try to merge it. - -Third run creates PR from backport branch (with merged previous PR) to release branch. -""" - - -import argparse -from enum import Enum -import logging -import os -import subprocess -import sys - -sys.path.append(os.path.dirname(__file__)) - -from query import Query as RemoteRepo - - -class CherryPick: - class Status(Enum): - DISCARDED = "discarded" - NOT_INITIATED = "not started" - FIRST_MERGEABLE = "waiting for 1st stage" - FIRST_CONFLICTS = "conflicts on 1st stage" - SECOND_MERGEABLE = "waiting for 2nd stage" - SECOND_CONFLICTS = "conflicts on 2nd stage" - MERGED = "backported" - - def _run(self, args): - out = subprocess.check_output(args).rstrip() - logging.debug(out) - return out - - def __init__(self, token, owner, name, team, pr_number, target_branch): - self._gh = RemoteRepo(token, owner=owner, name=name, team=team) - self._pr = self._gh.get_pull_request(pr_number) - self.target_branch = target_branch - - self.ssh_url = self._gh.ssh_url - - # TODO: check if pull-request is merged. - self.update_pr_branch(self._pr, self.target_branch) - - def update_pr_branch(self, pr_data, target_branch): - """The method is here to avoid unnecessary creation of new objects""" - self._pr = pr_data - self.target_branch = target_branch - self.merge_commit_oid = self._pr["mergeCommit"]["oid"] - - self.backport_branch = f"backport/{target_branch}/{pr_data['number']}" - self.cherrypick_branch = f"cherrypick/{target_branch}/{self.merge_commit_oid}" - - def getCherryPickPullRequest(self): - return self._gh.find_pull_request( - base=self.backport_branch, head=self.cherrypick_branch - ) - - def createCherryPickPullRequest(self, repo_path): - DESCRIPTION = ( - "This pull-request is a first step of an automated backporting.\n" - "It contains changes like after calling a local command `git cherry-pick`.\n" - "If you intend to continue backporting this changes, then resolve all conflicts if any.\n" - "Otherwise, if you do not want to backport them, then just close this pull-request.\n" - "\n" - "The check results does not matter at this step - you can safely ignore them.\n" - "Also this pull-request will be merged automatically as it reaches the mergeable state, but you always can merge it manually.\n" - ) - - # FIXME: replace with something better than os.system() - git_prefix = [ - "git", - "-C", - repo_path, - "-c", - "user.email=robot-clickhouse@yandex-team.ru", - "-c", - "user.name=robot-clickhouse", - ] - base_commit_oid = self._pr["mergeCommit"]["parents"]["nodes"][0]["oid"] - - # Create separate branch for backporting, and make it look like real cherry-pick. - self._run(git_prefix + ["checkout", "-f", self.target_branch]) - self._run(git_prefix + ["checkout", "-B", self.backport_branch]) - self._run(git_prefix + ["merge", "-s", "ours", "--no-edit", base_commit_oid]) - - # Create secondary branch to allow pull request with cherry-picked commit. - self._run( - git_prefix + ["branch", "-f", self.cherrypick_branch, self.merge_commit_oid] - ) - - self._run( - git_prefix - + [ - "push", - "-f", - "origin", - "{branch}:{branch}".format(branch=self.backport_branch), - ] - ) - self._run( - git_prefix - + [ - "push", - "-f", - "origin", - "{branch}:{branch}".format(branch=self.cherrypick_branch), - ] - ) - - # Create pull-request like a local cherry-pick - title = self._pr["title"].replace('"', r"\"") - pr = self._gh.create_pull_request( - source=self.cherrypick_branch, - target=self.backport_branch, - title=( - f'Cherry pick #{self._pr["number"]} ' - f"to {self.target_branch}: " - f"{title}" - ), - description=f'Original pull-request #{self._pr["number"]}\n\n{DESCRIPTION}', - ) - - # FIXME: use `team` to leave a single eligible assignee. - self._gh.add_assignee(pr, self._pr["author"]) - self._gh.add_assignee(pr, self._pr["mergedBy"]) - - self._gh.set_label(pr, "do not test") - self._gh.set_label(pr, "pr-cherrypick") - - return pr - - def mergeCherryPickPullRequest(self, cherrypick_pr): - return self._gh.merge_pull_request(cherrypick_pr["id"]) - - def getBackportPullRequest(self): - return self._gh.find_pull_request( - base=self.target_branch, head=self.backport_branch - ) - - def createBackportPullRequest(self, cherrypick_pr, repo_path): - DESCRIPTION = ( - "This pull-request is a last step of an automated backporting.\n" - "Treat it as a standard pull-request: look at the checks and resolve conflicts.\n" - "Merge it only if you intend to backport changes to the target branch, otherwise just close it.\n" - ) - - git_prefix = [ - "git", - "-C", - repo_path, - "-c", - "user.email=robot-clickhouse@clickhouse.com", - "-c", - "user.name=robot-clickhouse", - ] - - title = self._pr["title"].replace('"', r"\"") - pr_title = f"Backport #{self._pr['number']} to {self.target_branch}: {title}" - - self._run(git_prefix + ["checkout", "-f", self.backport_branch]) - self._run(git_prefix + ["pull", "--ff-only", "origin", self.backport_branch]) - self._run( - git_prefix - + [ - "reset", - "--soft", - self._run( - git_prefix - + [ - "merge-base", - "origin/" + self.target_branch, - self.backport_branch, - ] - ), - ] - ) - self._run(git_prefix + ["commit", "-a", "--allow-empty", "-m", pr_title]) - self._run( - git_prefix - + [ - "push", - "-f", - "origin", - "{branch}:{branch}".format(branch=self.backport_branch), - ] - ) - - pr = self._gh.create_pull_request( - source=self.backport_branch, - target=self.target_branch, - title=pr_title, - description=f"Original pull-request #{self._pr['number']}\n" - f"Cherry-pick pull-request #{cherrypick_pr['number']}\n\n{DESCRIPTION}", - ) - - # FIXME: use `team` to leave a single eligible assignee. - self._gh.add_assignee(pr, self._pr["author"]) - self._gh.add_assignee(pr, self._pr["mergedBy"]) - - self._gh.set_label(pr, "pr-backport") - - return pr - - def execute(self, repo_path, dry_run=False): - pr1 = self.getCherryPickPullRequest() - if not pr1: - if not dry_run: - pr1 = self.createCherryPickPullRequest(repo_path) - logging.debug( - "Created PR with cherry-pick of %s to %s: %s", - self._pr["number"], - self.target_branch, - pr1["url"], - ) - else: - return CherryPick.Status.NOT_INITIATED - else: - logging.debug( - "Found PR with cherry-pick of %s to %s: %s", - self._pr["number"], - self.target_branch, - pr1["url"], - ) - - if not pr1["merged"] and pr1["mergeable"] == "MERGEABLE" and not pr1["closed"]: - if not dry_run: - pr1 = self.mergeCherryPickPullRequest(pr1) - logging.debug( - "Merged PR with cherry-pick of %s to %s: %s", - self._pr["number"], - self.target_branch, - pr1["url"], - ) - - if not pr1["merged"]: - logging.debug( - "Waiting for PR with cherry-pick of %s to %s: %s", - self._pr["number"], - self.target_branch, - pr1["url"], - ) - - if pr1["closed"]: - return CherryPick.Status.DISCARDED - elif pr1["mergeable"] == "CONFLICTING": - return CherryPick.Status.FIRST_CONFLICTS - else: - return CherryPick.Status.FIRST_MERGEABLE - - pr2 = self.getBackportPullRequest() - if not pr2: - if not dry_run: - pr2 = self.createBackportPullRequest(pr1, repo_path) - logging.debug( - "Created PR with backport of %s to %s: %s", - self._pr["number"], - self.target_branch, - pr2["url"], - ) - else: - return CherryPick.Status.FIRST_MERGEABLE - else: - logging.debug( - "Found PR with backport of %s to %s: %s", - self._pr["number"], - self.target_branch, - pr2["url"], - ) - - if pr2["merged"]: - return CherryPick.Status.MERGED - elif pr2["closed"]: - return CherryPick.Status.DISCARDED - elif pr2["mergeable"] == "CONFLICTING": - return CherryPick.Status.SECOND_CONFLICTS - else: - return CherryPick.Status.SECOND_MERGEABLE - - -if __name__ == "__main__": - logging.basicConfig(format="%(message)s", stream=sys.stdout, level=logging.DEBUG) - - parser = argparse.ArgumentParser() - parser.add_argument( - "--token", "-t", type=str, required=True, help="token for Github access" - ) - parser.add_argument("--pr", type=str, required=True, help="PR# to cherry-pick") - parser.add_argument( - "--branch", - "-b", - type=str, - required=True, - help="target branch name for cherry-pick", - ) - parser.add_argument( - "--repo", - "-r", - type=str, - required=True, - help="path to full repository", - metavar="PATH", - ) - args = parser.parse_args() - - cp = CherryPick( - args.token, "ClickHouse", "ClickHouse", "core", args.pr, args.branch - ) - cp.execute(args.repo) diff --git a/tests/ci/cherry_pick_utils/local.py b/tests/ci/cherry_pick_utils/local.py deleted file mode 100644 index 71923b63c35..00000000000 --- a/tests/ci/cherry_pick_utils/local.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- - -import functools -import logging -import os -import re - -import git - - -class RepositoryBase: - def __init__(self, repo_path): - - self._repo = git.Repo(repo_path, search_parent_directories=(not repo_path)) - - # comparator of commits - def cmp(x, y): - if str(x) == str(y): - return 0 - if self._repo.is_ancestor(x, y): - return -1 - else: - return 1 - - self.comparator = functools.cmp_to_key(cmp) - - def iterate(self, begin, end): - rev_range = f"{begin}...{end}" - for commit in self._repo.iter_commits(rev_range, first_parent=True): - yield commit - - -class Repository(RepositoryBase): - def __init__(self, repo_path, remote_name, default_branch_name): - super().__init__(repo_path) - self._remote = self._repo.remotes[remote_name] - self._remote.fetch() - self._default = self._remote.refs[default_branch_name] - - def get_head_commit(self): - return self._repo.commit(self._default) - - def get_release_branches(self): - """ - Returns sorted list of tuples: - * remote branch (git.refs.remote.RemoteReference), - * base commit (git.Commit), - * head (git.Commit)). - List is sorted by commits in ascending order. - """ - release_branches = [] - - RE_RELEASE_BRANCH_REF = re.compile(r"^refs/remotes/.+/\d+\.\d+$") - - for branch in [ - r for r in self._remote.refs if RE_RELEASE_BRANCH_REF.match(r.path) - ]: - base = self._repo.merge_base(self._default, self._repo.commit(branch)) - if not base: - logging.info( - "Branch %s is not based on branch %s. Ignoring.", - branch.path, - self._default, - ) - elif len(base) > 1: - logging.info( - "Branch %s has more than one base commit. Ignoring.", branch.path - ) - else: - release_branches.append((os.path.basename(branch.name), base[0])) - - return sorted(release_branches, key=lambda x: self.comparator(x[1])) - - -class BareRepository(RepositoryBase): - def __init__(self, repo_path, default_branch_name): - super().__init__(repo_path) - self._default = self._repo.branches[default_branch_name] - - def get_release_branches(self): - """ - Returns sorted list of tuples: - * branch (git.refs.head?), - * base commit (git.Commit), - * head (git.Commit)). - List is sorted by commits in ascending order. - """ - release_branches = [] - - RE_RELEASE_BRANCH_REF = re.compile(r"^refs/heads/\d+\.\d+$") - - for branch in [ - r for r in self._repo.branches if RE_RELEASE_BRANCH_REF.match(r.path) - ]: - base = self._repo.merge_base(self._default, self._repo.commit(branch)) - if not base: - logging.info( - "Branch %s is not based on branch %s. Ignoring.", - branch.path, - self._default, - ) - elif len(base) > 1: - logging.info( - "Branch %s has more than one base commit. Ignoring.", branch.path - ) - else: - release_branches.append((os.path.basename(branch.name), base[0])) - - return sorted(release_branches, key=lambda x: self.comparator(x[1])) diff --git a/tests/ci/cherry_pick_utils/parser.py b/tests/ci/cherry_pick_utils/parser.py deleted file mode 100644 index 29c05e5328f..00000000000 --- a/tests/ci/cherry_pick_utils/parser.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- - - -class Description: - """Parsed description representation""" - - MAP_CATEGORY_TO_LABEL = { - "New Feature": "pr-feature", - "Bug Fix": "pr-bugfix", - "Improvement": "pr-improvement", - "Performance Improvement": "pr-performance", - # 'Backward Incompatible Change': doesn't match anything - "Build/Testing/Packaging Improvement": "pr-build", - "Non-significant (changelog entry is not needed)": "pr-non-significant", - "Non-significant (changelog entry is not required)": "pr-non-significant", - "Non-significant": "pr-non-significant", - "Documentation (changelog entry is not required)": "pr-documentation", - # 'Other': doesn't match anything - } - - def __init__(self, pull_request): - self.label_name = str() - self._parse(pull_request["bodyText"]) - - def _parse(self, text): - lines = text.splitlines() - next_category = False - category = str() - - for line in lines: - stripped = line.strip() - - if not stripped: - continue - - if next_category: - category = stripped - next_category = False - - category_headers = ( - "Category (leave one):", - "Changelog category (leave one):", - "Changelog category:", - "Category:", - ) - - if stripped in category_headers: - next_category = True - - if category in Description.MAP_CATEGORY_TO_LABEL: - self.label_name = Description.MAP_CATEGORY_TO_LABEL[category] - else: - if not category: - print("Cannot find category in pr description") - else: - print(("Unknown category: " + category)) diff --git a/tests/ci/cherry_pick_utils/query.py b/tests/ci/cherry_pick_utils/query.py deleted file mode 100644 index 917f9901287..00000000000 --- a/tests/ci/cherry_pick_utils/query.py +++ /dev/null @@ -1,532 +0,0 @@ -# -*- coding: utf-8 -*- - -import json -import inspect -import logging -import time -from urllib3.util.retry import Retry # type: ignore - -import requests # type: ignore -from requests.adapters import HTTPAdapter # type: ignore - - -class Query: - """ - Implements queries to the Github API using GraphQL - """ - - _PULL_REQUEST = """ -author {{ - ... on User {{ - id - login - }} -}} - -baseRepository {{ - nameWithOwner -}} - -mergeCommit {{ - oid - parents(first: {min_page_size}) {{ - totalCount - nodes {{ - oid - }} - }} -}} - -mergedBy {{ - ... on User {{ - id - login - }} -}} - -baseRefName -closed -headRefName -id -mergeable -merged -number -title -url - """ - - def __init__(self, token, owner, name, team, max_page_size=100, min_page_size=10): - self._PULL_REQUEST = Query._PULL_REQUEST.format(min_page_size=min_page_size) - - self._token = token - self._owner = owner - self._name = name - self._team = team - self._session = None - - self._max_page_size = max_page_size - self._min_page_size = min_page_size - - self.api_costs = {} - - repo = self.get_repository() - self._id = repo["id"] - self.ssh_url = repo["sshUrl"] - self.default_branch = repo["defaultBranchRef"]["name"] - - self.members = set(self.get_members()) - - def get_repository(self): - _QUERY = """ -repository(owner: "{owner}" name: "{name}") {{ - defaultBranchRef {{ - name - }} - id - sshUrl -}} - """ - - query = _QUERY.format(owner=self._owner, name=self._name) - return self._run(query)["repository"] - - def get_members(self): - """Get all team members for organization - - Returns: - members: a map of members' logins to ids - """ - - _QUERY = """ -organization(login: "{organization}") {{ - team(slug: "{team}") {{ - members(first: {max_page_size} {next}) {{ - pageInfo {{ - hasNextPage - endCursor - }} - nodes {{ - id - login - }} - }} - }} -}} - """ - - members = {} - not_end = True - query = _QUERY.format( - organization=self._owner, - team=self._team, - max_page_size=self._max_page_size, - next="", - ) - - while not_end: - result = self._run(query)["organization"]["team"] - if result is None: - break - result = result["members"] - not_end = result["pageInfo"]["hasNextPage"] - query = _QUERY.format( - organization=self._owner, - team=self._team, - max_page_size=self._max_page_size, - next=f'after: "{result["pageInfo"]["endCursor"]}"', - ) - - # Update members with new nodes compatible with py3.8-py3.10 - members = { - **members, - **{node["login"]: node["id"] for node in result["nodes"]}, - } - - return members - - def get_pull_request(self, number): - _QUERY = """ -repository(owner: "{owner}" name: "{name}") {{ - pullRequest(number: {number}) {{ - {pull_request_data} - }} -}} - """ - - query = _QUERY.format( - owner=self._owner, - name=self._name, - number=number, - pull_request_data=self._PULL_REQUEST, - min_page_size=self._min_page_size, - ) - return self._run(query)["repository"]["pullRequest"] - - def find_pull_request(self, base, head): - _QUERY = """ -repository(owner: "{owner}" name: "{name}") {{ - pullRequests( - first: {min_page_size} baseRefName: "{base}" headRefName: "{head}" - ) {{ - nodes {{ - {pull_request_data} - }} - totalCount - }} -}} - """ - - query = _QUERY.format( - owner=self._owner, - name=self._name, - base=base, - head=head, - pull_request_data=self._PULL_REQUEST, - min_page_size=self._min_page_size, - ) - result = self._run(query)["repository"]["pullRequests"] - if result["totalCount"] > 0: - return result["nodes"][0] - else: - return {} - - def find_pull_requests(self, label_name): - """ - Get all pull-requests filtered by label name - """ - _QUERY = """ -repository(owner: "{owner}" name: "{name}") {{ - pullRequests(first: {min_page_size} labels: "{label_name}" states: OPEN) {{ - nodes {{ - {pull_request_data} - }} - }} -}} - """ - - query = _QUERY.format( - owner=self._owner, - name=self._name, - label_name=label_name, - pull_request_data=self._PULL_REQUEST, - min_page_size=self._min_page_size, - ) - return self._run(query)["repository"]["pullRequests"]["nodes"] - - def get_pull_requests(self, before_commit): - """ - Get all merged pull-requests from the HEAD of default branch to the last commit (excluding) - """ - - _QUERY = """ -repository(owner: "{owner}" name: "{name}") {{ - defaultBranchRef {{ - target {{ - ... on Commit {{ - history(first: {max_page_size} {next}) {{ - pageInfo {{ - hasNextPage - endCursor - }} - nodes {{ - oid - associatedPullRequests(first: {min_page_size}) {{ - totalCount - nodes {{ - ... on PullRequest {{ - {pull_request_data} - - labels(first: {min_page_size}) {{ - totalCount - pageInfo {{ - hasNextPage - endCursor - }} - nodes {{ - name - color - }} - }} - }} - }} - }} - }} - }} - }} - }} - }} -}} - """ - - pull_requests = [] - not_end = True - query = _QUERY.format( - owner=self._owner, - name=self._name, - max_page_size=self._max_page_size, - min_page_size=self._min_page_size, - pull_request_data=self._PULL_REQUEST, - next="", - ) - - while not_end: - result = self._run(query)["repository"]["defaultBranchRef"]["target"][ - "history" - ] - not_end = result["pageInfo"]["hasNextPage"] - query = _QUERY.format( - owner=self._owner, - name=self._name, - max_page_size=self._max_page_size, - min_page_size=self._min_page_size, - pull_request_data=self._PULL_REQUEST, - next=f'after: "{result["pageInfo"]["endCursor"]}"', - ) - - for commit in result["nodes"]: - # FIXME: maybe include `before_commit`? - if str(commit["oid"]) == str(before_commit): - not_end = False - break - - # TODO: fetch all pull-requests that were merged in a single commit. - assert ( - commit["associatedPullRequests"]["totalCount"] - <= self._min_page_size - ) - - for pull_request in commit["associatedPullRequests"]["nodes"]: - if ( - pull_request["baseRepository"]["nameWithOwner"] - == f"{self._owner}/{self._name}" - and pull_request["baseRefName"] == self.default_branch - and pull_request["mergeCommit"]["oid"] == commit["oid"] - ): - pull_requests.append(pull_request) - - return pull_requests - - def create_pull_request( - self, source, target, title, description="", draft=False, can_modify=True - ): - _QUERY = """ -createPullRequest(input: {{ - baseRefName: "{target}", - headRefName: "{source}", - repositoryId: "{id}", - title: "{title}", - body: "{body}", - draft: {draft}, - maintainerCanModify: {modify} -}}) {{ - pullRequest {{ - {pull_request_data} - }} -}} - """ - - query = _QUERY.format( - target=target, - source=source, - id=self._id, - title=title, - body=description, - draft="true" if draft else "false", - modify="true" if can_modify else "false", - pull_request_data=self._PULL_REQUEST, - ) - return self._run(query, is_mutation=True)["createPullRequest"]["pullRequest"] - - def merge_pull_request(self, pr_id): - _QUERY = """ -mergePullRequest(input: {{ - pullRequestId: "{pr_id}" -}}) {{ - pullRequest {{ - {pull_request_data} - }} -}} - """ - - query = _QUERY.format(pr_id=pr_id, pull_request_data=self._PULL_REQUEST) - return self._run(query, is_mutation=True)["mergePullRequest"]["pullRequest"] - - # FIXME: figure out how to add more assignees at once - def add_assignee(self, pr, assignee): - _QUERY = """ -addAssigneesToAssignable(input: {{ - assignableId: "{id1}", - assigneeIds: "{id2}" -}}) {{ - clientMutationId -}} - """ - - query = _QUERY.format(id1=pr["id"], id2=assignee["id"]) - self._run(query, is_mutation=True) - - def set_label(self, pull_request, label_name): - """ - Set label by name to the pull request - - Args: - pull_request: JSON object returned by `get_pull_requests()` - label_name (string): label name - """ - - _GET_LABEL = """ -repository(owner: "{owner}" name: "{name}") {{ - labels(first: {max_page_size} {next} query: "{label_name}") {{ - pageInfo {{ - hasNextPage - endCursor - }} - nodes {{ - id - name - color - }} - }} -}} - """ - - _SET_LABEL = """ -addLabelsToLabelable(input: {{ - labelableId: "{pr_id}", - labelIds: "{label_id}" -}}) {{ - clientMutationId -}} - """ - - labels = [] - not_end = True - query = _GET_LABEL.format( - owner=self._owner, - name=self._name, - label_name=label_name, - max_page_size=self._max_page_size, - next="", - ) - - while not_end: - result = self._run(query)["repository"]["labels"] - not_end = result["pageInfo"]["hasNextPage"] - query = _GET_LABEL.format( - owner=self._owner, - name=self._name, - label_name=label_name, - max_page_size=self._max_page_size, - next=f'after: "{result["pageInfo"]["endCursor"]}"', - ) - - labels += list(result["nodes"]) - - if not labels: - return - - query = _SET_LABEL.format(pr_id=pull_request["id"], label_id=labels[0]["id"]) - self._run(query, is_mutation=True) - - @property - def session(self): - if self._session is not None: - return self._session - retries = 5 - self._session = requests.Session() - retry = Retry( - total=retries, - read=retries, - connect=retries, - backoff_factor=1, - status_forcelist=(403, 500, 502, 504), - ) - adapter = HTTPAdapter(max_retries=retry) - self._session.mount("http://", adapter) - self._session.mount("https://", adapter) - return self._session - - def _run(self, query, is_mutation=False): - # Get caller and parameters from the stack to track the progress - frame = inspect.getouterframes(inspect.currentframe(), 2)[1] - caller = frame[3] - f_parameters = inspect.signature(getattr(self, caller)).parameters - parameters = ", ".join(str(frame[0].f_locals[p]) for p in f_parameters) - mutation = "" - if is_mutation: - mutation = ", is mutation" - print(f"---GraphQL request for {caller}({parameters}){mutation}---") - - headers = {"Authorization": f"bearer {self._token}"} - if is_mutation: - query = f""" -mutation {{ - {query} -}} - """ - else: - query = f""" -query {{ - {query} - rateLimit {{ - cost - remaining - }} -}} - """ - - def request_with_retry(retry=0): - max_retries = 5 - # From time to time we face some concrete errors, when it worth to - # retry instead of failing competely - # We should sleep progressively - progressive_sleep = 5 * sum(i + 1 for i in range(retry)) - if progressive_sleep: - logging.warning( - "Retry GraphQL request %s time, sleep %s seconds", - retry, - progressive_sleep, - ) - time.sleep(progressive_sleep) - response = self.session.post( - "https://api.github.com/graphql", json={"query": query}, headers=headers - ) - result = response.json() - if response.status_code == 200: - if "errors" in result: - raise Exception( - f"Errors occurred: {result['errors']}\nOriginal query: {query}" - ) - - if not is_mutation: - if caller not in self.api_costs: - self.api_costs[caller] = 0 - self.api_costs[caller] += result["data"]["rateLimit"]["cost"] - - return result["data"] - elif ( - response.status_code == 403 - and "secondary rate limit" in result["message"] - ): - if retry <= max_retries: - logging.warning("Secondary rate limit reached") - return request_with_retry(retry + 1) - elif response.status_code == 502 and "errors" in result: - too_many_data = any( - True - for err in result["errors"] - if "message" in err - and "This may be the result of a timeout" in err["message"] - ) - if too_many_data: - logging.warning( - "Too many data is requested, decreasing page size %s by 10%%", - self._max_page_size, - ) - self._max_page_size = int(self._max_page_size * 0.9) - return request_with_retry(retry) - - data = json.dumps(result, indent=4) - raise Exception(f"Query failed with code {response.status_code}:\n{data}") - - return request_with_retry() diff --git a/tests/ci/cherry_pick_utils/readme.md b/tests/ci/cherry_pick_utils/readme.md deleted file mode 100644 index 10ae9ca4b0b..00000000000 --- a/tests/ci/cherry_pick_utils/readme.md +++ /dev/null @@ -1,3 +0,0 @@ -# Some scripts for backports implementation - -TODO: Remove copy from utils/github diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py index a53ce6715d5..420ca7a0ff7 100644 --- a/tests/ci/commit_status_helper.py +++ b/tests/ci/commit_status_helper.py @@ -3,8 +3,9 @@ import time import os import csv -from env_helper import GITHUB_REPOSITORY +from env_helper import GITHUB_REPOSITORY, GITHUB_RUN_URL from ci_config import CI_CONFIG +from pr_info import SKIP_SIMPLE_CHECK_LABEL RETRY = 5 @@ -73,3 +74,28 @@ def post_labels(gh, pr_info, labels_names): pull_request = repo.get_pull(pr_info.number) for label in labels_names: pull_request.add_to_labels(label) + + +def fail_simple_check(gh, pr_info, description): + if SKIP_SIMPLE_CHECK_LABEL in pr_info.labels: + return + commit = get_commit(gh, pr_info.sha) + commit.create_status( + context="Simple Check", + description=description, + state="failure", + target_url=GITHUB_RUN_URL, + ) + + +def create_simple_check(gh, pr_info): + commit = get_commit(gh, pr_info.sha) + for status in commit.get_statuses(): + if "Simple Check" in status.context: + return + commit.create_status( + context="Simple Check", + description="Skipped", + state="success", + target_url=GITHUB_RUN_URL, + ) diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index ce5a4195ceb..2e4d54f34c2 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -8,13 +8,16 @@ import sys from github import Github -from env_helper import CACHES_PATH, TEMP_PATH, GITHUB_SERVER_URL, GITHUB_REPOSITORY -from pr_info import FORCE_TESTS_LABEL, PRInfo, SKIP_SIMPLE_CHECK_LABEL +from env_helper import CACHES_PATH, TEMP_PATH +from pr_info import FORCE_TESTS_LABEL, PRInfo from s3_helper import S3Helper from get_robot_token import get_best_robot_token from upload_result_helper import upload_results from docker_pull_helper import get_image_with_version -from commit_status_helper import post_commit_status, get_commit +from commit_status_helper import ( + post_commit_status, + fail_simple_check, +) from clickhouse_helper import ( ClickHouseHelper, mark_flaky_tests, @@ -219,16 +222,5 @@ if __name__ == "__main__": if FORCE_TESTS_LABEL in pr_info.labels and state != "error": print(f"'{FORCE_TESTS_LABEL}' enabled, will report success") else: - if SKIP_SIMPLE_CHECK_LABEL not in pr_info.labels: - url = ( - f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/" - "blob/master/.github/PULL_REQUEST_TEMPLATE.md?plain=1" - ) - commit = get_commit(gh, pr_info.sha) - commit.create_status( - context="Simple Check", - description=f"{NAME} failed", - state="failed", - target_url=url, - ) + fail_simple_check(gh, pr_info, f"{NAME} failed") sys.exit(1) diff --git a/tests/ci/git_helper.py b/tests/ci/git_helper.py index e3ad0eb39c0..77c2fc9cf05 100644 --- a/tests/ci/git_helper.py +++ b/tests/ci/git_helper.py @@ -1,10 +1,13 @@ #!/usr/bin/env python import argparse +import logging import os.path as p import re import subprocess from typing import List, Optional +logger = logging.getLogger(__name__) + # ^ and $ match subline in `multiple\nlines` # \A and \Z match only start and end of the whole string RELEASE_BRANCH_REGEXP = r"\A\d+[.]\d+\Z" @@ -55,6 +58,7 @@ class Runner: def run(self, cmd: str, cwd: Optional[str] = None, **kwargs) -> str: if cwd is None: cwd = self.cwd + logger.debug("Running command: %s", cmd) return subprocess.check_output( cmd, shell=True, cwd=cwd, encoding="utf-8", **kwargs ).strip() @@ -70,6 +74,9 @@ class Runner: return self._cwd = value + def __call__(self, *args, **kwargs): + return self.run(*args, **kwargs) + git_runner = Runner() # Set cwd to abs path of git root @@ -109,8 +116,8 @@ class Git: def update(self): """Is used to refresh all attributes after updates, e.g. checkout or commit""" - self.branch = self.run("git branch --show-current") self.sha = self.run("git rev-parse HEAD") + self.branch = self.run("git branch --show-current") or self.sha self.sha_short = self.sha[:11] # The following command shows the most recent tag in a graph # Format should match TAG_REGEXP diff --git a/tests/ci/github_helper.py b/tests/ci/github_helper.py new file mode 100644 index 00000000000..46cf7d2b726 --- /dev/null +++ b/tests/ci/github_helper.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +"""Helper for GitHub API requests""" +import logging +from datetime import date, datetime, timedelta +from pathlib import Path +from os import path as p +from time import sleep +from typing import List, Optional + +import github +from github.GithubException import RateLimitExceededException +from github.Issue import Issue +from github.PullRequest import PullRequest +from github.Repository import Repository + +CACHE_PATH = p.join(p.dirname(p.realpath(__file__)), "gh_cache") + +logger = logging.getLogger(__name__) + +PullRequests = List[PullRequest] +Issues = List[Issue] + + +class GitHub(github.Github): + def __init__(self, *args, **kwargs): + # Define meta attribute + self._cache_path = Path(CACHE_PATH) + # And set Path + super().__init__(*args, **kwargs) + self._retries = 0 + + # pylint: disable=signature-differs + def search_issues(self, *args, **kwargs) -> Issues: # type: ignore + """Wrapper around search method with throttling and splitting by date. + + We split only by the first""" + splittable = False + for arg, value in kwargs.items(): + if arg in ["closed", "created", "merged", "updated"]: + if hasattr(value, "__iter__") and not isinstance(value, str): + assert [True for v in value if isinstance(v, (date, datetime))] + assert len(value) == 2 + kwargs[arg] = f"{value[0].isoformat()}..{value[1].isoformat()}" + if not splittable: + # We split only by the first met splittable argument + preserved_arg = arg + preserved_value = value + middle_value = value[0] + (value[1] - value[0]) / 2 + splittable = middle_value not in value + continue + assert isinstance(value, (date, datetime, str)) + + inter_result = [] # type: Issues + for i in range(self.retries): + try: + logger.debug("Search issues, args=%s, kwargs=%s", args, kwargs) + result = super().search_issues(*args, **kwargs) + if result.totalCount == 1000 and splittable: + # The hard limit is 1000. If it's splittable, then we make + # two subrequests requests with less time frames + logger.debug( + "The search result contain exactly 1000 results, " + "splitting %s=%s by middle point %s", + preserved_arg, + kwargs[preserved_arg], + middle_value, + ) + kwargs[preserved_arg] = [preserved_value[0], middle_value] + inter_result.extend(self.search_issues(*args, **kwargs)) + if isinstance(middle_value, date): + # When middle_value is a date, 2022-01-01..2022-01-03 + # is split to 2022-01-01..2022-01-02 and + # 2022-01-02..2022-01-03, so we have results for + # 2022-01-02 twicely. We split it to + # 2022-01-01..2022-01-02 and 2022-01-03..2022-01-03. + # 2022-01-01..2022-01-02 aren't split, see splittable + middle_value += timedelta(days=1) + kwargs[preserved_arg] = [middle_value, preserved_value[1]] + inter_result.extend(self.search_issues(*args, **kwargs)) + return inter_result + + inter_result.extend(result) + return inter_result + except RateLimitExceededException as e: + if i == self.retries - 1: + exception = e + self.sleep_on_rate_limit() + + raise exception + + # pylint: enable=signature-differs + def get_pulls_from_search(self, *args, **kwargs) -> PullRequests: + """The search api returns actually issues, so we need to fetch PullRequests""" + issues = self.search_issues(*args, **kwargs) + repos = {} + prs = [] # type: PullRequests + for issue in issues: + # See https://github.com/PyGithub/PyGithub/issues/2202, + # obj._rawData doesn't spend additional API requests + # pylint: disable=protected-access + repo_url = issue._rawData["repository_url"] # type: ignore + if repo_url not in repos: + repos[repo_url] = issue.repository + prs.append( + self.get_pull_cached(repos[repo_url], issue.number, issue.updated_at) + ) + return prs + + def sleep_on_rate_limit(self): + for limit, data in self.get_rate_limit().raw_data.items(): + if data["remaining"] == 0: + sleep_time = data["reset"] - int(datetime.now().timestamp()) + 1 + if sleep_time > 0: + logger.warning( + "Faced rate limit for '%s' requests type, sleeping %s", + limit, + sleep_time, + ) + sleep(sleep_time) + return + + def get_pull_cached( + self, repo: Repository, number: int, updated_at: Optional[datetime] = None + ) -> PullRequest: + pr_cache_file = self.cache_path / f"{number}.pickle" + if updated_at is None: + updated_at = datetime.now() - timedelta(hours=-1) + + def _get_pr(path: Path) -> PullRequest: + with open(path, "rb") as prfd: + return self.load(prfd) # type: ignore + + if pr_cache_file.is_file(): + cached_pr = _get_pr(pr_cache_file) + if updated_at <= cached_pr.updated_at: + logger.debug("Getting PR #%s from cache", number) + return cached_pr + logger.debug("Getting PR #%s from API", number) + for i in range(self.retries): + try: + pr = repo.get_pull(number) + break + except RateLimitExceededException: + if i == self.retries - 1: + raise + self.sleep_on_rate_limit() + logger.debug("Caching PR #%s from API in %s", number, pr_cache_file) + with open(pr_cache_file, "wb") as prfd: + self.dump(pr, prfd) # type: ignore + return pr + + @property + def cache_path(self): + return self._cache_path + + @cache_path.setter + def cache_path(self, value: str): + self._cache_path = Path(value) + if self._cache_path.exists(): + assert self._cache_path.is_dir() + else: + self._cache_path.mkdir(parents=True) + + @property + def retries(self): + if self._retries == 0: + self._retries = 3 + return self._retries + + @retries.setter + def retries(self, value: int): + self._retries = value diff --git a/tests/ci/rerun_helper.py b/tests/ci/rerun_helper.py index 35363593db6..0d523640f56 100644 --- a/tests/ci/rerun_helper.py +++ b/tests/ci/rerun_helper.py @@ -36,3 +36,9 @@ class RerunHelper: ): return True return False + + def get_finished_status(self): + for status in self.statuses: + if self.check_name in status.context: + return status + return None diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py index b6d654c7bed..a39d97ce81d 100644 --- a/tests/ci/run_check.py +++ b/tests/ci/run_check.py @@ -6,7 +6,12 @@ from typing import Tuple from github import Github -from commit_status_helper import get_commit, post_labels, remove_labels +from commit_status_helper import ( + get_commit, + post_labels, + remove_labels, + create_simple_check, +) from env_helper import GITHUB_RUN_URL, GITHUB_REPOSITORY, GITHUB_SERVER_URL from get_robot_token import get_best_robot_token from pr_info import FORCE_TESTS_LABEL, PRInfo @@ -223,12 +228,7 @@ if __name__ == "__main__": if pr_labels_to_remove: remove_labels(gh, pr_info, pr_labels_to_remove) - commit.create_status( - context="Simple Check", - description="Skipped", - state="success", - target_url=GITHUB_RUN_URL, - ) + create_simple_check(gh, pr_info) if description_error: print( diff --git a/tests/ci/style_check.py b/tests/ci/style_check.py index 84ed9e5a124..dd63909ad39 100644 --- a/tests/ci/style_check.py +++ b/tests/ci/style_check.py @@ -1,31 +1,29 @@ #!/usr/bin/env python3 -import logging -import subprocess -import os +import argparse import csv +import logging +import os +import subprocess import sys -from github import Github -from env_helper import ( - RUNNER_TEMP, - GITHUB_WORKSPACE, - GITHUB_REPOSITORY, - GITHUB_SERVER_URL, -) -from s3_helper import S3Helper -from pr_info import PRInfo, SKIP_SIMPLE_CHECK_LABEL -from get_robot_token import get_best_robot_token -from upload_result_helper import upload_results -from docker_pull_helper import get_image_with_version -from commit_status_helper import post_commit_status, get_commit from clickhouse_helper import ( ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse, ) -from stopwatch import Stopwatch +from commit_status_helper import fail_simple_check, post_commit_status +from docker_pull_helper import get_image_with_version +from env_helper import GITHUB_WORKSPACE, RUNNER_TEMP +from get_robot_token import get_best_robot_token +from github_helper import GitHub +from git_helper import git_runner +from pr_info import PRInfo from rerun_helper import RerunHelper +from s3_helper import S3Helper +from ssh import SSHKey +from stopwatch import Stopwatch +from upload_result_helper import upload_results NAME = "Style Check (actions)" @@ -57,7 +55,8 @@ def process_result(result_folder): try: results_path = os.path.join(result_folder, "test_results.tsv") - test_results = list(csv.reader(open(results_path, "r"), delimiter="\t")) + with open(results_path, "r", encoding="utf-8") as fd: + test_results = list(csv.reader(fd, delimiter="\t")) if len(test_results) == 0: raise Exception("Empty results") @@ -68,8 +67,77 @@ def process_result(result_folder): return state, description, test_results, additional_files +def parse_args(): + parser = argparse.ArgumentParser("Check and report style issues in the repository") + parser.add_argument("--push", default=True, help=argparse.SUPPRESS) + parser.add_argument( + "--no-push", + action="store_false", + dest="push", + help="do not commit and push automatic fixes", + default=argparse.SUPPRESS, + ) + return parser.parse_args() + + +def checkout_head(pr_info: PRInfo): + # It works ONLY for PRs, and only over ssh, so either + # ROBOT_CLICKHOUSE_SSH_KEY should be set or ssh-agent should work + assert pr_info.number + if not pr_info.head_name == pr_info.base_name: + # We can't push to forks, sorry folks + return + remote_url = pr_info.event["pull_request"]["base"]["repo"]["ssh_url"] + git_prefix = ( # All commits to remote are done as robot-clickhouse + "git -c user.email=robot-clickhouse@clickhouse.com " + "-c user.name=robot-clickhouse -c commit.gpgsign=false " + "-c core.sshCommand=" + "'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'" + ) + fetch_cmd = ( + f"{git_prefix} fetch --depth=1 " + f"{remote_url} {pr_info.head_ref}:head-{pr_info.head_ref}" + ) + if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""): + with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"): + git_runner(fetch_cmd) + else: + git_runner(fetch_cmd) + git_runner(f"git checkout -f head-{pr_info.head_ref}") + + +def commit_push_staged(pr_info: PRInfo): + # It works ONLY for PRs, and only over ssh, so either + # ROBOT_CLICKHOUSE_SSH_KEY should be set or ssh-agent should work + assert pr_info.number + if not pr_info.head_name == pr_info.base_name: + # We can't push to forks, sorry folks + return + git_staged = git_runner("git diff --cached --name-only") + if not git_staged: + return + remote_url = pr_info.event["pull_request"]["base"]["repo"]["ssh_url"] + git_prefix = ( # All commits to remote are done as robot-clickhouse + "git -c user.email=robot-clickhouse@clickhouse.com " + "-c user.name=robot-clickhouse -c commit.gpgsign=false " + "-c core.sshCommand=" + "'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'" + ) + git_runner(f"{git_prefix} commit -m 'Automatic style fix'") + push_cmd = ( + f"{git_prefix} push {remote_url} head-{pr_info.head_ref}:{pr_info.head_ref}" + ) + if os.getenv("ROBOT_CLICKHOUSE_SSH_KEY", ""): + with SSHKey("ROBOT_CLICKHOUSE_SSH_KEY"): + git_runner(push_cmd) + else: + git_runner(push_cmd) + + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) + logging.getLogger("git_helper").setLevel(logging.DEBUG) + args = parse_args() stopwatch = Stopwatch() @@ -77,8 +145,10 @@ if __name__ == "__main__": temp_path = os.path.join(RUNNER_TEMP, "style_check") pr_info = PRInfo() + if args.push: + checkout_head(pr_info) - gh = Github(get_best_robot_token()) + gh = GitHub(get_best_robot_token()) rerun_helper = RerunHelper(gh, pr_info, NAME) if rerun_helper.is_already_finished_by_status(): @@ -103,6 +173,9 @@ if __name__ == "__main__": shell=True, ) + if args.push: + commit_push_staged(pr_info) + state, description, test_results, additional_files = process_result(temp_path) ch_helper = ClickHouseHelper() mark_flaky_tests(ch_helper, NAME, test_results) @@ -110,7 +183,7 @@ if __name__ == "__main__": report_url = upload_results( s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME ) - print("::notice ::Report url: {}".format(report_url)) + print(f"::notice ::Report url: {report_url}") post_commit_status(gh, pr_info.sha, NAME, description, state, report_url) prepared_events = prepare_tests_results_for_clickhouse( @@ -124,17 +197,6 @@ if __name__ == "__main__": ) ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) - if state == "error": - if SKIP_SIMPLE_CHECK_LABEL not in pr_info.labels: - url = ( - f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/" - "blob/master/.github/PULL_REQUEST_TEMPLATE.md?plain=1" - ) - commit = get_commit(gh, pr_info.sha) - commit.create_status( - context="Simple Check", - description=f"{NAME} failed", - state="failed", - target_url=url, - ) + if state in ["error", "failure"]: + fail_simple_check(gh, pr_info, f"{NAME} failed") sys.exit(1) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index cab6daf3a50..e060535c1ae 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -426,6 +426,27 @@ class SettingsRandomizer: "read_in_order_two_level_merge_threshold": lambda: random.randint(0, 100), "optimize_aggregation_in_order": lambda: random.randint(0, 1), "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000), + "use_uncompressed_cache": lambda: random.randint(0, 1), + "min_bytes_to_use_direct_io": lambda: 0 + if random.random() < 0.5 + else 1 + if random.random() < 0.2 + else random.randint(1, 1024 * 1024 * 1024), + "min_bytes_to_use_mmap_io": lambda: 0 + if random.random() < 0.5 + else 1 + if random.random() < 0.2 + else random.randint(1, 1024 * 1024 * 1024), + "local_filesystem_read_method": lambda: random.choice( + ["read", "pread", "mmap", "pread_threadpool"] + ), + "remote_filesystem_read_method": lambda: random.choice(["read", "threadpool"]), + "local_filesystem_read_prefetch": lambda: random.randint(0, 1), + "remote_filesystem_read_prefetch": lambda: random.randint(0, 1), + "compile_expressions": lambda: random.randint(0, 1), + "compile_aggregate_expressions": lambda: random.randint(0, 1), + "compile_sort_description": lambda: random.randint(0, 1), + "merge_tree_coarse_index_granularity": lambda: random.randint(2, 32), } @staticmethod @@ -710,7 +731,9 @@ class TestCase: return None - def process_result_impl(self, proc, stdout: str, stderr: str, debug_log: str, total_time: float): + def process_result_impl( + self, proc, stdout: str, stderr: str, debug_log: str, total_time: float + ): description = "" if proc: @@ -1038,7 +1061,9 @@ class TestCase: server_logs_level, client_options ) - result = self.process_result_impl(proc, stdout, stderr, debug_log, total_time) + result = self.process_result_impl( + proc, stdout, stderr, debug_log, total_time + ) result.check_if_need_retry(args, stdout, stderr, self.runs_count) if result.status == TestStatus.FAIL: result.description = self.add_info_about_settings( @@ -1530,7 +1555,8 @@ def collect_build_flags(args): result.append(BuildFlags.RELEASE) value = clickhouse_execute( - args, "SELECT value FROM system.settings WHERE name = 'allow_deprecated_database_ordinary'" + args, + "SELECT value FROM system.settings WHERE name = 'allow_deprecated_database_ordinary'", ) if value == b"1": result.append(BuildFlags.ORDINARY_DATABASE) @@ -1634,7 +1660,9 @@ def do_run_tests(jobs, test_suite: TestSuite, parallel): queue.close() except Full: - print("Couldn't put test to the queue within timeout. Server probably hung.") + print( + "Couldn't put test to the queue within timeout. Server probably hung." + ) print_stacktraces() queue.close() @@ -2071,7 +2099,7 @@ if __name__ == "__main__": group.add_argument( "--backward-compatibility-check", action="store_true", - help="Run tests for further backwoard compatibility testing by ignoring all" + help="Run tests for further backward compatibility testing by ignoring all" "drop queries in tests for collecting data from new version of server", ) parser.add_argument( diff --git a/tests/config/config.d/enable_access_control_improvements.xml b/tests/config/config.d/enable_access_control_improvements.xml index 052858a9519..5a186548098 100644 --- a/tests/config/config.d/enable_access_control_improvements.xml +++ b/tests/config/config.d/enable_access_control_improvements.xml @@ -2,5 +2,7 @@ true true + true + true diff --git a/tests/fuzz/all.dict b/tests/fuzz/all.dict index 356428a0b86..dff62cd68a7 100644 --- a/tests/fuzz/all.dict +++ b/tests/fuzz/all.dict @@ -898,6 +898,7 @@ "parseDateTimeBestEffortUS" "parseDateTimeBestEffortUSOrNull" "parseDateTimeBestEffortUSOrZero" +"parseTimeDelta" "PARTITION" "PARTITION BY" "partitionId" diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index 1bdaed5ee1b..cbcad3c05da 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -68,6 +68,7 @@ "reinterpretAsUInt8" "atanh" "formatReadableTimeDelta" +"parseTimeDelta" "geohashEncode" "atan2" "acos" diff --git a/tests/integration/README.md b/tests/integration/README.md index 2d44ff70861..18d46908524 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -44,7 +44,9 @@ sudo -H pip install \ dict2xml \ hypothesis \ pyhdfs \ - pika + pika \ + meilisearch \ + nats-py ``` (highly not recommended) If you really want to use OS packages on modern debian/ubuntu instead of "pip": `sudo apt install -y docker docker-compose python3-pytest python3-dicttoxml python3-docker python3-pymysql python3-protobuf python3-pymongo python3-tzlocal python3-kazoo python3-psycopg2 kafka-python python3-pytest-timeout python3-minio` diff --git a/tests/integration/helpers/0_common_instance_config.xml b/tests/integration/helpers/0_common_instance_config.xml index b6ea21648bb..64f0ce9e361 100644 --- a/tests/integration/helpers/0_common_instance_config.xml +++ b/tests/integration/helpers/0_common_instance_config.xml @@ -21,5 +21,8 @@ true + true + true + true diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 219bc830a07..7700fc2dffd 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -237,6 +237,18 @@ def enable_consistent_hash_plugin(rabbitmq_id): return p.returncode == 0 +def extract_test_name(base_path): + """Extracts the name of the test based to a path to its test*.py file + Must be unique in each test directory (because it's used to make instances dir and to stop docker containers from previous run) + """ + name = p.basename(base_path) + if name == "test.py": + name = "" + elif name.startswith("test_") and name.endswith(".py"): + name = name[len("test_") : (len(name) - len(".py"))] + return name + + def get_instances_dir(): if ( "INTEGRATION_TESTS_RUN_ID" in os.environ @@ -274,7 +286,7 @@ class ClickHouseCluster: logging.debug("ENV %40s %s" % (param, os.environ[param])) self.base_path = base_path self.base_dir = p.dirname(base_path) - self.name = name if name is not None else "" + self.name = name if name is not None else extract_test_name(base_path) self.base_config_dir = base_config_dir or os.environ.get( "CLICKHOUSE_TESTS_BASE_CONFIG_DIR", "/etc/clickhouse-server/" @@ -373,6 +385,7 @@ class ClickHouseCluster: self.with_jdbc_bridge = False self.with_nginx = False self.with_hive = False + self.with_coredns = False self.with_minio = False self.minio_dir = os.path.join(self.instances_dir, "minio") @@ -416,6 +429,8 @@ class ClickHouseCluster: self.schema_registry_port = get_free_port() self.kafka_docker_id = self.get_instance_docker_id(self.kafka_host) + self.coredns_host = "coredns" + # available when with_kerberozed_kafka == True self.kerberized_kafka_host = "kerberized_kafka1" self.kerberized_kafka_port = get_free_port() @@ -1090,6 +1105,25 @@ class ClickHouseCluster: ] return self.base_mongo_cmd + def setup_coredns_cmd(self, instance, env_variables, docker_compose_yml_dir): + self.with_coredns = True + env_variables["COREDNS_CONFIG_DIR"] = instance.path + "/" + "coredns_config" + self.base_cmd.extend( + ["--file", p.join(docker_compose_yml_dir, "docker_compose_coredns.yml")] + ) + + self.base_coredns_cmd = [ + "docker-compose", + "--env-file", + instance.env_file, + "--project-name", + self.project_name, + "--file", + p.join(docker_compose_yml_dir, "docker_compose_coredns.yml"), + ] + + return self.base_coredns_cmd + def setup_meili_cmd(self, instance, env_variables, docker_compose_yml_dir): self.with_meili = True env_variables["MEILI_HOST"] = self.meili_host @@ -1253,6 +1287,7 @@ class ClickHouseCluster: with_cassandra=False, with_jdbc_bridge=False, with_hive=False, + with_coredns=False, hostname=None, env_variables=None, image="clickhouse/integration-test", @@ -1337,6 +1372,7 @@ class ClickHouseCluster: with_cassandra=with_cassandra, with_jdbc_bridge=with_jdbc_bridge, with_hive=with_hive, + with_coredns=with_coredns, server_bin_path=self.server_bin_path, odbc_bridge_bin_path=self.odbc_bridge_bin_path, library_bridge_bin_path=self.library_bridge_bin_path, @@ -1501,6 +1537,11 @@ class ClickHouseCluster: ) ) + if with_coredns and not self.with_coredns: + cmds.append( + self.setup_coredns_cmd(instance, env_variables, docker_compose_yml_dir) + ) + if with_meili and not self.with_meili: cmds.append( self.setup_meili_cmd(instance, env_variables, docker_compose_yml_dir) @@ -1617,6 +1658,16 @@ class ClickHouseCluster: "IPAddress" ] + def get_instance_global_ipv6(self, instance_name): + logging.debug("get_instance_ip instance_name={}".format(instance_name)) + docker_id = self.get_instance_docker_id(instance_name) + # for cont in self.docker_client.containers.list(): + # logging.debug("CONTAINERS LIST: ID={} NAME={} STATUS={}".format(cont.id, cont.name, cont.status)) + handle = self.docker_client.containers.get(docker_id) + return list(handle.attrs["NetworkSettings"]["Networks"].values())[0][ + "GlobalIPv6Address" + ] + def get_container_id(self, instance_name): return self.get_instance_docker_id(instance_name) # docker_id = self.get_instance_docker_id(instance_name) @@ -2441,6 +2492,12 @@ class ClickHouseCluster: self.up_called = True self.wait_mongo_to_start(30, secure=self.with_mongo_secure) + if self.with_coredns and self.base_coredns_cmd: + logging.debug("Setup coredns") + run_and_check(self.base_coredns_cmd + common_opts) + self.up_called = True + time.sleep(10) + if self.with_meili and self.base_meili_cmd: logging.debug("Setup MeiliSearch") run_and_check(self.base_meili_cmd + common_opts) @@ -2779,6 +2836,7 @@ class ClickHouseInstance: with_azurite, with_jdbc_bridge, with_hive, + with_coredns, with_cassandra, server_bin_path, odbc_bridge_bin_path, @@ -2862,6 +2920,8 @@ class ClickHouseInstance: self.with_cassandra = with_cassandra self.with_jdbc_bridge = with_jdbc_bridge self.with_hive = with_hive + self.with_coredns = with_coredns + self.coredns_config_dir = p.abspath(p.join(base_path, "coredns_config")) self.main_config_name = main_config_name self.users_config_name = users_config_name @@ -3771,6 +3831,11 @@ class ClickHouseInstance: self.kerberos_secrets_dir, p.abspath(p.join(self.path, "secrets")) ) + if self.with_coredns: + shutil.copytree( + self.coredns_config_dir, p.abspath(p.join(self.path, "coredns_config")) + ) + # Copy config.d configs logging.debug( f"Copy custom test config files {self.custom_main_config_paths} to {self.config_d_dir}" diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index 3c638efe7cf..e490634e552 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -1,4 +1,5 @@ import pytest +import asyncio import re import os.path from helpers.cluster import ClickHouseCluster @@ -323,6 +324,42 @@ def test_async(): assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" +@pytest.mark.parametrize("interface", ["native", "http"]) +def test_async_backups_to_same_destination(interface): + create_and_fill_table() + backup_name = new_backup_name() + + ids = [] + for _ in range(2): + if interface == "http": + res = instance.http_query(f"BACKUP TABLE test.table TO {backup_name} ASYNC") + else: + res = instance.query(f"BACKUP TABLE test.table TO {backup_name} ASYNC") + ids.append(res.split("\t")[0]) + + [id1, id2] = ids + + assert_eq_with_retry( + instance, + f"SELECT count() FROM system.backups WHERE uuid IN ['{id1}', '{id2}'] AND status != 'BACKUP_COMPLETE' AND status != 'FAILED_TO_BACKUP'", + "0\n", + ) + + assert ( + instance.query(f"SELECT status FROM system.backups WHERE uuid='{id1}'") + == "BACKUP_COMPLETE\n" + ) + + assert ( + instance.query(f"SELECT status FROM system.backups WHERE uuid='{id2}'") + == "FAILED_TO_BACKUP\n" + ) + + instance.query("DROP TABLE test.table") + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + def test_empty_files_in_backup(): instance.query("CREATE DATABASE test") instance.query( diff --git a/tests/integration/test_backup_restore_on_cluster/test.py b/tests/integration/test_backup_restore_on_cluster/test.py index 8ba06d9a88c..438ab87b5c7 100644 --- a/tests/integration/test_backup_restore_on_cluster/test.py +++ b/tests/integration/test_backup_restore_on_cluster/test.py @@ -423,6 +423,63 @@ def test_replicated_database_async(): assert node2.query("SELECT * FROM mydb.tbl2 ORDER BY y") == TSV(["a", "bb"]) +@pytest.mark.parametrize( + "interface, on_cluster", [("native", True), ("http", True), ("http", False)] +) +def test_async_backups_to_same_destination(interface, on_cluster): + node1.query( + "CREATE TABLE tbl ON CLUSTER 'cluster' (" + "x UInt8" + ") ENGINE=ReplicatedMergeTree('/clickhouse/tables/tbl/', '{replica}')" + "ORDER BY x" + ) + + node1.query("INSERT INTO tbl VALUES (1)") + + backup_name = new_backup_name() + + ids = [] + nodes = [node1, node2] + on_cluster_part = "ON CLUSTER 'cluster'" if on_cluster else "" + for node in nodes: + if interface == "http": + res = node.http_query( + f"BACKUP TABLE tbl {on_cluster_part} TO {backup_name} ASYNC" + ) + else: + res = node.query( + f"BACKUP TABLE tbl {on_cluster_part} TO {backup_name} ASYNC" + ) + ids.append(res.split("\t")[0]) + + [id1, id2] = ids + + for i in range(len(nodes)): + assert_eq_with_retry( + nodes[i], + f"SELECT count() FROM system.backups WHERE uuid='{ids[i]}' AND status != 'BACKUP_COMPLETE' AND status != 'FAILED_TO_BACKUP'", + "0\n", + ) + + num_completed_backups = sum( + [ + int( + nodes[i] + .query( + f"SELECT count() FROM system.backups WHERE uuid='{ids[i]}' AND status == 'BACKUP_COMPLETE'" + ) + .strip() + ) + for i in range(len(nodes)) + ] + ) + + assert num_completed_backups == 1 + node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY") + node1.query(f"RESTORE TABLE tbl FROM {backup_name}") + assert node1.query("SELECT * FROM tbl") == "1\n" + + def test_required_privileges(): node1.query( "CREATE TABLE tbl ON CLUSTER 'cluster' (" @@ -434,6 +491,7 @@ def test_required_privileges(): node1.query("INSERT INTO tbl VALUES (100)") node1.query("CREATE USER u1") + node1.query("GRANT CLUSTER ON *.* TO u1") backup_name = new_backup_name() expected_error = "necessary to have grant BACKUP ON default.tbl" @@ -478,6 +536,7 @@ def test_system_users(): backup_name = new_backup_name() node1.query("CREATE USER u2 SETTINGS allow_backup=false") + node1.query("GRANT CLUSTER ON *.* TO u2") expected_error = "necessary to have grant BACKUP ON system.users" assert expected_error in node1.query_and_get_error( diff --git a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py index 35cdaeef9ac..01c9736c354 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="aggregate_fixed_key") +cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=True, diff --git a/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py b/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py index 13dd28ee8af..1e54e6220d7 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_function_state_avg.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="aggregate_state") +cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=False, diff --git a/tests/integration/test_backward_compatibility/test_convert_ordinary.py b/tests/integration/test_backward_compatibility/test_convert_ordinary.py index 59ceca23a51..c509dade0b8 100644 --- a/tests/integration/test_backward_compatibility/test_convert_ordinary.py +++ b/tests/integration/test_backward_compatibility/test_convert_ordinary.py @@ -1,7 +1,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="convert_ordinary") +cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", image="yandex/clickhouse-server", diff --git a/tests/integration/test_backward_compatibility/test_cte_distributed.py b/tests/integration/test_backward_compatibility/test_cte_distributed.py index 89a565b4b37..7ea0d2d9f21 100644 --- a/tests/integration/test_backward_compatibility/test_cte_distributed.py +++ b/tests/integration/test_backward_compatibility/test_cte_distributed.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="cte_distributed") +cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=False) node2 = cluster.add_instance( "node2", diff --git a/tests/integration/test_backward_compatibility/test_data_skipping_indices.py b/tests/integration/test_backward_compatibility/test_data_skipping_indices.py index 60d709c257f..c65dc6d3841 100644 --- a/tests/integration/test_backward_compatibility/test_data_skipping_indices.py +++ b/tests/integration/test_backward_compatibility/test_data_skipping_indices.py @@ -5,7 +5,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="skipping_indices") +cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", image="yandex/clickhouse-server", diff --git a/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py b/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py index cb9929db48b..02fccfae4e5 100644 --- a/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py +++ b/tests/integration/test_backward_compatibility/test_detach_part_wrong_partition_id.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="detach") +cluster = ClickHouseCluster(__file__) # Version 21.6.3.14 has incompatible partition id for tables with UUID in partition key. node_21_6 = cluster.add_instance( "node_21_6", diff --git a/tests/integration/test_backward_compatibility/test_insert_profile_events.py b/tests/integration/test_backward_compatibility/test_insert_profile_events.py index 8047c088e4c..0fd453e57d4 100644 --- a/tests/integration/test_backward_compatibility/test_insert_profile_events.py +++ b/tests/integration/test_backward_compatibility/test_insert_profile_events.py @@ -6,7 +6,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="insert_profile_events") +cluster = ClickHouseCluster(__file__) upstream_node = cluster.add_instance("upstream_node") old_node = cluster.add_instance( "old_node", diff --git a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py index e98894d887a..8bdae54a889 100644 --- a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py +++ b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="aggregate_alias_column") +cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=False) node2 = cluster.add_instance( "node2", diff --git a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py index 8053ad417ec..17a7282b7b5 100644 --- a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py +++ b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="short_strings") +cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=False, diff --git a/tests/integration/test_broken_detached_part_clean_up/test.py b/tests/integration/test_broken_detached_part_clean_up/test.py index 019ae3d52a1..1321778f2ca 100644 --- a/tests/integration/test_broken_detached_part_clean_up/test.py +++ b/tests/integration/test_broken_detached_part_clean_up/test.py @@ -197,7 +197,10 @@ def test_store_cleanup(started_cluster): node1.exec_in_container(["mkdir", f"{path_to_data}/store/kek"]) node1.exec_in_container(["touch", f"{path_to_data}/store/12"]) - node1.exec_in_container(["mkdir", f"{path_to_data}/store/456"]) + try: + node1.exec_in_container(["mkdir", f"{path_to_data}/store/456"]) + except Exception as e: + print("Failed to create 456/:", str(e)) node1.exec_in_container(["mkdir", f"{path_to_data}/store/456/testgarbage"]) node1.exec_in_container( ["mkdir", f"{path_to_data}/store/456/30000000-1000-4000-8000-000000000003"] @@ -218,7 +221,7 @@ def test_store_cleanup(started_cluster): timeout=60, look_behind_lines=1000, ) - node1.wait_for_log_line("directories from store") + node1.wait_for_log_line("directories from store", look_behind_lines=1000) store = node1.exec_in_container(["ls", f"{path_to_data}/store"]) assert "100" in store diff --git a/tests/integration/test_cluster_copier/test.py b/tests/integration/test_cluster_copier/test.py index 14417f151ee..0aadcadc064 100644 --- a/tests/integration/test_cluster_copier/test.py +++ b/tests/integration/test_cluster_copier/test.py @@ -18,7 +18,7 @@ sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) COPYING_FAIL_PROBABILITY = 0.2 MOVING_FAIL_PROBABILITY = 0.2 -cluster = ClickHouseCluster(__file__, name="copier_test") +cluster = ClickHouseCluster(__file__) def generateRandomString(count): diff --git a/tests/integration/test_cluster_copier/test_three_nodes.py b/tests/integration/test_cluster_copier/test_three_nodes.py index c8039792fe8..31d6c0448f4 100644 --- a/tests/integration/test_cluster_copier/test_three_nodes.py +++ b/tests/integration/test_cluster_copier/test_three_nodes.py @@ -12,7 +12,7 @@ import docker CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) -cluster = ClickHouseCluster(__file__, name="copier_test_three_nodes") +cluster = ClickHouseCluster(__file__) @pytest.fixture(scope="module") diff --git a/tests/integration/test_cluster_copier/test_trivial.py b/tests/integration/test_cluster_copier/test_trivial.py index 84bf39f0d76..785186fded4 100644 --- a/tests/integration/test_cluster_copier/test_trivial.py +++ b/tests/integration/test_cluster_copier/test_trivial.py @@ -19,7 +19,7 @@ sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) COPYING_FAIL_PROBABILITY = 0.1 MOVING_FAIL_PROBABILITY = 0.1 -cluster = ClickHouseCluster(__file__, name="copier_test_trivial") +cluster = ClickHouseCluster(__file__) def generateRandomString(count): diff --git a/tests/integration/test_cluster_copier/test_two_nodes.py b/tests/integration/test_cluster_copier/test_two_nodes.py index 6fdaaeea720..10ab7d03b00 100644 --- a/tests/integration/test_cluster_copier/test_two_nodes.py +++ b/tests/integration/test_cluster_copier/test_two_nodes.py @@ -12,7 +12,7 @@ import docker CURRENT_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(CURRENT_TEST_DIR)) -cluster = ClickHouseCluster(__file__, name="copier_test_two_nodes") +cluster = ClickHouseCluster(__file__) @pytest.fixture(scope="module") diff --git a/tests/integration/test_concurrent_backups_s3/__init__.py b/tests/integration/test_concurrent_backups_s3/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_concurrent_backups_s3/configs/storage_conf.xml b/tests/integration/test_concurrent_backups_s3/configs/storage_conf.xml new file mode 100644 index 00000000000..9124d583f8c --- /dev/null +++ b/tests/integration/test_concurrent_backups_s3/configs/storage_conf.xml @@ -0,0 +1,34 @@ + + + + + s3 + http://minio1:9001/root/data/ + minio + minio123 + 33554432 + + + local + / + + + + + +

+ s3 +
+ + + + + + + 0 + + + hdd + + + diff --git a/tests/integration/test_concurrent_backups_s3/test.py b/tests/integration/test_concurrent_backups_s3/test.py new file mode 100644 index 00000000000..608144843d9 --- /dev/null +++ b/tests/integration/test_concurrent_backups_s3/test.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +import pytest +import re +import os.path +from multiprocessing.dummy import Pool +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import assert_eq_with_retry +import time + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/storage_conf.xml"], + with_minio=True, +) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_concurrent_backups(start_cluster): + node.query("DROP TABLE IF EXISTS s3_test NO DELAY") + columns = [f"column_{i} UInt64" for i in range(1000)] + columns_str = ", ".join(columns) + node.query( + f"CREATE TABLE s3_test ({columns_str}) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='s3';" + ) + node.query( + f"INSERT INTO s3_test SELECT * FROM generateRandom('{columns_str}') LIMIT 10000" + ) + + def create_backup(i): + backup_name = f"Disk('hdd', '/backups/{i}')" + node.query(f"BACKUP TABLE s3_test TO {backup_name} ASYNC") + + p = Pool(40) + + p.map(create_backup, range(40)) + + assert_eq_with_retry( + node, + "SELECT count() FROM system.backups WHERE status != 'BACKUP_COMPLETE' and status != 'FAILED_TO_BACKUP'", + "0", + retry_count=100, + ) + assert node.query("SELECT count() FROM s3_test where not ignore(*)") == "10000\n" diff --git a/tests/integration/test_config_substitutions/configs/config_allow_databases.xml b/tests/integration/test_config_substitutions/configs/config_allow_databases.xml index be727360dcf..ba38a4f250a 100644 --- a/tests/integration/test_config_substitutions/configs/config_allow_databases.xml +++ b/tests/integration/test_config_substitutions/configs/config_allow_databases.xml @@ -19,6 +19,7 @@ default + system diff --git a/tests/integration/test_create_user_and_login/test.py b/tests/integration/test_create_user_and_login/test.py index fd052ba9716..25346c22d3b 100644 --- a/tests/integration/test_create_user_and_login/test.py +++ b/tests/integration/test_create_user_and_login/test.py @@ -81,18 +81,23 @@ EOF""", ["bash", "-c", "rm /etc/clickhouse-server/users.d/user_c.xml"] ) - expected_error = "no user with such name" + expected_errors = ["no user with such name", "not found in user directories"] while True: out, err = instance.query_and_get_answer_with_error("SELECT 1", user="C") - if expected_error in err: - logging.debug(f"Got error '{expected_error}' just as expected") + found_error = [ + expected_error + for expected_error in expected_errors + if (expected_error in err) + ] + if found_error: + logging.debug(f"Got error '{found_error}' just as expected") break if out == "1\n": logging.debug(f"Got output '1', retrying...") time.sleep(0.5) continue raise Exception( - f"Expected either output '1' or error '{expected_error}', got output={out} and error={err}" + f"Expected either output '1' or one of errors '{expected_errors}', got output={out} and error={err}" ) assert instance.query("SELECT name FROM system.users WHERE name='C'") == "" diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py index aa1eb614dd5..2213623379a 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_cassandra.py @@ -24,7 +24,7 @@ def setup_module(module): global complex_tester global ranged_tester - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) SOURCE = SourceCassandra( "Cassandra", diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py index b7f8226960f..bb0e3b47414 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_local.py @@ -38,7 +38,7 @@ def setup_module(module): ranged_tester.create_dictionaries(SOURCE) # Since that all .xml configs were created - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) main_configs = [] main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py index 6790d11ed1a..bf4d05a154c 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_clickhouse_remote.py @@ -38,7 +38,7 @@ def setup_module(module): ranged_tester.create_dictionaries(SOURCE) # Since that all .xml configs were created - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) main_configs = [] main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py index 5186139ddf6..6af5fa841c1 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_cache.py @@ -38,7 +38,7 @@ def setup_module(module): ranged_tester.create_dictionaries(SOURCE) # Since that all .xml configs were created - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) main_configs = [] main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py index 63f4ff87cce..dfcf1e4fc64 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_executable_hashed.py @@ -38,7 +38,7 @@ def setup_module(module): ranged_tester.create_dictionaries(SOURCE) # Since that all .xml configs were created - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) main_configs = [] main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py index 0147b95c786..e9bf93b3c8e 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_file.py @@ -36,7 +36,7 @@ def setup_module(module): ranged_tester.create_dictionaries(SOURCE) # Since that all .xml configs were created - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) main_configs = [] main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py index 96d17508880..94220d7c698 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_http.py @@ -36,7 +36,7 @@ def setup_module(module): ranged_tester.create_dictionaries(SOURCE) # Since that all .xml configs were created - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) main_configs = [] main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py index 007e318e037..0b7476faf2e 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_https.py @@ -38,7 +38,7 @@ def setup_module(module): ranged_tester.create_dictionaries(SOURCE) # Since that all .xml configs were created - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) main_configs = [] main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py index 4a9d054b08f..55639877ba0 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py @@ -24,7 +24,7 @@ def setup_module(module): global complex_tester global ranged_tester - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) SOURCE = SourceMongo( "MongoDB", "localhost", diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py index c6551e0eb70..84c547b7a6b 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py @@ -24,7 +24,7 @@ def setup_module(module): global complex_tester global ranged_tester - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) SOURCE = SourceMongoURI( "MongoDB", diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py index 96757c58e0c..77b2c0741b5 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mysql.py @@ -24,7 +24,7 @@ def setup_module(module): global complex_tester global ranged_tester - cluster = ClickHouseCluster(__file__, name=test_name) + cluster = ClickHouseCluster(__file__) SOURCE = SourceMySQL( "MySQL", diff --git a/tests/integration/test_dictionaries_redis/test_long.py b/tests/integration/test_dictionaries_redis/test_long.py index 19b03322b4d..094df789704 100644 --- a/tests/integration/test_dictionaries_redis/test_long.py +++ b/tests/integration/test_dictionaries_redis/test_long.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster import redis -cluster = ClickHouseCluster(__file__, name="long") +cluster = ClickHouseCluster(__file__) node = cluster.add_instance("node", with_redis=True) diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py index bb587efa7e9..85c45d5df3c 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py @@ -5,7 +5,7 @@ from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseKiller from helpers.network import PartitionManager -cluster = ClickHouseCluster(__file__, name="reading") +cluster = ClickHouseCluster(__file__) dictionary_node = cluster.add_instance("dictionary_node", stay_alive=True) main_node = cluster.add_instance( diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py index 7acc26a66e0..92d681698bc 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_string.py @@ -7,7 +7,7 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV -cluster = ClickHouseCluster(__file__, name="string") +cluster = ClickHouseCluster(__file__) dictionary_node = cluster.add_instance("dictionary_node", stay_alive=True) main_node = cluster.add_instance( diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py index 54c5976f295..1da8fd3325a 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_dict_get_or_default.py @@ -5,7 +5,7 @@ from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseKiller from helpers.network import PartitionManager -cluster = ClickHouseCluster(__file__, name="default") +cluster = ClickHouseCluster(__file__) dictionary_node = cluster.add_instance("dictionary_node", stay_alive=True) main_node = cluster.add_instance( diff --git a/tests/integration/test_disabled_access_control_improvements/configs/config.d/disable_access_control_improvements.xml b/tests/integration/test_disabled_access_control_improvements/configs/config.d/disable_access_control_improvements.xml index 0192e211b68..7969c638fd7 100644 --- a/tests/integration/test_disabled_access_control_improvements/configs/config.d/disable_access_control_improvements.xml +++ b/tests/integration/test_disabled_access_control_improvements/configs/config.d/disable_access_control_improvements.xml @@ -1,5 +1,7 @@ + + diff --git a/tests/integration/test_disabled_access_control_improvements/configs/users.d/another_user.xml b/tests/integration/test_disabled_access_control_improvements/configs/users.d/another_user.xml index 19249011968..476072bd138 100644 --- a/tests/integration/test_disabled_access_control_improvements/configs/users.d/another_user.xml +++ b/tests/integration/test_disabled_access_control_improvements/configs/users.d/another_user.xml @@ -13,6 +13,9 @@ default default + + mydb + diff --git a/tests/integration/test_disabled_access_control_improvements/test_select_from_system_tables.py b/tests/integration/test_disabled_access_control_improvements/test_select_from_system_tables.py new file mode 100644 index 00000000000..5d760c9fc2c --- /dev/null +++ b/tests/integration/test_disabled_access_control_improvements/test_select_from_system_tables.py @@ -0,0 +1,162 @@ +import os +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + main_configs=["configs/config.d/disable_access_control_improvements.xml"], + user_configs=[ + "configs/users.d/another_user.xml", + ], +) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + node.query("CREATE DATABASE mydb") + node.query("CREATE TABLE mydb.table1(x UInt32) ENGINE=Log") + node.query("CREATE TABLE table2(x UInt32) ENGINE=Log") + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_after_test(): + try: + node.query("CREATE USER OR REPLACE sqluser") + yield + finally: + pass + + +def test_system_db(): + assert node.query("SELECT count()>0 FROM system.settings") == "1\n" + assert node.query("SELECT count()>0 FROM system.users") == "1\n" + assert node.query("SELECT count()>0 FROM system.clusters") == "1\n" + assert node.query("SELECT count() FROM system.tables WHERE name='table1'") == "1\n" + assert node.query("SELECT count() FROM system.tables WHERE name='table2'") == "1\n" + + assert node.query("SELECT count()>0 FROM system.settings", user="another") == "1\n" + expected_error = "necessary to have grant SHOW USERS ON *.*" + assert expected_error in node.query_and_get_error( + "SELECT count()>0 FROM system.users", user="another" + ) + assert node.query("SELECT count()>0 FROM system.clusters", user="another") == "1\n" + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table1'", user="another" + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table2'", user="another" + ) + == "0\n" + ) + + assert node.query("SELECT count()>0 FROM system.settings", user="sqluser") == "1\n" + expected_error = "necessary to have grant SHOW USERS ON *.*" + assert expected_error in node.query_and_get_error( + "SELECT count()>0 FROM system.users", user="sqluser" + ) + assert node.query("SELECT count()>0 FROM system.clusters", user="sqluser") == "1\n" + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table1'", user="sqluser" + ) + == "0\n" + ) + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table2'", user="sqluser" + ) + == "0\n" + ) + + node.query("GRANT SHOW USERS ON *.* TO sqluser") + node.query("GRANT SHOW ON mydb.table1 TO sqluser") + node.query("GRANT SHOW ON table2 TO sqluser") + assert node.query("SELECT count()>0 FROM system.settings", user="sqluser") == "1\n" + assert node.query("SELECT count()>0 FROM system.users", user="sqluser") == "1\n" + assert node.query("SELECT count()>0 FROM system.clusters", user="sqluser") == "1\n" + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table1'", user="sqluser" + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table2'", user="sqluser" + ) + == "1\n" + ) + + +def test_information_schema(): + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'" + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'" + ) + == "1\n" + ) + + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'", + user="another", + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'", + user="another", + ) + == "0\n" + ) + + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'", + user="sqluser", + ) + == "0\n" + ) + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'", + user="sqluser", + ) + == "0\n" + ) + + node.query("GRANT SHOW ON mydb.table1 TO sqluser") + node.query("GRANT SHOW ON table2 TO sqluser") + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'", + user="sqluser", + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'", + user="sqluser", + ) + == "1\n" + ) diff --git a/tests/integration/test_distributed_respect_user_timeouts/test.py b/tests/integration/test_distributed_respect_user_timeouts/test.py index 567377aba0b..593843b4e4a 100644 --- a/tests/integration/test_distributed_respect_user_timeouts/test.py +++ b/tests/integration/test_distributed_respect_user_timeouts/test.py @@ -8,8 +8,6 @@ from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager from helpers.test_tools import TSV -cluster = ClickHouseCluster(__file__) - NODES = {"node" + str(i): None for i in (1, 2)} IS_DEBUG = False @@ -131,15 +129,7 @@ def started_cluster(request): def _check_timeout_and_exception(node, user, query_base, query): repeats = EXPECTED_BEHAVIOR[user]["times"] - extra_repeats = 1 - # Table function remote() are executed two times. - # It tries to get table structure from remote shards. - # On 'node2' it will firstly try to get structure from 'node1' (which is not available), - # so there are 1 extra connection attempts for 'node2' and 'remote' - if node.name == "node2" and query_base == "remote": - extra_repeats = 2 - - expected_timeout = EXPECTED_BEHAVIOR[user]["timeout"] * repeats * extra_repeats + expected_timeout = EXPECTED_BEHAVIOR[user]["timeout"] * repeats start = timeit.default_timer() exception = node.query_and_get_error(query, user=user) diff --git a/tests/integration/test_host_regexp_multiple_ptr_records/__init__.py b/tests/integration/test_host_regexp_multiple_ptr_records/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_host_regexp_multiple_ptr_records/configs/host_regexp.xml b/tests/integration/test_host_regexp_multiple_ptr_records/configs/host_regexp.xml new file mode 100644 index 00000000000..7a2141e6c7e --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records/configs/host_regexp.xml @@ -0,0 +1,11 @@ + + + + + + test1\.example\.com$ + + default + + + \ No newline at end of file diff --git a/tests/integration/test_host_regexp_multiple_ptr_records/configs/listen_host.xml b/tests/integration/test_host_regexp_multiple_ptr_records/configs/listen_host.xml new file mode 100644 index 00000000000..58ef55cd3f3 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records/configs/listen_host.xml @@ -0,0 +1,5 @@ + + :: + 0.0.0.0 + 1 + diff --git a/tests/integration/test_host_regexp_multiple_ptr_records/coredns_config/Corefile b/tests/integration/test_host_regexp_multiple_ptr_records/coredns_config/Corefile new file mode 100644 index 00000000000..0dd198441dc --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records/coredns_config/Corefile @@ -0,0 +1,8 @@ +. { + hosts /example.com { + reload "200ms" + fallthrough + } + forward . 127.0.0.11 + log +} diff --git a/tests/integration/test_host_regexp_multiple_ptr_records/coredns_config/example.com b/tests/integration/test_host_regexp_multiple_ptr_records/coredns_config/example.com new file mode 100644 index 00000000000..9beb415c290 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records/coredns_config/example.com @@ -0,0 +1 @@ +filled in runtime, but needs to exist in order to be volume mapped in docker \ No newline at end of file diff --git a/tests/integration/test_host_regexp_multiple_ptr_records/test.py b/tests/integration/test_host_regexp_multiple_ptr_records/test.py new file mode 100644 index 00000000000..fa2917411e4 --- /dev/null +++ b/tests/integration/test_host_regexp_multiple_ptr_records/test.py @@ -0,0 +1,91 @@ +import pytest +from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check +import os + +DOCKER_COMPOSE_PATH = get_docker_compose_path() +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + +cluster = ClickHouseCluster(__file__) + +ch_server = cluster.add_instance( + "clickhouse-server", + with_coredns=True, + main_configs=["configs/listen_host.xml"], + user_configs=["configs/host_regexp.xml"], + ipv6_address="2001:3984:3989::1:1111", +) + +client = cluster.add_instance( + "clickhouse-client", + ipv6_address="2001:3984:3989::1:1112", +) + + +@pytest.fixture(scope="module") +def started_cluster(): + global cluster + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def setup_dns_server(ip): + domains_string = "test3.example.com test2.example.com test1.example.com" + example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com' + run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True) + + +def setup_ch_server(dns_server_ip): + ch_server.exec_in_container( + (["bash", "-c", f"echo 'nameserver {dns_server_ip}' > /etc/resolv.conf"]) + ) + ch_server.exec_in_container( + (["bash", "-c", "echo 'options ndots:0' >> /etc/resolv.conf"]) + ) + ch_server.query("SYSTEM DROP DNS CACHE") + + +def build_endpoint_v4(ip): + return f"'http://{ip}:8123/?query=SELECT+1&user=test_dns'" + + +def build_endpoint_v6(ip): + return build_endpoint_v4(f"[{ip}]") + + +def test_host_regexp_multiple_ptr_v4_fails_with_wrong_resolution(started_cluster): + server_ip = cluster.get_instance_ip("clickhouse-server") + random_ip = "9.9.9.9" + dns_server_ip = cluster.get_instance_ip(cluster.coredns_host) + + setup_dns_server(random_ip) + setup_ch_server(dns_server_ip) + + endpoint = build_endpoint_v4(server_ip) + + assert "1\n" != client.exec_in_container((["bash", "-c", f"curl {endpoint}"])) + + +def test_host_regexp_multiple_ptr_v4(started_cluster): + server_ip = cluster.get_instance_ip("clickhouse-server") + client_ip = cluster.get_instance_ip("clickhouse-client") + dns_server_ip = cluster.get_instance_ip(cluster.coredns_host) + + setup_dns_server(client_ip) + setup_ch_server(dns_server_ip) + + endpoint = build_endpoint_v4(server_ip) + + assert "1\n" == client.exec_in_container((["bash", "-c", f"curl {endpoint}"])) + + +def test_host_regexp_multiple_ptr_v6(started_cluster): + setup_dns_server(client.ipv6_address) + setup_ch_server(cluster.get_instance_global_ipv6(cluster.coredns_host)) + + endpoint = build_endpoint_v6(ch_server.ipv6_address) + + assert "1\n" == client.exec_in_container((["bash", "-c", f"curl -6 {endpoint}"])) diff --git a/tests/integration/test_keeper_four_word_command/test_allow_list.py b/tests/integration/test_keeper_four_word_command/test_allow_list.py index 026bd1d59af..4bf8ae1ab53 100644 --- a/tests/integration/test_keeper_four_word_command/test_allow_list.py +++ b/tests/integration/test_keeper_four_word_command/test_allow_list.py @@ -3,7 +3,7 @@ import pytest from helpers.cluster import ClickHouseCluster import time -cluster = ClickHouseCluster(__file__, name="test_keeper_4lw_allow_list") +cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", main_configs=["configs/keeper_config_with_allow_list.xml"], stay_alive=True ) diff --git a/tests/integration/test_log_levels_update/test.py b/tests/integration/test_log_levels_update/test.py index 86719390f33..b0c003ea440 100644 --- a/tests/integration/test_log_levels_update/test.py +++ b/tests/integration/test_log_levels_update/test.py @@ -3,7 +3,7 @@ import re from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="log_quries_probability") +cluster = ClickHouseCluster(__file__) node = cluster.add_instance("node", with_zookeeper=False) config = """ diff --git a/tests/integration/test_log_query_probability/test.py b/tests/integration/test_log_query_probability/test.py index d13ecc276cb..0ed7bf2c928 100644 --- a/tests/integration/test_log_query_probability/test.py +++ b/tests/integration/test_log_query_probability/test.py @@ -2,7 +2,7 @@ import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="log_quries_probability") +cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=False) node2 = cluster.add_instance("node2", with_zookeeper=False) diff --git a/tests/integration/test_postgresql_database_engine/test.py b/tests/integration/test_postgresql_database_engine/test.py index 5619c551c71..d07f62f8a80 100644 --- a/tests/integration/test_postgresql_database_engine/test.py +++ b/tests/integration/test_postgresql_database_engine/test.py @@ -349,6 +349,29 @@ def test_postgres_database_old_syntax(started_cluster): node1.query("DROP DATABASE IF EXISTS postgres_database;") +def test_postgresql_fetch_tables(started_cluster): + conn = get_postgres_conn( + started_cluster.postgres_ip, started_cluster.postgres_port, database=True + ) + cursor = conn.cursor() + + cursor.execute("DROP SCHEMA IF EXISTS test_schema CASCADE") + cursor.execute("CREATE SCHEMA test_schema") + cursor.execute("CREATE TABLE test_schema.table1 (a integer)") + cursor.execute("CREATE TABLE test_schema.table2 (a integer)") + cursor.execute("CREATE TABLE table3 (a integer)") + + node1.query( + "CREATE DATABASE postgres_database ENGINE = PostgreSQL('postgres1:5432', 'postgres_database', 'postgres', 'mysecretpassword')" + ) + + assert node1.query("SHOW TABLES FROM postgres_database") == "table3\n" + assert not node1.contains_in_log("PostgreSQL table table1 does not exist") + + cursor.execute(f"DROP TABLE table3") + cursor.execute("DROP SCHEMA IF EXISTS test_schema CASCADE") + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_s3_zero_copy_replication/test.py b/tests/integration/test_s3_zero_copy_replication/test.py index 39be0d564df..7b7fb9d21ad 100644 --- a/tests/integration/test_s3_zero_copy_replication/test.py +++ b/tests/integration/test_s3_zero_copy_replication/test.py @@ -150,6 +150,7 @@ def test_s3_zero_copy_replication(cluster, policy): node2.query("DROP TABLE IF EXISTS s3_test NO DELAY") +@pytest.mark.skip(reason="Test is flaky (and never was stable)") def test_s3_zero_copy_on_hybrid_storage(cluster): node1 = cluster.instances["node1"] node2 = cluster.instances["node2"] diff --git a/tests/integration/test_s3_zero_copy_ttl/test.py b/tests/integration/test_s3_zero_copy_ttl/test.py index 14b4664fcc1..9a782aacef6 100644 --- a/tests/integration/test_s3_zero_copy_ttl/test.py +++ b/tests/integration/test_s3_zero_copy_ttl/test.py @@ -68,19 +68,27 @@ def test_ttl_move_and_s3(started_cluster): assert node1.query("SELECT COUNT() FROM s3_test_with_ttl") == "30\n" assert node2.query("SELECT COUNT() FROM s3_test_with_ttl") == "30\n" - time.sleep(5) + for attempt in reversed(range(5)): + time.sleep(5) - print( - node1.query( - "SELECT * FROM system.parts WHERE table = 's3_test_with_ttl' FORMAT Vertical" + print( + node1.query( + "SELECT * FROM system.parts WHERE table = 's3_test_with_ttl' FORMAT Vertical" + ) ) - ) - minio = cluster.minio_client - objects = minio.list_objects(cluster.minio_bucket, "data/", recursive=True) - counter = 0 - for obj in objects: - print("Objectname:", obj.object_name, "metadata:", obj.metadata) - counter += 1 - print("Total objects", counter) + minio = cluster.minio_client + objects = minio.list_objects(cluster.minio_bucket, "data/", recursive=True) + counter = 0 + for obj in objects: + print(f"Objectname: {obj.object_name}, metadata: {obj.metadata}") + counter += 1 + + print(f"Total objects: {counter}") + + if counter == 300: + break + + print(f"Attempts remaining: {attempt}") + assert counter == 300 diff --git a/tests/integration/test_select_access_rights/configs/another_user.xml b/tests/integration/test_select_access_rights/configs/another_user.xml new file mode 100644 index 00000000000..627ebccdada --- /dev/null +++ b/tests/integration/test_select_access_rights/configs/another_user.xml @@ -0,0 +1,16 @@ + + + + + + + ::/0 + + default + default + + mydb + + + + diff --git a/tests/integration/test_select_access_rights/test_from_system_tables.py b/tests/integration/test_select_access_rights/test_from_system_tables.py new file mode 100644 index 00000000000..ac938a9694a --- /dev/null +++ b/tests/integration/test_select_access_rights/test_from_system_tables.py @@ -0,0 +1,192 @@ +import os +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance( + "node", + user_configs=[ + "configs/another_user.xml", + ], +) + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + node.query("CREATE DATABASE mydb") + node.query("CREATE TABLE mydb.table1(x UInt32) ENGINE=Log") + node.query("CREATE TABLE table2(x UInt32) ENGINE=Log") + yield cluster + + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def reset_after_test(): + try: + node.query("CREATE USER OR REPLACE sqluser") + yield + finally: + pass + + +def test_system_db(): + assert node.query("SELECT count()>0 FROM system.settings") == "1\n" + assert node.query("SELECT count()>0 FROM system.users") == "1\n" + assert node.query("SELECT count()>0 FROM system.clusters") == "1\n" + assert node.query("SELECT count() FROM system.tables WHERE name='table1'") == "1\n" + assert node.query("SELECT count() FROM system.tables WHERE name='table2'") == "1\n" + + assert node.query("SELECT count()>0 FROM system.settings", user="another") == "1\n" + + expected_error = ( + "necessary to have grant SELECT for at least one column on system.users" + ) + assert expected_error in node.query_and_get_error( + "SELECT count()>0 FROM system.users", user="another" + ) + + expected_error = ( + "necessary to have grant SELECT for at least one column on system.clusters" + ) + assert expected_error in node.query_and_get_error( + "SELECT count()>0 FROM system.clusters", user="another" + ) + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table1'", user="another" + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table2'", user="another" + ) + == "0\n" + ) + + assert node.query("SELECT count()>0 FROM system.settings", user="sqluser") == "1\n" + + expected_error = ( + "necessary to have grant SELECT for at least one column on system.users" + ) + assert expected_error in node.query_and_get_error( + "SELECT count()>0 FROM system.users", user="sqluser" + ) + + expected_error = ( + "necessary to have grant SELECT for at least one column on system.clusters" + ) + assert node.query_and_get_error( + "SELECT count()>0 FROM system.clusters", user="sqluser" + ) + + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table1'", user="sqluser" + ) + == "0\n" + ) + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table2'", user="sqluser" + ) + == "0\n" + ) + + node.query("GRANT SELECT ON system.users TO sqluser") + node.query("GRANT SELECT ON system.clusters TO sqluser") + node.query("GRANT SHOW ON mydb.table1 TO sqluser") + node.query("GRANT SHOW ON table2 TO sqluser") + assert node.query("SELECT count()>0 FROM system.settings", user="sqluser") == "1\n" + assert node.query("SELECT count()>0 FROM system.users", user="sqluser") == "1\n" + assert node.query("SELECT count()>0 FROM system.clusters", user="sqluser") == "1\n" + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table1'", user="sqluser" + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM system.tables WHERE name='table2'", user="sqluser" + ) + == "1\n" + ) + + node.query("REVOKE ALL ON *.* FROM sqluser") + node.query("GRANT SHOW USERS ON *.* TO sqluser") + assert node.query("SELECT count()>0 FROM system.users", user="sqluser") == "1\n" + + +def test_information_schema(): + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'" + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'" + ) + == "1\n" + ) + + expected_error = ( + "necessary to have grant SELECT(table_name) ON information_schema.tables" + ) + assert expected_error in node.query_and_get_error( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'", + user="another", + ) + assert expected_error in node.query_and_get_error( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'", + user="another", + ) + + assert expected_error in node.query_and_get_error( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'", + user="sqluser", + ) + assert expected_error in node.query_and_get_error( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'", + user="sqluser", + ) + + node.query("GRANT SELECT ON information_schema.* TO sqluser") + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'", + user="sqluser", + ) + == "0\n" + ) + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'", + user="sqluser", + ) + == "0\n" + ) + + node.query("GRANT SHOW ON mydb.table1 TO sqluser") + node.query("GRANT SHOW ON table2 TO sqluser") + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table1'", + user="sqluser", + ) + == "1\n" + ) + assert ( + node.query( + "SELECT count() FROM information_schema.tables WHERE table_name='table2'", + user="sqluser", + ) + == "1\n" + ) diff --git a/tests/integration/test_select_access_rights/test.py b/tests/integration/test_select_access_rights/test_main.py similarity index 100% rename from tests/integration/test_select_access_rights/test.py rename to tests/integration/test_select_access_rights/test_main.py diff --git a/tests/integration/test_table_functions_access_rights/test.py b/tests/integration/test_table_functions_access_rights/test.py index 705150c8bdd..09a05122c07 100644 --- a/tests/integration/test_table_functions_access_rights/test.py +++ b/tests/integration/test_table_functions_access_rights/test.py @@ -65,3 +65,38 @@ def test_merge(): "it's necessary to have grant SELECT ON default.table2" in instance.query_and_get_error(select_query, user="A") ) + + +def test_view_if_permitted(): + assert ( + instance.query( + "SELECT * FROM viewIfPermitted(SELECT * FROM table1 ELSE null('x UInt32'))" + ) + == "1\n" + ) + + expected_error = "requires a SELECT query with the result columns matching a table function after 'ELSE'" + assert expected_error in instance.query_and_get_error( + "SELECT * FROM viewIfPermitted(SELECT * FROM table1 ELSE null('x Int32'))" + ) + assert expected_error in instance.query_and_get_error( + "SELECT * FROM viewIfPermitted(SELECT * FROM table1 ELSE null('y UInt32'))" + ) + + instance.query("CREATE USER A") + assert ( + instance.query( + "SELECT * FROM viewIfPermitted(SELECT * FROM table1 ELSE null('x UInt32'))", + user="A", + ) + == "" + ) + + instance.query("GRANT SELECT ON table1 TO A") + assert ( + instance.query( + "SELECT * FROM viewIfPermitted(SELECT * FROM table1 ELSE null('x UInt32'))", + user="A", + ) + == "1\n" + ) diff --git a/tests/integration/test_zookeeper_config/test_password.py b/tests/integration/test_zookeeper_config/test_password.py index 71f059b3277..55a06cd5f51 100644 --- a/tests/integration/test_zookeeper_config/test_password.py +++ b/tests/integration/test_zookeeper_config/test_password.py @@ -2,7 +2,7 @@ import time import pytest from helpers.cluster import ClickHouseCluster -cluster = ClickHouseCluster(__file__, name="password") +cluster = ClickHouseCluster(__file__) # TODO ACL not implemented in Keeper. node1 = cluster.add_instance( diff --git a/tests/integration/test_zookeeper_config/test_secure.py b/tests/integration/test_zookeeper_config/test_secure.py index f540a504024..1bc7c62f92c 100644 --- a/tests/integration/test_zookeeper_config/test_secure.py +++ b/tests/integration/test_zookeeper_config/test_secure.py @@ -9,7 +9,6 @@ TEST_DIR = os.path.dirname(__file__) cluster = ClickHouseCluster( __file__, - name="secure", zookeeper_certfile=os.path.join(TEST_DIR, "configs_secure", "client.crt"), zookeeper_keyfile=os.path.join(TEST_DIR, "configs_secure", "client.key"), ) diff --git a/tests/performance/low_cardinality_argument.xml b/tests/performance/low_cardinality_argument.xml new file mode 100644 index 00000000000..89ee58ffd07 --- /dev/null +++ b/tests/performance/low_cardinality_argument.xml @@ -0,0 +1,12 @@ + + + CREATE TABLE test_lc (x UInt64, lc LowCardinality(Nullable(String))) ENGINE = MergeTree order by x + + + INSERT INTO test_lc SELECT number, number % 10 ? NULL : toString(number % 10000) FROM numbers(1e7) + + SELECT isNullable(lc) FROM test_lc FORMAT Null + SELECT isConstant(lc) FROM test_lc FORMAT Null + SELECT isNull(lc) FROM test_lc FORMAT Null + SELECT isNotNull(lc) FROM test_lc FORMAT Null + diff --git a/tests/queries/0_stateless/00233_position_function_family.reference b/tests/queries/0_stateless/00233_position_function_family.reference index f6cacbc7027..bd970f335e1 100644 --- a/tests/queries/0_stateless/00233_position_function_family.reference +++ b/tests/queries/0_stateless/00233_position_function_family.reference @@ -7306,16509 +7306,3 @@ 1 1 1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 diff --git a/tests/queries/0_stateless/00233_position_function_family.sql b/tests/queries/0_stateless/00233_position_function_family.sql index 36f8a5535d9..dd7394bc39a 100644 --- a/tests/queries/0_stateless/00233_position_function_family.sql +++ b/tests/queries/0_stateless/00233_position_function_family.sql @@ -1,4 +1,5 @@ SET send_logs_level = 'fatal'; + select 1 = position('', ''); select 1 = position('abc', ''); select 0 = position('', 'abc'); @@ -482,1827 +483,3 @@ select 1 = position('abc', materialize('')) from system.numbers limit 1000; select 1 = position('abab', materialize('ab')); select 1 = position('abababababababababababab', materialize('abab')); select 1 = position('abababababababababababab', materialize('abababababababababa')); - -select 0 = multiSearchAny('\0', CAST([], 'Array(String)')); -select 0 = multiSearchAnyCaseInsensitive('\0', CAST([], 'Array(String)')); -select 0 = multiSearchAnyCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); -select 0 = multiSearchAnyUTF8('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstIndex('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstIndexCaseInsensitive('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstIndexUTF8('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstPosition('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstPositionCaseInsensitive('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstPositionCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); -select 0 = multiSearchFirstPositionUTF8('\0', CAST([], 'Array(String)')); -select [] = multiSearchAllPositions('\0', CAST([], 'Array(String)')); -select [] = multiSearchAllPositionsCaseInsensitive('\0', CAST([], 'Array(String)')); -select [] = multiSearchAllPositionsCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); -select [] = multiSearchAllPositionsUTF8('\0', CAST([], 'Array(String)')); - -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']); -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']); -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']); -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']); -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']); -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']); -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']); - -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']); -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']); -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']); -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']); -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']); -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']); -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']); -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']); - -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']); -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']); -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']); -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']); -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']); -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']); - -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']); -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']); -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']); -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']); -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']); - -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']); -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']); -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']); -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']); - -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']); -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']); -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']); - -select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']); -select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']); - -select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']); - -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 10; - -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']) from system.numbers limit 10; - -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']) from system.numbers limit 10; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']) from system.numbers limit 10; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']) from system.numbers limit 10; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']) from system.numbers limit 10; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 10; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 10; - -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']) from system.numbers limit 10; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']) from system.numbers limit 10; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']) from system.numbers limit 10; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']) from system.numbers limit 10; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']) from system.numbers limit 10; - -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']) from system.numbers limit 10; -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']) from system.numbers limit 10; -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']) from system.numbers limit 10; -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']) from system.numbers limit 10; - -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']) from system.numbers limit 10; -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']) from system.numbers limit 10; -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']) from system.numbers limit 10; - -select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']) from system.numbers limit 10; -select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']) from system.numbers limit 10; - -select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']) from system.numbers limit 10; - -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']) from system.numbers limit 129; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']) from system.numbers limit 129; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']) from system.numbers limit 129; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 129; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 129; -select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 129; - -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 129; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 129; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 129; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']) from system.numbers limit 129; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']) from system.numbers limit 129; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']) from system.numbers limit 129; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']) from system.numbers limit 129; -select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']) from system.numbers limit 129; - -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']) from system.numbers limit 129; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']) from system.numbers limit 129; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']) from system.numbers limit 129; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']) from system.numbers limit 129; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 129; -select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 129; - -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']) from system.numbers limit 129; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']) from system.numbers limit 129; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']) from system.numbers limit 129; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']) from system.numbers limit 129; -select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']) from system.numbers limit 129; - -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']) from system.numbers limit 129; -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']) from system.numbers limit 129; -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']) from system.numbers limit 129; -select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']) from system.numbers limit 129; - -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']) from system.numbers limit 129; -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']) from system.numbers limit 129; -select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']) from system.numbers limit 129; - -select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']) from system.numbers limit 129; -select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']) from system.numbers limit 129; - -select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']) from system.numbers limit 129; - -select [2] = multiSearchAllPositions(materialize('abc'), ['b']); -select [2] = multiSearchAllPositions(materialize('abc'), ['bc']); -select [0] = multiSearchAllPositions(materialize('abc'), ['bcde']); -select [0] = multiSearchAllPositions(materialize('abc'), ['bcdef']); -select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefg']); -select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefgh']); - -select [0] = multiSearchAllPositions(materialize('abc'), ['abcdefg']); -select [0] = multiSearchAllPositions(materialize('abc'), ['abcdef']); -select [0] = multiSearchAllPositions(materialize('abc'), ['abcde']); -select [0] = multiSearchAllPositions(materialize('abc'), ['abcd']); -select [1] = multiSearchAllPositions(materialize('abc'), ['abc']); -select [1] = multiSearchAllPositions(materialize('abc'), ['ab']); -select [1] = multiSearchAllPositions(materialize('abc'), ['a']); - -select [3] = multiSearchAllPositions(materialize('abcd'), ['c']); -select [3] = multiSearchAllPositions(materialize('abcd'), ['cd']); -select [0] = multiSearchAllPositions(materialize('abcd'), ['cde']); -select [0] = multiSearchAllPositions(materialize('abcd'), ['cdef']); -select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefg']); -select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefgh']); - -select [0] = multiSearchAllPositions(materialize('abc'), ['defgh']); -select [0] = multiSearchAllPositions(materialize('abc'), ['defg']); -select [0] = multiSearchAllPositions(materialize('abc'), ['def']); -select [0] = multiSearchAllPositions(materialize('abc'), ['de']); -select [0] = multiSearchAllPositions(materialize('abc'), ['d']); - - -select [2] = multiSearchAllPositions(materialize('abc'), ['b']) from system.numbers limit 10; -select [2] = multiSearchAllPositions(materialize('abc'), ['bc']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['bcde']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['bcdef']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefg']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefgh']) from system.numbers limit 10; - - -select [0] = multiSearchAllPositions(materialize('abc'), ['abcdefg']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['abcdef']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['abcde']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['abcd']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abc'), ['abc']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abc'), ['ab']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abc'), ['a']) from system.numbers limit 10; - -select [3] = multiSearchAllPositions(materialize('abcd'), ['c']) from system.numbers limit 10; -select [3] = multiSearchAllPositions(materialize('abcd'), ['cd']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abcd'), ['cde']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abcd'), ['cdef']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefg']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefgh']) from system.numbers limit 10; - -select [0] = multiSearchAllPositions(materialize('abc'), ['defgh']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['defg']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['def']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['de']) from system.numbers limit 10; -select [0] = multiSearchAllPositions(materialize('abc'), ['d']) from system.numbers limit 10; - -select [1] = multiSearchAllPositions(materialize('abc'), ['']); -select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 10; -select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 100; -select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 1000; - -select [1] = multiSearchAllPositions(materialize('abab'), ['ab']); -select [1] = multiSearchAllPositions(materialize('abababababababababababab'), ['abab']); -select [1] = multiSearchAllPositions(materialize('abababababababababababab'), ['abababababababababa']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['b']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['a']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['c']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['def']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['de']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['d']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['e']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['f']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['g']); -select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['h']); - -select 1 = multiSearchAny(materialize('abcdefgh'), ['b']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['a']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['c']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['def']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['de']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['d']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['e']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['f']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['g']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['h']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['b']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['a']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['c']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['def']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['de']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['d']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['e']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['f']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['g']) from system.numbers limit 129; -select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abcdefgh'), ['h']) from system.numbers limit 129; - -select 1 = multiSearchAny(materialize('abc'), ['b']); -select 1 = multiSearchAny(materialize('abc'), ['bc']); -select 0 = multiSearchAny(materialize('abc'), ['bcde']); -select 0 = multiSearchAny(materialize('abc'), ['bcdef']); -select 0 = multiSearchAny(materialize('abc'), ['bcdefg']); -select 0 = multiSearchAny(materialize('abc'), ['bcdefgh']); - -select 0 = multiSearchAny(materialize('abc'), ['abcdefg']); -select 0 = multiSearchAny(materialize('abc'), ['abcdef']); -select 0 = multiSearchAny(materialize('abc'), ['abcde']); -select 0 = multiSearchAny(materialize('abc'), ['abcd']); -select 1 = multiSearchAny(materialize('abc'), ['abc']); -select 1 = multiSearchAny(materialize('abc'), ['ab']); -select 1 = multiSearchAny(materialize('abc'), ['a']); - -select 1 = multiSearchAny(materialize('abcd'), ['c']); -select 1 = multiSearchAny(materialize('abcd'), ['cd']); -select 0 = multiSearchAny(materialize('abcd'), ['cde']); -select 0 = multiSearchAny(materialize('abcd'), ['cdef']); -select 0 = multiSearchAny(materialize('abcd'), ['cdefg']); -select 0 = multiSearchAny(materialize('abcd'), ['cdefgh']); - -select 0 = multiSearchAny(materialize('abc'), ['defgh']); -select 0 = multiSearchAny(materialize('abc'), ['defg']); -select 0 = multiSearchAny(materialize('abc'), ['def']); -select 0 = multiSearchAny(materialize('abc'), ['de']); -select 0 = multiSearchAny(materialize('abc'), ['d']); - - -select 1 = multiSearchAny(materialize('abc'), ['b']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abc'), ['bc']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['bcde']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['bcdef']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['bcdefg']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['bcdefgh']) from system.numbers limit 10; - - -select 0 = multiSearchAny(materialize('abc'), ['abcdefg']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['abcdef']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['abcde']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['abcd']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abc'), ['abc']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abc'), ['ab']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abc'), ['a']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abcd'), ['c']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abcd'), ['cd']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abcd'), ['cde']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abcd'), ['cdef']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abcd'), ['cdefg']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abcd'), ['cdefgh']) from system.numbers limit 10; - -select 0 = multiSearchAny(materialize('abc'), ['defgh']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['defg']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['def']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['de']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('abc'), ['d']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('abc'), ['']); -select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 100; -select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 1000; - -select 1 = multiSearchAny(materialize('abab'), ['ab']); -select 1 = multiSearchAny(materialize('abababababababababababab'), ['abab']); -select 1 = multiSearchAny(materialize('abababababababababababab'), ['abababababababababa']); - --- select 'some random tests'; - -select [4, 1, 1, 2, 6, 1, 1, 0, 4, 1, 14, 0, 10, 0, 16, 6] = multiSearchAllPositions(materialize('jmdqwjbrxlbatqeixknricfk'), ['qwjbrxlba', 'jmd', '', 'mdqwjbrxlbatqe', 'jbrxlbatqeixknric', 'jmdqwjbrxlbatqeixknri', '', 'fdtmnwtts', 'qwjbrxlba', '', 'qeixknricfk', 'hzjjgrnoilfkvzxaemzhf', 'lb', 'kamz', 'ixknr', 'jbrxlbatq']) from system.numbers limit 10; -select [0, 0, 0, 2, 3, 0, 1, 0, 5, 0, 0, 0, 11, 10, 6, 7] = multiSearchAllPositions(materialize('coxcctuehmzkbrsmodfvx'), ['bkhnp', 'nlypjvriuk', 'rkslxwfqjjivcwdexrdtvjdtvuu', 'oxcctuehm', 'xcctuehmzkbrsm', 'kfrieuocovykjmkwxbdlkgwctwvcuh', 'coxc', 'lbwvetgxyndxjqqwthtkgasbafii', 'ctuehmzkbrsmodfvx', 'obzldxjldxowk', 'ngfikgigeyll', 'wdaejjukowgvzijnw', 'zkbr', 'mzkb', 'tuehm', 'ue']) from system.numbers limit 10; -select [1, 1, 0, 0, 0, 1, 1, 1, 4, 0, 6, 6, 0, 10, 1, 5] = multiSearchAllPositions(materialize('mpswgtljbbrmivkcglamemayfn'), ['', 'm', 'saejhpnfgfq', 'rzanrkdssmmkanqjpfi', 'oputeneprgoowg', 'mp', '', '', 'wgtljbbrmivkcglamemay', 'cbpthtrgrmgfypizi', 'tl', 'tlj', 'xuhs', 'brmivkcglamemayfn', '', 'gtljb']) from system.numbers limit 10; -select [1, 0, 0, 8, 6, 0, 7, 1, 3, 0, 0, 0, 0, 12] = multiSearchAllPositions(materialize('arbphzbbecypbzsqsljurtddve'), ['arbphzb', 'mnrboimjfijnti', 'cikcrd', 'becypbz', 'z', 'uocmqgnczhdcrvtqrnaxdxjjlhakoszuwc', 'bbe', '', 'bp', 'yhltnexlpdijkdzt', 'jkwjmrckvgmccmmrolqvy', 'vdxmicjmfbtsbqqmqcgtnrvdgaucsgspwg', 'witlfqwvhmmyjrnrzttrikhhsrd', 'pbzsqsljurt']) from system.numbers limit 10; -select [7, 0, 0, 8, 0, 2, 0, 0, 6, 0, 2, 0, 3, 1] = multiSearchAllPositions(materialize('aizovxqpzcbbxuhwtiaaqhdqjdei'), ['qpzcbbxuhw', 'jugrpglqbm', 'dspwhzpyjohhtizegrnswhjfpdz', 'pzcbbxuh', 'vayzeszlycke', 'i', 'gvrontcpqavsjxtjwzgwxugiyhkhmhq', 'gyzmeroxztgaurmrqwtmsxcqnxaezuoapatvu', 'xqpzc', 'mjiswsvlvlpqrhhptqq', 'iz', 'hmzjxxfjsvcvdpqwtrdrp', 'zovxqpzcbbxuhwtia', 'ai']) from system.numbers limit 10; -select [0, 0, 0, 19, 14, 22, 10, 0, 0, 13, 0, 8] = multiSearchAllPositions(materialize('ydfgiluhyxwqdfiwtzobwzscyxhuov'), ['srsoubrgghleyheujsbwwwykerzlqphgejpxvog', 'axchkyleddjwkvbuyhmekpbbbztxdlm', 'zqodzvlkmfe', 'obwz', 'fi', 'zsc', 'xwq', 'pvmurvrd', 'uulcdtexckmrsokmgdpkstlkoavyrmxeaacvydxf', 'dfi', 'mxcngttujzgtlssrmluaflmjuv', 'hyxwqdfiwtzobwzscyxhu']) from system.numbers limit 10; -select [6, 1, 1, 0, 0, 5, 1, 0, 8, 0, 5, 0, 2, 12, 0, 15, 0, 0] = multiSearchAllPositions(materialize('pyepgwainvmwekwhhqxxvzdjw'), ['w', '', '', 'gvvkllofjnxvcu', 'kmwwhboplctvzazcyfpxhwtaddfnhekei', 'gwainv', 'pyepgwain', 'ekpnogkzzmbpfynsunwqp', 'invmwe', 'hrxpiplfplqjsstuybksuteoz', 'gwa', 'akfpyduqrwosxcbdemtxrxvundrgse', 'yepgwainvmw', 'wekwhhqxxvzdjw', 'fyimzvedmyriubgoznmcav', 'whhq', 'ozxowbwdqfisuupyzaqynoprgsjhkwlum', 'vpoufrofekajksdp']) from system.numbers limit 10; -select [0, 0, 5, 1, 1, 0, 15, 1, 5, 10, 4, 0, 1, 0, 3, 0, 0, 0] = multiSearchAllPositions(materialize('lqwahffxurkbhhzytequotkfk'), ['rwjqudpuaiufle', 'livwgbnflvy', 'hffxurkbhh', '', '', 'xcajwbqbttzfzfowjubmmgnmssat', 'zytequ', 'lq', 'h', 'rkbhh', 'a', 'immejthwgdr', '', 'llhhnlhcvnxxorzzjt', 'w', 'cvjynqxcivmmmvc', 'wexjomdcmursppjtsweybheyxzleuz', 'fzronsnddfxwlkkzidiknhpjipyrcrzel']) from system.numbers limit 10; -select [0, 1, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1] = multiSearchAllPositions(materialize('nkddriylnakicwgdwrfxpodqea'), ['izwdpgrgpmjlwkanjrffgela', '', 'kicw', 'hltmfymgmrjckdiylkzjlvvyuleksikdjrg', 'yigveskrbidknjxigwilmkgyizewikh', 'xyvzhsnqmuec', 'odcgzlavzrwesjks', 'oilvfgliktoujukpgzvhmokdgkssqgqot', 'llsfsurvimbahwqtbqbp', 'nxj', 'pimydixeobdxmdkvhcyzcgnbhzsydx', 'couzmvxedobuohibgxwoxvmpote', 'driylnakicwgdwrf', 'nkddr']) from system.numbers limit 10; -select [0, 0, 0, 3, 0, 15, 0, 0, 12, 7, 0, 0, 0, 0, 5, 0] = multiSearchAllPositions(materialize('jnckhtjqwycyihuejibqmddrdxe'), ['tajzx', 'vuddoylclxatcjvinusdwt', 'spxkhxvzsljkmnzpeubszjnhqczavgtqopxn', 'ckhtjqwycyi', 'xlbfzdxspldoes', 'u', 'czosfebeznt', 'gzhabdsuyreisxvyfrfrkq', 'yihuejibqmd', 'jqwycyihuejibqm', 'cfbvprgzx', 'hxu', 'vxbhrfpzacgd', 'afoaij', 'htjqwycyihu', 'httzbskqd']) from system.numbers limit 10; -select [0, 0, 12, 4, 4, 0, 13, 23, 0, 1, 0, 2, 0, 0, 0, 3, 0, 0] = multiSearchAllPositions(materialize('dzejajvpoojdkqbnayahygidyrjmb'), ['khwxxvtnqhobbvwgwkpusjlhlzifiuclycml', 'nzvuhtwdaivo', 'dkqbnayahygidyr', 'jajvpoo', 'j', 'wdtbvwmeqgyvetu', 'kqbn', 'idyrjmb', 'tsnxuxevsxrxpgpfdgrkhwqpkse', '', 'efsdgzuefhdzkmquxu', 'zejajvpoojdkqbnayahyg', 'ugwfuighbygrxyctop', 'fcbxzbdugc', 'dxmzzrcplob', 'ejaj', 'wmmupyxrylvawsyfccluiiene', 'ohzmsqhpzbafvbzqwzftbvftei']) from system.numbers limit 10; -select [6, 8, 1, 4, 0, 10, 0, 1, 14, 0, 1, 0, 5, 0, 0, 0, 0, 15, 0, 1] = multiSearchAllPositions(materialize('ffaujlverosspbzaqefjzql'), ['lvero', 'erossp', 'f', 'ujlverosspbz', 'btfimgklzzxlbkbuqyrmnud', 'osspb', 'muqexvtjuaar', 'f', 'bzaq', 'lprihswhwkdhqciqhfaowarn', 'ffaujlve', 'uhbbjrqjb', 'jlver', 'umucyhbbu', 'pjthtzmgxhvpbdphesnnztuu', 'xfqhfdfsbbazactpastzvzqudgk', 'lvovjfoatc', 'z', 'givejzhoqsd', '']) from system.numbers limit 10; -select [5, 7, 0, 1, 6, 0, 0, 1, 1, 2, 0, 1, 4, 2, 0, 6, 0, 0] = multiSearchAllPositions(materialize('hzftozkvquknsahhxefzg'), ['ozkvquknsahhxefzg', 'kv', 'lkdhmafrec', '', 'zkvquknsahh', 'xmjuizyconipirigdmhqclox', 'dqqwolnkkwbyyjicsoshidbay', '', '', 'zf', 'sonvmkapcjcakgpejvn', 'hzftoz', 't', 'zftozkvqukns', 'dyuqohvehxsvdzdlqzl', 'zkvquknsahhx', 'vueohmytvmglqwptfbhxffspf', 'ilkdurxg']) from system.numbers limit 10; -select [1, 7, 6, 4, 0, 1, 0, 0, 0, 9, 7, 1, 1, 0, 0, 0] = multiSearchAllPositions(materialize('aapdygjzrhskntrphianzjob'), ['', 'jz', 'gjzrh', 'dygjzrhskntrphia', 'qcnahphlxmdru', '', 'rnwvzdn', 'isbekwuivytqggsxniqojrvpwjdr', 'sstwvgyavbwxvjojrpg', 'rhskn', 'jzrhskntrp', '', '', 'toilvppgjizaxtidizgbgygubmob', 'vjwzwpvsklkxqgeqqmtssnhlmw', 'znvpjjlydvzhkt']) from system.numbers limit 10; -select [0, 1, 0, 1, 0, 0, 10, 0, 0, 0, 11, 0, 5, 0] = multiSearchAllPositions(materialize('blwpfdjjkxettfetdoxvxbyk'), ['wgylnwqcrojacofrcanjme', 'bl', 'qqcunzpvgi', '', 'ijemdmmdxkakrawwdqrjtrttig', 'qwkaifalc', 'xe', 'zqocnfuvzowuqkmwrfxw', 'xpaayeljvly', 'wvphqqhulpepjjjnxjfudfcomajc', 'ettfetdoxvx', 'ikablovwhnbohibbuhwjshhdemidgreqf', 'fdjjkxett', 'kiairehwbxveqkcfqhgopztgpatljgqp']) from system.numbers limit 10; -select [0, 0, 6, 1, 1, 0, 0, 1, 2, 0, 0, 0, 0, 0] = multiSearchAllPositions(materialize('vghzgedqpnqtvaoonwsz'), ['mfyndhucfpzjxzaezny', 'niejb', 'edqpnqt', '', 'v', 'kivdvealqadzdatziujdnvymmia', 'lvznmgwtlwevcxyfbkqc', 'vghzge', 'gh', 'tbzle', 'vjiqponbvgvguuhqdijbdeu', 'mshlyabasgukboknbqgmmmj', 'kjk', 'abkeftpnpvdkfyrxbrihyfxcfxablv']) from system.numbers limit 10; -select [0, 0, 0, 0, 9, 0, 7, 0, 9, 8, 0, 0] = multiSearchAllPositions(materialize('oaghnutqsqcnwvmzrnxgacsovxiko'), ['upien', 'moqszigvduvvwvmpemupvmmzctbrbtqggrk', 'igeiaccvxejtfvifrmimwpewllcggji', 'wnwjorpzgsqiociw', 'sq', 'rkysegpoej', 'tqsqcnwvmzrnxgacsovxiko', 'ioykypvfjufbicpyrpfuhugk', 's', 'qsqcnwvmzrnxgacsov', 'hhbeisvmpnkwmimgyfmybtljiu', 'kfozjowd']) from system.numbers limit 10; -select [0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 20, 5, 0, 0, 14, 1, 1, 0, 0] = multiSearchAllPositions(materialize('wbjfsevqspsvbwlzrkhcfuhxddbq'), ['ltgjbz', 's', 'qdfnmggupdfxjfnmvwyrqopxtxf', 'sazlkmaikcltojbzbmdfddu', 'yzanifqxufyfwrxzkhngoxkrrph', 'iwskc', 'xkykshryphyfnwcnmjfqjrixykmzmwm', 'wwpenztbhkdbwidfkypqlxivsjs', 'rlkevy', 'qigywtkezwd', '', 'c', 'sevqspsvbwlzrk', 'gwg', 'iduhrjsrtodxdkjykjoghtjtvplrscitxnvt', 'wlzrkhcfuhxddb', '', 'wbjfsev', 'zytusrcvqbazb', 'tec']) from system.numbers limit 10; -select [0, 1, 5, 0, 6, 8, 0, 3, 2, 0, 0, 9, 0, 4, 0, 0] = multiSearchAllPositions(materialize('mxiifpzlovgfozpgirtio'), ['srullnscuzenzhp', '', 'f', 'apetxezid', 'pzlovgf', 'lo', 'ecbmso', 'i', 'xiifpzlovgfozpgir', 'bnefwypvctubvslsesnctqspdyctq', 'tdncmgbikboss', 'o', 'zmgobcarxlxaho', 'ifpzlovgfozpg', 'dwmjqyylvsxzfr', 'pxhrecconce']) from system.numbers limit 10; -select [0, 0, 0, 2, 0, 0, 2, 0, 8, 0, 0, 0, 7, 0, 0, 0, 21, 3, 1, 8] = multiSearchAllPositions(materialize('jtvnrdpdevgnzexqdrrxqgiujexhm'), ['ibkvzoqmiyfgfztupug', 'iqzeixfykxcghlbgsicxiywlurrgjsywwk', 'vzdffjzlqxgzdcrkgoro', 'tvnrdpdevgnzexqdr', 'nqywueahcmoojtyjlhfpysk', 'iqalixciiidvrtmpzozfb', 'tv', 'rxkfeasoff', 'devgnzexqdrrxqgiuj', 'kvvuvyplboowjrestyvdfrxdjjujvkxy', 'shkhpneekuyyqtxfxutvz', 'yy', 'pdevgnz', 'nplpydxiwnbvlhoorcmqkycqisi', 'jlkxplbftfkxqgnqnaw', 'qdggpjenbrwbjtorbi', 'qgiuje', 'vnrdpd', '', 'dev']) from system.numbers limit 10; -select [14, 0, 0, 7, 20, 6, 0, 13, 0, 0, 20, 0, 20, 2, 0, 8, 2, 11, 2, 0] = multiSearchAllPositions(materialize('asjwxabjrwgcdviokfaoqvqiafz'), ['v', 'zqngytligwwpzxhatyayvdnbbj', 'gjicovfzgbyagiirn', 'bjrwgcdviok', 'oqvqiafz', 'abjrwgc', 'wulrpfzh', 'dviokfao', 'esnchjuiufjadqmdtrpcd', 'tkodqzsjchpaftk', 'oqvq', 'eyoshlrlvmnqjmtmloryvg', 'oqv', 'sjwx', 'uokueelyytnoidplwmmox', 'jrwgcdviokfaoqvqiaf', 'sjwxabjrwgcdviokfaoqvqi', 'gcdviokfa', 'sjwxab', 'zneabsnfucjcwauxmudyxibnmxzfx']) from system.numbers limit 10; -select [0, 16, 8, 0, 10, 0, 0, 0, 0, 1, 0, 6, 0, 1, 0, 4, 0, 6, 0, 0] = multiSearchAllPositions(materialize('soxfqagiuhkaylzootfjy'), ['eveprzxphyenbrnnznpctvxn', 'oo', 'iuhka', 'ikutjhrnvzfb', 'h', 'duyvvjizristnkczgwj', 'ihfrp', 'afpyrlj', 'uonp', 'soxfqagiuhkaylzootfjy', 'qeckxkoxldpzzpmkbvcex', 'agiuhkaylzo', 'tckcumkbsgrgqjvtlijack', '', 'fnfweqlldcdnwfaohqohp', 'fqagiuhkayl', 'pqnvwprxwwrcjqvfsbfimwye', 'agi', 'ta', 'r']) from system.numbers limit 10; -select [3, 7, 1, 6, 0, 1, 0, 11, 0, 9, 17, 1, 18, 12] = multiSearchAllPositions(materialize('ladbcypcbcxahmujwezkvweud'), ['db', 'pcbcxahm', 'lad', 'ypcb', 'atevkzyyxhphtuekymhh', 'lad', 'mltjrwaibetrtwpfa', 'xahmujwezkvweud', 'dg', 'bcxahmujw', 'we', '', 'e', 'ahmujwezkvw']) from system.numbers limit 10; -select [6, 0, 11, 0, 7, 0, 0, 0, 6, 1, 0, 3, 0, 0, 0, 0] = multiSearchAllPositions(materialize('hhkscgmqzmuwltmrhtxnnzsxl'), ['gmqzmuwltmrh', 'qtescwjubeqhurqoqfjauwxdoc', 'uwltmrh', 'qlhyfuspwdtecdbrmrqcnxghhlnbmzs', 'm', 'kcsuocwokvohnqonnfzmeiqtomehksehwc', 'hoxocyilgrxxoek', 'nisnlmbdczjsiw', 'gmqz', '', 'cqzz', 'k', 'utxctwtzelxmtioyqshxedecih', 'ifsmsljxzkyuigdtunwk', 'ojxvxwdosaqjhrnjwisss', 'dz']) from system.numbers limit 10; -select [0, 0, 19, 7, 0, 0, 1, 0, 0, 12, 0, 0, 1, 0, 1, 1, 5, 0, 23, 8] = multiSearchAllPositions(materialize('raxgcqizulxfwivauupqnofbijxfr'), ['sxvhaxlrpviwuinrcebtfepxxkhxxgqu', 'cuodfevkpszuimhymxypktdvicmyxm', 'pqnof', 'i', 'ufpljiniflkctwkwcrsbdhvrvkizticpqkgvq', 'osojyhejhrlhjvqrtobwthjgw', '', 'anzlevtxre', 'ufnpkjvgidirrnpvbsndfnovebdily', 'fwivauupqnofbi', 'rywyadwcvk', 'ltnlhftdfefmkenadahcpxw', '', 'xryluzlhnsqk', 'r', '', 'cqizulxfwivauupqnofb', 'y', 'fb', 'zulxfwivauupqnofbijxf']) from system.numbers limit 10; -select [4, 0, 0, 0, 0, 24, 1, 2, 0, 2, 0, 0, 8, 0] = multiSearchAllPositions(materialize('cwcqyjjodlepauupgobsgrzdvii'), ['q', 'yjppewylsqbnjwnhokzqtauggsjhhhkkkqsy', 'uutltzhjtc', 'pkmuptmzzeqhichaikwbggronli', 'erzgcuxnec', 'dvii', '', 'w', 'fkmpha', 'wcqyjjodlepauupgobsgrz', 'cbnmwirigaf', 'fcumlot', 'odlepauu', 'lthautlklktfukpt']) from system.numbers limit 10; -select [1, 1, 1, 1, 22, 0, 0, 8, 18, 15] = multiSearchAllPositions(materialize('vpscxxibyhvtmrdzrocvdngpb'), ['', '', '', '', 'n', 'agrahemfuhmftacvpnaxkx', 'dqqwvfsrqv', 'byhvtmrdzrocv', 'ocvdn', 'dzrocvdngpb']) from system.numbers limit 10; -select [1, 1, 1, 15, 10, 0, 0, 0, 0, 2] = multiSearchAllPositions(materialize('nfoievsrpvheprosjdsoiz'), ['', 'nfo', '', 'osjd', 'vheprosjdsoiz', 'az', 'blhvdycvjnxaipvxybs', 'umgxmpkvuvuvdaczkz', 'gfspmnzidixcjgjw', 'f']) from system.numbers limit 10; -select [0, 0, 2, 2, 0, 0, 0, 11, 10, 4, 9, 1, 6, 4, 0, 0] = multiSearchAllPositions(materialize('bdmfwdisdlgbcidshnhautsye'), ['uxdceftnmnqpveljer', 'xdnh', 'dmf', 'dmfwdisdlgbc', 'cpwnaijpkpyjgaq', 'doquvlrzhusjbxyqcqxvwr', 'llppnnmtqggyfoxtawnngsiiunvjjxxsufh', 'gbcidshnhau', 'lgbcids', 'f', 'dlgbc', 'bdmfwdisdlgbcids', 'disdlgbcidshnhautsy', 'fwdisdlgbcidshn', 'zfpbfc', 'triqajlyfmxlredivqiambigmge']) from system.numbers limit 10; -select [0, 0, 16, 0, 0, 0, 14, 6, 2, 1, 0, 0, 1, 0, 10, 12, 0, 0, 0, 0] = multiSearchAllPositions(materialize('absimumlxdlxuzpyrunivcb'), ['jglfzroni', 'wzfmtbjlcdxlbpialqjafjwz', 'yrun', 'fgmljkkp', 'nniob', 'fdektoyhxrumiycvkwekphypgti', 'zp', 'um', 'bsimu', '', 'yslsnfisaebuujltpgcskhhqcucdhb', 'xlaphsqgqsfykhilddctrawerneqoigb', '', 'pdvcfxdlurmegspidojt', 'd', 'xu', 'fdp', 'xjrqmybmccjbjtvyvdh', 'nvhdfatqi', 'neubuiykajzcrzdbvpwjhlpdmd']) from system.numbers limit 10; -select [0, 0, 0, 9, 0, 0, 1, 1, 1, 1] = multiSearchAllPositions(materialize('lvyenvktdnylszlypuwqecohy'), ['ihlsiynj', 'ctcnhbkumvbgfdclwjhsswpqyfrx', 'rpgqwkydwlfclcuupoynwrfffogxesvmbj', 'dnyl', 'coeqgdtbemkhgplprfxgwpl', 'dkbshktectbduxlcaptlzspq', 'l', 'lvyenvktdnylszlypuw', 'lvyenvk', '']) from system.numbers limit 10; -select [1, 0, 0, 0, 0, 1, 2, 22, 8, 17, 1, 13, 0, 0, 0, 0, 0, 5] = multiSearchAllPositions(materialize('wphcobonpgaqwgfenotzadgqezx'), ['', 'qeuycfhkfjwokxgrkaodqioaotkepzlhnrv', 'taehtytq', 'gejlcipocalc', 'poyvvvntrvqazixkwigtairjvxkgouiuva', '', 'phc', 'dg', 'npgaqwg', 'notzadgqe', '', 'wgfe', 'smipuxgvntys', 'qhrfdytbfeujzievelffzrv', 'cfmzw', 'hcywnyguzjredwjbqtwyuhtewuhzkc', 'tssfeinoykdauderpjyxtmb', 'obonpgaqwgfen']) from system.numbers limit 10; -select [0, 0, 0, 0, 0, 6, 6, 0, 0, 2, 0, 5, 2, 0, 6, 3] = multiSearchAllPositions(materialize('qvslufpsddtfudzrzlvrzdra'), ['jxsgyzgnjwyd', 'hqhxzhskwivpuqkjheywwfhthm', 'kbwlwadilqhgwlcpxkadkamsnzngms', 'fxunda', 'nlltydufobnfxjyhch', 'fpsddtfudzrzl', 'fp', 'ykhxjyqtvjbykskbejpnmbxpumknqucu', 'iyecekjcbkowdothxc', 'vslufpsddtfu', 'mjgtofkjeknlikrugkfhxlioicevil', 'uf', 'vslufpsdd', 'cxizdzygyu', 'fpsddtfudzrz', 'slufp']) from system.numbers limit 10; -select [12, 0, 0, 0, 0, 1, 6, 0, 1, 2] = multiSearchAllPositions(materialize('ydsbycnifbcforymknzfi'), ['forymkn', 'vgxtcdkfmjhc', 'ymugjvtmtzvghmifolzdihutqoisl', 'fzooddrlhi', 'bdefmxxdepcqi', '', 'cnif', 'ilzbhegpcnkdkooopaguljlie', '', 'dsbycnifbcforym']) from system.numbers limit 10; -select [0, 2, 4, 1, 1, 3, 0, 0, 0, 7] = multiSearchAllPositions(materialize('sksoirfwdhpdyxrkklhc'), ['vuixtegnp', 'ks', 'oirfwdhpd', 'sksoirf', 'skso', 'soi', 'eoxpa', 'vpfmzovgatllf', 'txsezmqvduxbmwu', 'fw']) from system.numbers limit 10; -select [2, 21, 8, 10, 6, 0, 1, 11, 0, 0, 21, 4, 29, 0] = multiSearchAllPositions(materialize('wlkublfclrvgixpbvgliylzbuuoyai'), ['l', 'ylzbuu', 'clr', 'rvgi', 'lf', 'bqtzaqjdfhvgddyaywaiybk', '', 'vgixpbv', 'ponnohwdvrq', 'dqioxovlbvobwkgeghlqxtwre', 'y', 'ublfclrvgix', 'a', 'eoxxbkaawwsdgzfweci']) from system.numbers limit 10; -select [0, 0, 2, 1, 1, 9, 1, 0, 0, 1] = multiSearchAllPositions(materialize('llpbsbgmfiadwvvsciak'), ['knyjtntotuldifbndcpxzsdwdduv', 'lfhofdxavpsiporpdyfziqzcni', 'lpbsbgmf', 'llpbsbgmfi', 'llpbsbgmfiadwvv', 'fia', '', 'uomksovcuhfmztuqwzwchmwvonk', 'ujbasmokvghmredszgwe', '']) from system.numbers limit 10; -select [3, 0, 0, 0, 6, 1, 7, 0, 2, 1, 1, 0, 7, 0, 1, 0, 1, 1, 5, 11] = multiSearchAllPositions(materialize('hnmrouevovxrzrejesigfukkmbiid'), ['m', 'apqlvipphjbui', 'wkepvtnpu', 'amjvdpudkdsddjgsmzhzovnwjrzjirdoxk', 'ue', '', 'evov', 'qoplzddxjejvbmthnplyha', 'nmrouevovxrz', '', 'hnmrouev', 'hnzevrvlmxnjmvhitgdhgd', 'evovxrzrejesig', 'yvlxrjaqdaizishkftgcuikt', '', 'buyrmbkvqukochjteumqchrhxgtmuorsdgzlfn', '', 'hnmrouevov', 'ouevovx', 'xr']) from system.numbers limit 10; -select [0, 13, 0, 0, 0, 0, 0, 14, 0, 0, 1, 12, 0, 1] = multiSearchAllPositions(materialize('uwfgpemgdjimotxuxrxxoynxoaw'), ['uzcevfdfy', 'otxuxrxxoynxoa', 'xeduvwhrogxccwhnzkiolksry', 'pxdszcyzxlrvkymhomz', 'vhsacxoaymycvcevuujpvozsqklahstmvgt', 'zydsajykft', 'vdvqynfhlhoilkhjjkcehnpmwgdtfkspk', 'txuxrx', 'slcaryelankprkeyzaucfhe', 'iocwevqwpkbrbqvddaob', 'uwfg', 'motxuxrxx', 'kpzbg', '']) from system.numbers limit 10; -select [1, 1, 0, 6, 6, 0, 0, 0, 8, 0, 8, 14, 1, 5, 6, 0, 0, 1] = multiSearchAllPositions(materialize('epudevopgooprmhqzjdvjvqm'), ['ep', 'epudevopg', 'tlyinfnhputxggivtyxgtupzs', 'vopgoop', 'v', 'hjfcoemfk', 'zjyhmybeuzxkuwaxtcut', 'txrxzndoxyzgnzepjzagc', 'pgooprmhqzj', 'wmtqcbsofbe', 'pgo', 'm', '', 'evopgooprmhqzjdv', 'vopgooprmhqzjdv', 'gmvqubpsnvrabixk', 'wjevqrrywloomnpsjbuybhkhzdeamj', '']) from system.numbers limit 10; -select [15, 4, 4, 0, 0, 1, 1, 0, 0, 0, 0, 20, 0, 10, 1, 1, 0, 2, 4, 3] = multiSearchAllPositions(materialize('uogsfbdefogwnekfoeobtkrgiceksz'), ['kfoeobtkrgice', 'sfbd', 'sfbdefogwn', 'zwtenhiqavmqoolkvjiqjfb', 'vnjkshyvpwhrauackplqllakcjyamvsuokrxbfv', 'uog', '', 'qtzuhdcdymytgtscvzlzswdlrqidreuuuqk', 'vlridmjlbxyiljpgxsctzygzyawqqysf', 'xsnkwyrmjaaaryvrdgtoshdxpvgsjjrov', 'fanchgljgwosfamgscuuriwospheze', 'btkrgicek', 'ohsclekvizgfoatxybxbjoxpsd', 'ogwnekfoeobtkr', '', '', 'vtzcobbhadfwubkcd', 'og', 's', 'gs']) from system.numbers limit 10; -select [0, 0, 5, 1, 0, 5, 1, 6, 0, 1, 9, 0, 1, 1] = multiSearchAllPositions(materialize('aoiqztelubikzmxchloa'), ['blc', 'p', 'ztelubikzmxchlo', 'aoiqztelubi', 'uckqledkyfboolq', 'ztelubikzmxch', 'a', 'telubikzm', 'powokpdraslpadpwvrqpbb', 'aoiqztelu', 'u', 'kishbitagsxnhyyswn', '', '']) from system.numbers limit 10; -select [5, 11, 0, 0, 0, 5, 0, 0, 0, 1, 16, 0, 0, 0, 0, 0] = multiSearchAllPositions(materialize('egxmimubhidowgnfziwgnlqiw'), ['imubhidowgnfzi', 'dowgnf', 'yqpcpfvnfpxetozraxbmzxxcvtzm', 'xkbaqvzlqjyjoiqourezbzwaqkfyekcfie', 'jjctusdmxr', 'imubhi', 'zawnslbfrtqohnztmnssxscymonlhkitq', 'oxcitennfpuoptwrlmc', 'ac', 'egxmi', 'fziwgn', 'rt', 'fuxfuctdmawmhxxxg', 'suulqkrsfgynruygjckrmizsksjcfwath', 'slgsq', 'zcbqjpehilwyztumebmdrsl']) from system.numbers limit 10; -select [20, 0, 9, 0, 0, 14, 0, 5, 8, 3, 0, 0, 0, 4] = multiSearchAllPositions(materialize('zczprzdcvcqzqdnhubyoblg'), ['obl', 'lzrjyezgqqoiydn', 'vc', 'nbvwfpmqlziedob', 'pnezljnnujjbyviqsdpaqkkrlogeht', 'dn', 'irvgeaq', 'rzdcvcqzqdnh', 'cvcqzqdnh', 'zprzdcv', 'wvvgoexuevmqjeqavsianoviubfixdpe', 'aeavhqipsvfkcynyrtlxwpegwqmnd', 'blckyiacwgfaoarfkptwcei', 'prz']) from system.numbers limit 10; -select [2, 1, 1, 9, 10, 5, 0, 0, 0, 2, 9, 7, 9, 0, 1, 9, 7, 0] = multiSearchAllPositions(materialize('mvovpvuhjwdzjwojcxxrbxy'), ['vo', '', '', 'jwdz', 'wdzj', 'pvu', 'ocxprubxhjnji', 'phzfbtacrg', 'jguuqhhxbrwbo', 'vovpvuhjwd', 'jw', 'u', 'jwdzjwojcx', 'nlwfvolaklizslylbvcgicbjw', '', 'jwd', 'uhjwdz', 'bbcsuvtru']) from system.numbers limit 10; -select [2, 0, 21, 0, 0, 0, 3, 0, 0, 0, 0, 10, 1, 18] = multiSearchAllPositions(materialize('nmdkwvafhcbipwoqtsrzitwxsnabwf'), ['m', 'ohlfouwyucostahqlwlbkjgmdhdyagnihtmlt', 'itwx', 'jjkyhungzqqyzxrq', 'abkqvxxpu', 'lvzgnaxzctaarxuqowcski', 'dkwvafhcb', 'xuxjexmeeqvyjmpznpdmcn', 'vklvpoaakfnhtkprnijihxdbbhbllnz', 'fpcdgmcrwmdbflnijjmljlhtkszkocnafzaubtxp', 'hmysdmmhnebmhpjrrqpjdqsgeuutsj', 'cbipwoqtsrzitwxsna', 'nm', 'srzitwx']) from system.numbers limit 10; -select [17, 5, 0, 13, 0, 0, 10, 1, 0, 19, 10, 8, 0, 4] = multiSearchAllPositions(materialize('gfvndbztroigxfujasvcdgfbh'), ['asvcdgf', 'dbztroigxfujas', 'pr', 'xfujas', 'nxwdmqsobxgm', 'wdvoepclqfhy', 'oigxfu', '', 'flgcghcfeiqvhvqiriciywbkhrxraxvneu', 'vcd', 'oigxfu', 'troigxfuj', 'gbnyvjhptuehkefhwjo', 'ndbz']) from system.numbers limit 10; -select [0, 14, 1, 0, 0, 1, 1, 11, 0, 8, 6, 0, 3, 19, 7, 0] = multiSearchAllPositions(materialize('nofwsbvvzgijgskbqjwyjmtfdogzzo'), ['kthjocfzvys', 'skbqjwyjmtfdo', 'nof', 'mfapvffuhueofutby', 'vqmkgjldhqohipgecie', 'nofwsbv', '', 'ijgs', 'telzjcbsloysamquwsoaso', 'vzgijgskbqjwyjmt', 'bvvzgijgskbqjwyjmtfd', 'hdlvuoylcmoicsejofcgnvddx', 'fwsbvvzgijgskb', 'wyjm', 'vvzgijg', 'fwzysuvkjtdiufetvlfwf']) from system.numbers limit 10; -select [10, 2, 13, 0, 0, 0, 2, 0, 9, 2, 4, 1, 1, 0, 1, 6] = multiSearchAllPositions(materialize('litdbgdtgtbkyflsvpjbqwsg'), ['tbky', 'itdbgdtgtb', 'yflsvpjb', 'ikbylslpoqxeqoqurbdehlroympy', 'hxejlgsbthvjalqjybc', 'sontq', 'itdbgd', 'ozqwgcjqmqqlkiaqppitsvjztwkh', 'gtbkyf', 'itdbgdtgtbkyfls', 'dbg', 'litdb', '', 'qesbakrnkbtfvwu', 'litd', 'g']) from system.numbers limit 10; -select [0, 0, 1, 1, 5, 0, 8, 12, 0, 2, 0, 7, 0, 6] = multiSearchAllPositions(materialize('ijzojxumpvcxwgekqimrkomvuzl'), ['xirqhjqibnirldvbfsb', 'htckarpuctrasdxoosutyxqioizsnzi', '', '', 'jxu', 'dskssv', 'mpvcxwgekqi', 'xwgek', 'qsuexmzfcxlrhkvlzwceqxfkyzogpoku', 'jzojx', 'carjpqihtpjniqz', 'umpvcxwgekq', 'krpkzzrxxtvfhdopjpqcyxfnbas', 'xumpvcxwg']) from system.numbers limit 10; -select [0, 0, 0, 6, 0, 8, 0, 2, 0, 0, 0, 0, 14, 0, 0, 1, 1, 0, 0, 0] = multiSearchAllPositions(materialize('zpplelzzxsjwktedrrtqhfmoufv'), ['jzzlntsokwlm', 'cb', 'wuxotyiegupflu', 'lzzxsjwkte', 'owbxgndpcmfuizpcduvucnntgryn', 'zxsjwktedrrtqhf', 'kystlupelnmormqmqclgjakfwnyt', 'pple', 'lishqmxa', 'mulwlrbizkmtbved', 'uchtfzizjiooetgjfydhmzbtmqsyhayd', 'hrzgjifkinwyxnazokuhicvloaygeinpd', 'tedrrt', 'shntwxsuxux', 'evrjehtdzzoxkismtfnqp', 'z', '', 'nxtybut', 'vfdchgqclhxpqpmitppysbvxepzhxv', 'wxmvmvjlrrehwylgqhpehzotgrzkgi']) from system.numbers limit 10; - -select [15, 19, 0, 0, 15, 0, 0, 1, 2, 6] = multiSearchAllPositionsUTF8(materialize('зжерхмчсйирдчрришкраоддцфгх'), ['ришкра', 'раоддц', 'фттиалусгоцжлтщзвумрдчи', 'влййи', 'ришкра', 'цгфжуцгивй', 'ккгжхрггчфглх', 'з', 'жерхмчсйи', 'мчсйирдчрришкраоддц']) from system.numbers limit 10; -select [0, 0, 0, 1, 4, 0, 14, 0, 1, 8, 8, 9, 0, 0, 4, 0] = multiSearchAllPositionsUTF8(materialize('етвхйчдобкчукхпщлмжпфайтфдоизщ'), ['амфшужперосрфщфлижйййжжжй', 'ххкбщшзлмщггтшцпсдйкдшйвхскемц', 'ергйплгпнглккшкарещимгапхг', '', 'хйчдо', 'вввбжовшзйбгуоиждепйабаххеквщижтйиухос', 'хпщл', 'жфуомщуххнедзхищнгхрквлпмзауеегз', 'етвхй', 'о', 'о', 'бк', 'цфецккифж', 'аизлокл', 'х', 'слщгеивлевбчнчбтшгфмжрфка']) from system.numbers limit 10; -select [0, 0, 1, 2, 0, 0, 14, 0, 3, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('йбемооабурнирйофшдгпснж'), ['гпфцл', 'нчбперпмцкввдчсщвзйрдфнф', '', 'бем', 'ч', 'жгш', 'йофшдгпснж', 'шасгафчг', 'емооабур', 'пиохцжццгппщчопзйлмуотз', 'рпдомнфвопхкшешйишумбацтл', 'нисиийфррбдоц']) from system.numbers limit 10; -select [1, 18, 12, 0, 0, 1, 1, 3, 7, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('гсщнфийтфзжцйпфбйалущ'), ['', 'алущ', 'цйпфбйал', 'цвбфцйвсвлицсчнргпцнр', 'х', 'гс', '', 'щн', 'й', 'дгйрвцщтп', 'уитвквоффвцхфишрлерйцувф', 'кфтййлпнзжчижвглзкижн']) from system.numbers limit 10; -select [14, 0, 5, 5, 0, 6, 0, 16, 0, 0] = multiSearchAllPositionsUTF8(materialize('ефщнйнуйебнснлрцгкеитбг'), ['лрцгкеитб', 'епклжфцпнфопе', 'йнуйебн', 'й', 'тлт', 'нуйебнснлрцгкеит', 'глечршгвотумкимтлм', 'цгк', 'щгйчой', 'звкцкчк']) from system.numbers limit 10; -select [0, 1, 18, 6, 0, 3, 0, 0, 25, 0, 0, 1, 16, 5, 1, 7, 0, 0] = multiSearchAllPositionsUTF8(materialize('пумгмцшмжштсшлачсжарерфиозиг'), ['чсуубфийемквмоотванухмбрфхжоест', '', 'жар', 'цшмжш', 'жртещтинтвпочнкдткцза', 'м', 'адзгтбаскщгдшжл', 'штфжшллезпджигщфлезфгзчайанхктицштйй', 'о', 'етадаарйсцейдошшцечхзлшлрртсрггцртспд', 'зтвшалрпфлщбцд', 'пу', 'ч', 'мцшмжштсшлачсж', '', 'шмжшт', 'ещтжшйтчзчаноемрбц', 'тевбусешйрйчшзо']) from system.numbers limit 10; -select [7, 10, 0, 0, 0, 0, 1, 12, 9, 2, 0, 0, 0, 4, 1, 1, 0, 6] = multiSearchAllPositionsUTF8(materialize('дупгвндвйжмаузнллнзл'), ['двйжмаузн', 'жмаузнлл', 'емйжркоблновцгпезрдавкбелцщста', 'щзкгм', 'лебрпцрсутшриащгайвц', 'лзнмл', 'д', 'ауз', 'йжмау', 'упгвндвйж', 'жщсббфвихг', 'всигсеигцбгаелтчкирлнзшзцжещнс', 'рмшиеиесрлщципщхкхтоцщчйоо', 'гвн', '', '', 'йадеоцлпшпвщзещзкхйрейопмажбб', 'ндв']) from system.numbers limit 10; -select [0, 0, 0, 8, 3, 10, 22, 0, 13, 11, 0, 1, 18, 0, 1, 0] = multiSearchAllPositionsUTF8(materialize('жшзфппавввслфцлнщшопкдшку'), ['саоткнхфодзаа', 'кйхванкзаисйбврщве', 'бчоуучватхфукчф', 'вввслфц', 'з', 'вслфцлнщшопк', 'дшк', 'из', 'фцл', 'с', 'зртмцтпощпщхк', 'жшзфппавввслфц', 'шопк', 'збтхрсдтатхпрзлхдооощифачхчфн', '', 'жщшийугз']) from system.numbers limit 10; -select [2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 21, 0, 6, 0] = multiSearchAllPositionsUTF8(materialize('пчботухвгдчекмжндбоожш'), ['чботухвгдчекмжндб', 'от', 'гвсжжйлбтщчучнхсмдйни', 'жцжзмшлибшефуоуомпацбщщу', 'онхфлуцйлхтбмц', 'йтепжу', 'хтдрпвкщрли', 'аддайф', 'нхегщккбфедндоацкиз', 'йгкцзртфжгв', 'буелрщмхйохгибжндфшщвшрлдччрмфмс', 'цщцтзфнщ', 'уч', 'пчб', 'жш', 'пнфббтшйгхйрочнлксщпгвжтч', 'ухвг', 'лсцппузазщрйхймщбзоршощбзленхп']) from system.numbers limit 10; -select [0, 0, 4, 11, 0, 0, 0, 0, 0, 11, 2, 4, 6, 0, 0, 1, 2, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('тжрмчпваухрхуфбгнифгбопфт'), ['дпмгкекщлнемссаицщпащтиуцхкфчихтц', 'акйиуоатунтчф', 'мчпва', 'рхуфбгнифгб', 'кнаишж', 'пчвотенеафкухжцешбцхг', 'опеа', 'ушчадфтчхечеуркбтел', 'ашшптаударчжчмвалтдхкимищпф', 'рхуфбгниф', 'ж', 'мчпваухрхуфбгнифг', 'пваухрху', 'зргачбтцдахвймсбсврбндзтнущхвп', 'асбфцавбгуолг', 'тж', 'жрмчпваухрх', 'мрвзцгоб', 'чрцснчсдхтзжвнздзфцвхеилишдбж', 'кчт']) from system.numbers limit 10; -select [0, 2, 4, 0, 6, 0, 0, 0, 0, 19, 7, 1, 0, 1, 0, 0, 2, 10, 0, 1] = multiSearchAllPositionsUTF8(materialize('опрурпгабеарушиойцрхвбнсщ'), ['йошуоесдщеж', 'пр', 'урпгабеарушиой', 'хщиаршблашфажщметчзи', 'пгабеарушиойцрхвб', 'щцбдвц', 'еечрззвкожзсдурйщувмцйшихдц', 'офхачгсзашфзозрлба', 'айдфжджшжлрргмабапткбцпиизигдтс', 'рх', 'габ', '', 'цнкдбфчщшмчулврбцчакщвзхлазфа', '', 'екбтфпфилсаванхфкмчнпумехиищди', 'епвщхаклшомвцжбф', 'прурпгабе', 'еарушиойцрхв', 'црвтгрзтитц', 'опрурпг']) from system.numbers limit 10; -select [0, 10, 1, 0, 0, 0, 0, 0, 10, 0, 15, 2] = multiSearchAllPositionsUTF8(materialize('угпщлзчжшбзвууцшатпщцр'), ['цоуарцжсз', 'бз', '', 'пщфтзрч', 'лфуипмсдмнхнгйнтк', 'айжунцйбйцасчфдхй', 'щдфщлцптплсачв', 'грв', 'бзвууц', 'бумййшдшфашцгзфвчвзвтсувнжс', 'цшатпщ', 'гпщлзчжшб']) from system.numbers limit 10; -select [0, 15, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 0, 0] = multiSearchAllPositionsUTF8(materialize('цнлеодлмдцдйснитвдчтхжизв'), ['ивкчсзшугоцжчохщцабл', 'итвдчт', 'кнх', '', 'одлм', 'ктшфзбщзцуймагсоукщщудвуфо', 'ххеаефудгчхр', 'одлмдцдйснитвдчт', 'умцлпкв', 'зщсокйтцзачщафвбповжгнлавсгйг', 'бкибм', '', 'охсоихнцчцшевчеележтука', 'фаийхгжнсгищгщц']) from system.numbers limit 10; -select [0, 0, 0, 2, 0, 0, 0, 0, 3, 2, 3, 6, 0, 0, 0, 12, 4, 1] = multiSearchAllPositionsUTF8(materialize('бгдбувдужщвоошлтчрбй'), ['щвбаиф', 'итчднесжкчжвпжйвл', 'мм', 'г', 'хктзгтзазфгщшфгбеулцмдмдбдпчзх', 'сфуак', 'злйфцощегзекщб', 'фшлдтолрщфзжчмих', 'дбувдужщ', 'гдб', 'дбувдужщ', 'в', 'лчищкечнжщисцичбнзшмулпмлп', 'чжцсгмгфвлиецахзнрбмщин', 'обпжвй', 'о', 'бувдужщвоош', '']) from system.numbers limit 10; -select [0, 2, 5, 3, 2, 0, 1, 0, 0, 4, 2, 0, 0, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('шсушлорзфжзудбсейенм'), ['чнзпбновтршеумбвщчлх', 'су', 'лорзфж', 'ушлорзфжзудб', 'сушлорзфжзудбсейенм', 'ткдрхфнб', '', 'пщд', 'чбдцмщ', 'шлорзфж', 'су', 'сккигркедчожжемгнайвйчтдмхлтти', 'мц', 'пхнхрхйцйсйбхчлктз', 'иафжстлйфцр', 'алщщлангнбнйхлшлфшйонщек']) from system.numbers limit 10; -select [12, 1, 0, 5, 0, 10, 1, 0, 7, 4, 0, 1, 12, 1, 1, 1, 0, 1, 15, 0] = multiSearchAllPositionsUTF8(materialize('ощзллчубоггцвжриуардрулащйпу'), ['цвжр', '', 'нмзкаиудзтиффззшзканжвулт', 'лчубоггцвжриуардрулащйпу', 'чтцлзшуижолибаоххвшихбфжйхетивп', 'ггцвжри', '', 'йдгнвс', 'у', 'л', 'зпщнжуойдлдвхокцжнзйсйзе', '', 'цв', '', '', '', 'ехлцзгвф', '', 'риу', 'уйжгтжноомонгщ']) from system.numbers limit 10; -select [0, 12, 13, 20, 0, 1, 0, 0, 3, 4] = multiSearchAllPositionsUTF8(materialize('цбкифйтшузажопнжщарбштвдерзтдш'), ['щлмлижтншчсмксгтнсврро', 'жопнжщарбштвд', 'опнжщарб', 'бштвдерзтд', 'пуфслейщбкжмпнш', 'ц', 'маве', 'кмйхойрдлшцхишдтищвйбцкщуигваещгтнхйц', 'кифй', 'и']) from system.numbers limit 10; -select [0, 6, 0, 0, 0, 8, 0, 3, 6, 0] = multiSearchAllPositionsUTF8(materialize('еачачгбмомоххкгнвштггпчудл'), ['ндзчфчвжтцщпхщуккбеф', 'г', 'рткнфвчтфннхлжфцкгштймгмейжй', 'йчннбщфкщф', 'лсртщиндшшкичзррущвдйвнаркмешерв', 'момоххк', 'рфафчмсизлрхзуа', 'ч', 'гбмомоххкгнвштг', 'валжпошзбгзлвевчнтз']) from system.numbers limit 10; -select [0, 0, 10, 0, 8, 13, 0, 0, 19, 15, 3, 1] = multiSearchAllPositionsUTF8(materialize('зокимчгхухшкшмтшцчффвззкалпва'), ['цалфжажщщширнрвтпвмщжннрагвойм', 'оукзрдцсадешжмз', 'хшкшмтшцч', 'ауилтсаомуркпаркбцркугм', 'хухшкшмтшцчффв', 'шмтшцч', 'зщгшпцхзгцншднпеусмтжбцшч', 'щлраащсйлщрд', 'ффвзз', 'тшцчффвззкалпв', 'кимчгхухшкш', '']) from system.numbers limit 10; -select [0, 0, 1, 0, 6, 0, 6, 0, 5, 0, 13, 0, 0, 6] = multiSearchAllPositionsUTF8(materialize('йдйндиибщекгтчбфйдредпхв'), ['тдршвтцихцичощнцницшдхйбогбчубие', 'акппакуцйсхцдххнотлгирввоу', '', 'улщвзхохблтксчтб', 'и', 'ибейзчшклепзриж', 'иибщекгт', 'шидббеухчпшусцнрз', 'диибщекгтчбфйд', 'дейуонечзйзлдкшщрцйбйклччсцуй', 'тч', 'лшицлшме', 'чйнжчоейасмрщегтхвйвеевбма', 'ии']) from system.numbers limit 10; -select [15, 3, 3, 2, 0, 11, 0, 0, 0, 2, 0, 4, 0, 1, 1, 3, 0, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('нхгбфчшджсвхлкхфвтдтлж'), ['хфвтдтлж', 'гбфчшд', 'гбфчш', 'х', 'ачдгбккжра', 'вхлк', 'мщчвещлвшдщпдиимлшрвнщнфсзгщм', 'жчоббгшзщлгеепщжкчецумегпйчт', 'жжд', 'хг', 'мтсааролшгмоуйфйгщгтрв', 'бфчшд', 'чейрбтофпшишгуасоодлакчдф', 'н', 'нхгбфч', 'гбф', 'гдежсх', 'йифжацзгжбклх', 'ещпзущпбаолплвевфиаибшйубйцсзгт', 'жезгчжатзтучжб']) from system.numbers limit 10; -select [0, 10, 1, 0, 0, 0, 4, 0, 13, 1, 12, 1, 0, 6] = multiSearchAllPositionsUTF8(materialize('акбдестрдшерунпвойзв'), ['нркчх', 'шерунп', '', 'зжвахслфббтоиоцрзаззасгнфчх', 'шлжмдг', 'тлйайвцжчсфтцйрчосмижт', 'дестрдшерунп', 'мвамйшцбдщпчлрщд', 'у', 'акбдестрд', 'рунпвойз', '', 'айздцоилсйшцфнчтхбн', 'с']) from system.numbers limit 10; -select [1, 0, 0, 3, 2, 1, 0, 0, 1, 10, 7, 0, 5, 0, 8, 4, 1, 0, 8, 1] = multiSearchAllPositionsUTF8(materialize('кйхпукаеуддтйччхлнпсуклрф'), ['кйхпукаеуддтйччхл', 'йатлрйкстлхфхз', 'фгихслшкж', 'хпу', 'йхпукаеу', '', 'сруакбфоа', 'оажуз', 'кйхпукаеуддтйччх', 'ддтйччхлн', 'аеуддтйччхл', 'тмажиойщтпуцглхфишеиф', 'укаеуддтйччхлнпс', 'ретифе', 'еуддтйччхлнпсуклр', 'пукаеуд', 'кйхпу', 'таппфггвджлцпжшпишбпциуохсцх', 'еуд', '']) from system.numbers limit 10; -select [2, 3, 3, 16, 5, 13, 0, 0, 0, 18, 0, 6, 0, 16, 0, 10, 3, 0] = multiSearchAllPositionsUTF8(materialize('плврйщовкзнбзлбжнсатрцщщучтйач'), ['лврйщовкзнбзлбж', 'врйщовкзнбзлбжнса', 'врйщовкзнбз', 'жнсатрцщщучтйач', 'йщовкзнбзлбжнсатрцщщуч', 'злбжнсатрцщ', 'ввтбрдт', 'нжйапойг', 'ннцппгперхйвдхоеожупйебочуежбвб', 'сатрцщщу', 'деваийтна', 'щ', 'вкжйгкужжгтевлцм', 'жнс', 'датг', 'знбзлбжнсатрцщщучтйа', 'врйщовк', 'оашмкгчдзщефм']) from system.numbers limit 10; -select [3, 1, 19, 1, 0, 0, 0, 0, 11, 3, 0, 0] = multiSearchAllPositionsUTF8(materialize('фчдеахвщжхутхрккхасвсхепщ'), ['деах', '', 'свсхепщ', '', 'анчнсржйоарвтщмрж', 'нечбтшщвркгд', 'вштчцгшж', 'з', 'у', 'деахвщ', 'ххкцжрвзкжзжчугнфцшуиаклтмц', 'фцкжшо']) from system.numbers limit 10; -select [16, 0, 0, 1, 8, 14, 0, 12, 12, 5, 0, 0, 16, 0, 11, 0] = multiSearchAllPositionsUTF8(materialize('щмнжчввбжцчммчшсрхйшбктш'), ['срхйшбк', 'йлзцнржчууочвселцхоучмщфчмнфос', 'еижлафатшхщгшейххжтубзвшпгзмзцод', '', 'бжцчммчшсрхй', 'чшсрхй', 'влемчммйтителщвзган', 'ммч', 'ммчшсрх', 'чввбж', 'нобзжучшошмбщешлхжфгдхлпнгпопип', 'цгт', 'срхйш', 'лкклмйжтеа', 'чммчшсрхйшбктш', 'йежффзнфтнжхфедгбоахпг']) from system.numbers limit 10; -select [1, 12, 9, 5, 1, 0, 6, 3, 0, 1] = multiSearchAllPositionsUTF8(materialize('кжнщсашдзитдмщцхуоебтфжл'), ['', 'дмщцхуоебт', 'зитдмщцхуоебт', 'сашдзитдмщцхуое', 'кжнщ', 'тхкйтшебчигбтмглшеужззоббдилмдм', 'ашдзитдмщцхуоебтф', 'нщсашдз', 'аузщшр', 'кжнщсашдз']) from system.numbers limit 10; -select [2, 0, 0, 0, 1, 0, 2, 0, 0, 17, 0, 8, 7, 14, 0, 0, 0, 7, 9, 23] = multiSearchAllPositionsUTF8(materialize('закуфгхчтшивзчжаппбжнтслщввущ'), ['а', 'днойвхфрммтж', 'внтлжрхзрпчбтуркшдатннглечг', 'ахиеушжтфкгцщтзхмжнрхдшт', '', 'тцчгрззржмдшйщфдцрбшжеичч', 'а', 'ктиечцпршнфнбчуолипацчдсосцнлфаццм', 'аусрлхдцегферуо', 'ппбжнт', 'жкццуосгвп', 'чтшивзчжаппб', 'хчтшивзчжаппб', 'чжаппбжнтслщ', 'ччрлфдмлу', 'щзршффбфчзо', 'ущуймшддннрхзийлваежщухч', 'хчтши', 'тшивзчжаппбжнтсл', 'слщв']) from system.numbers limit 10; -select [1, 1, 9, 2, 0, 3, 7, 0, 0, 19, 2, 2, 0, 8] = multiSearchAllPositionsUTF8(materialize('мвкзккупнокченйнзкшбдрай'), ['м', '', 'н', 'вкз', 'гдпертшйбртотунур', 'к', 'упнокченйнзкшбдр', 'нфшрг', 'нмждрйббдцлйемжпулдвкещхтжч', 'ш', 'вкзккупнокченйнзкшбдр', 'вкзккупнокченйнзкшбдрай', 'адииксвеавогтйторчтцвемвойшпгбнз', 'пнокченй']) from system.numbers limit 10; -select [15, 0, 0, 1, 12, 1, 0, 0, 1, 11, 0, 4, 0, 2] = multiSearchAllPositionsUTF8(materialize('отарлшпсабждфалпшножид'), ['лпшно', 'вт', 'лпжшосндутхорлиифжаакш', 'отарлшпсабждфалпшнож', 'дфал', '', 'бкцжучншжбгзжхщпзхирртнбийбтж', 'уцвцкшдзревпршурбсвйнемоетчс', '', 'ждфал', 'тлскхрнпмойчбцпфущфгф', 'рлшпсабж', 'нхнмк', 'тарлшпса']) from system.numbers limit 10; -select [0, 2, 0, 20, 0, 17, 18, 0, 1, 1, 21, 1, 0, 1, 6, 26] = multiSearchAllPositionsUTF8(materialize('ачйвцштвобижнзжнчбппйеабтцнйн'), ['сзхшзпетншйисщкшрвйшжуогцвбл', 'чйвцштво', 'евз', 'пй', 'хуждапрахитйажрищуллйзвчт', 'чбппйе', 'бппйеабтцнйн', 'схш', 'а', 'ачйвцштвобижнзжнчбпп', 'йеабтцнй', '', 'ег', '', 'штвобижнзжнчбпп', 'цн']) from system.numbers limit 10; -select [1, 0, 0, 3, 4, 12, 0, 9, 0, 12, 0, 0, 8, 0, 10, 3, 4, 1, 1, 9] = multiSearchAllPositionsUTF8(materialize('жмхоужежйуфцзеусеоднчкечфмемба'), ['', 'идосйксзнщйервосогф', 'тхмсйлвкул', 'хоужежйуф', 'оужежйуфцзеусеоднчкечфм', 'цзеусеоднчкеч', 'бецвдиубххвхйкажуурщщшщфбзххт', 'йуфцзеусеодн', 'мглкфтуеайсржисстнпкгебфцпа', 'цзеусео', 'уехцфучецчгшйиржтсмгхакчшввохочжпухс', 'дчвмсбткзталшбу', 'жйуфцзеусеоднчке', 'ччшщтдбпвчд', 'уфцзеусеоднчкечфмем', 'хоужежйуфцзеусеоднчкечф', 'оуже', '', 'жмхоужежйуфцзеу', 'й']) from system.numbers limit 10; -select [0, 0, 0, 3, 0, 0, 0, 0, 1, 0, 1, 0, 1, 2, 0, 0, 0, 6] = multiSearchAllPositionsUTF8(materialize('лшпцхкмтресзпзйвцфрз'), ['енрнцепацлщлблкццжсч', 'ецжужлуфаееоггрчохпчн', 'зхзнгасхебнаейбддсфб', 'пцхкмтресзпзйв', 'фчетгеодщтавиииухцундпнхлчте', 'шшгсдошкфлгдвкурбуохзчзучбжйк', 'мцщщцп', 'рх', '', 'зйошвщцгхбж', '', 'ввлпнамуцвлпзеух', '', 'шпцхкмтре', 'маабтруздрфйпзшлсжшгож', 'фдчптишмштссщшдшгх', 'оллохфпкаем', 'кмтресзпз']) from system.numbers limit 10; -select [2, 5, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1, 1, 12, 0, 0, 0, 4, 8] = multiSearchAllPositionsUTF8(materialize('есипзсвшемлхчзмйрсфз'), ['с', 'з', 'пщчсмаиахппферзжбпвиибаачй', 'гтщкзоиежав', 'свшемлхчзм', 'шийанбке', 'зхе', 'авркудфаусзквкфффйцпзжщввенттб', 'ножцваушапиж', 'иизкежлщиафицкчщмалнпсащсднкс', 'вчмв', 'кщеурмуужжлшррце', '', '', 'х', 'алзебзпчеложихашжвхмйхрицн', 'тпзмумчшдпицпдшиаог', 'сулксфчоштаййзбзшкджббщшсей', 'пзсвшемлхчзм', 'ш']) from system.numbers limit 10; -select [0, 1, 2, 4, 0, 0, 14, 1, 13, 4, 0, 0, 1, 1] = multiSearchAllPositionsUTF8(materialize('сзиимонзффичвфжоеулсадону'), ['зфтшебтршхддмеесчд', '', 'зиимонзф', 'имон', 'езбдйшжичценлгршщшаумайаицй', 'птпщемтбмднацлг', 'фжоеулса', '', 'вфжоеулсадону', 'имонзфф', 'йщвдфдиркважгйджгжашарчучйххйднпт', 'дй', '', '']) from system.numbers limit 10; -select [12, 0, 24, 0, 9, 0, 1, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('ижсщщрзжфнгццпзкфбвезгбохлж'), ['ццпзкфбвез', 'ацррвхоптаоснулнжкщжел', 'охлж', 'тнсхбпшщнб', 'фнг', 'урйвг', '', 'цохс', 'щбйрйкжчмйзачуефч', 'афа']) from system.numbers limit 10; -select [9, 0, 0, 0, 1, 0, 7, 7, 0, 0, 1, 0, 7, 0, 0, 8, 0, 3, 0, 0] = multiSearchAllPositionsUTF8(materialize('рерфвирачйнашхрмцебфдйааеммд'), ['чйнашхрмцебфдйааеммд', 'сжщзснвкущлжплцзлизаомдизцнжлмййбохрцч', 'еппбжджмримфчйеаолидпцруоовх', 'едтжкоийггснехшсчйлвфбкцжжрчтш', '', 'пжахфднхсотй', 'ра', 'рач', 'вчримуцнхбкуйжрвфиугзфсзг', 'кщфехрххциаашщсифвашгйцвхевцщнйахтбпжщ', '', 'ртщиобчжстовйчфабалзц', 'рачйнашхрмцебфдйаае', 'ощгжосччфкуг', 'гехвжнщжссидмрфчйтнепдсртбажм', 'а', 'ицжлсрсиатевбвнжрдмзцувввтзцфтвгвш', 'рф', 'прсмлча', 'ндлхшцааурмзфгверуфниац']) from system.numbers limit 10; -select [2, 14, 10, 0, 6, 15, 1, 0, 0, 4, 5, 17, 0, 0, 3, 0, 3, 0, 9, 0] = multiSearchAllPositionsUTF8(materialize('влфощсшкщумчллфшшвбшинфппкчуи'), ['лфощ', 'лфшшвбшинфпп', 'умчллфшшвбшинф', 'слмтнг', 'сшкщумчллфшшвбшинф', 'фшшвб', '', 'рчфбчййсффнодцтнтнбцмолф', 'щфнщокхжккшкудлцжрлжкнп', 'ощ', 'щсшкщумчлл', 'швбшинфппкч', 'септзкщотишсехийлоцчапщжшжсфмщхсацг', 'нт', 'фощсшкщумчллфшшвбшинфп', 'нщпдш', 'фощс', 'мивсмча', 'щумч', 'щчйнткжпмгавфтйтибпхх']) from system.numbers limit 10; -select [0, 10, 0, 0, 0, 0, 0, 3, 0, 0, 0, 2, 0, 11, 0, 0] = multiSearchAllPositionsUTF8(materialize('еаиалмзхцгфунфеагшчцд'), ['йнш', 'гфун', 'жлйудмхнсвфхсуедспщбтутс', 'елмуийгдйучшфлтхцппамфклйг', 'евйдецц', 'пчтфцоучфбсйщпвдацмчриуцжлтжк', 'нстмпумчспцвцмахб', 'иалмз', 'зифчп', 'чогфщимоопт', 'фдйблзеп', 'аиа', 'щугмзужзлйдктш', 'фунфеагшч', 'нйхшмсгцфжчхжвхгдхцуппдц', 'асмвмтнрейшгардллмсрзгзфйи']) from system.numbers limit 10; -select [23, 0, 8, 0, 0, 0, 0, 0, 0, 4, 0, 5, 7, 1, 9, 4] = multiSearchAllPositionsUTF8(materialize('зузйфзлхходфрхгтбпржшрктпйхеоп'), ['ктпйхео', 'лжитуддикчсмкглдфнзцроцбзтсугпвмхзллжж', 'х', 'меуфтено', 'фтдшбшрпоцедктсийка', 'кхтоомтбчвеонксабшйптаихжбтирпзшймчемжим', 'чиаущлрдкухцрдумсвивпафгмр', 'фрнпродв', 'тдгтишхйсашвмдгкчбмшн', 'йфзлхходфрхгтбпржшр', 'бежшлрйврзмумеуооплкицхлйажвцчнчсеакм', 'ф', 'лхходфрхгтб', '', 'ходфрхгтбпржшр', 'й']) from system.numbers limit 10; -select [0, 0, 0, 1, 0, 1, 22, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 1] = multiSearchAllPositionsUTF8(materialize('чфгвчхчпщазтгмбнплдгщикойчднж'), ['мштцгтмблаезочкхзвхгрбпкбмзмтбе', 'канбжгсшхшз', 'кзинвщйччажацзйнсанкнщ', 'чфгвчхчпщазтгмбнп', 'етйцгтбнщзнржнйхж', '', 'ик', '', 'еизщвпрохдгхир', 'псумйгшфбвгщдмхжтц', 'слмжопинйхнштх', 'йшралцицммбщлквмгхцввизопнт', 'л', 'чфгвчхчпщазтгмбнплдгщ', 'пбзмхжнпгикиищжтшботкцеолчцгхпбвхи', 'хзкцгрмшгхпхуоцгоудойнжлсоййосссмрткцес', 'ажуофйпщратдйцбржжлжнжащцикжиа', '']) from system.numbers limit 10; -select [6, 0, 2, 5, 2, 9, 10, 0, 0, 4, 0, 6, 3, 2] = multiSearchAllPositionsUTF8(materialize('ишогпсисжашфшлйичлба'), ['сисжашфшлй', 'пднещбгзпмшепкфосовбеге', 'шогп', 'пс', 'шогпси', 'жаш', 'аш', 'деисмжатуклдшфлщчубфс', 'грмквкщзур', 'гпсис', 'кйпкбцмисчхдмшбу', 'сисжашф', 'о', 'шо']) from system.numbers limit 10; -select [8, 15, 13, 0, 1, 2, 5, 2, 9, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('нсчщчвсанпрлисблснокзагансхм'), ['анпрлисблснокзагансхм', 'блснокз', 'исб', 'дрмгвснпл', '', 'счщчвса', 'чвсанпрлисблснокзагансх', 'счщчвсанпрлис', 'нпрли', 'пциишуецнймуодасмжсойглретиефо', 'фхимщвкехшлг', 'слщмаимшжчфхзпрцмхшуниврлуйлжмфжц']) from system.numbers limit 10; -select [0, 5, 0, 0, 14, 0, 12, 0, 2, 3, 0, 3, 21, 5] = multiSearchAllPositionsUTF8(materialize('хажуижанндвблищдтлорпзчфзк'), ['щуфхл', 'и', 'фцежлакчннуувпаму', 'щесщжрчиктфсмтжнхекзфс', 'ищдтлорпзчф', 'дееичч', 'блищ', 'гиефгйзбдвишхбкбнфпкддмбтзиутч', 'ажуижа', 'жуижанндвблищдтлорпзчфзк', 'чщщдзетвщтччмудвзчгг', 'ж', 'пзчфз', 'ижанн']) from system.numbers limit 10; -select [0, 0, 0, 9, 15, 0, 0, 0, 1, 3, 0, 0, 1, 0, 10, 0, 4, 0, 0, 7] = multiSearchAllPositionsUTF8(materialize('россроапцмцагвиигнозхзчотус'), ['ошажбчвхсншсвйршсашкм', 'пфдчпдчдмауцгкйдажрйефапввшжлшгд', 'иеаочутввжмемчушлуч', 'цмцагвиигно', 'ииг', 'ммпжщожфйкакбущчирзоммагеиучнщмтвгихк', 'укррхбпезбжууеипрзжсло', 'ншопзжфзббилйбувгпшшиохврнфчч', '', 'ссроап', 'лийщфшдн', 'йчкбцциснгначдцйчпа', 'россроапцмцагвииг', 'кштндцтсшорввжсфщчмщчжфжквзралнивчзт', 'мца', 'нбтзетфтздцао', 'сроа', 'мщсфие', 'дткодбошенищйтрподублжскенлдик', 'апцмцагвиигноз']) from system.numbers limit 10; -select [16, 0, 0, 2, 1, 1, 0, 1, 9, 0, 0, 3] = multiSearchAllPositionsUTF8(materialize('тйсдйилфзчфплсджбарйиолцус'), ['жбарйиолцу', 'цназщжждефлбрджктеглщпунйжддгпммк', 'хгжоашцшсзкеазуцесудифчнощр', 'йс', '', 'тйсдйилфзчфп', 'ивфсплшвслфмлтххжчсстзл', '', 'зчфплсдж', 'йртопзлодбехрфижчдцйс', 'цлащцкенмшеоерееиуноп', 'с']) from system.numbers limit 10; -select [3, 2, 1, 1, 0, 0, 0, 14, 6, 0] = multiSearchAllPositionsUTF8(materialize('нсцннйрмщфбшщховвццбдеишиохл'), ['цннйр', 'сцннйрм', 'н', 'нс', 'двтфхйзгеиеиауимбчхмщрцутф', 'пчтмшйцзсфщзшгнхщсутфжтлпаввфгххв', 'лшмусе', 'ховвццбд', 'йрмщфбшщховвццбдеи', 'гндруущрфзсфжикшзцжбил']) from system.numbers limit 10; -select [0, 18, 0, 1, 2, 0, 0, 0, 1, 7, 10, 0, 1, 0, 2, 0, 0, 18] = multiSearchAllPositionsUTF8(materialize('щидмфрсготсгхбомлмущлаф'), ['тлтфхпмфдлуоцгчскусфжчкфцхдухм', 'мущла', 'емлвзузхгндгафги', '', 'идмфрсготсгхбомлмущла', 'зфаргзлщолисцфдщсеайапибд', 'кдхоорхзжтсйимкггйлжни', 'лчгупсзждплаблаеклсвчвгвдмхклщк', 'щидмфр', 'сготсгхбомлму', 'тсгхбомлмущла', 'хсзафйлкчлди', '', 'й', 'ид', 'щлйпмздйхфзайсщсасейлфцгхфк', 'шдщчбшжбмййзеормнрноейй', 'мущ']) from system.numbers limit 10; -select [0, 13, 0, 0, 1, 0, 7, 7, 8, 0, 2, 0, 3, 0, 0, 13] = multiSearchAllPositionsUTF8(materialize('трцмлщввадлжввзчфипп'), ['хшзйийфжмдпуигсбтглй', 'ввзчфи', 'нсцчцгзегммтсшбатщзузпкшрг', 'гувйддежзфилйтш', '', 'хгзечиа', 'ввадлжввз', 'ввадлжввзчфи', 'ва', 'щтшсамклегш', 'рцмлщ', 'учзмиерфбтцучйдглбщсз', 'цмлщввадлжввзчфи', 'орйжччцнаррбоабцжзйлл', 'квпжматпцсхзузхвмйч', 'ввзчфип']) from system.numbers limit 10; -select [0, 1, 1, 0, 11, 4, 1, 2, 0, 0] = multiSearchAllPositionsUTF8(materialize('инкщблбвнскцдндбмсщщш'), ['жхрбсусахрфкафоилмецчебржкписуз', 'инкщблбвнс', '', 'зисгжфлашймлджинаоджруй', 'кцднд', 'щблбвнскцдндбмсщщ', 'инкщблбвнс', 'н', 'зб', 'фчпупшйфшбдфенгитатхч']) from system.numbers limit 10; -select [6, 0, 4, 20, 1, 0, 5, 0, 1, 0] = multiSearchAllPositionsUTF8(materialize('рзтецуйхлоорйхдбжашнларнцт'), ['у', 'бпгййекцчглпдвсцсещщкакцзтцбччввл', 'ецуйхлоо', 'нлар', 'рз', 'ккнжзшекфирфгсгбрнвжчл', 'цуйхлоорйхдбжашн', 'йнучгрчдлйвводт', 'рзте', 'нткрввтубчлщк']) from system.numbers limit 10; - -select [1, 1, 0, 0, 1, 0, 0, 3, 3, 3, 1, 0, 8, 0, 8, 1, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('OTMMDcziXMLglehgkklbcGeAZkkdh'), ['', 'OTmmDCZiX', 'SfwUmhcGTvdYgxlzsBJpikOxVrg', 'ngqLQNIkqwguAHyqA', '', 'VVZPhzGizPnKJAkRPbosoNGJTeO', 'YHpLYTVkHnhTxMODfABor', 'mMdcZi', 'MmdCZI', 'MMdCZixmlg', '', 'hgaQHHHkIQRpPjv', 'ixMLgLeHgkkL', 'uKozJxZBorYWjrx', 'i', '', 'WSOYdEKatHkWiCtlwsCbKRnXuKcLggbkBxoq', '']) from system.numbers limit 10; -select [4, 15, 0, 0, 0, 0, 5, 0, 5, 1, 0, 1, 13, 0, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('VcrBhHvWSFXnSEdYCYpU'), ['bhhVwSfXnSEd', 'DycyP', 'kEbKocUxLxmIAFQDiUNoAmJd', 'bsOjljbyCEcedqL', 'uJZxIXwICFBPDlUPRyDHMmTxv', 'BCIPfyArrdtv', 'hHv', 'eEMkLteHsuwsxkJKG', 'hHVWsFxNseDy', '', 'HsFlleAQfyVVCoOSLQqTNTaA', '', 'sEDY', 'UMCKQJY', 'j', 'rBhHvw']) from system.numbers limit 10; -select [1, 1, 0, 0, 1, 0, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('wZyCLyiWnNNdNAPWeGSQZcdqk'), ['w', '', 'vlgiXgFTplwqRbnwBumAjHvQuM', 'QoIRVKDHMlapLNiIZXvwYxluUivjY', 'WZY', 'gAFpUfPDAwgzARCIMrtbZUsNcR', 'egkLWqqdNiETeETsMG', 'dzSlJaoHKlQmENIboow', 'vPNBhcaIfsgLH', 'mlWPTCBDVTdKHxlvIUVcJXBrmTcJokAls']) from system.numbers limit 10; -select [0, 10, 0, 1, 7, 1, 6, 1, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pqliUxqpRcOOKMjtrZSEsdW'), ['YhskuppNFdWaTaZo', 'Coo', 'mTEADzHXPeSMCQaYbKpikXBqcfIGKs', 'PQLiUxq', 'qpRCoOK', 'PQLIu', 'XQPrcoOK', '', 'pR', 'cTmgRtcSdRIklNQVcGZthwfarLtAYh']) from system.numbers limit 10; -select [16, 1, 1, 1, 1, 4, 17, 0, 0, 0, 1, 0, 0, 0, 20, 0] = multiSearchAllPositionsCaseInsensitive(materialize('kJyseeDFCeUWoqMfubYqJqWA'), ['fub', 'kJY', '', '', 'Kj', 's', 'uBYQJq', 'sUqCmHUZIBtZPswObXSrYCwrdxdznM', 'mtZDCJENYuikJnCcJfRcSCDYDPXU', 'IDXjRjHhmjqXmCOlQ', '', 'jiEwAxIsJDu', 'YXqcEKbHxlgUliIALorSKDMlGGWeCO', 'OstKrLpYuASEUrIlIuHIRdwLr', 'qJq', 'tnmvMTFvjsW']) from system.numbers limit 10; -select [11, 3, 1, 0, 9, 0, 0, 0, 0, 8, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('EBSPtFpDaCIydASuyreS'), ['iyD', 'sptfpdAciyDAsuyR', 'EbS', 'IJlqfAcPMTUsTFXkvmtsma', 'AcIYda', 'fbWuKoCaCpRMddUr', 'srlRzZKeOQGGLtTLOwylLNpVM', 'ZeIgfTFxUyNwDkbnpeiPxQumD', 'j', 'daciydA', 'sp', 'dyGFtyfnngIIbcCRQzphoqIgIMt']) from system.numbers limit 10; -select [6, 0, 0, 0, 10, 0, 1, 4, 0, 15, 0, 2, 2, 6] = multiSearchAllPositionsCaseInsensitive(materialize('QvlLEEsgpydemRZAZcYbqPZHx'), ['eSgpYDEMRzAzcyBQPzH', 'NUabuIKDlDxoPXoZOKbUMdioqwQjQAiArv', 'pRFrIAGTrggEOBBxFmnZKRPtsUHEMUEg', 'CDvyjef', 'YdEMrzaZc', 'BO', '', 'leEsgPyDEmRzaZCYBqPz', 'EzcTkEbqVXaVKXNuoxqNWHM', 'Z', 'cuuHNcHCcLGb', 'V', 'vllEes', 'eS']) from system.numbers limit 10; -select [0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 5, 7, 5, 0, 11, 1] = multiSearchAllPositionsCaseInsensitive(materialize('eiCZvPdGJSmwxMIrZvEzfYFOFJmV'), ['lSydrmJDeXDYHGFFiFOOJGyCbCCDbLzbSbub', 'ewsAVflvcTBQFtvWBwuZOJKkrUArIg', 'fpEkBWaBkRWypFWtMz', 'YatSURyNtcSuerWWlTBSdBNClO', 'YO', 'CZvpdg', 'uoH', 'gtGwQSVqSJDVROmsBIxjuVNfrQnxDhWGXLBH', 'IKNs', 'HElLuRMlsRgINaNp', 'V', 'DGjsMW', 'vPDgJSmW', 'SGCwNiAmNfHSwLGZkRYEqrxBTaDRAWcyHZYzn', 'mWXMiRZvezfYf', '']) from system.numbers limit 10; -select [23, 1, 0, 17, 0, 0, 9, 3, 0, 2] = multiSearchAllPositionsCaseInsensitive(materialize('BizUwoENfLxIIYVDflhOaxyPJw'), ['yPJ', '', 'gExRSJWtZwOptFTkNlBGuxyQrAu', 'FLH', 'hCqo', 'oVGcArersxMUCNewhTMmjpyZYAIU', 'FlXIiYVdflHoAX', 'ZuWOe', 'bhfAfNdgEAtGdHylxkjgvU', 'IZUWo']) from system.numbers limit 10; -select [0, 9, 0, 0, 0, 0, 1, 0, 0, 1, 3, 0, 13, 0, 3, 5] = multiSearchAllPositionsCaseInsensitive(materialize('loKxfFSIAjbRcguvSnCdTdyk'), ['UWLIDIermdFaQVqEsdpPpAJ', 'ajBrcg', 'xmDmuYoRpGu', 'wlNjlKhVzpC', 'MxIjTspHAQCDbGrIdepFmLHgQzfO', 'FybQUvFFJwMxpVQRrsKSNHfKyyf', '', 'vBWzlOChNgEf', 'DiCssjczvdDYZVXdCfdSDrWaxmgpPXDiD', '', 'kxFFSIAjBRCGUVSNcD', 'LrPRUqeehMZapsyNJdu', 'cGuVSNcdTdy', 'NmZpHGkBIHVSoOcj', 'KxffSIAjBr', 'ffsIaJB']) from system.numbers limit 10; -select [14, 0, 11, 0, 10, 0, 0, 0, 13, 1, 2, 11, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('uijOrdZfWXamCseueEbq'), ['sE', 'VV', 'AmcsEu', 'fUNjxmUKgnDLHbbezdTOzyLaknQ', 'XAmCsE', 'HqprIpxIcOTkDIKcVK', 'NbmirQlNsTHnAVKlF', 'VVDNOxFKSnQGKPsTqgtwLhZnIPkL', 'c', '', 'IJ', 'aM', 'rDzF', 'YFwP']) from system.numbers limit 10; -select [0, 8, 17, 0, 1, 0, 0, 0, 0, 0, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('PzIxktujxHZsaDlwSGQPgvA'), ['zrYlZdnUxlPrVJJeZEASwdCHlNEm', 'jxhZS', 'sGQPgV', 'MZMChmRBgsxhdgspUhALoxmrkZVp', 'pzIxktuJxHzsADlw', 'xavwOAibQuoKg', 'vuuETOrWLBNLhrMeWLgGQpeFPdcWmWu', 'TZrAgmdorqZIdudhyCMypHYKFO', 'ztcCyGxRKrcUTv', 'OUvwdMZrcZuwGtjuEBeGU', 'k', 'rFTpnfGIOCfwktWnyOMeXQZelkYwqZ']) from system.numbers limit 10; -select [3, 1, 4, 1, 0, 17, 13, 0, 0, 0, 0, 0, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pUOaQLUvgmqvxaMsfJpud'), ['OaqLUvGm', '', 'aQ', '', 'VajqJSlkmQTOYcedjiwZwqNH', 'f', 'xaMsfj', 'CirvGMezpiIoacBGAGQhTJyr', 'vucKngiFjTlzltKHexFVFuUlVbey', 'ppalHtIYycBCEjsgsXbFeecpkQMNr', 'nEgIYVoGkhTsFgBUSHJvIcYCYbuOBP', 'efjBVRVzknGrikGHxExlFEtYf', 'v', 'QgRBCaGlwNYWRslDylOrfPxZxAOF']) from system.numbers limit 10; -select [14, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 20, 5, 0, 4, 0] = multiSearchAllPositionsCaseInsensitive(materialize('WZNWOCjFkCAAzIptkUtyPCyC'), ['iPTkuT', 'BngeNlFbKymzMYmNPfV', 'XKEjbLtADFMqS', 'dbRQKJGSFhzljAiZV', 'wZnwoCjFKCAAzIPTKuTYpc', 'yBaUvSSGOEL', 'iEYopROOYKxBwPdCgbPNPAsMwVksHgagnO', 'TljXPJVebHqrnhSiTGwpMaNeKy', 'wzNWocjF', 'bLxLrZnOCeIfxkfZEOcqDteUvc', 'CtHYpAZDANEv', '', 'XMAMpGYMiOb', 'y', 'o', 'floswnnFjXDTxantSvDYPSnaORL', 'WOcjFkcAaZIp', 'buqBHbZsLDnCUDhLdgd']) from system.numbers limit 10; -select [0, 20, 14, 0, 2, 0, 1, 14, 0, 0, 0, 1, 0, 26, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('XJMggEHaxfddDadtwKMCcPsMlSFVJ'), ['NzbUAZvCsnRnuzTglTsoT', 'ccP', 'ADTwKmc', 'JaUzcvWHMotuEMUtjsTfJzrsXqKf', 'jMGgEHaXfdddAdTWKMCcpsM', 'SMnb', '', 'AdTWkMccPSMlsfv', 'fVjPVafkp', 'goqsYAFqhhnCkGwhg', 'CNHNPZHZreFwhRMr', '', 'vcimNhmdbtoiCgVzNuvdgZG', 'sfvJ', 'AqKmroxmRMSFAKjfhwrzxmNSSjMHxKow', 'Xhub']) from system.numbers limit 10; -select [0, 0, 7, 0, 1, 1, 0, 0, 13, 0, 1, 1, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('VQuEWycGbGcTcCCvWkujgdoWjKgVYy'), ['UevGaXmEAtBdWsPhBfqp', 'aQOrNMPmoVGSu', 'c', 'TMhzvbNJCaxtGNUgRBmTFEqgNBIBpSJ', '', 'vq', 'pVNUTCqXr', 'QSvkansbdPbvVmQpcQXDk', 'cCCvwkUjgdOWjKgVYy', 'EtCGaEzsSbJ', 'V', '', 'WycgBgCTCcCvwkujgdoWJKgv', 'xPBJqKrZbZHJawYvPxgqrgxPN']) from system.numbers limit 10; -select [4, 1, 0, 0, 0, 0, 0, 0, 0, 18] = multiSearchAllPositionsCaseInsensitive(materialize('LODBfQsqxfeNuoGtzvrUMRVWNKUKKs'), ['Bf', 'lOdbfQs', 'ZDSDfKXABsFiZRwsebyU', 'DT', 'GEUukPEwWZ', 'GNSbrGYqEDWNNCFRYokZbZEzGzc', 'kYCF', 'Kh', 'jRMxqdmGYpTkePeReXJNdnxagceitMJlmbbro', 'VrumrvWnKU']) from system.numbers limit 10; -select [1, 1, 3, 1, 10, 0, 9, 2, 2, 0, 0, 0, 0, 0, 8, 0, 1, 11, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('lStPVtsQypFlZQoQhCuP'), ['', '', 'tpV', 'L', 'PF', 'pGPggwbkQMZandXugTpUorlPOubk', 'yPFlz', 'sTPVTsQyPfLzQOqhCU', 'StPVtSq', 'cbCxBjAfJXYgueqMFNIoSguFm', 'AosIZKMPduRfumDZ', 'AGcNTHObH', 'oPaGpsQ', 'kwQCczyY', 'q', 'HHUYdzGAzVJyn', '', 'fLZQoqHcUp', 'q', 'SSonzfqLVwIGzdHtj']) from system.numbers limit 10; -select [0, 1, 2, 0, 0, 0, 13, 1, 27, 1, 0, 1, 3, 1, 0, 1, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('NhKJtvBUddKWpseWwRiMyBsTWmlk'), ['toBjODDZoRAjFeppAdsne', '', 'HKjTvBu', 'QpFOZJzUHHQAExAqkdoBpSbXzPnTzuPd', 'gE', 'hLmXhcEOwCkatUrLGuEIJRkjATPlqBjKPOV', 'Ps', 'NH', 'l', '', 'aSZiWpmNKfglqAbMZpEwZKmIVNjyJTtDianY', 'NhKJTvBUDDkwpS', 'KJtvbUDDKWPSewwrimYbstwm', 'NHKJTvbudDKwpSEwwR', 'hmMeWEpksVAaXd', 'NHkJTvBUDd', 'kjTvbudd', 'kmwUzfEpWSIWkEylDeRPpJDGb']) from system.numbers limit 10; -select [0, 5, 0, 0, 0, 1, 1, 15, 2, 3, 4, 5] = multiSearchAllPositionsCaseInsensitive(materialize('NAfMyPcNINKcgsShJMascJunjJva'), ['ftHhHaJoHcALmFYVvNaazowvQlgxwqdTBkIF', 'yp', 'zDEdjPPkAdtkBqgLpBfCtsepRZScuQKbyxeYP', 'yPPTvdFcwNsUSeqdAUGySOGVIhxsJhMkZRGI', 'JQEqJOlnSSam', 'nAFmy', '', 'sHJmaScjUnJj', 'afmY', 'FmYpcnINKCg', 'MYPCniNkcgSS', 'YPCNiNkCgSsHjmasCJuNjJ']) from system.numbers limit 10; -select [0, 0, 6, 3, 2, 0, 8, 2, 2, 10, 0, 0, 14, 0, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('hgpZVERvggiLOpjMJhgUhpBKaN'), ['Nr', 'jMcd', 'e', 'PZVeRvggiLOPjmjh', 'GpZVe', 'cVbWQeTQGhYcWEANtAiihYzVGUoHKH', 'VGgilOPj', 'GPZVervgGiLopjmjHGuHp', 'GP', 'gil', 'fzwDPTewvwuCvpxNZDi', 'gLLycXDitSXUZTgwyeQgMSyC', 'PJmjh', 'bTQdrFiMiBtYBcEnYbKlqpTvGLmo', 'ggHxiDatVcGTiMogkIWDxmNnKyVDJth', 'pzv']) from system.numbers limit 10; -select [7, 1, 9, 3, 0, 0, 2, 0, 1, 11] = multiSearchAllPositionsCaseInsensitive(materialize('xUHVawrEvgeYyUZGmGZejClfinvNS'), ['RevGeYyuz', 'XUHvAWrev', 'Vg', 'hvawR', 'eRQbWyincvqjohEcYHMwmDbjU', 'nuQCxaoxEdadhptAhZMxkZl', 'UhVAwREvGEy', 'lHtwTFqlcQcoOAkujHSaj', '', 'eYYUzgMgzEjCLfIn']) from system.numbers limit 10; -select [0, 0, 8, 5, 9, 1, 0, 4, 12, 6, 4, 0, 0, 12] = multiSearchAllPositionsCaseInsensitive(materialize('DbtStWzfvScJMGVPQEGkGFoS'), ['CSjYiEgihaqQDxZsOiSDCWXPrBdiVg', 'aQukOYRCSLiildgifpuUXvepbXuAXnYMyk', 'fvsCjmgv', 'TWZFV', 'VscjMgVpQ', 'dBtSTwZfVsCjmGVP', 'wqpMklzJiEvqRFnZYMfd', 'StwZfVScJ', 'j', 'wzfVsCjmGV', 'STWZfVS', 'kdrDcqSnKFvKGAcsjcAPEwUUGWxh', 'UtrcmrgonvUlLnzWXvZI', 'jMgvP']) from system.numbers limit 10; -select [0, 0, 0, 0, 7, 3, 0, 11, 1, 10, 0, 0, 7, 1, 4, 0, 17, 3, 15, 0] = multiSearchAllPositionsCaseInsensitive(materialize('YSBdcQkWhYJMtqdEXFoLfDmSFeQrf'), ['TnclcrBJjLBtkdVtecaZQTUZjkXBC', 'SPwzygXYMrxKzdmBRTbppBQSvDADMUIWSEpVI', 'QnMXyFwUouXBoCGLtbBPDSxyaLTcjLcf', 'dOwcYyLWtJEhlXxiQLRYQBcU', 'KWhYjMtqdEXFo', 'BD', 'nnPsgvdYUIhjaMRVcbpPGWOgVjJxoUsliZi', 'j', '', 'YjmtQdeXF', 'peeOAjH', 'agVscUvPQNDwxyFfXpuUVPJZOjpSBv', 'kWh', '', 'dcQKWHYjmTQD', 'qjWSZOgiTCJyEvXYqaPFqbwvrwadJsGVTOhD', 'xfoL', 'b', 'DeXf', 'HyBR']) from system.numbers limit 10; -select [4, 0, 0, 13, 1, 0, 3, 13, 16, 1, 0, 1, 16, 1, 12, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('SoVPMQNqmaTGuzYxDvZvapSuPiaP'), ['pMqNQMAtGuzYxDVz', 'TEJtgLhyredMnIpoZfmWvNwpkxnm', 'XRWmsfWVOCHhk', 'u', '', 'HvkXtxFdhVIyccpzFFSL', 'VPM', 'uZyXDVzvAPsUpIaP', 'xDvzV', 'sovpmqNQmATguZYx', 'wEG', 'soVPmQnQ', 'XDVzV', '', 'GUZyXdvzva', 'FetUahWwGtwEpVdlJCJntL', 'B', 'lSCUttZM']) from system.numbers limit 10; -select [1, 0, 1, 2, 15, 0, 0, 0, 1, 0] = multiSearchAllPositionsCaseInsensitive(materialize('zFWmqRMtsDjSeWBSFoqvWsrV'), ['', 'GItrPyYRBwNUqwSaUBpbHJ', '', 'f', 'BsfOQvWsR', 'JgvsMUZzWaddD', 'wxRECkgoCBPjSMRorZpBwuOQL', 'xHKLLxUoWexAM', '', 'YlckoSedfStmFOumjm']) from system.numbers limit 10; -select [11, 1, 1, 1, 0, 0, 1, 0, 4, 0, 0, 0, 1, 0, 5, 8] = multiSearchAllPositionsCaseInsensitive(materialize('THBuPkHbMokPQgchYfBFFXme'), ['KpqGchyfBF', '', '', 'TH', 'NjnC', 'ssbzgYTybNDbtuwJnvCCM', 'tHbupKHBMOkPQgcHy', 'RpOBhT', 'uPKHbMoKpq', 'oNQLkpSKwocBuPglKvciSjttK', 'TaCqLisKvOjznOxnTuZe', 'HmQJhFyZrcfeWbXVXsnqpcgRlg', 'tHB', 'gkFGbYje', 'pkhbMokPq', 'Bm']) from system.numbers limit 10; -select [7, 10, 0, 0, 9, 0, 0, 3, 0, 10] = multiSearchAllPositionsCaseInsensitive(materialize('ESKeuHuVsDbiNtvxUrfPFjxblv'), ['uvsDBiNtV', 'DbInTvxu', 'YcLzbvwQghvrCtCGTWVuosE', 'cGMNo', 'SDb', 'nFIRTLImfrLpxsVFMBJKHBKdSeBy', 'EUSiPjqCXVOFOJkGnKYdrpuxzlbKizCURgQ', 'KeUHU', 'gStFdxQlrDcUEbOlhLjdtQlddJ', 'DBInTVx']) from system.numbers limit 10; -select [1, 0, 2, 18, 1, 3, 15, 8, 0, 0, 1, 3, 0, 23, 2, 0, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('TzczIDSFtrkjCmDQyHxSlvYTNVKjMT'), ['', 'AmIFsYdYFaIYObkyiXtxgvnwMVZxLNlmytkSqAyb', 'ZcZI', 'HXsLVYTnvKjm', '', 'CZiDsFtRKJ', 'DQYhxSl', 'fTRKjCmdqYHxsLvYtNvk', 'hxVpKFQojYDnGjPaTNPhGkRFzkNhnMUeDLKnd', 'RBVNIxIvzjGYmQBNFhubBMOMvInMQMqXQnjnzyw', '', 'c', 'vcvyskDmNYOobeNSfmlWcpfpXHfdAdgZNXzNm', 'ytnvKJM', 'ZcZidsFtRKjcmdqy', 'IRNETsfz', 'fTR', 'POwVxuBifnvZmtBICqOWhbOmrcU']) from system.numbers limit 10; -select [14, 16, 10, 2, 6, 1, 0, 8, 0, 0, 12, 1, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('tejdZOLhjpFLkGBWTGPfmk'), ['GBWtgPF', 'Wt', 'PflkgBWTgpFmK', 'ejdZOLhJPFlKgb', 'o', 'TejDZ', 'HlQfCP', 'hJP', 'ydiyWEfPGyRwcKGfGVdYxAXmkY', 'QsOyrgkTGMpVUAmLjtnWEIW', 'LKGBw', 'tejDzolHJpFLKgbWT', 'IK', '', 'WrzLpcmudcIJEBapkToDbYSazKTwilW', 'DmEWOxoieDsQHYsLNelMc']) from system.numbers limit 10; -select [9, 0, 1, 4, 13, 0, 0, 1, 3, 7, 9, 0, 1, 1, 0, 7] = multiSearchAllPositionsCaseInsensitive(materialize('ZWHpzwUiXxltWPAIGGxIcJB'), ['XxLTWpA', 'YOv', '', 'pzwUIXXl', 'wp', 'lpMMLDAuflLnWMFrETXRethzCUZOWfQ', 'la', '', 'HPZ', 'UixxlTw', 'xXLTWP', 'YlfpbSBqkbddrVwTEmXxgymedH', '', '', 'QZWlplahlCRTMjmNBeoSlcBoKBTnNZAS', 'UiXxlTwPAiGG']) from system.numbers limit 10; -select [0, 9, 6, 0, 4, 0, 3, 0, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('NytxaLUvmiojEepjuCzwUYPoWL'), ['LcOnnmjbZSifx', 'm', 'lUvMIOjeE', 'vuZsNMSsutiLCDbClPUSsrziohmoZaQeXtKG', 'XaLuvm', 'hlUevDfTSEGOjvLNdRTYjJQvMvwrMpwy', 'TXALuVmioJeePjUczw', 'pKaQKZg', 'PAdX', 'FKLMfNAwNqeZeWplTLjd', 'DODpbzUmMCzfGZwfkjH', 'HMcEGRHLspYdJIiJXqwjDUBp']) from system.numbers limit 10; -select [2, 1, 0, 16, 8, 1, 6, 0, 0, 1, 8, 0, 7, 0, 9, 1, 1, 0, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('WGVvkXuhsbzkLqiIEOuyiRfomy'), ['GVv', '', 'VbldWXHWzdziNcJKqIkDWrO', 'iEOUyIRFomy', 'hsBZklqiieOuy', '', 'X', 'emXjmIqLvXsNz', 'rxhVkujX', 'wgvvK', 'HsBzKLQiie', 'wVzJBMSdKOqjiNrXrfLEjjXozolCgYv', 'UHsbzklQiiEouyirf', 'UOvUsiKtUnwIt', 'SBZKLqiIEoUYIrfom', 'wg', '', 'BefhETEirL', 'WyTCSmbKLbkQ', '']) from system.numbers limit 10; -select [8, 1, 2, 8, 1, 0, 5, 0, 0, 4, 0, 1, 14, 0, 0, 7, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('uyWhVSwxUFitYoVQqUaCVlsZN'), ['XufitYOVqqUACVlszn', '', 'ywH', 'XUFIT', 'uywHvSWXuFIt', 'dGhpjGRnQlrZhzGeInmOj', 'vswXuFitYovqQuA', 'dHCfJRAAQJUZeMJNXLqrqYCygdozjAC', 'rojpIwYfNLECl', 'hVswxufiTYov', 'bgJdgRoye', '', 'ovQ', 'AdVrJlq', 'krJFOKilvBTGZ', 'WxuFITYOV', 'AsskQjNPViwyTF', 'u']) from system.numbers limit 10; -select [0, 2, 0, 0, 0, 6, 0, 5, 0, 15, 0, 0, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BEKRRKLkptaZQvBxKoBL'), ['HTwmOxzMykTOkDVKjSbOqaAbg', 'eKrRKl', 'UrLKPVVwK', 'TyuqYmTlQDMXJUfbiTCr', 'fyHrUaoMGdq', 'KLkPtaZq', 'cPUJp', 'RKLk', 'yMnNgUOpDdP', 'BX', 'tXZScAuxcwYEfSKXzyfioYPWsrpuZz', 'dsiqhlAKbCXkyTjBbXGxOENd', 'k', 'juPjORNFlAoEeMAUVH']) from system.numbers limit 10; -select [9, 0, 0, 0, 1, 4, 2, 0, 0, 0, 0, 8, 0, 2, 0, 3, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('PFkLcrbouhBTisTkuUcO'), ['UhBtistKU', 'ioQunYMFWHD', 'VgYHTKZazRtfgRtvywtIgVoBqNBwVn', 'ijSNLKch', 'pFKlcrBOuhbtIsTku', 'lCRboUHBtI', 'fKLCRBOu', 'XTeBYUCBQVFwqRkElrvDOpZiZYmh', 'KzXfBUupnT', 'OgIjgQO', 'icmYVdmekJlUGSmPLXHc', 'OuH', 'BWDGzBZFhTKQErIRCbtUDIIjzw', 'F', 'LuWyPfSdNHIAOYwRMFhP', 'kL', 'PQmvXDCkEhrlFBkUmRqqWBxYi', 'kLcrbo']) from system.numbers limit 10; -select [0, 1, 1, 6, 14, 3, 0, 1, 9, 1, 9, 0, 1, 10, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pfynpJvgIjSqXWlZzqSGPTTW'), ['ZzeqsJPmHmpoYyTnKcWJGReOSUCITAX', '', 'P', 'jvGIj', 'wLZzQsgP', 'YnPjVGij', 'DmpcmWsyilwHwAFcKpLhkiV', '', 'I', 'pFy', 'IjsqxwLZzqSgpT', 'pKpe', 'PfynpJvgiJSqXwlzZ', 'jsQXwLZZqs', 'onQyQzglEOJwMCO', 'GV']) from system.numbers limit 10; -select [1, 17, 1, 20, 0, 0, 5, 0, 0, 0, 24, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BLNRADHLMQstZkAlKJVylmBUDHqEVa'), ['bLnRaDhLm', 'kJVYlmbuD', 'bLnr', 'yLMbU', 'eAZtcqAMoqPEgwtcrHTgooQcOOCmn', 'jPmVwqZfp', 'aDHlmqS', 'fmaauDbUAQsTeijxJFhpRFjkbYPX', 'aqIXStybzbcMjyDKRUFBrhfRcNjauljlqolfDX', 'WPIuzORuNbTGTNb', 'uDhqeVa', 'fQRglSARIviYABcjGeLK']) from system.numbers limit 10; -select [2, 0, 4, 5, 1, 15, 1, 9, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BEwjMzphoTMoGikbrjTVyqDq'), ['E', 'sClgniMsZoGTEuLO', 'jmzphotmoGIKBRjtv', 'MZPhOtmo', '', 'Kb', '', 'otm', 'tVpxYRttoVpRLencV', 'SJAhAuMttGaeMsalRjeelAGG']) from system.numbers limit 10; -select [1, 0, 0, 0, 0, 0, 4, 0, 0, 19, 0, 7] = multiSearchAllPositionsCaseInsensitive(materialize('yNnYRQfcyemQdxUEPOiwRn'), ['', 'SJteoGNeIAMPWWBltkNKMrWDiVfR', 'kKnnKQhIPiekpnqTXJuyHfvWL', 'GPDUQEMWKzEEpvjLaIRYiuNfpzxsnSBX', 'oPrngRKwruyH', 'ukTSzFePSeVoeZeLQlAaOUe', 'yRqfcyemQDXUepo', 'CwmxidvpPHIbkJnVfSpbiZY', 'FUxmQdFVISApa', 'iwr', 'ciGHzDpMGNQbytsKpRP', 'Fcy']) from system.numbers limit 10; -select [0, 1, 0, 11, 2, 0, 1, 3, 0, 0, 0, 21] = multiSearchAllPositionsCaseInsensitive(materialize('EgGWQFaRsjTzAzejYhVrboju'), ['DVnaLFtCeuFJsFMLsfk', '', 'thaqudWdT', 'Tzazejy', 'GGW', 'RolbbeLLHOJpzmUgCN', '', 'gwqfarsjtzaZeJYHvR', 'KkaoIcijmfILoe', 'UofWvICTEbwVgISstVjIzkdrrGryxNB', 'UJEvDeESWShjvsJeioXMddXDkaWkOiCV', 'B']) from system.numbers limit 10; -select [0, 5, 2, 0, 0, 7, 0, 0, 0, 11, 0, 12, 22, 10, 0, 12] = multiSearchAllPositionsCaseInsensitive(materialize('ONgpDBjfRUCmkAOabDkgHXICkKuuL'), ['XiMhnzJKAulYUCAUkHa', 'dbj', 'nGpDbJFRU', 'xwbyFAiJjkohARSeXmaU', 'QgsJHnGqKZOsFCfxXEBexQHrNpewEBFgme', 'JFruCM', 'DLiobjNSVmQk', 'vx', 'HYQYzwiCArqkVOwnjoVNZxhbjFaMK', 'Cm', 'ckHlrEXBPMrVIlyD', 'M', 'xI', 'UcmkAOabdKg', 'jursqSsWYOLbXMLQAEhvnuHclcrNcKqB', 'mKaoaBdKghxiCkkUUL']) from system.numbers limit 10; -select [0, 1, 0, 1, 0, 0, 0, 0, 7, 21] = multiSearchAllPositionsCaseInsensitive(materialize('WhdlibCbKUmdiGbJRshgdOWe'), ['kDPiHmzbHUZB', '', 'CukBhVOzElTdbEBHyrspj', '', 'QOmMle', 'wiRqgNwjpdfgyQabxzksjg', 'RgilTJqakLrXnlWMn', 'bSPXSjkbypwqyazFLQ', 'CBkuMDiGbJRShGdOWe', 'dow']) from system.numbers limit 10; -select [0, 8, 0, 1, 1, 0, 1, 7, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('oOccAoDDoPzHUyRqdWhJxNmATEqtE'), ['LFuvoQkVx', 'DoPzh', 'YaBSTdWvmUzlgRloppaShkRmLC', 'oO', '', 'eeEpOSLSXbyaOxTscOPoaTcKcchPmSGThk', '', 'dDO', 'oFXmyIJtmcSnebywDlKruvPUgmPFzEnMvA', 'vCs', 'MsxHLTgQcaQYZdPWJshIMWbk', 'yqrjIzvrxd']) from system.numbers limit 10; -select [0, 16, 0, 0, 0, 0, 7, 1, 0, 0, 1, 2, 1, 4, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('FtjOSBIjcnZecmFEoECoep'), ['FQQwzxsyauVUBufEBdLTKKSdxSxoMFpL', 'EOecoEP', 'HGWzNTDfHxLtKrIODGnDehl', 'ZxirLbookpoHaxvASAMfiZUhYlfuJJN', 'mKh', 'GZaxbwVOEEsApJgkLFBRXvmrymSp', 'Ij', '', 'X', 'AnCEVAe', 'fTj', 'tjOSbIjcNZECMfeoEC', '', 'OsBIjcN', 'LtdJpFximOmwYmawvlAIadIstt', 'JOsBiJCNzEc']) from system.numbers limit 10; -select [0, 2, 0, 0, 19, 0, 0, 12, 1, 0, 3, 1, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('ugpnWWncvqSLsYUCVXRZk'), ['yOWnQmZuhppRVZamgmRIXXMDQdeUich', 'gPNww', 'jlyFSbvmjaYPsMe', 'fQUeGVxgQdmPbVH', 'rZk', 'ariCX', 'grAffMPlefMQvugtAzN', 'LsYuCVX', '', 'jZFoQdWEWJFfSmNDqxIyNjvxnZJ', 'P', 'UgPN', 'JmKMsbegxNvusaiGGAZKglq', 'qArXLxzdYvabPv']) from system.numbers limit 10; -select [0, 0, 0, 0, 0, 0, 8, 0, 0, 1, 1, 15, 0, 1, 7, 0] = multiSearchAllPositionsCaseInsensitive(materialize('nxwotjpplUAXvoQaHgQzr'), ['ABiEhaADbBLzPwhSfhu', 'TbIqtlkCnFdPgvXAYpUuLjqnnDjDD', 'oPszWpzxuhcyuWxiOyfMBi', 'fLkacEEeHXCYuGYQXbDHKTBntqCQOnD', 'GHGZkWVqyooxtKtFTh', 'CvHcLTbMOQBKNCizyEXIZSgFxJY', 'PlUAxVoQah', 'zrhYwNUzoYjUSswEFEQKvkI', 'c', 'NXWOt', '', 'qAhG', 'JNqCpsMJfOcDxWLVhSSqyNauaRxC', '', 'PpLuaxV', 'DLITYGE']) from system.numbers limit 10; -select [2, 0, 0, 1, 0, 0, 28, 1, 16, 1] = multiSearchAllPositionsCaseInsensitive(materialize('undxzJRxBhUkJpInxxJZvcUkINlya'), ['ndxzjRxbhuKjP', 'QdJVLzIyWazIfRcXU', 'oiXcYEsTIKdDZSyQ', 'U', 'dRLPRY', 'jTQRHyW', 'Y', '', 'nxxJZVcU', '']) from system.numbers limit 10; -select [1, 4, 1, 0, 4, 1, 0, 1, 16, 1, 0, 0, 0, 8, 12, 14, 0, 2] = multiSearchAllPositionsCaseInsensitive(materialize('lrDgweYHmpzOASVeiFcrDQUsv'), ['', 'gwEYhMP', 'LrDGwEyHmPzOaSVEifC', 'oMN', 'gwEYhMpZO', 'lrdGWEy', 'pOKrxN', 'lrDgwEyhmpZoaSv', 'eifcrdqU', 'LrDgw', 'dUvarZ', 'giYIvswNbNaBWprMd', 'pPPqKPhVaBhNdmZqrBmb', 'hmPzoASVEiF', 'O', 'SVEi', 'gIGLmHnctIkFsDFfeJWahtjDzjPXwY', 'rDGweyHmP']) from system.numbers limit 10; -select [0, 0, 11, 1, 1, 1, 0, 16, 0, 1, 5, 0, 0, 0, 2, 0, 2, 0] = multiSearchAllPositionsCaseInsensitive(materialize('XAtDvcDVPxZSQsnmVSXMvHcKVab'), ['bFLmyGwEdXiyNfnzjKxUlhweubGMeuHxaL', 'IhXOeTDqcamcAHzSh', 'ZSQsNMvsxmVHcK', '', '', '', 'dbrLiMzYMQotrvgwjh', 'MvsxMV', 'zMp', 'XaTDvCdvpXzsqSNMVSxm', 'v', 'LkUkcjfrhyFmgPXPmXNkuDjGYlSfzPi', 'ULpAlGowytswrAqYdaufOyWybVOhWMQrvxqMs', 'wGdptUwQtNaS', 'ATdVcdVPXzSqsnmVSXMvHcKVab', 'JnhhGhONmMlUvrKGjQcsWbQGgDCYSDOlor', 'atdvCdvpXzsqSnMVSxMVhCkvAb', 'ybNczkKjdlMoOavqBaouwI']) from system.numbers limit 10; -select [8, 0, 0, 0, 4, 0, 0, 5, 5, 2] = multiSearchAllPositionsCaseInsensitive(materialize('XPquCTjqgYymRuwolcgmcIqS'), ['qgyYMruW', 'tPWiStuETZYRkfjfqBeTfYlhmsjRjMVLJZ', 'PkTdqDkRpPpQAMksmkRNXydKBmrlOAzIKe', 'wDUMtn', 'UcTJQgYYMRuWoLCgMcI', 'PieFD', 'kCBaCC', 'Ct', 'C', 'pQuctjqgyymRuwOLCgmc']) from system.numbers limit 10; - -select [1, 0, 7, 1, 0, 24, 17, 0, 0, 0, 2, 0, 1, 7, 4, 1, 12, 8] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('гГБаДнФбпнЩврЩшЩЩМщЕБшЩПЖПчдт'), ['', 'таОХхрзИДжЛСдЖКЧжБВЩжЛкКХУКждАКРеаЗТгч', 'Ф', '', 'ЙЩИФМфАГщХзКЩЧТЙжмуГшСЛ', 'ПЖпчдТ', 'ЩМщЕбшЩПжПч', 'ФгА', 'гУД', 'зУцкжРоППЖчиШйЗЕшаНаЧаЦх', 'гбаДНФбПНЩВРЩШЩщМЩеБшЩпжПЧд', 'РДЧЖАбрФЦ', 'гГ', 'ФбпНщвр', 'адНфБПнщвРщШщщМщЕбШщ', 'ггб', 'ВРЩ', 'бПНщврЩш']) from system.numbers limit 10; -select [0, 12, 8, 0, 12, 0, 0, 10, 0, 8, 4, 6] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('айРВбЧБжКВИхБкчФЖЖНВнпФйФБДфЗ'), ['ЛрЦфуУДВК', 'хБкчфЖжНвнпфйфБдФ', 'жКВИХБкчФЖжНвнПф', 'кЖчвУцВСфЗБТИфбСжТИдРкшгзХвщ', 'хбк', 'штДезйААУЛчнЖофМисНЗо', 'нлнШЧВЙхОПежкцевчлКрайдХНчНб', 'вИХбкчфжжНВН', 'ЩдзЦТуоЛДСеШГфЦ', 'ЖКВихбКЧфжЖ', 'вбЧбЖкВихБкЧфЖжНВ', 'Чб']) from system.numbers limit 10; -select [18, 15, 0, 0, 0, 0, 5, 0, 14, 1, 0, 0, 0, 0, 0, 15] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('пМИОкоЗжГйНТПЙацччЧАЩгЕВБбЕ'), ['ЧЧАЩгЕВБ', 'а', 'ФбРВщшййпХдфаЗЖлЛСЗПРШПпАОинЧКзЩхждН', 'ЛфРКДЙВСУСЙОчтнИкРЗбСГфкЩреИхЛлчХчШСч', 'ШйвБПАдФдФепЗТкНУрААйеЧПВйТоЧмБГДгс', 'ФтЙлЖЕсИАХИФЗаЕМшсшуцлцАМФМгбО', 'КО', 'лиШБнлпОХИнБаФЩдмцпжЗИЛнвсЩЙ', 'йацччЧАщгевбБЕ', 'ПмИоКозжГйНТП', 'ИГНннСчКАИСБщцП', 'ПнжмЙЛвШтЩейХЛутОРЩжифбЗчгМУЛруГпх', 'ХжЗПлГЖЛйсбпрЩОТИеБвулДСиГзлЛНГ', 'учклЦНЕгжмщлжАшщжМд', 'ЩеПОЙтЖзСифОУ', 'АЦЧ']) from system.numbers limit 10; -select [10, 0, 1, 1, 6, 1, 7, 6, 0, 0, 0, 2, 12, 0, 6, 0, 4, 8, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('квхБнцхйзЕпйИмтЙхфзвгдФ'), ['еПйИМт', 'хгкиМжСБИТНщенЩИщНСкй', '', 'Квхб', 'цхЙЗЕПйИмТйХФЗ', 'к', 'хйЗЕПЙИмтй', 'Цх', 'нКлШбМЖГйШкРзадрЛ', 'ДштШвБШТг', 'СЦКйЕамЦщПглдСзМлоНШарУтМднЕтв', 'ВхБнцхйЗЕПйимТ', 'йимтЙХФЗВГД', 'жчссунЙаРцМкЖУЦщнцОЕхнРж', 'цХЙЗЕП', 'ОгНФдМЛПТИдшцмХИеКйРЛД', 'бнЦхЙ', 'ЙЗе', 'згЩищШ', 'фХлФчлХ']) from system.numbers limit 10; -select [0, 0, 0, 12, 0, 0, 27, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('хНпсРТХВдтоЦчдлеФПвнЛгЗКлПйнМВ'), ['ШиБфЗШПДЧхОЩшхфщЗЩ', 'иГйСЧЗтШЛуч', 'АЗХЦхедхОуРАСВЙС', 'цчдЛЕфП', 'СДбйГйВЕРмЙЩЛщнжен', 'НДлцСфТшАщижгфмуЖицжчзегЕСЕНп', 'й', '', 'йлчМкРИЙиМКЙжссЦТцРГзщнхТмОР', 'ПРцГувЧкйУХггОгЖНРРсшГДрлЧНжГМчрХЗфЧЕ']) from system.numbers limit 10; -select [0, 0, 2, 0, 10, 7, 1, 1, 0, 9, 0, 2, 0, 17, 0, 0, 0, 6, 5, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЙзЗжпжДЕСУхчйдттСЙзоЗо'), ['щОЙУшееЧщкхГККреБкВ', 'жВ', 'ззЖпждЕсУХчЙДТТсЙ', 'ЙЦШЦЙЖзХШРвнкЕд', 'УхчйДтТсйЗОз', 'дЕСу', '', '', 'дсцеррищндЗдНкжаНЦ', 'сУхчЙдттсйзОзО', 'ЦЖРжмц', 'ЗЗ', 'СгЛГАГЕЖНгщОеЖЦДмБССцЩафзЗ', 'Сйзоз', 'ЦГХТЕвЕЗБМА', 'пмВоиеХГжВшдфАЖАшТйуСщШчИДРЙБнФц', 'Оа', 'ждЕ', 'ПжДесу', 'ЗзЖПждЕСУ']) from system.numbers limit 10; -select [0, 0, 0, 0, 5, 1, 0, 6, 0, 1, 17, 15, 1, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('уФШЙбШТоХВбзЦцЖОЕКТлщхнЖГ'), ['цЛ', 'ууМ', 'ТИгЙолМФсибтЕМнетквЦИЩИБккйн', 'оФОаМогсХЧЦооДТПхб', 'бШтОХВбЗцЦЖоЕКтЛ', 'уфШйбШтоХ', 'фдтщрФОЦсшигдПУхЛцнХрЦл', 'ШтО', 'НИкИТрбФБГИДКфшзЕмЙнДЖОсЙпЩцщкеЖхкР', 'уфШЙБш', 'екТлщ', 'ЖоекТл', 'уфШйБшТоХвбз', 'ТуОхдЗмгФеТаафЙм']) from system.numbers limit 10; -select [0, 1, 6, 1, 0, 1, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чМЩБЛЛПРлщкВУбПефХВФлАЗШао'), ['гаТкЛВнрвдПМоеКПОйр', 'ч', 'ЛпрЛЩКвуБпе', 'ЧмЩб', 'ц', '', 'жгаччЖйГЧацмдсИИВЩЩжВЛо', 'йГеЙнБзгнкЦЛБКдОЕЧ', 'ПоЦРвпЕЗСАШж', 'ЙОНЦОбиееО']) from system.numbers limit 10; -select [2, 0, 17, 1, 0, 0, 0, 5, 0, 4, 0, 0, 0, 0, 0, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЕаЩичщМЦЖиЗБЛЧжуНМЧК'), ['АЩиЧЩ', 'ИлУсшДБнжщаатуРТтраПОЙКЩйТГ', 'НМЧк', 'Еа', 'зАВФЛЩбФрМВШбПФГгВЕвЖббИТйе', 'РЗНРБЩ', 'ЦдЙНГпефзЛчпУ', 'ч', 'НШШчПЗР', 'ИчЩмЦжИЗБлЧЖУНМч', 'аннвГДлмОнТЖЗЙ', 'ШдчЩшЕБвхПУсШпг', 'гФИШНфЖПжймРчхАБШкЖ', 'ЖзгЖАБлШЗДпд', 'Д', 'ащиЧ']) from system.numbers limit 10; -select [4, 1, 0, 7, 0, 7, 1, 1, 0, 3, 7, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('иОцХКЙвувМИжШдУМУЕйНсБ'), ['ХкйвуВмИжШдУм', '', 'звМАОМЩщЙПшкиТчЩдгТЦмфзеИ', 'вуВМиж', 'КДщчшЙВЕ', 'в', '', 'ИоЦхКЙВувМижШ', 'ЕвТАРи', 'цхКЙвувмИЖШДумуе', 'вУвМи', 'зПШИХчУщШХУвврХйсуЙЗеВЧКНмКШ']) from system.numbers limit 10; -select [0, 5, 0, 0, 0, 0, 0, 12, 0, 11] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЦОфбчУФсвТймЦчдщгЩжИАБ'), ['йлрк', 'ЧуФсвтйМцчдЩгщ', 'МНлЕжорв', 'иНзТЖМсмх', 'шЕМЖжпИчсБжмтЧЙчщФХб', 'жШХДнФКАЩГсОЩвЕаам', 'НпКЦХулЛвФчШЕЗкхХо', 'мЦчДЩгЩжиАб', 'мпцгВАЕ', 'Й']) from system.numbers limit 10; -select [1, 0, 0, 0, 8, 0, 2, 0, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чТХЙНщФфцИНБаеЖкОвлиУДР'), ['', 'рВХмжКцНцИЙраштМппсодЛнЧАКуЩ', 'ИХфХЖЧХВкзЩВЙхчфМрчдтКздиОфЙжУ', 'Гзлр', 'фЦи', 'абПф', 'тХЙНщффЦИн', 'нссГбВеЖх', 'амлЗщрсУ', 'фФ']) from system.numbers limit 10; -select [0, 9, 11, 0, 11, 1, 0, 0, 0, 1, 6, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зДЗпщАцвТгРдврщхЩфЖл'), ['йХЛ', 'Т', 'рд', 'АИЦщгниДфВОе', 'Р', 'здзпщ', 'вКТвВШмгч', 'ввирАйбЗЕЕНПс', 'тХиХоОтхПК', '', 'аЦВТгРДврщ', '', 'уЗЗЖвУЕйтчудноЕКМЖцВРаНТЙЗСОиЕ', 'оЕфПхЕДжАаНхЕцЖжжофЦхкШоБЙр']) from system.numbers limit 10; -select [1, 1, 0, 0, 1, 7, 0, 0, 0, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('йЛПЛшмЦШНЖРрЧрМцкЖзЕНжЧДелФжАн'), ['', 'йЛПлшМЦшНЖррч', 'ПНКдфтДейуиШзЗХАРУХизВ', 'ПценмщЧОФУСЙЖв', '', 'ЦшнжрРчрМЦКЖЗе', 'МрПзЕАгжРбТЧ', 'ЕДФмаФНвТЦгКТЧЦжцЛбещЛ', 'УтПУвЛкТасдЦкеИмОещНИАоИжЖдЛРгБЩнвЖКЛЕП', 'Л']) from system.numbers limit 10; -select [1, 5, 1, 1, 0, 0, 1, 1, 0, 2, 19, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('сйДпмжнДРщКБгфцЖОчтГНБ'), ['', 'МЖнДРщ', 'Сй', '', 'пУщ', 'йгВИАЦнозаемТиХВвожКАПТдкПИаж', 'Сйд', 'СЙДпмжНдРщ', 'ФПщБцАпетаЙФГ', 'ЙдпМжНдрЩКбГфЦжОЧТГНб', 'т', 'гллрБВМнвУБгНаЙцМцТйЙФпзЧОЙЛвчЙ', 'йДПМжндРЩкБ', 'ЗмфОмГСНПщшЧкиССдГБУсчМ']) from system.numbers limit 10; -select [0, 18, 10, 5, 0, 2, 8, 1, 4, 11] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ХпИРддХрмВНйфчвгШиЧМКП'), ['хЗФДлДУБЙаЦтжРБЗсуйнЦпш', 'иЧмК', 'внЙ', 'д', 'зиМУЩГиГ', 'ПИр', 'РМвнЙфчвгШич', '', 'РдДхРМ', 'нЙфчВГШИ']) from system.numbers limit 10; -select [18, 0, 0, 1, 0, 0, 6, 0, 0, 9] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('нГгФкдуФШуИТбшпХфтаГт'), ['Таг', 'рРпшУйчГд', 'гК', '', 'лаВНбездпШШ', 'ЕБРйаНрОБожкКИсв', 'ДУфШУитБ', 'ГРиГШфШтйфЖлРФзфбащМЗ', 'мхЩжЛнК', 'ШуИтБШ']) from system.numbers limit 10; -select [13, 0, 0, 7, 0, 15, 0, 0, 15, 0, 0, 5, 6, 0, 18, 21, 11, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('рлобшдПЦИхжФуХщжгПФукшзт'), ['УхщжГ', 'ТВщЦфФсчЩГ', 'ЕжФШойжуЛРМчУвк', 'пцИХжфуХЩж', 'бР', 'щЖГПфуКШЗТ', 'йжРГгЛуШКдлил', 'ТщЖГкбШНИщЩеЩлаАГхрАфЙНцЦгВкб', 'щжГПфУ', 'бкаДБЛХ', 'АЗ', 'шДПЦихжфух', 'дП', 'вфнЙобСцвЩмКОбЦсИббФКзЩ', 'пФУкшзТ', 'К', 'жфу', '']) from system.numbers limit 10; -select [12, 19, 8, 1, 0, 0, 0, 15, 0, 0, 12, 2, 0, 4, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЦкЛЗепкЕХЩГлКФрБдТрлвйАхдООШ'), ['лК', 'рЛв', 'Ехщ', '', 'еаПКБгЦЩАоЗВонйТЗгМхццСАаодМЕЩГ', 'ишОНиеБидфбФБЖриУЩЩ', 'дуж', 'РбДТ', 'пЗсГХКсгРущкЙРФкАНЩОржФвбЦнЩНЖЩ', 'щрОУАГФащзхффКвЕйизцсйВТШКбнБПеОГ', 'лкФрБдТРлвЙа', 'КЛзеп', 'УЛФЗРшкРщзеФуМвгПасШЧЛАЦр', 'зеПКеХщглкфР', 'ЦЖЗдХеМЕ', 'зЖжрт', 'уЩФрйрЖдЦз', 'МфцУГЩтвПАЦжтМТоеищЕфнЖй']) from system.numbers limit 10; -select [0, 0, 1, 0, 1, 0, 0, 7, 0, 5, 1, 6, 1, 1, 1, 5, 6, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РННЕШвжМКФтшДЙлфЛИзЙ'), ['ГаМРош', 'Дтфс', '', 'еБбиаКщГхххШвхМЖКзЛАезФУчХо', 'РНн', 'сВбТМ', 'ЖЗЦПБчиСйе', 'жМкфтШДЙл', 'нЖХуеДзтЧтулиСХпТпеМлИа', 'ШВжМкФТШдЙлфл', '', 'вЖМКфТ', '', '', '', 'швЖМКфтШДЙЛфлИЗй', 'вЖмКФТ', 'еМ']) from system.numbers limit 10; -select [0, 0, 15, 1, 0, 0, 8, 1, 0, 0, 0, 4, 8, 10] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РиучГийдХутДЕЙДпфиуд'), ['ЩмгцлЖрц', 'ЕСжСлЩЧИЖгЗЛлф', 'дП', '', 'щГЦаБтПШВзЦСрриСЙбД', 'тдРгОЛТШ', 'д', '', 'КЕбЗКСХЦТщЦДЖХпфаЧйоХАл', 'мТвзелНКрЖЧЦПпЕЙвдШтеШйБ', 'ЙОТКрБСШпШд', 'ЧГ', 'ДХУТДЕЙд', 'УТд']) from system.numbers limit 10; -select [0, 0, 0, 0, 15, 0, 0, 0, 11, 0, 0, 5, 1, 1, 0, 2, 3, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('МшазшргхОПивОлбмДоебАшцН'), ['ЦИшштН', 'еМСЗкФЕКДйОНМ', 'ЛСчГрбеРЕбЩМПМЗЦИп', 'ХнИПЧжЗдзФщЗ', 'бмдоЕ', 'гМОдйсбТСЦЩбФВЗШзшщбчегаЕмЕБаХаРР', 'фщнР', 'щмТчФчсМАОгчБщшг', 'иВ', 'УщцГОшТзпУХКоКЖБеМШ', 'мйаАЛцАегСмПОаСТИСфбЧДБКоИВчбЦЙ', 'шРгхоп', '', '', 'еИпАЩпнЛцФжЩХИрЧаИИТЛвшиСНЩ', 'шаЗ', 'АЗ', 'ФгдтфвКЩБреногуир', 'ДБжШгщШБЩпЖИЛК', 'ЧдРЩрбфЛзЙклхдМСФУЙЛн']) from system.numbers limit 10; -select [5, 0, 0, 18, 13, 0, 2, 7, 0, 0, 1, 15, 1, 0, 0, 0, 3, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('хщеКЗПчуНЙтрЧЩгфСбоКЕАДТййАрр'), ['зп', 'хчПЦшпДбзСфНВЧзНжЕМФОП', 'ЧЖхЕУк', 'БОКеАдтЙЙа', 'чЩГфС', 'шллддЩщеМжШйкЩн', 'щЕкзпЧуНЙТ', 'ЧунйтРЧщгФс', 'ввНздЙуоТЖРаВЙчМИчхРвфЛЖБН', 'ЗХМХПщПкктцАзщЙкдпжф', '', 'ГФСбОкеАДтйЙа', '', 'МБХВЕчпБМчуххРбнИМЛТшЩИщЙгаДцзЛАМвйаО', 'ЛкОзц', 'ЕцпАДЗСРрсЕвтВщДвцбЗузУннТИгХжхрцПДРДПм', 'екЗПЧунЙТРчщгФсбоК', 'шпИфЕчгШжцГВСйм', 'ЛхйЧбЧД', 'ВзЗоМцкЩНХГж']) from system.numbers limit 10; -select [0, 0, 6, 20, 0, 10, 0, 0, 0, 9, 10, 3, 23, 1, 0, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('лцапШиХчЛДшдксСНИбшгикзчЙанми'), ['ХууатТдтбодМГЧгщЧнклШтЗПНчкЦОаЙг', 'МЦЧчпИхКЛаФхщХдРУДщжУчфлжахц', 'иХЧлдшдкСсНИбШГикзЧЙ', 'гикЗчйА', 'ГсТзЛОфИББлекЩАсЛвмБ', 'Д', 'ЦХрТЖощНрУШфнужзжецсНХВфЩБбДУоМШШиГйж', 'йуВдЕзоггПВДЖб', 'ЙфБГйХМбжоакЖЛфБаГИаБФСнБЖсТшбмЗЙТГОДКИ', 'ЛДШдКССНИБшГикзч', 'ДШдКССниБ', 'аПШИХчЛДШДКсс', 'з', '', 'ФоохПЩОГЖоУШлКшзЙДоуп', 'хАДХЩхлвУИсшчрбРШУдФА', 'ЦА', 'гвптУФлчУуРхпрмЖКИрБеЩКчН']) from system.numbers limit 10; -select [0, 4, 5, 7, 15, 3, 3, 17, 7, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зЗАЩлЕЕЕПИохЧчШвКЧйрсКХдд'), ['пКРбуШОНТЙБГНзИРвЖБсхрЛщчИрлЧУ', 'ЩЛЕЕЕПиоХЧ', 'ЛеЕеп', 'Еепио', 'швкЧйрС', 'ащЛеееПИох', 'АЩлеЕЕпиОхЧЧШвкЧЙРсК', 'КчйРскхД', 'ЕЕПИохччшВКчй', 'у']) from system.numbers limit 10; -select [1, 12, 0, 8, 1, 1, 0, 1, 5, 0, 1, 0, 0, 0, 0, 3, 1, 0, 4, 5] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ПмКСйСКЖККмШеоигЙчПфжТ'), ['', 'Шео', 'РчвлдЙЙлПщуКмтН', 'жкКмшЕоИГЙЧ', '', '', 'йРмМЖнПиЙ', '', 'йс', 'тфФРСцл', '', 'щлЩХиКсС', 'кпнТЖпФЩиЙЛ', 'абкКптбИВгмЧкцфЦртЛДЦФФВоУхЗБн', 'чНшоВСГДМйДлтвфмхХВВуеЩЦВтЖтв', 'кС', '', 'фидБлйеЙЧШРЗЗОулщеЕЩщЙсЙшА', 'СЙс', 'йсКжкКМшЕо']) from system.numbers limit 10; -select [0, 0, 1, 0, 2, 2, 1, 2, 7, 0, 1, 2, 1, 0, 6, 8] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('УгЖЕугАЩХйчидаррлжНпфФГшр'), ['утвШ', 'кЕвФч', 'угжеУг', 'тШлТвЕШЗчЖеЛНджЦазЩХцж', 'гЖеугаЩхй', 'ГжЕугаЩХйЧидАР', 'УгжЕУГаЩХЙЧИда', 'гЖеу', 'ащхЙчИ', 'мЧлщгкЛдмЙЩРЧДИу', '', 'ГжеугАщХйЧиДаРРЛЖНП', '', 'зЕМвИКбУГКЩФшоГЧГ', 'ГАЩХйчИДАррлЖНпФфг', 'ЩХЙчИдАррЛЖНпфФгш']) from system.numbers limit 10; -select [1, 0, 0, 7, 0, 6, 0, 11, 0, 0, 0, 2, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЗЕГЛЩПцГНтзЕЦШЧхНКГТХЙЙФШ'), ['', 'шзкиЗсаИщАБмаз', 'Ж', 'ц', 'гШуЕжЛСПодРнхе', 'пцГНтЗЕЦ', 'щРкЩАеНржЙПМАизшщКвЗщглТкКИф', 'ЗеЦшчхнКГтхЙЙ', 'пелгЩКкцвтфнжЖУуКосЙлкЛ', 'рф', 'хНШчНрАХМШщфЧкЩБНзХУкилЙмП', 'ЕгЛЩПЦгнтзецШЧ', 'ЩУчБчРнЖугабУоиХоИККтО', 'СГмЦШтФШЛмЙЩ', 'ауТПЛШВадоХМПиБу', 'ЩЩйр']) from system.numbers limit 10; -select [2, 2, 1, 0, 0, 0, 0, 0, 1, 0, 7, 9, 0, 15, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('гЙЧЙФХнЖБвомгАШГбОВГксИйцз'), ['ЙЧйфхНЖбвО', 'Й', 'гЙЧйфхнЖбв', 'хсЩмШЙЙММВЦмУБТчгзУЛР', 'зктшп', 'дЕоиЖлгШж', 'хКкаНЛБ', 'ЗКйСчсоЗшскГЩбИта', '', 'у', 'НжбВОмгашГ', 'БВо', 'ещфРШлчСчмаЖШПЧфоК', 'шгбо', 'ЙСтШШДЩшзМмдпЧдЙЖевТвоУСЕп', 'Л']) from system.numbers limit 10; -select [0, 9, 0, 0, 18, 13, 13, 11, 0, 0, 4, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЙЛмоЦСдТаоФчШКЖЦСНРаРЦзоС'), ['ДфгЗАасВфаМмшхчлмР', 'аоФчШкЖцСнРАРЦзОС', 'зЩзнйтФРТЙжУлхФВт', 'чЦкШВчЕщДУМкхЛУЩФшА', 'н', 'Шк', 'шКЖцсНРаРцЗос', 'фчшкЖцснрАРЦз', 'лку', 'пЧШМЦквоемЕщ', 'о', 'йЛМоцСДТАофЧшкжЦСнРаРЦзос']) from system.numbers limit 10; -select [21, 0, 0, 17, 1, 11, 0, 2, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кЧЖнЕбМЛпШЗХиЙжиМщлнСФрПЧЖВН'), ['сФ', 'гцХаШЛсаШЛкшфЧОКЛцзешХСиЩоаЕОш', 'Г', 'МщЛНСФРпч', '', 'зХ', 'ОАДепНпСГшгФАЦмлуНуШШЗфдЧРШфрБЛчРМ', 'чЖне', 'СфЕАбФн', 'М']) from system.numbers limit 10; -select [4, 0, 1, 1, 0, 2, 4, 16, 3, 6, 5, 0, 0, 6, 1, 0, 5, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кдАпЩСШИСцРхтеСиФЖЧСсОоц'), ['пщСшиСцрХТЕсифЖчССоОц', 'рхнкикДТКДВШчиЖЦнВм', '', '', 'жПЛСнЦцн', 'дА', 'ПщсШИсцрХтЕс', 'иФжЧсСоОЦ', 'ап', 'с', 'щсШИ', 'МАзашДРПЩПзРТЛАсБцкСШнЕРЙцИЩлТЛеУ', 'ичцпДбАК', 'сшИСЦрхтЕсифжчСсООц', 'КдАПЩСшИСЦРХТЕсИфЖЧСсо', 'ЛнБсИПоМЩвЛпиЩЗЖСд', 'щс', 'шщДНБаСщЗАхкизжнЛАХЙ']) from system.numbers limit 10; -select [0, 13, 0, 2, 16, 1, 3, 0, 9, 0, 2, 0, 1, 4, 0, 0, 0, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('иНхеЕкхЩщмгзМГхсгРБсхОКцУгуНБ'), ['ДиоУлФЖЛисУСЕтсЕалщн', 'МгХсгрБСХО', 'ЖХНцршПшгйО', 'нХЕЕкхЩ', 'сГРбсхОКцУг', '', 'х', 'Ж', 'щМгЗмгхСг', 'СрпхДГОУ', 'НхеЕкХщ', 'ПМтБцЦЙЖАЙКВБпФ', 'ИнхеЕ', 'еЕКхЩ', 'мМГлРзш', 'гтдоЙБСВещкЩАЩЦйТВИгоАЦлчКнНРНПДЖшСЧа', 'ЖшеН', '']) from system.numbers limit 10; -select [1, 5, 0, 0, 3, 0, 2, 0, 14, 14, 1, 0, 17, 13, 3, 25] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('айлзсЗБоГйтГжЙРККФхКшлНРОрЦкфо'), ['', 'с', 'Д', 'шиБраНИЦЧуИжп', 'Лз', 'ДРБСУфКСшцГДц', 'йЛЗСЗбОгЙтГЖйРК', 'ЕЙЦсвРЕШшщЕЗб', 'ЙркКфхкшЛнРОР', 'ЙРкКФхкШ', 'а', 'ГдоДКшСудНл', 'КФхКшлНРоР', 'ж', 'лзСзБогйТГЖйрККф', 'оР']) from system.numbers limit 10; -select [6, 0, 8, 10, 1, 0, 1, 13, 0, 0, 0, 2, 2, 0, 4, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РучУлрХчЗУпИчДТЕфщИЙщрНлн'), ['РХЧ', 'оДсГСЛЙшйиЧРСКзчХВоХарцНШ', 'ЧЗУпИ', 'УПичдТе', 'Р', 'ВЙЩхжАутПСНЦфхКщеЩИуЧдчусцАесзМпмУв', '', 'ЧдТ', 'ООсШИ', 'ФШсВжХтБУШз', 'ЕЩуДдшкМУРЕБшщпДОСАцйауи', 'УЧ', 'УЧУЛрХчзуПИчдТеФщий', 'йнЦцДСхйШВЛнШКМСфмдЩВйлнеЖуВдС', 'улрхчзупиЧдтефщИ', 'СХТЧШшГТВвлЕИчНОВи']) from system.numbers limit 10; -select [0, 0, 0, 2, 1, 1, 0, 1, 19, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('УецжлЦЦщМшРГгЩЩдБмхЖЗЧзШЙб'), ['НзИуАузуРЗРуКфоТМмлПкрсмЕЕЕнТ', 'ЕЩГХхЧш', 'ХоЙпООчфЖввИжЙшЖжЕФОтБхлВен', 'ЕЦЖЛЦцщ', '', '', 'ухогСИФвемдпаШЗуЛтлизОЧ', 'УецЖ', 'ХЖзЧЗ', 'П', 'мБкзХ', 'уБуОБхШ']) from system.numbers limit 10; -select [6, 1, 15, 5, 0, 0, 0, 3, 2, 4, 0, 12, 0, 2, 0, 3, 1, 6, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ГЖФеачМаКчПСпкВкхсПтг'), ['чмАкЧ', '', 'ВкХс', 'ачМА', 'КлтжУлОЛршБЕблФЩ', 'тцуМфж', 'л', 'фе', 'Жф', 'ЕАЧМак', 'лЖЕРТнФбЧЙТййвзШМСплИхбЙЛЖзДпм', 'СпкВК', 'ЩзчжИш', 'жФеАчМ', 'КбЦбйЕШмКтЩЕКдуЩтмпИЕВТЖл', 'ФЕаЧмАКчПСПквкхспТ', 'гжФеАЧмаКчпСп', 'ЧмАК', 'дцкДННМБцйЕгайхшжПГх', 'ТЩбвЦЖАНшрАШФДчОщй']) from system.numbers limit 10; -select [1, 6, 0, 1, 0, 0, 3, 1, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('МФННЧйОнцЛИЧЕПШПЧйоГФО'), ['', 'йОн', 'шУлгИЛЛРЙАсфЗоИЙЗРхуПбОЙсшдхо', 'МФННчЙоНц', 'лзВжбЦзфкзтуОйзуЗ', 'ЖГДщшЦзсжщцЦЖеЧвРфНИНОСАОщг', 'ННчйОНЦлИчЕПШ', '', 'Ф', 'ЩрИдНСлЙуАНЗвЕчмчАКмФУипндиП']) from system.numbers limit 10; -select [5, 0, 8, 13, 0, 0, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зВйймХЩМзЦГЕкЕКфоСтхПблуКМхц'), ['МХщмз', 'НАНрШоНДмурМлО', 'мзцгЕкек', 'кеКфоСтХПбЛУК', 'СУУксО', 'ЦоШжЧфйШЦаГЧйбЛШГЙггцРРчт', 'НбтвВбМ', '', 'тЩФкСтоСЧЦЦЙаСДЩСГЙГРИФЗОЗфбТДЙИб', 'ВГж']) from system.numbers limit 10; -select [0, 0, 0, 8, 19, 0, 3, 12, 1, 4] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ДпбЙЖНЗбПнЛбахБаХТуабШ'), ['цИаЩвгеИР', 'Ф', 'РЖиА', 'БпнЛб', 'У', 'Тфн', 'Б', 'БА', '', 'ЙЖНзБПнлбАхбаХ']) from system.numbers limit 10; -select [0, 0, 0, 0, 0, 1, 0, 17, 1, 0, 1, 1, 1, 11, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ТЦмЩОинХзоДДпПНЩигрРщОзКц'), ['ЕжЙВпПл', 'ВКфКТ', 'ШкДсЖхшфоПИадУбхФЩБчОАкпУеБхи', 'НТЕЙОШЦЖоЩбзВзшс', 'учГгуКФзлУдНУУуПУлкаЦЕ', '', 'фАПМКуЧйБЧзСоЗргШДб', 'ИГРрщОзк', '', 'йупОМшУйзВиВрЛЩЕеЩмп', '', '', '', 'дДППнщИгРР', 'ШФвИЧакеЦвШ', 'ТцМЩоинхЗОДдппнЩ', 'мрОгЩшЩеЧ', 'еЖРиркуаОТсАолЩДББВАМБфРфпШшРРРм']) from system.numbers limit 10; -select [3, 0, 0, 0, 0, 0, 1, 0, 0, 14, 0, 1, 0, 1, 1, 1, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('аОкиЛгКйхаОГОУзЦЛрбцш'), ['кИЛГкйхАогоУЗЦл', 'щЧДпХИхбпсГвфДФХкчХ', 'ШвАмБЗлДОИПткжхФТФН', 'щфсхФмЦсЛеувЙО', 'лВУЖц', 'еИщРшозЖАдцтКииДУлДОУФв', 'а', 'ХгЦРШ', 'ФзрЖкРЗЩЧИеЧцКФИфЧЧжаооИФк', 'уЗ', 'фЦФдцжжМчЗЖлиСЧзлщжжЦт', '', 'МдхжизИХфвбМААрйФНХдЕжп', 'аОкиЛг', 'АОКИЛгкйХАОГОУЗЦ', '', 'МбЖйрсумщиеОЩк', 'КйХАоГоУЗцлРБЦШ']) from system.numbers limit 10; -select [0, 0, 2, 1, 0, 0, 12, 0, 17, 0, 0, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('КУчЛХФчЛХшвбМЦинРвНрФМРкмиеЕп'), ['ТБЩБзхАмщПщЧПИФПашгЕТиКЦМБМпСЩСуЩМчтшеш', 'йлВЕЙшфшаШЗШЩВХЦчЛБс', 'УЧл', '', 'ЛДсЖщмНЦсКуфЗуГиука', 'РТТОТфГЕлЩЕгЛтДфлВЖШГзЦЖвнЗ', 'БМцИНРвнРф', 'ОЕИЕдИсАНаифТПмузЧчЖфШЕуеЩсслСШМоЖуЩЛМп', 'рвНРфМркМи', 'ЦзБМСиКчУжКУЩИИПУДвлбдБИОЙКТЛвтз', 'злСГе', 'ВдтцвОИРМЕжХО', 'учЛХфЧл', 'БшччШбУзЕТзфКпиШжнезвоеК']) from system.numbers limit 10; -select [0, 7, 0, 0, 0, 0, 7, 6, 0, 16, 12, 12, 15, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('оЖиогсфклШМСДрбхРбМбрЕщНЙЗйод'), ['иПмДКейууОклНХГЗсбаЙдШ', 'ФКлШмсДрБХРбМбрещНЙЗЙОд', 'арчжтСТнк', 'чбТНЛЕжооЗшзОУ', 'ощАЩучРСУгауДхГКлмОхЙцЕо', 'аЛбкиЦаКМбКхБМДнмФМкйРвРр', 'ФКлШмСДрбХРбм', 'СфклШ', 'еДйилкУлиИчХЙШтхцЗБУ', 'хрБ', 'СДрбХрбМБР', 'СдрбхРБ', 'бхрБМБРЕщНйз', 'КИб']) from system.numbers limit 10; -select [22, 1, 8, 0, 0, 1, 0, 3, 0, 6, 20, 0, 0, 0, 4, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЕЖДФбКужЙЦЦмсЖГГжБзеЙнПйЙри'), ['НПййР', '', 'Жй', 'Щ', 'ФхУО', 'ЕЖДфБКУЖйЦЦмСжГГ', 'НФЙзщЩГЧпфсфЦШОМЕЗгцрс', 'д', 'ЦтщДДЖтбвкгКонСк', 'кУЖЙЦЦм', 'ЕйНПййРИ', 'РчеЙйичФбдЦОтпчлТЖИлДучЙПгЗр', 'внчзшЗзОнФфхДгфзХТеНПШРшфБТЖДйф', 'кНснгмулМуГНурщЕББСузВмбнЧаХ', 'фбКУЖйЦцМсЖГгЖб', 'ЩСЕ']) from system.numbers limit 10; -select [0, 0, 0, 1, 10, 4, 0, 0, 5, 0, 1, 0, 7, 0, 3, 7, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чБхлжгКЖХлЙнкКЦфжЕгЖАндЧ'), ['ПдмРрЖАтВнСдСБШпПЗГгшИ', 'цшцг', 'тчАЙЧОеЕАвГпЗцЖЧгдХуЛСЛНрвАЖщ', '', 'Лй', 'Л', 'ОйррцУжчуЦБАжтшл', 'вХУКк', 'жгКжхЛЙН', 'уцбЕЕОЧГКУПуШХВЕчГБнт', '', 'ПсАжБИКштЕаН', 'КжхлЙН', 'ЩгШухЦПАТКежхгХксгокбщФЙПсдТНШФЦ', 'Х', 'кЖХЛйНккЦФжЕГЖ', 'ЙзРДСПднаСтбЧЖхощ', 'пАПОУЧмИпслБЗПфУ']) from system.numbers limit 10; -select [0, 0, 0, 5, 2, 16, 4, 4, 11, 0, 0, 3, 3, 0, 0, 6] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кпМаоуГГфвощолЦЩщЧПРОКепеА'), ['ЗзуФжНшщПТнЧЦКВОиАУсЧХОШбк', 'тмПкАпеайзуХсурШй', 'АЕЦавбШиСДвВДумВкиИУБШЕ', 'о', 'ПМаОУггФВощоЛЦЩЩЧПрокЕПеа', 'щЩ', 'аоУг', 'аОуГгФВ', 'оЩоЛЦЩщчПРОК', 'виХЛшчБсщ', 'УчАМаЦкйДЦфКСмГУЧт', 'мАоУ', 'МАО', 'щФФА', 'Н', 'У']) from system.numbers limit 10; -select [0, 3, 10, 8, 3, 0, 4, 0, 9, 4, 1, 9] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('щЙЧРпшИцхпргЦНуДййусЧЧнЖ'), ['ДлУцтееЖБКХгМзСВжА', 'чРпШИЦ', 'пргЦнУДЙЙУ', 'Ц', 'ЧРПш', 'нЩрЕвмрМеРйхтшЩче', 'РпШИЦхПРГцнУд', 'ПНоЙтПкоаОКгПОМЦпДЛФЩДНКПбСгЗНЗ', 'ХПРГцНудЙЙ', 'рПши', '', 'ХПРГ']) from system.numbers limit 10; -select [11, 4, 1, 0, 1, 0, 0, 0, 0, 12, 0, 9, 5, 0, 16, 0, 12, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('пкзщщЛНОНбфЦноИЧфхбФ'), ['ф', 'щщл', 'ПКзЩщЛНОн', 'ЩшФйЧБНДОИзМхеЖНЦцеЛлУЧ', '', 'сЗоЙТклйДШкДИЗгЖ', 'орЛФХПвБбУхНс', 'доЗмЩВу', 'ШиЕ', 'ЦНО', 'ндЩдРУЖШМпнзНссЖШДЦФвпТмуМЙйцН', 'НбФЦнОИч', 'ЩлНонБФ', 'ЛдРжКММЙм', 'чфх', 'ЦматДйиСфЦфааЦо', 'ЦНОИчФх', 'иржЦщн']) from system.numbers limit 10; -select [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 3, 0, 5] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чЖажцВбшЛттзДааАугШщАйПгщП'), ['ШгУТсчГОВЦЦеЛАСфдЗоЗЦВЛйлТДзчвЛва', 'УшЕшищЖткрвРСйиФЗйТФТЛЗаЗ', 'ВдикЙббщузоФХщХХГтЗоДпхбЕкМщц', 'срйеХ', 'рАшуПсЙоДнхчВкПЖ', '', 'гНЗбКРНСБВрАВФлнДШг', 'фХЧгмКнлПШлЩР', 'мкйЗбИФрЗахжгАдвЕ', 'чжаЖцВБШлТ', 'лХЕСрлПрОс', '', 'ЗЧПтчЙОцвОФУФО', 'ажцвБшЛТт', 'уНчЖШчМЕА', 'ц']) from system.numbers limit 10; -select [7, 1, 0, 7, 1, 19, 8, 6, 3, 0, 2, 13, 6, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('НТКПпмБжДцбАКПНСЖоиТФД'), ['б', '', 'аУщЛМХЖбвИтНчГБМГдДнч', 'Б', 'НТкппм', 'и', 'Жд', 'МБждЦбАкП', 'кппмБждцБа', 'ПЕрнЦпМЦВгЧЧгГ', 'ткПпМБЖДцбаКпнСжО', 'кПнСЖоИ', 'МБжДцБакпН', 'гхОХжГуОвШШАкфКМщсшФДШеИжоАйг']) from system.numbers limit 10; - -select 0 = multiSearchAny(materialize('mpnsguhwsitzvuleiwebwjfitmsg'), ['wbirxqoabpblrnvvmjizj', 'cfcxhuvrexyzyjsh', 'oldhtubemyuqlqbwvwwkwin', 'bumoozxdkjglzu', 'intxlfohlxmajjomw', 'dxkeghohv', 'arsvmwwkjeopnlwnan', 'ouugllgowpqtaxslcopkytbfhifaxbgt', 'hkedmjlbcrzvryaopjqdjjc', 'tbqkljywstuahzh', 'o', 'wowoclosyfcuwotmvjygzuzhrery', 'vpefjiffkhlggntcu', 'ytdixvasrorhripzfhjdmlhqksmctyycwp']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('qjjzqexjpgkglgxpzrbqbnskq'), ['vaiatcjacmlffdzsejpdareqzy', 'xspcfzdufkmecud', 'bcvtbuqtctq', 'nkcopwbfytgemkqcfnnno', 'dylxnzuyhq', 'tno', 'scukuhufly', 'cdyquzuqlptv', 'ohluyfeksyxepezdhqmtfmgkvzsyph', 'ualzwtahvqvtijwp', 'jg', 'gwbawqlngzcknzgtmlj', 'qimvjcgbkkp', 'eaedbcgyrdvv', 'qcwrncjoewwedyyewcdkh', 'uqcvhngoqngmitjfxpznqomertqnqcveoqk', 'ydrgjiankgygpm', 'axepgap']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('fdkmtqmxnegwvnjhghjq'), ['vynkybvdmhgeezybbdqfrukibisj', 'knazzamgjjpavwhvdkwigykh', 'peumnifrmdhhmrqqnemw', 'lmsnyvqoisinlaqobxojlwfbi', 'oqwfzs', 'dymudxxeodwjpgbibnkvr', 'vomtfsnizkplgzktqyoiw', 'yoyfuhlpgrzds', 'cefao', 'gi', 'srpgxfjwl', 'etsjusdeiwbfe', 'ikvtzdopxo', 'ljfkavrau', 'soqdhxtenfrkmeic', 'ktprjwfcelzbup', 'pcvuoddqwsaurcqdtjfnczekwni', 'agkqkqxkfbkfgyqliahsljim']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('khljxzxlpcrxpkrfybbfk'), ['', 'lpc', 'rxpkrfybb', 'crxp', '', 'pkr', 'jxzxlpcrxpkrf', '', 'xzxlpcr', 'xpk', 'fyb', 'xzxlpcrxpkrfybbfk', 'k', 'lpcrxp', 'ljxzxlpcr', 'r', 'pkr', 'fk']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('rbrizgjbigvzfnpgmpkqxoqxvdj'), ['ee', 'cohqnb', 'msol', 'yhlujcvhklnhuomy', 'ietn', 'vgmnlkcsybtokrepzrm', 'wspiryefojxysgrzsxyrluykxfnnbzdstcel', 'mxisnsivndbefqxwznimwgazuulupbaihavg', 'vpzdjvqqeizascxmzdhuq', 'pgvncohlxcqjhfkm', 'mbaypcnfapltsegquurahlsruqvipfhrhq', 'ioxjbcyyqujfveujfhnfdfokfcrlsincjbdt', 'cnvlujyowompdrqjwjx', 'wobwed', 'kdfhaoxiuifotmptcmdbk', 'leoamsnorcvtlmokdomkzuo', 'jjw', 'ogugysetxuqmvggneosbsfbonszepsatq']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('uymwxzyjbfegbhgswiqhinf'), ['lizxzbzlwljkr', 'ukxygktlpzuyijcqeqktxenlaqi', 'onperabgbdiafsxwbvpjtyt', 'xfqgoqvhqph', 'aflmcwabtwgmajmmqelxwkaolyyhmdlc', 'yfz', 'meffuiaicvwed', 'hhzvgmifzamgftkifaeowayjrnnzw', 'nwewybtajv', 'ectiye', 'epjeiljegmqqjncubj', 'zsjgftqjrn', 'pssng', 'raqoarfhdoeujulvqmdo']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('omgghgnzjmecpzqmtcvw'), ['fjhlzbszodmzavzg', 'gfofrnwrxprkfiokv', 'jmjiiqpgznlmyrxwewzqzbe', 'pkyrsqkltlmxr', 'crqgkgqkkyujcyoc', 'endagbcxwqhueczuasykmajfsvtcmh', 'xytmxtrnkdysuwltqomehddp', 'etmdxyyfotfyifwvbykghijvwv', 'mwqtgrncyhkfhjdg', 'iuvymofrqpp', 'pgllsdanlhzqhkstwsmzzftp', 'disjylcceufxtjdvhy']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('mznihnmshftvnmmhnrulizzpslq'), ['nrul', 'mshftvnmmhnr', 'z', 'mhnrulizzps', 'hftvnmmhnrul', 'ihnmshftvnmmhnrulizzp', 'izz', '', 'uli', 'nihnmshftvnmmhnru', 'hnrulizzp', 'nrulizz']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('ruqmqrsxrbftvruvahonradau'), ['uqmqrsxrbft', 'ftv', 'tvruvahonrad', 'mqrsxrbftvruvahon', 'rbftvruvah', 'qrsxrbftvru', 'o', 'ahonradau', 'a', 'ft', '', 'u', 'rsxrbftvruvahonradau', 'ruvahon', 'bftvruvahonradau', 'qrsxrbftvru', 't', 'vahonrada', 'vruvahonradau', 'onra']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('gpsevxtcoeexrltyzduyidmtzxf'), ['exrltyzduyid', 'vxtcoeexrltyz', 'xr', 'ltyzduyidmt', 'yzduy', 'exr', 'coeexrltyzduy', 'coeexrltyzduy', 'rlty', 'rltyzduyidm', 'exrltyz', 'xtcoeexrlty', 'vxtcoeexrltyzduyidm', '', 'coeexrl', 'sevxtcoeexrltyzdu', 'dmt', '']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('dyhycfhzyewaikgursyxfkuv'), ['sktnofpugrmyxmbizzrivmhn', 'fhlgadpoqcvktbfzncxbllvwutdawmw', 'eewzjpcgzrqmltbgmhafwlwqb', 'tpogbkyj', 'rtllntxjgkzs', 'mirbvsqexscnzglogigbujgdwjvcv', 'iktwpgjsakemewmahgqza', 'xgfvzkvqgiuoihjjnxwwpznxhz', 'nxaumpaknreklbwynvxdsmatjekdlxvklh', 'zadzwqhgfxqllihuudozxeixyokhny', 'tdqpgfpzexlkslodps', 'slztannufxaabqfcjyfquafgfhfb', 'xvjldhfuwurvkb', 'aecv', 'uycfsughpikqsbcmwvqygdyexkcykhbnau', 'jr']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('vbcsettndwuntnruiyclvvwoo'), ['dwuntnru', '', 'ttndwuntnruiyclvv', 'ntnr', 'nruiyclvvw', 'wo', '', 'bcsettndwuntnruiycl', 'yc', 'untnruiyclvvw', 'csettndwuntnr', 'ntnruiyclvvwo']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('pqqnugshlczcuxhpjxjbcnro'), ['dpeedqy', 'rtsc', 'jdgla', 'qkgudqjiyzvlvsj', 'xmfxawhijgxxtydbd', 'ebgzazqthb', 'wyrjhvhwzhmpybnylirrn', 'iviqbyuclayqketooztwegtkgwnsezfl', 'bhvidy', 'hijctxxweboq', 't', 'osnzfbziidteiaifgaanm']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('loqchlxspwuvvccucskuytr'), ['', 'k', 'qchlxspwu', 'u', 'hlxspwuvv', 'wuvvccucsku', 'vcc', 'uyt', 'uvv', 'spwu', 'ytr', 'wuvvccucs', 'xspwuv', 'lxspwuvvccuc', 'spwuvvccu', 'oqchlxspwuvvccucskuy']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('pjjyzupzwllshlnatiujmwvaofr'), ['lnatiujmwvao', '', 'zupzwllsh', 'nati', 'wllshl', 'hlnatiujmwv', 'mwvao', 'shlnat', 'ati', 'wllshlnatiujmwvao', 'wllshlnatiujmwvaofr', 'nat']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('iketunkleyaqaxdlocci'), ['nkleyaqaxd', 'etunkleyaq', 'yaqaxdlocci', 'tunkleyaq', 'eyaqaxdlocc', 'leyaq', 'nkleyaqaxdl', 'tunkleya', 'kleyaqa', 'etunkleya', 'leyaqa', 'dlo', 'yaqa', 'leyaqaxd', 'etunkleyaq', '']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('drqianqtangmgbdwruvblkqd'), ['wusajejyucamkyl', 'wsgibljugzrpkniliy', 'lhwqqiuafwffyersqjgjvvvfurx', 'jfokpzzxfdonelorqu', 'ccwkpcgac', 'jmyulqpndkmzbfztobwtm', 'rwrgfkccgxht', 'ggldjecrgbngkonphtcxrkcviujihidjx', 'spwweavbiokizv', 'lv', 'krb', 'vstnhvkbwlqbconaxgbfobqky', 'pvxwdc', 'thrl', 'ahsblffdveamceonqwrbeyxzccmux', 'yozji', 'oejtaxwmeovtqtz', 'zsnzznvqpxdvdxhznxrjn', 'hse', 'kcmkrccxmljzizracxwmpoaggywhdfpxkq']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('yasnpckniistxcejowfijjsvkdajz'), ['slkpxhtsmrtvtm', 'crsbq', 'rdeshtxbfrlfwpsqojassxmvlfbzefldavmgme', 'ipetilcbpsfroefkjirquciwtxhrimbmwnlyv', 'knjpwkmdwbvdbapuyqbtsw', 'horueidziztxovqhsicnklmharuxhtgrsr', 'ofohrgpz', 'oneqnwyevbaqsonrcpmxcynflojmsnix', 'shg', 'nglqzczevgevwawdfperpeytuodjlf']) from system.numbers limit 10; -select 0 = multiSearchAny(materialize('ueptpscfgxhplwsueckkxs'), ['ohhygchclbpcdwmftperprn', 'dvpjdqmqckekndvcerqrpkxen', 'lohhvarnmyi', 'zppd', 'qmqxgfewitsunbuhffozcpjtc', 'hsjbioisycsrawktqssjovkmltxodjgv', 'dbzuunwbkrtosyvctdujqtvaawfnvuq', 'gupbvpqthqxae', 'abjdmijaaiasnccgxttmqdsz', 'uccyumqoyqe', 'kxxliepyzlc', 'wbqcqtbyyjbqcgdbpkmzugksmcxhvr', 'piedxm', 'uncpphzoif', 'exkdankwck', 'qeitzozdrqopsergzr', 'hesgrhaftgesnzflrrtjdobxhbepjoas', 'wfpexx']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('ldrzgttlqaphekkkdukgngl'), ['gttlqaphekkkdukgn', 'ekkkd', 'gttlqaphe', 'qaphek', 'h', 'kdu', 'he', 'phek', '', 'drzgttlqaphekkkd']) from system.numbers limit 10; -select 1 = multiSearchAny(materialize('ololo'), ['ololo', 'ololo', 'ololo']); - -select 1 = multiSearchAnyUTF8(materialize('иечбпрхгебилцмпфвжцс'), ['лцмпфвж', 'ечбпрхгебилц', 'фвж', 'мпфвж', 'вжцс', 'пфвжцс', 'ц', 'чбпрхгебил', 'илцмп', 'фвж', 'ечбпрхгеби', '', 'б', 'хгеб', '', '', 'ил', 'ебилцмпфвжцс']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('змейдмоодкшуищвеишчддуцпх'), ['здсщесгдкзмчбжчщчиоо', 'чфззцмудщхтфрмсзрвшйщ', 'рлунбнзрфубуббдочтвлзмпгскузохк', 'ктзлебцам', 'вчспмж', 'нгкк', 'гпзйа', 'щпйкччнабакцтлапсбваихншхфридб', 'афсузжнайхфи', 'йрздеучфдбсвпжохрз', 'ошбечпзлг', 'полшхидфр']) from system.numbers limit 10; -select 1 = multiSearchAnyUTF8(materialize('лшнуухевгплвйужчошгнкнгбпщф'), ['гбпщф', 'б', 'ф', 'чошгнкнг', 'йужчо', 'гплвйужчошгнкн', 'бпщф', 'плвйужч', 'шгнкнг', 'хевгплвй', 'плвйужчошгн', 'вй', 'лвйужчошгнкнгбпщф', 'лвйужчошгнкн']) from system.numbers limit 10; -select 1 = multiSearchAnyUTF8(materialize('кцпгуоойвщталпобщафибирад'), ['ойвщталпобща', 'щта', 'пгуоойвщтал', 'ф', 'общ', 'цпгуоойвщталпобща', 'побщ', 'ф', 'цпгуоойвщталпобщафиб', 'побщаф', 'лпобщафи', 'цпгуоойвщталпобщафи', 'пгуоойвщталпобщаф', 'талпоб', 'уоойвщталпо', 'гуоойвщтал', 'уоойвщталп', 'щ', '', 'цпгуоойвщталпобщафибирад']) from system.numbers limit 10; -select 1 = multiSearchAnyUTF8(materialize('фвгйсеккзбщвфтмблщходео'), ['еккзбщвфтмблщходе', 'йсеккзбщвфтм', 'вфтмблщходео', 'вгйсеккзбщ', '', 'йсеккзбщвфт', 'бщвфтмблщход', 'ккзбщвфтмблщход', 'ккзбщвфтм', 'еккзбщвфтмблщходе', 'еккзбщвфтмблщх', 'вгйсеккзбщвф', 'оде', 'оде', '', 'бщвфтмблщх', 'б', 'йсеккзбщвфтмблщходео', 'вфтмблщ', 'кзбщ']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('хбаипфшнкнлтбшрскшщдувчтг'), ['хгшгднфуркшщвфгдглххс', 'цогчщки', 'тдмщшйзйхиквмб', 'етелфмшвмтзгеурнтбгчнщпмйпйжжциш', 'чсбк', 'ибащлшздеуревжйфуепфхкузбзао', 'дкмбщдсбжййсвгкхбхпшноншлщ', 'щхбеехнцегрфжжу', 'збфлпгсмащр', 'скчдигцнсзфрещйлвзнбнл', 'освзелагррдоортлрз', 'утхрч', 'йкбрвруенчччпшрнгмхобщимантешищщбж', 'жгивтеншхкцаргдасгирфанебкзаспбдшж', 'ййекжшщцщ', 'ефдсфбунйчдбуй', 'бвжцирзшмзщ', 'випжцщйзхнгахчсцвфгщзкдтвчйцемшлй', 'лдрфгвднеиопннтчсйффвлхемввег', 'бмтцжжеоебщупфчазпгхггцегнрутр']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('фбуоойпцщишщлбхчрсллзвг'), ['уччхщ', 'вщчсарфмйшгшпйфгмжугмщжкцщгйжзфл', 'кклл', 'лпнжирпсиуо', 'нчипзфщхнтштхйхщрпзитко', 'вйпсдергвцзсцсгмхпбз', 'чфщдфоилгцевпц', 'чааиае', 'чгингршжтчпу', 'щетбнгутшйсгмвмучдхстнбрптничихб']) from system.numbers limit 10; -select 1 = multiSearchAnyUTF8(materialize('лйвзжфснтлгбгцерлзсжфещ'), ['зсжф', '', 'бгц', 'зжфснтлгбгц', 'л', 'цер', 'жфснтлгбгц', 'тлгбг', 'це', 'гбгцерл', 'нтлгбгцерлзсж', 'жфещ', 'взжфснтлг', 'фснтлгбгцерлзсжфещ', 'нтлгбгцерлзсж', 'зжфснтлгбг', 'взжфснтлгбгцерлз', 'взжфснтлгбгце']) from system.numbers limit 10; -select 1 = multiSearchAnyUTF8(materialize('нфдцжбхуучеинивсжуеблмйрзцршз'), ['чеинивсжуеблм', 'жуебл', 'блмйрзцрш', 'цр', 'м', 'фдцжбхуучеинивсжуеблмйрзцр', 'нивсж', 'ивсжуеблмй', 'й', 'всжуеблмйрзцршз']) from system.numbers limit 10; -select 1 = multiSearchAnyUTF8(materialize('всщромуцйсхрпчщрхгбцмхшуиоб'), ['муцйсхрп', '', 'уцйсхрп', 'сщромуцйсхрпчщ', 'схрпчщр', 'сщромуцйсхрп', '', 'уцйсхрпчщрхгбцмх', '', 'цмхшуиоб', 'гбц', 'пчщр', 'цйсхрпчщр', 'омуцйсхрпч', 'схрпчщрхгбцм', 'йсхрпчщрхгбцм', '', 'пчщрхгбцм', 'уцйсхрпчщрхгбцмх', 'омуцйсхрпчщ']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('уузшсржоцчтсачтедебозцвчвс'), ['бомбсзхйхкх', 'отвгстзихфойукарацуздшгбщеховпзкй', 'мфнев', 'вйийшшггилцохнзбхрлхи', 'втинбтпсщрбевзуокб', 'оиойвулхкзлифкзиххт', 'зацччзвибшицщрзиптвицзхщхкбйгшфи', 'кнузршшднмвтощрцвтрулхцх', 'рчбкагчкпзжвтбажиабиркдсройцл', 'щргчкзожийтпдзфч', 'щбошгщзсжтнжцтлкщитеееигзцлцсмч', 'сцкк']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('щчбслгзвйдйжрнщчвфшй'), ['пдашзбалйнзвузкдвймц', 'щхтшйоч', 'фднвфигозржаз', 'рйфопхкшщвщдвл', 'цдкйхтусожпешпджпатфуиткп', 'щпбчсслгщййлвскшц', 'жпснс', 'уиицуувешвмчмиеднлекшснчлйц', 'пххаедштхмчщчбч', 'ичтмжз', 'лсбкчу', 'бгфдвпзрл', 'йицц', 'цфйвфлнвопкмщк', 'бгщцвбелхефв', 'мймсвзаелхнжйчохомлизенфш', 'трйднхндшсщмпвщомашчнгхд', 'жфцнифлгдзйе', 'зспкшщщенбцжгл', 'рщтб']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('шщпееасбтхогвгвцниуевисгшгбч'), ['гпа', 'стимсркзебхрвфпиемзчзу', 'нзгофухвекудблкадбшшусбеулрлмгфнйгиух', 'кфиашфобакщворувгвкчавфзшх', 'гфпгщгедкмтгрдодфпуйддхзчждихгрчтб', 'тцтжр', 'рцйна', 'йцбпбдрреаолг', 'житсфосшлтгсщдцидгсгфтвлз', 'жвтнжедцфцтхжчщч']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('вхкшгфпфмнщаохтмизпврйопцуйзмк'), ['дтчбкхащаткифружжейабфйкйтрскбощиеч', 'фтоуабхмдааиснрбраттклмйонлфна', 'цадзиднщймшкщолттпгщбх', 'кштбчжтждпкцнтщвмухнлби', 'микудпдпумцдцгфахгб', 'ирик', 'емлжухвмк', 'чгуросфйдцшигцхжрухжпшдкфгдклмдцнмодкп', 'ттбнллквдувтфжвчттжщажзчлнбждчщцонцлуж', 'елцофйамкхзегхклйгглаувфтуувее', 'двкзчсифвтекб', 'шсус']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('йхцглкцвзтшщочпзмнчтуеао'), ['йечдай', 'дащжщзлосмй', 'афуккгугаазшрчпцнхщцтмлфф', 'чфтфскрфйщк', 'жлччкцшнфижтехппафхвщфс', 'бзжчв', 'щкщймнкщлпедидсу', 'оцбажцзшзйпптгщтфекртдпдзшодвойвох', 'йжддбссерхичгнчлкидвгбдзуфембрц', 'ктщвшкрщмдшчогхфхусдотсщтцхтищ', 'пшстккамнбнардпзчлшечхундргтоегцзр', 'нсрнфузгжррчнжначучиелебрб', 'шгжмквршжтккднгаткзтпвкгзхшйр', 'змквцефтулхфохбнхбакдичудфмйчп']) from system.numbers limit 10; -select 1 = multiSearchAnyUTF8(materialize('шждйрчйавщбйфвмнжоржмвдфжх'), ['ор', '', 'йрчйавщбйфвмнжо', 'вщбйфвмнжорж', 'ждйрчйавщбйфвмнжорж', 'йавщбйф', 'дф', 'вщбйф', 'бйфвмнжорж', 'мнж']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('кдшнсйршгвлицбенйбцфрсаччетфм'), ['асмун', 'йогкдчодиф', 'лштйбжнзфкикмпбитжшгкбоослщгзнщо', 'улштжцисцажзчштгжтфффабйлофедуфме', 'дрпгкчджихшзммймиамзфнуиорлищзгйвху', 'йиоршнйоввквбдвдзасма', 'члмвасмфрхжсхрбцро', 'лшкизщушборшчшастйсцкжцбонсшейрщ', 'масдфкршлупасвйфщфважсуфсейшзлащхрж', 'дгхшщферодщцнйна', 'цзфзждбавкжрткст', 'рфбожзееаце', 'кошомвгпрщсдквазчавожпечдиуйлщадфкгфи', 'бшпхнхсгшикеавааизцсйажсдийаачбхч']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('хтиелйтарквурйлжпеегфш'), ['зпмйвзуднцпвжкбмйрпушдуавднвцх', 'фбссчгчвжакуагдвижйтщтшоабпхабжш', 'щхшибаскрщбшрндххщт', 'сммрсцзмптисвим', 'цсргщфж', 'восжбшйштезвлкммвдхд', 'вбсапкефецщжквплуо', 'даеуфчвеби', 'бтптлжпин', 'шчддтнсйкщйщ', 'фжхщецпзчбйкц', 'цсвфпздхрщхцбуцвтег']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('апрчвзфжмбутццрйщкар'), ['индхжз', 'жилцовщччгстби', 'ажс', 'фктйамйтаг', 'шммнзачггоннксцушпчн', 'чдлйтзтоцдгзццисц', 'пнбтувщцдсчнщмсакрлгфмгрй', 'овмсйнщзушвщгуитщрхвйодф', 'бзлштезвлаижхбмигйзалчолшеунлц', 'фкжпеввгшгащз', 'тменбщжмсхщсогттршгек', 'чап', 'х', 'шкомегурлнйпшбщглав']) from system.numbers limit 10; -select 0 = multiSearchAnyUTF8(materialize('двхопооллаеийтпцчфжштнргкк'), ['йймчнздешхбццбжибопгктрнркевпиз', 'фйрохсамщцнмф', 'ййхфдпецжзгнуорвбплоахрфиле', 'пкллкацнвдббогг', 'йщдезамтжйзихщжмцлх', 'гдзувмщиеулиддердшпитвд', 'фхтунйшзхтщжтзхгцорошднпбс', 'фнситбеелцдкйщойлатиуухгффдвищсше', 'нзщщщндцрнищпхйвтбвмцтнуадцбву', 'вбщкапшнв', 'зйлмуимчскщнивтшлчмуузщепшйр', 'шжбвйдр', 'гддждбкначдттфшжшхпфиклртпгм', 'еншащцфафчнгбнщххнзочбтпушщорегшцзб', 'уунеущкззоетбучкц', 'щасифзоажребийещ', 'пщбххсдгйтт', 'хшсчуотрт', 'жкднйрозбцшужчшбкккагрщчхат', 'шачефцгч']) from system.numbers limit 10; - -select 0 = multiSearchAnyCaseInsensitive(materialize('QWyWngrQGrDmZxgRnlOMYHBtuMW'), ['ZnvckNbkeVHnIBwAwpPZIr', 'NCzFhWQmOqIGQzMORw', 'tDYaxfQXWpKNLsawBUUOmik', 'IMveCViyAvmoTEQqmbcTbdfjULnnl', 'NRvsdotmmfwumsDpDtZU', 'mnqVnwWOvMiD', 'HXpHrMvGQpbuhVgnUkfFPqjpoRdhXBrFB', 'awtr', 'IMIdOmMHZccbOZHhWOKcKjkwwgkJSfxHDCzR', 'jPLISbIwWJEKPwgvajTxVLws', 'HBfRrzEC', 'VXsysGnAsFbqNOvIaR', 'upCaeaIOK', 'GUDFkrzBiqrbZVnS', 'MoCOePXRlVqCQpSCaIKpEXkH', 'rfF', 'fjhMEpySIpevBVWLOpqi', 'KdeskLSktU', 'vjUuNUlBEGkQyRuojZLyrmf', 'SvSxotkTKCeVzNICcSZLsScKsf']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('gcDqqBCNqhQgVVgsxMXkevYIAxNl'), ['BHnoKRqOoKgmOVkjtehGSsInDvavDWOhkKAUL', 'nYqpmKPTWGdnyMcg', 'TIplHzsSXUz', 'SiQwpQgEdZ', 'YoJTWBJgsbJvq', 'CwyazvXERUFMCJWhTjvltxFBkkvMwAysRLe', 'tXUxqmPbYFeLUlNrNlvKFKAwLhCXg', 'vUbNusJGlwsOyAqxPS', 'ME', 'ASUzpELipnYwAknh', 'VtTdMpsQALpibryKQfPBzDFNLz', 'KmujbORrULAYfSBDyYvA', 'BaLGNBliWdgmqnzUx', 'IzwKIbbSUiwhFQrujMgRcigX', 'pnS', 'UKSZbRGwGtFyLMSxcinKvBvaX']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('HCPOPUUEVVsuZDbyRnbowGuOMhQ'), ['UzDbYrNBoWgUo', '', 'pUUEVVsUzdByrNB', 'nBO', 'SUZdbYrNbOWgUoMH', 'pOpuUevVSUZDbYRnb', 'bowGUoMh', 'VsUZDbyrNbo', 'suzdBYrN', 'uueVvsUZDBYRnBoW', 'gUom', 'eVvsuzDBYRNBoWgUOM']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('RIDPJWYYSGBFWyXikHofbTcZAnj'), ['aFxQyVe', 'OcnZBgPsA', 'iBQaH', 'oesSvsWtgQprSSIPaDHdW', 'EfytiMfW', 'qHiFjeUvQRm', 'LfQkfmhTMUfoTOmGJUnJpevIoPpfpzMuKKjv', 'scYbCYNzJhEMMg', 'yTLwClSbqklywqDiSKmEdyfU', 'HYlGFMM', 'TMQhjOMTImXbCv', 'AVtzpxurFkmpVkddQANedlyVlQsCXWcRjEr']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('GEsmYgXgMWWYsdhZaVvikXZiN'), ['wySd', 'smYgxGMWWYsDHZ', 'vIk', 'smyGxgmwWysDHzAvvikxZi', 'WYsdHZAvVI', 'YGxGmwWYSDhzavvI', 'XzI', 'ySDhZAvvIK', '', 'myGXgmwWySdHz', 'MYGxgmwWySdHZaVvik', 'wYsDhzAvvikXz', 'wwYsdHzav', 'Z']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('XKCeCpxYeaYOWzIDcreyPWJWdrck'), ['tTRLUYJTkSWOabLJlIBshARIkwVRKemt', 'jQgn', 'wdNRsKIVunGlvwqkwn', 'BsbKGBJlkWQDBwqqeIjENvtkQue', 'yLuUru', 'zoLGzThznNmsitmJFIjQ', 'WFKnfdrnoxOWcXBqxkvqrFbahQx', 'QHbgRXcfuESPcMkwGJuDN', 'NPqfqLS', 'bi', 'HnccYFPObXjeGYtrmAEHDZQiXTvbNcOiesqRPS', 'KobVCJewfUsjBXDfgSnPxzeJhz', 'AqYNUPOYDZjwXx', 'xbZydBGZFFYFsFHwm']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('AnIhBNnXKYQwRSuSqrDCnI'), ['', 'HBNNxkyqWRS', 'xKyqwrSUSQR', 'yQwr', 'ihbnnxKYQWrsUS', 'bnnXkYqwrSuS', 'qWRs', 'nXKyqWRSUS', 'qrdcN', 'NiHBnNXkYQWrS', 'NnXkYQwRSUsqRDCn', 'rSusqRd']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('OySHBUpomaqcWHcHgyufm'), ['lihJlyBiOyyqzeveErImIJuJlfl', 'WyfAXSwZPcxOEDtiCGBJvkCHNnYfA', 'hZ', 'fDQzngAutwHSVeoGVihUyvHXmAE', 'aCpcZqWKdNqTdLwBnQENgQptIyRuOT', 'PFQVrlctEwb', 'ggpNUNnWqoubvmAFdjhLXzohmT', 'VFsfaLwcwNME', 'nHuIzNMciJjmK', 'OryyjtFfIaxViPXRyzKiMu', 'XufDMKXzqKjYynmmZzZHcDm', 'xWbDgq', 'ArElRZqdLQmN', 'obzvBzKQuJXZHMVmEBgFdnnQvtZSV', 'ZEHSnSmlbfsjc', 'gjmWPiLylEkYMTFCOVFB']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('NwMuwbdjhSYlzKoAZIceDx'), ['ZKOaZ', 'wBDJhsYlZKo', 'hSy', 'MUwbDjHsyl', 'sYlzK', 'ylZKOAZ', 'y', 'lZKoaZICEdX', 'azIce', 'djHSylZkoAzice', 'djHsYLZKoAzi', 'dJHSYlZK', 'muWbDJHsYLzKOaziC', 'zi']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('gtBXzVqRbepHJVsMocOxn'), ['DidFXiqhRVBCHBVklLHudA', 'yEhumIpaYXlj', 'iaEmViTRLPM', 'vTwKBlbpaJZGYGdMifOVd', 'zvgfzWeLsMQNLutdAdCeuAgEBhy', 'Ca', 'iHabiaRoIeiJgSx', 'EBfgrJnzHbuinysDBKc', 'kT', 'SGIT', 'BTRuKgHDuXMzxwwEgvE', 'OWJIeTLqLfaPT', 'BQM', 'yMimBqutKovoBIvMBok', 'zIBCYVNYAwu', 'EFDEFWGqvuxygsLszSwSiWYEqJu', 'QJDIXvPOYtvhPyfIKqebhTfL', 'ssALaXRxjguUIVKMCdWRPkivww']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('MowjvqBkjnVTelCcXpoSuUowuzF'), ['Su', 'vqBkJNvTelC', 'Elccxp', 'vtElc', 'JVqBkJnVTELCcxpOsU', 'OsUuOWUz', 'ElccxPoSU', 'wJVQbkJNVtElCC', 'xpOSUUo', 'VQbkJnvTELCCXp', '', 'TeLcCxPOsuuO']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('VfVQmlYIDdGBpRyfoeuLffUUpMordC'), ['vqMLyIddgBPrYFoEulFFu', 'lyIDdgBPrYFOeul', 'dGBPRYFOeUlffUupmOrD', 'OEulffU', 'pMordc', 'FVqmlyiDdgBpRyFoeUlFfuUpMOrD', 'PmO', 'o', 'YiDDgbPRYFOe', 'DGBPryfoeU', 'yIDdgbpRyFOeULfFU', 'lyIddgBPryfoeulfFuU', 'gbPrYfOeUlFfuupmO', 'yFoeULF']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('CdnrzjzmwtMMPLjgcXWsbtrBs'), ['RfgIUeerlPIozKpRQR', 'QRoYzjZlgngJxX', 'mEbqlBIzTQH', 'UmrfJxKyTllktPfyHA', 'ukoZeOPA', 'pbbRaUcJijcxt', 'Rg', 'lSBG', 'HvuwuiqVy', 'Fo', 'aGpUVjaFCrOwFCvjc', 'zKhfkgymcWmXdsSrqAHBnxJhvcpplgUecg', 'ioTdwUnrJBGUEESnxKuaRM', 'QciYRCjRDUxPkafN']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('miTQkQcxbKMwGOyzzRJpfXLyGx'), ['yMwgQQJkeshUugm', 'wGVe', 'XncShWqjp', 'KWjGQCOsfMKWRcgCfebkXZwZ', 'SFWbU', 'WdFDMIcfWeApTteNfcDsHIjEB', 'XRuUJznPOCQbK', 'tibBMGZHiIKVAKuUAIwuRAAfG', 'VVCqVGGObZLQsuqUjrXrsBSQJKChGpZxb', 'bWYAOLuwMcwWYeECkpVYLGeWHRrIp', 'SLzCgfkRWmZQQcQzP', 'VvfOhFBhfiVezUSPdIbr']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('KXoTIgVktxiXoEwfoLCENiEhz'), ['oLCENie', 'xix', 'en', 'IgvktxIXoEWFOLCEnieHz', 'xOEWFoL', 'LC', 'ktxIxoEwfolCenie', 'ce', 'oTIGvktXIXOE', 'eW', 'otigVKTXIXOEwFolC', 'E', 'CEni', 'gVKtxIxoEwfOLCENieh']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('DXKzSivrdLuBdCrEYfMEgPhOZ'), ['', 'sIVRDlUBdcr', 'luBDcrE', 'rDLUbDCreY', 'KzSiVRdLuBDCr', 'dcREYFme', 'lUbdCReyFMEgph', 'sivrDlubdCr', 'BdcreYfMEgP', 'ZSiVrdluBDCrEYfmegpHOZ']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('lTvINMXVojkokvNBXPZOm'), ['ZQOJMEJfrjm', 'vIpmXnGlmWze', 'wbdDKcjrrIzBHypzJU', 'omotHOYbZjWfyVNeNtyOsfXPALJG', 'SXxu', 'yZPDFsZq', 'OVYVWUjQDSQTKRgKoHSovXbROLRQ', 'RnXWZfZwHipewOJimTeRoNRYIdcZGzv', 'sizoEJibbfzwqFb', 'vgFmePQYlajiqSyBpvaKdmMYZohM', 'ENsFoFCxDQofsBSkLZRtOcJNU', 'nG']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('LsTqxiGRdvQClVNBCGMOUHOAmOqPEC'), ['NdFuUQEUWaxS', 'fdOHzUzineBDnWJJvhPNZgB', 'rYAWGIBPxOLrjuquqGjLLoIHrHqSFmjh', 'IVgYBJARY', 'ToivVgUJAxRJoCIFo', 'yQXGrRjhIqFtC', 'PNYdEPsWVqjZOhanGNAq', 'nrQIDDOfETr', 'usJcPtiHKhgKtYO', 'vPKqumGhPbmAJGAoiyZHJvNBd', 'eXINlP', 'WQeESQJcJJV']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitive(materialize('gRzzQYOwLNiDcMFjXzSFleV'), ['XZSfLe', 'wLnIdcMFjxZSf', 'F', 'm', 'Le', 'qYoWLNidcMFjXzsf', 'zqyoWlNIdcMFj', '', 'oWlnIDCMfJxzsfL', 'wlNIdCmfjXzS']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitive(materialize('cYnMXJMJCdibMXoUQHEw'), ['BFrGFZRgzwHGkUVbBiZMe', 'piORdVIWHMBsBDeJRLbGZAHGBrzNg', 'bmDePbTPnFQiCFfBJUxAEYNSbgrOoM', 'gtzeAGwqjFrasTQUgAscfcangexE', 'okLG', 'l', 'EBkkGYNZZURgFgJPlb', 'HDQVngp', 'vEHhtBqWhZHCOrqEKO', 'fgqdFc', 'COig', 'VftTpSXAmTmvnShHJqJTdEFcyKPUN', 'WDI', 'knBm']) from system.numbers limit 10; - -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('мтдчЛВЖАгвзщущвкфИКмТбжВ'), ['щУщвкФИкМ', 'чЛвжАГвЗЩуЩвКФикм', 'ДчлвЖАГвзЩУЩвКфИКМтБЖВ', 'ЖагвзщуЩВКФикМТБжВ', 'ВжагВзЩУ', 'гВЗщущвкфИКмТБж', 'ГвЗщ', 'щВкФикМТБЖВ', 'вЖАГВзщущ', 'взЩуЩвКФИкМТ', 'ЧЛВЖагвЗщуЩВк', 'тДчлвЖагвзЩуЩвкфИк', 'ТДЧлвжаГВзЩущВ', 'тДчлВжАГВЗЩУ']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('дтрцФхИнпиОШфдАгзктвбУвсб'), ['чТрВиУРФсРпДЩОащчзЦНцхИДА', 'ЗжмПВтмиойУГхАЦПиДУЦноНуййЩХаФТофшЩ', 'уБшлОЙцМПгетЖЧетШжу', 'ЧзИАУХобФрачТеХОШбМщЖСамиМВАКРщАЦ', 'ВйвТзхЙФЧоАЖвщиушАз', 'ЦшИфххкжиФйСЛЛНЛчВоЙВПпхиИ', 'ОатЕтщкЦпбСБйцОшГШРОшхБцщЙЧиУЩЕеФлщ', 'цСПпЧА', 'ШЧНфПмФсКМКДВЦАоФчОУеТЦИзЦ', 'зАбдЛНДГИ', 'фхЩлЗДНСсКЖИФлУАбЛеТФЕпЖлпПхЙиТЕ', 'иВшкНслТКМШЗиДПйфвйНкМЛхеФДзИм', 'лпушПБванпцев', 'ЧОшЧЧмшЦЛЙйГСДФйЛАв']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('нщМаНдЧЛмиВврПокПШмКБичкхвРГ'), ['АЙбаЙйШЛЙРЦмЗчВеИЕощсЦ', 'щЦФдВжчТСЩВКЦСпачЙсумщАтЩувеиниХПДоМС', 'иоАкДРршуойиЩищпрфВаЦПж', 'еЖПйШкГжЧтоГЙМВ', 'ЩПалиБ', 'ТвВлт', 'оХжйЛФеКчхЗВвЕ', 'ерцЩ', 'ШХЖОАрзеп', 'ККМрфктКГишпГЩхаллхДиВИИЛЗДеКйХмжШ']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('вШЙчоМгОттЧАЕнЧаВеЦщчЧошМУ'), ['ЧОмГотТчАЕН', 'ОмГотТчАЕнчАвецЩчч', 'ЧАВецЩч', 'ТЧАеНЧаВ', 'ттчаЕнча', 'ТчАЕ', 'мготтЧАенчавЕЦЩ', 'НЧаВец', 'тТЧаенчАвецщчЧошм', 'Ав', 'ТЧаЕнчавецщчЧоШму', 'аЕнЧав', 'АеНЧав', 'шйЧомГОТТчаЕнчАВЕ', 'шйчоМгОтТЧаЕНчаВеЦщЧчош', 'МУ', 'ошМ', 'гОТтЧаеНЧА']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('фйадзЧмщЖШйЖЛшцГигцШ'), ['НТХеМРшДНУЗгадцуЧИ', 'жпСИКЩМлНлиоктлЦИвНЛ', 'КхшКРчХ', 'кгТЗаШИарХЧЛЖмСЖм', 'ОмиЛй', 'жЕРбФЩНуЕКЕАВоБМОнАЕнКщшзйПкОЗ', 'гиЗдадкбжХМЗслшВИШай', 'двтЗйЙНгПуТзД', 'ТНкмаВЕФ', 'Шеа']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШЕшхмеЦХеАСКощеКИфлсТЧИЗЛ'), ['КифЛсТ', 'ХеаСКощЕк', 'КифлсТЧ', 'шХМеЦхЕаскОЩеКИ', 'ЕшхмЕцХеаСК', 'ХЕасКоЩ', 'чИ', 'ЕцхеАсКОЩек', 'ЩЕкИфлс', 'асКощЕкифЛсТ']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('шоКнВЕрОЖЛпУйХзСугКПВжиРсЙпо'), ['игВербфНахчжЙггч', 'лтимрдфЕг', 'нкеаЖАшНБвйСдКИВГДшАГиАТнФШ', 'МжсТЙМГОииУКВГнцткДнцсоАд', 'ХтпгУСдБдцАЖЛАННоЕцзЕшштккз', 'ншУЦгФСЖшмс', 'нЩшМ', 'гоЖхМшаЕмаДРЧБЛИТпмЗОоД', 'фГКШхчФбЕГЛйкчПИЙххуМГНШзхг', 'ХпХщПЦАзщтг']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('кЧбоЗХфвБХхусмШгНаШШаГзш'), ['Ури', 'лММшткфНзцЦСВАдЩПМШфйОМшефигЖлуЕП', 'сМтЕдчЦафйСТЖЗфлРЙПЦдипжШскцВКХЦЖ', 'АУкжИФцшЛБЦЧм', 'ФПлнАаДСХзфоХПСБоСгМТОкЗЧйЛ', 'ЦшСГЛрцДмнНнХщивППттжв', 'жзЕгнциФ', 'МШЛсЙЧтЛАГжд', 'уИиЕжцоРНх', 'ЧбйГуХтшОНкрЧИеПД', 'ЦдЩЕкКвРЦжщЧциекЗРйхрббЖуЧ', 'иВжен', 'ГчОржвБГсжштРЕБ', 'ШоЖдуЙфчсЧегумщс', 'йчЙГ', 'РДедвТ']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ткРНбЩаРкгГчХшецИкНЕнСЖкйзАуУЖ'), ['ХлЖхУИллрРННйЗйсРуШЧвМбЧЧщфФЦц', 'СЛчКБцСФДшлфщаФлЙСзШабмбхуБжТСТ', 'УКУиввЗЩуВМцпчбпнДГбпЕЖрПбИДркМРОеЧмЧдГ', 'ПчщвШЩвГсЛмММГБ', 'хКЦЧсчжХЩИЖХеНнтоФЦлнмЛЧРФКпмСшгСЧДБ', 'удсЗйУДНЧУнтЕйЦЗЖзВСх', 'хПЖЙИрцхмУкКоСмГсвПаДОаЦНЖПп', 'сВОей', 'ЩЦжщоабнСгдчрХнЩиМХзжЩмФцррвД', 'ЦИсйнЦДоЕДглЕЦД', 'жзйПфБфУФоцзмКЩГПЧХхщщПТпдодмап', 'ДНХГНипжШлСхХхСнШЩЛИснУйЧЩЖДССФфиС', 'ОйЩНнйЕшцФчБГЛвхЖ', 'КЧРВшИуШйВфрпБНМсУмнСЦРпхЗАщЗУСвЧйБХтшХЧ', 'зЛбНу', 'ЗСрзпшЕйРржПСсФсШиМдйМЦГхдйтРКЩКНцкбмгС', 'СУццБуКнчОищГ', 'уЕГЧлЗБНпУисЕЛ']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ВЦХсЖЗЧЙБЗНбРитщстеМНжвВ'), ['итщст', 'ЧйБЗНбрИтщстЕМнЖ', 'ХСЖЗЧйбзНБриТщ', 'Темнж', 'сЖзЧЙБзнб', 'хСжЗчйБзнБрИтЩстЕм', 'БзнБРиТщ', 'ЗчЙбзНбрИТщ', 'чйбЗНбри', 'зЧйбзНБРИ', 'нБРитщсТе', 'зНб', 'цхСжзчйБЗнБРИТЩСтЕм', 'жЗЧЙБЗнбрит']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ХцМШКАБАОххЕижгГХЩГиНциД'), ['ОРАБЕРВомЛфГНМИКупбхЛаАкЗдМзтш', 'лЗУЩнлбмиЛАфсгМРкцВтлснййишИНАС', 'ТлжлУоУгжукФжЖва', 'жоСШПоУНЩшРМГшОЛзЦБЛиЛдТхПДнфжн', 'чнСУЗбДаГогжДфвШКеЙПБПутрРпсалцоБ', 'ЙозоПщчакщаАлРХбЦгац', 'иаИСсчЙЧБШорлгЧТнчцйзоВБХбхЙФтоЩ', 'ПСзсБЗЕщурфДЛХйГИеПНрмииаРнвСФч', 'ЦйЖЕуТфЖбхЩМтйсЙОгЛбхгтКЕЩСАЩ', 'гтЗуЩлужДУцФВПЛмрБТсСНпА', 'тГвлбчЗМасМЖхдЕгхмЩксоЩдрквук', 'ВРаг']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('тУйВЖдНнщцЗЖфКгфжГфиХСБЕЩ'), ['КгФЖГФи', 'сБе', 'ЖФ', 'гфжгФИхсбе', 'ВЖДНнщЦзжфКГфЖгфИхсбещ', 'ВЖДНнЩЦзжфкГ', 'вЖДННЩЦзжФКГфЖгФ', 'ф', 'НщЦЗж', 'нщЦЗЖФк', 'Их', 'дННщцзЖФКгф', '', 'нщцзжФкг']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШкКРаоПеЗалРсТОиовРжгЙЧМКЛШ'), ['рчсажЕК', 'пЧТМфУрУММждЛйжзУрбкмам', 'бАШеНмВШзлзтушШШсхОсцрчЙПКИБнКжфЧЕХ', 'ЖМЛшбсУМкшфзочщАЖцМбмШСбВб', 'гтРХсщхАИОащчлИЧуйиСпСДФПбРл', 'ЧуОРУаоойГбУппМйЩФДКПВ', 'уУпугйРЕетвцБес', 'ЙЖЦТбСЖж', 'ИБКЛ', 'ТДтвОШСХГКУИПСмФМтНМзвбЦрднлхвДРсРФ', 'вВгНЙХИрвйЕЗпчРГЩ', 'ПчмТуивШб']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('РлчгхзуВШежХЦуМмнВЙщдцО'), ['ХшвМЦДШпЩОСшЦПдруа', 'ФИЦчУвРкпнПшИЕСЧАувиХд', 'фшвбЦОИЗфпИУМщзОЧЗфВцЙПнмтаТгг', 'мЖЩйавтнМСЛ', 'НВбШ', 'ааФДДрВвЙТдПд', 'ЗнчЧущшхЙС', 'рзуСзнеДфЩПуХЙЕл', 'ШСЩсАгдЦбНиШмшКрКс', 'ггнЕфБГзрОнАГЙзЧеИП', 'вшТИпЧдЖРкМНшзпиоиЩчзДмлШКТдпЦчж', 'фЦТЙц', 'ОтУшмбптТКЗеПлЧцЛОкЩБпккфгИн', 'ЩпвхпЗлШБЦ']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЙбйнхНщЧЖщчГОАпчФнЛШФбгЛа'), ['щчг', '', 'апЧфНЛШфб', 'ЙнхНЩЧЖщчгОАПЧф', 'ХНщЧжЩЧгоАпч', 'ХНщЧжщчГо', 'нщЧжщчГОа', 'чЖЩЧГоапЧФНл', 'оапчФ', 'щЧГОАпЧФНлшФ', 'ЩЧГОАпЧФНЛшфБг', 'БЙНхнщчЖщчГоаПЧФНЛШФБгЛ', 'ОапЧфн', 'ф', 'БглА', 'ш', 'шфбГ', 'ХнЩЧЖщчГоА', 'ХНщчжщЧгоапч', 'хНЩчжщЧГоапчфнлшФбгЛ']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('кдЙДТЩеВЕфйКЩЦДиКБМф'), ['щфЛ', 'фЧЩЩичрКйЦКхеИИАпоБВЙЗбДАФио', 'мИтиЦРоВЙсБбСлНзиЛЧОфФевТмижщК', 'тЙгнКШфНТЕБЛцтГШЦхШхБ', 'уаабРГрМЙпМаБуЗпБЙчНивЦеДК', 'мпВЛНДеКПУгРЛЛинзуЕщиВШ', 'ЩжКйШшпгллщУ', 'пршЙПцхХЗжБС', 'нбЗНЙШБш', 'йцхИщиоцаМРсвнНфКБекзЛкчТ', 'хсмЦмнТрЩкДТЖиХщцкЦМх', 'ГмЛАбМщЗцЦйаОНвзуЗмЕКПБЙмАЕЛГ', 'ОЦХРЗРмкжмРИЖИЙ', 'з', 'лЕТкпкдЗчЗшжНфо', 'ИТПфйгЖЛзУТсЩ', 'ОфрбЛпГА', 'МЖооШпЦмсуГцАвМЕ']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЩГТРШКИОРБРеНЖПКиуМОкхЛугИе'), ['брЕнЖ', 'РбрЕНЖпКиУМокХЛу', 'ГТрШКИорБРеНЖпКиУМ', 'рШКиоРбрЕнЖпкИУМОК', 'ИорбрЕнЖПК', 'Окхл', 'шкИоРБРеНЖПк', 'ТРШкИоРБрЕнжПКИУМОкхл', 'КИОРБРЕнжпкиУм', 'Н', 'КиОРбРЕнЖпкИУмоКхл', 'к', 'ГтРшКИоРБРЕнЖпк', 'гтрШкиорбрЕНЖпк']) from system.numbers limit 10; -select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШНвпкфЗвгДжУЙГлрТШаШЛгНЗг'), ['нЗБенВшщрЛАрблцщшБАдзччммсцКЖ', 'бЗЩхзЗЗбФЕйМоазщугБбмМ', 'рЙсВжВсхдйлЩгБтХлчсщФ', 'пиБшКРнбВБгЕуЖ', 'жПшнхпШзУБрУЛРНЩДиаГШщКдЕвшоуПС', 'чЕщкЗмДуузуСдддзгКлИнгРмЙщВКТчхзЗЛ', 'кЖУЗЖС', 'щххОВМшуажвН', 'фбцЖМ', 'ДШитЧЩДсйНбдШеООУдг', 'ЛХПфБВХЦТИаФПЕвгкпкпщлхмЙхГбц', 'чЦсщЗщрМ']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ФРХгаСлчЧОцкШгзмКЗшФфББвЧ'), ['кзШфФб', 'ГАслЧЧОцкшг', 'ФфббВЧ', 'ЦкШ', '', 'АслчЧОЦКШгзМкЗШффбБвч', 'РХгаслЧчОЦКШГз', 'РхгаслчЧОцКШгзМкзшФфБбВ', 'Шг', 'Ф', 'ХГАслчЧоцКШГзМкзш', 'ШгЗмКЗшфФб']) from system.numbers limit 10; -select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЧдйШкхОлалщНйбССХКаФзОМрКЕЙР'), ['бссХкафзОм', 'ХОЛаЛщнйБссХкаФз', 'лаЛщнйБсСХ', 'ЩнЙбСсхКаФЗО', 'йБСсХКАФЗОмР', 'йшкХолаЛЩНйбсСхК', 'С', '', 'ЙшкхОлалщНЙБсСхКаФзом', 'Йр', 'щнЙБссхКАфзоМрК', 'рКе']) from system.numbers limit 10; - -select 1 = multiSearchFirstIndex(materialize('alhpvldsiwsydwhfdasqju'), ['sydwh', 'dwh', 'dwhfdasqj', 'w', 'briozrtpq', 'fdasq', 'lnuvpuxdhhuxjbolw', 'vldsiws', 'dasqju', 'uancllygwoifwnnp', 'wfxputfnen', 'hzaclvjumecnmweungz']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('kcwchxxyujbhrxkxgnomg'), ['jmvqipszutxfnhdfaxqwoxcz', 'nrgzkbsakdtdiiyphozjoauyughyvlz', 'qbszx', 'sllthykcnttqecpequommemygee', 'bvsbdiufrrrjxaxzxgbd', 'hdkpcmpdyjildw', 'frxkyukiywngfcxfzwkcun', 'dmvxf', 'esamivybor', 'eoggdynqwlnlxr']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('zggbeilrfpkleafjjldgyfgn'), ['rpypxkpgvljhqthneremvabcd', 'qchzlnsctuwkdxqcrjgihvtfxhqxfqsxm', 'vtozkivjyqcqetmqenuihq', 'fixcvjyzbzejmwdivjf', 'lydoolvnuuamwlnzbyuuwpqqjaxf', 'elkodwthxqpcybwezm', 'wpiju', 'wdzuuwumlqfvga', 'iokphkai', 'wkbwdstplhivjyk', 'wxfbhfturuqoymwklohawgwltptytc', 'jehprkzofqvurepbvuwdqj']) from system.numbers limit 10; -select 9 = multiSearchFirstIndex(materialize('bwhfigqufrbwsrnnkjdzjhplfck'), ['v', 'ovusuizkdn', 'ttnsliwvxbvck', 'uh', 'lfourtjqblwdtvbgtbejkygkdurerqqdwm', 'snmtctvqmyyqiz', 'ckpixecvternrg', 'gluetlfyforxcygqnj', 'igqufrbwsr', 'om', 'huwazltjsnohsrcbfttzwyvcrobdixsuerkle', 'gqufrbwsrnnkjdzj', 'hfigqufrbwsrn', 'lhhyosbtznyeqzsddnqkfxayiyyajggxb', 'igqufrbwsrnnkjdzjhplf', 'pl', 'jtbqaqakbkesnazbvlaaojppxlbxccs', 'gqufrbwsrnnkjdz']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('yevfiumtjatfdnqixatbprvzwqlfgu'), ['ozghvskaixje', 'vmdrvdjhwxdvajmkpcxigsjzmtuhdxgllhzrpqd', 'qfhnxpcmtzpociajidwlcvobjfyxfcugsxy', 'pgamvhedjibcghinjrnowqzkfzibmfmh', 'bcmrdzpcczhquy', 'czosacvwfsbdvwwyirpvbve', 'qu', 'fdkobwlnmxbpvjkapextlbcrny', 'bqutjqobkyobhtpevjvewyksnoqyjunnnmtocr', 'kjlgff', 'oitltmhdburybwfxrjtxdiry', 'kiokuquyllpeagxygqugfmtm', 'wlbkl', 'khubpmstqjzzjzmsvfmrbmknykszqvue', 'lqrbmyndsztyrkcgqxcsnsanqjigimaxce', 'nitnyonuzedorrtkxhhgedohqcojbvtvjx']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('wmvuoeqphsycrvtxghrcozortmdnh'), ['hv', 'ugcmpebvlzgdtcmgkbgzyfel', 'qvmofayljsvybupvvnbhhibsz', 'zvlihxmyxlxwbffwjzjrfjgimmltftqqre', 'mwassqvxptav', 'jrumvqzkiaewngoufhrleakcfrsaxhpxyg', 'sxlxwhvkpavgfhxrxcbnqbstyrejtosxwe', 'psnlqakyfhcupryqatrmwqlswwjylpaiqammx', 'ivozojwldsgtnxpvsi', 'epyzjs', 'legi', 'sdqxxahfbddhacqrglgdcmlslraxfaahhfyodon']) from system.numbers limit 10; -select 12 = multiSearchFirstIndex(materialize('lebwdwxfdzwquhqhbvmte'), ['mwhruilzxvlyrgxivavxbbsq', 'ubuiizuasp', 'xpkzcsf', 'qpeqitoqqqeivohajzhmjbo', 'kbftixqmqgonemmbfpazcvf', 'iyhluioqs', 'hws', 'tupfdksgc', 'ows', 'pngzkoedabstewcdtdc', 'zdmyczldeftgdlwedcjfcoqycjcivf', '', 'xt', 'syuojejhbblohzwvjzzedzgmwc']) from system.numbers limit 10; -select 7 = multiSearchFirstIndex(materialize('wcrqaoecjwkhnskrbahqxfqgf'), ['qegldkdmyaznlmlhzvxfgoukngzbatnuq', 'khgcvgrifwtc', 'hkwcpogbbdqulizrycmneqmqynvj', 'zkqjf', 'xfduxyy', 'ructdekcoywfxsvpumfefoglljptsuwd', 'wkhnskrbahq', 'crqaoecjwkh', 'ikmpbunpguleinptzfelysiqc', 'lhldcci', 'nooepfypkoxxbriztycqam', 'uxeroptbiqrjartlnxzhhnlvjp']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('psgkkcwttitgrjsobiofheyohadu'), ['achfrepey', 'minlzeiwgjfvvmhnevisky', 'oxfghfdthtyczzveppcoxrued', 'ydhaupodnezvxhcqahfkwtpvxnymriixf', 'slxsbxidylxyurq', 'socyyabwbjdabnuqswrtjtqogirctqsk', 'lvbnacirctyxxspjmispi', 'oj', 'ihmmuuqlosorrwhfxvpygfrzsqpmilcvjodmcz', 'idmtmemqfyrlbwhxz', 'hsqfsfdzvslwbtlwrfavez', 'gszl', 'ei', 'pnywjnezncpjtyazuudpaxulyv', 'iqgavdjfqmxufapuziwwzkdmovdprlhfpl', 'yigk', 'mjidozklrpedutllijluv', 'vixwko']) from system.numbers limit 10; -select 3 = multiSearchFirstIndex(materialize('xtjxvytsseiqrpkbspwipjns'), ['bwmoghrdbaeybrmsnucbd', 'zoslqabihtlcqatlczbf', 'sseiqrpkbspwipjn', 'mdnbzcvtayycqfbycwum', 'npueimpsprhfdfnbtyzcogqsb', 'ytsseiqrpkbspwipj', 'fzvhcobygkwqohwutfyauwocwid', 'naacyhhkirpqlywrrpforhkcjrjsnz', 'vezbzderculzpmsehxqrkoihfoziaxhghh', 'mvvdfqzskcyomjbaxjfrtmbduvm', 'pwipjns', 'tsseiqrpkbspwipjn', 'sseiqrpkbspwip', 'qgrtbcdqcbybzevizw', 'isjouwql', 'rlbeidykltcyopzsfstukduxabothywwbq']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('zxmeusmehplcgbqabjof'), ['hqxgrw', 'fydjyrr', 'cocwtbazwjrswygttvrna', 'wpkvowuq', 'mwnzdxihrxihzhqtl', 'ljkjtmrfbonhqkioyzotyeegrw', 'ofxo', 'rjubwtpbweratrelqlrqotl', 'wvxkcil', 'qvolxxgqs', 'afqlhjnlvxowtnuuzywxuob', 'slwbmq']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('tjcmtoisgbilkygushkpuxklis'), ['bkdohwx', 'dfohgzhcjqirlbrokwy', 'zaemgqgxltznvkccyumhgsftnfigbol', 'otgcaybejwe', 'qn', 'gvfzcyhvmsnbgkulsqrzeekmjkc', 'cajuyauvmhkrriehgwfmtqbkupysudle', 'pmcupysyllzpstolkfpdvieffxaupqtjty', 'elhlzvescbfpayngnnalzixxgunqdhx', 'cvxpgdnqcxeesk', 'etlewyipypeiiowuoewulkpalvcfe', 'ordhwrkwqq', 'wnroixlkrqnydblfrtlbywc', 'xshujuttvcdxzbetuvifiqi', 'meqqxqhntkvzwoptnwskdgsxsgjdawe', 'dnmicrfshqnzosxhnrftxxeifoqlnfdhheg']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('iepqqbvekaflprupsmnpoijrld'), ['kqomoeysekwcplpegdwcdoeh', 'mwdvr', 'aobviioktzwzmpilblbdwstndhimabfgct', 'vqustluciruiyfkoontehnwylnauwpol', 'utcqnitztcgr', 'ityszrqmlwzspnrwdcvdhtziob', 'hmll', 'ilfzvuxbkyppwejtp', 'euxdzqcqutnfeiivw', 'rbcjlmjniiznzaktsuawnfjzqjri', 'fzyxlzzretsshklrkwru', 'jrujmdevqqojloz']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('cufztqffwjhtlkysekklpaywemm'), ['cpawuauqodogaitybtvplknjrsb', 'ynsocxfnxshzwnhlrfilynvz', 'ylrpytgcvtiumdckm', 'mvgrkueaslpgnjvvhzairgldtl', 'iliorsjypskmxfuuplfagktoycywb', 'drvwngp', 'zviuhcxaspwmqqz', 'qfgmrmhycskus', 'szj', 'rooivliiqufztcqlhrqyqvp', 'tufdmsmwue', 'cssowtldgwksbzlqyfereodcpuedighwd', 'odcjdffchhabtaxjvnr', 'o']) from system.numbers limit 10; -select 7 = multiSearchFirstIndex(materialize('zqwvlarwmhhtjjgwrivwfpsjkvx'), ['zcwhagxehtswbdkey', 'okezglmrjoim', 'ilwdviqimijzgoopmxdswouh', 'aqztpsntwjqpluygrvwdyz', 'uzxhjuhiwpz', 'akgc', 'larwmhhtjjgwrivwfpsj', 'isqghxsmcrwlgyloslmlyeboywtttgejdyma', 'arwmhhtjjgwri', 'rwmhhtjj']) from system.numbers limit 10; -select 9 = multiSearchFirstIndex(materialize('fuddujwwcewlhthgwsrn'), ['shtzrrtukxmdovtixf', 'rkcnzzzojqvvysm', 'jlamctgphjqcxlvmpzyxtghnoaq', 'pthrwvbheydmrot', 'kpniaqbcrgtxdyxxdxonbbltbdo', 'igulngxgtauumhckvbdt', 'khgrmskijoxruzzzaigjxonsc', 'rxzeykfxwssltw', 'hthg', '']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('jtgvvkggpkqhbxptjgoy'), ['nplzawmacgtqfxsp', 'oosw', 'akw', 'hnsenqoqwiydiufozomkyirgjepeqw', 'fpafgahvfdxukzvskbuy', 'tqimmsqffiqfoni', 'rrxkjklmkdhxqwcpfyutqzxu', 'esfqeujcbqxwnvodkwwdbsyozptaf', 'rqnyguyz', 'fftl', 'ccfyavxtxrpi', 'wftpsblszgovfgf']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('steccxkwnptybaddcuau'), ['qagxfznhjaxtyclxdsi', 'rtxwptfyzgthkwrx', 'rmcoxxs', 'vlubx', 'siecygstzivz', 'tksiagm', 'kq', 'dgsqrobxegmdbjkanb', 'lxokyvhveklvdakrxyiqokr', 'tgpmehwdrirpfjonqzhqshbo', 'cqmkargvsfjoxrguymtzsfwkg', 'avkmufhoywprjw', 'xzywtvlpoozmgkrcavevwebv', 'hfiuwslapamiceaouznxm', 'tmfjhqddafhhjbybfphlbwu', 'mrigvhmjvdpny']) from system.numbers limit 10; -select 0 = multiSearchFirstIndex(materialize('ccbgxzoivbqtmyzqyooyepnmwufizz'), ['lcclseplkhxbrrzlnani', 'xggxivwqlpxmpypzovprdkmhrcgjkro', 'dbbmiegotfxjxybs', 'hqtcowpupsyqfx', 'znatfzjbeevbaqbmpofhywbyfxn', 'mnditiygex', 'lazqapwjswhkuimwmjoyseyucllnrfxrwnzj', 'jg', 'dmqwnuvsufgffuubhqeugwcanvflseorrydyyxvr', 'wpjfcfwfgjiybncrw', 'joucnvxxcyjyqlwhrzwnstyj', 'babtxkzasyaffxzd', 'wgcfdyhwxjoytbxffdxbdfinolbltnhqkvyzybc', 'yhrgwbdwopznltjtyidxawqg', 'bvrrt', 'bcwmsys', 'ijdjojhhzaiyjyai', 'eevxwppogogdbmqpbeqtembiqxeiwf']) from system.numbers limit 10; -select 2 = multiSearchFirstIndex(materialize('xrwjeznohtbdvijwsbdksf'), ['hwdfufmoemohatqafdrcvdk', 'tbdvijwsbdks', 'xzwjczbuteujfjifzkbxvezs', 'bdvijwsbd', 'eznohtbdvijwsbdks', 'xadezwhbbmlqz', 'b', 'socrdjxsibkb', 'dk', 'eznohtbdvijws', 'pavsosnncajr', 'jixlmxxmxnnbpebjhitvtsaiwzmtqq', 'yuxmmnrqz', 'mpzytweuycabvu', 'tbdvi', 'ip']) from system.numbers limit 10; - -select 0 = multiSearchFirstIndexUTF8(materialize('црвтгмсрооацволепкщкпнгшкамщ'), ['гйцбсханрейщжнфбхтщбйала', 'дирдфнжпнччхаоцшрийнйнечллтгцбфедгсш', 'жфйндбффаилбндмлточиирасдзйлжбдзег', 'жвоуйфсйойфцвгзшцитсчпкч', 'ршонтбгщжооилчхрзшгсдцпзчесххцп', 'пйучихссгнхщлутвменлмм', 'хишгешегдефесо', 'знупгж', 'щчфу', 'знвтжифбнщсибеноожжметачаохфхсжосдзйуп', 'ггтоцйпгхчсбохлрчлваисивжбшбохдурввагш', 'щлийбчштбсч']) from system.numbers limit 10; -select 5 = multiSearchFirstIndexUTF8(materialize('опднхссгртрхтотлпагхжипхпитраб'), ['шфршсцешушклудефцугщцмйщлошечедзг', 'нйумйхфщцгщклдожхвосочжжислцрц', 'згтпвзцбхйптцбагсвцгтнф', 'пшичси', 'ссгртрхтотлпа', 'апзазогвсбежзрйгщоитмдкн', 'непгайтзкгштглхифмзданоихц', 'пднхссгртрхтотлпагхжипхпитр', 'ждднфлрзалшптсбтущвошрйтхкцнегшхрсв', 'брп', 'сгртрхтотлпагхжипх', 'нхссгртрхтотлпагхжипхп', 'пагхж', 'мфкжм']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('овччцнтчайомсйййоуйуучщххиффсб'), ['жжрддцпнехйр', 'шзбвуооинпаххесйкпкошжмцзгхе', 'ррсннилщлщжгцтйрпхабкехахззнтщемагдйшпсч', 'пуфугнказепщ', 'гддхтплвд', 'сщсчи', 'бйрсахедщфкхиевкетнс', 'йфжцжшпхлййхачзхнфоц', 'цтмтжлщдщофисзрвтбо', 'кщсевбоуйб', 'щгаапзкн', 'осймщовшчозцййизм', 'фкмаат', 'бкзцсдонфгттнфтаглпрцтбхбсок', 'жлмичлйнйсжбгсейбсиезщдмутационжгмзп', 'нбищижнлпмтморлхцхвеибщщлкйкндлтпбд']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('фдситчщдвхмфйтхшдтуцтщжрочщо'), ['ейшфдннтйечгк', 'фуйщгбйшдцирзб', 'ехйцмчщрсртнк', 'увтцмдорщжфгцгзущпувтщкнрфсйбщрзй', 'хчщпхвуарнббпзсцшчщуносйгщпсбтх', 'жтдчрхфмхцххккзппзбнуббс', 'тчохнмбаваошернеймгготлузвсбрщезднеил', 'стссчкшрчррйбхдуефвеепщшзмербгц', 'жбезжпещ', 'вйтсрхптлкшвавдаакгохжцоощд', 'искеубочвчмдхе', 'щмлочпзбунщнхлрдлщтбеощчшчхцелшоп', 'екуийтсйукцн', 'дочахгжошвшйжцпчзвжйкис', 'лтеенешпсболгчиожпжобка', 'букзппщрчбпшвпопвйцач']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('гопвмрутфпфбхмидшлуб'), ['цнхшдойгщн', 'дкаежщрапщпщеа', 'фмогимдничрфтхмсцмчпдфтиофнтйц', 'фчмсщисхщуп', 'ощмвдчефозйжбеесбмещочевцчд', 'апкбцйщжщабвппофм', 'мтйоддлфцгдуммптднпщшрн', 'икхнсмжчбхнфхнссгл', 'ущмунинлбпрман', 'ллкнечрезп', 'ажтнвбиччджсзтйешйффдгдрувер', 'йрщ', 'чигдкйшфщжужзлвщулквдфщхубги', 'иккшсмаеодейнкмгхбдлоижххдан']) from system.numbers limit 10; -select 12 = multiSearchFirstIndexUTF8(materialize('срлцчуийдлрзтейоцгиз'), ['жщлнвбубжпф', 'оклвцедмиср', 'нлзхмчдзрззегщ', 'хоу', 'шайиуд', 'ерслщтзцфзвмйтжвфеблщдурстмйжо', 'жмгуйузнчгтт', 'стеглмрдмирйрумилвшнзззр', 'втедлчрчайвщнллнцдмурутш', 'цимхргмрвмщиогврнпиччубцйе', 'ктчтцбснзцйцймридвш', 'ейоц']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('лрицжленфилзсжпжйнцжжупупдфз'), ['чпбрмлрнцмвеуфу', 'рмпизмпжчшбхдудчшохтжш', 'гргцжчпгщищннусв', 'ийщтщвзчшпдзитщубакусхавслрсбткб', 'бйбакижцтибгбгхжцвйчжжщжсжкзф', 'чгрп', 'чуносжусжфчмфжхрщзлщрдвбашажаанча', 'чекршбш', 'лбцкхйсооцц', 'сгвнлегвфмпчтййлрмд', 'наатущркхйимхщщг', 'щпзоеимфощулбзхафпц', 'дцабцхлврк', 'умидмчуегтхпу', 'дщнаойрмчсуффиббдйопдииуефосжхнлржрйлз', 'щзжетезвндхптпфлк', 'бгчемкццдбжп', 'иихуеоцедгрсеужрииомкбззцнгфифоаневц']) from system.numbers limit 10; -select 3 = multiSearchFirstIndexUTF8(materialize('бхжвчашрощбмсбущлхевозожзуцгбе'), ['амидхмуеийхрнчйейтущлуегрртщрхвг', 'фнисцщггбщйа', 'хжвчашрощбмсбу', 'фщвщцнеспдддцчччекчвеещ', 'ущуджсшежчелмкдмщхашв', 'цкуфбиз', 'евозожз', 'ппт', 'лвцнелшхцш', 'ощбмсбущлхев', 'ефхсзишшвтмцжнвклцуо', 'цржсржмчвмфмнеещхмиркчмцойвйц', 'ашрощбмсбущлхевозожзу', 'гхщншфрщзтнтжкмлщанв', '', 'хевозо', 'ощбмсбущлхевозожзуц', 'возожзуц']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('мзчатйжщгтзлвефчшмлшт'), ['гхшфрунирйдзтеафщгк', 'ймхмфлц', 'звуумивмвштчтнтеобзщесакийгк', 'чщжетзнцишхрммтбцакиббчп', 'блмидикавущщдпгпчхйаатйанд', 'цмщшбклгцгмчредмущаофпткеф', 'бнетввйцзпдерхщ', 'ицйнцрввемсвтштчфрпжнатаихцклкц', 'дзлщсштофвздтмчвсефишс', 'пбзртдцвгкглцфесидлвваисщр', 'ммеилбзфнчищч', 'жш', 'лздиззтпемкх', 'байлужднфугмкшгвгулффмщзхомпав', 'рсзнббедсчзущафббзбйоелид', 'цфшйкцксйгуйо']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('жжмзмащйфжщлрффбпврзнидииейщ'), ['ржфзнлйщсздйткаоцруйцгцт', 'илинксщмгщшещееифвпданмйлж', 'кг', 'гпааймцщпмсочтеиффосицхйпруйшнццвс', 'кнзфгжйирблщлл', 'ищуушфчорзлкбцппидчннцвхщщжййнкфтлрдчм', 'тбтдчлвцилргоргжсфбоо', 'ехаех', 'нехщмдлйджждмрцпйкбрнщсифхфщ', 'тцжпснйофцжфивзфбхзузщтмдкцжплавозмше']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('биаризлрвххжкпщтккучфизуршткпн'), ['йбручвндбщвссаеха', 'ол', 'еузкмпогщзгзафшдшоплбфнфдккх', 'ибзихщйфбтаз', 'ибрчиейш', 'нафрпбснзрузнтмнйиомтечтшзбкпзутдилтф', 'тщтбапцчдий', 'щкнггмфцжрзщцзжвлкчбммхтхтуж', 'ваам', 'цкфиушзигбжтацнчдлжжзфшщммтнлж', 'туфовжтнкзщсщщизмрйкхкпц', 'пирзксзикфтшодожшчцг', 'жфчфцфвлйбмеглжйдазгптзщгж', 'тутириждкзчвтсоажп', 'мотзусбхту', 'слщкгхжщфщоцкцтрлгп', 'бругтбфесвсшцхнтулк', 'восур', 'ссежгнггщдтишхйнн', 'вгзосзгоукмтубахжнзгшн']) from system.numbers limit 10; -select 8 = multiSearchFirstIndexUTF8(materialize('мчслвбжвманджййсикнврцдчмш'), ['рлбмй', 'иб', 'жажлцсзхйфдцудппефвжфк', 'огггхзгтцфслхацбщ', 'дзтцкогаибевсйещпг', 'зпцтйзфмвгщшуоилл', 'етщзгцпдйчзмфнхпфцен', 'нджййсик', 'сикнврцдчмш', 'жййсикн', 'икнврцдч', 'паокаочввеулщв', '', '', 'кечзсшип', 'вбжвманджййсикнвр']) from system.numbers limit 10; -select 2 = multiSearchFirstIndexUTF8(materialize('нвррммппогдйншбшнехнвлхм'), ['нфошцншблеооту', 'лх', 'цртд', 'огдйншбшн', 'уулддйдщицчпшбоиоцшй', '', 'дрдужзжпцкслетгвп', 'й', 'мппогдйншбшнех', 'дйншб', 'лжвофчзвдд', 'рммппогдйншб', 'ехнв', 'втущсщзбчсжцмаанчлнасп']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('удехбкабиацхпгзнхжелшц'), ['фмнбтйезсфоахофофдблкжщжфмгхтзс', 'тщтамзафозхлз', 'цшжфсбл', 'йзгзилупшллвипучхавшнмщафзмнк', 'лу', 'гтебпднцчвмктщсзи', 'лпщлмцийгуеджекшд', 'пцдхфоецфрунзм', 'зис', 'хпж', 'цтцплхцжишфнплуеохн', 'впх', 'чцчдацлуецрчцжижфиквтйийкез', 'гчшмекотд', 'пйгкцчафеавзихзтххтсмкал', 'сжфхпцгдфицжслрдчлдхлсувчнрогнву']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('щщвфчгамтжашнуошбзшуйчтшх'), ['дийу', 'жеомлсжщймемрсччошдфажцтдп', 'нгопнцквбф', 'хопб', 'ив', 'чвфвшфрдфелрдбтатшвейтг', 'вхкцадмупдчбаушшлдксйв', 'жтжбсвмшшсйеуфдпбдлкквдиовж', 'гтсдолснхесйцкйкмищгсзедх', 'ошплп', 'ифпуррикбопйгиччи', 'чдфймудаибвфчжтзглс', 'зпцмвпнлтунвйж', 'еждрйитхччещлцч', 'вмофсужхгрнзехкх', 'щжгквкрфжмжжсефпахст']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('рфгигуужжцфмоаешщечувщгонт'), ['слащченщлуоцргврбаб', 'тцизут', 'лйрсцолзклжбчрзгббммоищщ', 'уицмлоилзф', 'зпхмшвфйккфщщп', 'ймижрпдщмшв', 'пуощжлрмжлщхмкйгщшщивдпчпжчл', 'ойахшафнж', 'гксомбвцрсбжепхкхжхнсббци', 'панлраптщмцмйфебцщемйахенг', 'сохлгожштлднчсзпгтифсйгфмфп', 'аждчвзну', 'дхшуфд', 'борзизцхнийбщгхепрнзшй', 'фщшздруггрке', 'оевупрйщктнолшбкунзжху']) from system.numbers limit 10; -select 8 = multiSearchFirstIndexUTF8(materialize('кщзпапйднучлктхжслмищ'), ['апмдйлсафхугшдезксш', 'кйрм', 'цйивайчшуалгащсхйш', 'злорнмхекг', 'сгщврурфопжнлхкбилдч', 'бнлпщшнвубддрлижпайм', 'нукдонццнрмовфнбгзщсшщшдичежффе', 'йднучлктхжс', 'зпапйднучлктхж', 'затйотдсмпбевлжаиутсуг']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('жцажссефррршнфмнупщаоафгкщваа'), ['жфпщкгзкрмтщчцтжйчпйдошбшоцд', 'бхгйлйдробптвущшппзуиидежнлтпбжащткцф', 'хлещазйцепдханпажчизнхгншйуазщхй', 'ашцк', 'фрбммхдднчзшс', 'нжцанилзжаречвучозрущцдщаон', 'длмчзцрмжщбневрхуонпйейм', 'шкбщттврлпреабпоиожнууупшмкере', 'вуцпщдиифпеоурчвибойбпкпбкйбшхдбхнаббж', 'нртжвкдйтнлншцанцпугтогщгчигзтоищпм', 'цкплнкщлкшемощмстздхпацефогтск', 'цвждйбсмпгацфн', 'шсжшрзрардтпщлгчфздумупд', 'цйииткглчжйвуейеиииинврщу', 'унлодтулшпймашоквббчйнибтвалалрвбцж', 'нбнфнвйишйжлзхкахчмнлшзуеенк', 'бшлпсщжквпцахигчдтибкййб', 'фчакпзовтрлкншзцулшщмпзж']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexUTF8(materialize('иматеччдфлггшпучумджпфпзмвх'), ['дахахпчлцлаачгцгтфпнжлшчйуцбшсг', 'атжйувхец', 'грдсбвиднницдвшпйршгмегцаоопнжгй', 'чзлхречмктфащмтеечуиагоуб', 'савбхлпилийщтихутйчдгфсойй', 'вбгочбзистзщшденусцофит', 'мар', 'дфшажхдсри', 'тжлмщшж', 'птсрсщгшммв', 'ре', 'зратамкткфкинййй', 'гуцмсизулвазужфдмхнелфнжббдтрудчтнфцр', 'нйчинеучкхнпчгнйвчвсвлгминуцахгщввжц', 'ечагчнуулфббгбел', 'йшжуговрйкащцофдокфчушжктнптйеззушфо']) from system.numbers limit 10; -select 11 = multiSearchFirstIndexUTF8(materialize('азтммйтшхцхлгдрнтхфжбдрлцхщ'), ['нпучщфвспндщшспзмшочгсщжчйгжбжзжжтн', 'хккдйшабисдузфртнллщпбоуооврайцз', 'йпхрфжждгпнйаспйппвхбргшйвжччт', 'ффеее', 'кежцновв', 'еххрчштарзмкпйззсйлмплхбчбулзибвчбщ', 'шфжйдотрщттфхобббг', 'ожоцжущопгоцимсфчйщцддзнфи', 'цуимеимймкфччц', 'прммщмтбт', 'хцхлгдрнтхфж', 'лгд', 'цжбдаичхпщзцасбиршшикджцунйохдлхй', 'пидхцмхйнспйокнттмййвчщпхап', 'йтйзмеаизкшйошзвфучйирг', 'хцхлгдр']) from system.numbers limit 10; - -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('gyhTlBTDPlwbsznFtODVUzGJtq'), ['seSqNDSccPGLUJjb', 'xHvtZaHNEwtPVTRHuTPZDFERaTsDoSdX', 'QCeZOYqoYDU', 'bsybOMriWGxpwvJhbPfYR', 'FFHhlxfSLzMYwLPPz', 'tvDAJjaLNCCsLPbN', 'kOykGaSibakfHcr', 'mWAZaefkrIuYafkCDegF', 'ILrFDapnEDGCZWEQxSDHjWnjJmeMJlcMXh', 'zHvaaTgspUDUx', 'tss', 'laUe', 'euUKFLSUqGCjgj', 'Kd', 'MxyBG', 'qRXMsQbNsmFKbYSfEKieYGOxfVvSOuQZw', 'PdBrNIsprvTHfTuLgObTt', 'kMekbxI']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('ZxTznPEbfoBfLElYOrRiHrDLMmTpIh'), ['bJhYwKLeeLvLmXwWvQHWFkDQp', 'dLyZmUicTZmUfjfsFjxxgOiMJn', 'UCYbbGcY', 'kpPiwfWHEuh', 'jviwmHeiTQGxlTKGVEnse', 'cVnEyLFjKXiLebXjjVxvVeNzPPhizhAWnfCFr', 'gkcoAlFFA', 'ahZFvTJLErKpnnqesNYueUzI', 'VIJXPlFhp', 'rxWeMpmRFMZYwHnUP', 'iFwXBONeEUkQTxczRgm', 'ZnbOGKnoWh', 'SokGzZpkdaMe', 'EfKstISJNTmwrJAsxJoAqAzmZgGCzVRoC', 'HTmHWsY', 'CpRDbhLIroWakVkTQujcAJgrHHxc']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('VELfidaBvVtAghxjkrdZnG'), ['fvEFyRHvixuAYbuXygKeD', 'zFNHINreSOFksEGssBI', 'hcdWEcKDGWvfu', 'KczaFjvN', 'nZLTZAYSbfqcNWzWuGatDPUBYaRzuMBO', 'UdOdfdyPWPlUVeBzLRPMnqKLSuHvHgKX', 'DgVLuvxPhqRdSHVRSeoJwWeJQKQnKqFM', 'NNfgQylawNsoRJNpmFJVjAtoYy', 'tWFyALHEAyladtnPaTsmFJQfafkFjL', 'lYIXNiApypgtQuziDNKYfjwAqT', 'QjbTezRorweORubheFFrj', 'htIjVIFzLlMJDsPnBPF', 'ltDTemMQEgITf', 'fprmapUHaSQNLkRLWAfhOZNy', 'dOJMvPoNCUjEk', 'm', 'vEEXwfF', 'aVIsuUeKGAcmBcxOHubKuk']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('kOzLaInSCOFHikwfkXaBfkyjdQ'), ['t', 'emHGfAiZSkZaVTSfplxRiPoDZUTT', 'YHnGJDTzxsboDsLPGHChMHwrHHICBIs', 'gbcbVHSlVeVDOeILWtSLkKfVVjG', 'fPaJjbnNthEwWZyg', 'qS', 'PCQxoLaSdQOjioMKPglmoWR', 'KLMNszm', 'TCErEFyxOvqnHs', 'dRbGzEJqvIGAcilZoHlXtZpjmLLZfsYueKqo', 'iKHmNSbGgaJYJEdMkbobXTdlFgAGEJMQ', 'mUGB']) from system.numbers limit 10; -select 1 = multiSearchFirstIndexCaseInsensitive(materialize('JGcICnWOGwFmJzHjtGJM'), ['fmJzHj', 'LhGTreYju', 'yCELHyNLiAJENFOLKOeuvEPxDPUQj', 'kWqx', 'OBnNMuaeQWmZqjWvQI', 'ektduDXTNNeelv', 'J', 'iCNwoGwfMJzhjtGJ', 'uiIipgCRWeKm', 'bNIWEfWyZlLd']) from system.numbers limit 10; -select 7 = multiSearchFirstIndexCaseInsensitive(materialize('fsoSePRpplvNyBVQYjRFHHIh'), ['ZqGBzyQJYuhTupkOLLqgXdtIkhZx', 'pouH', 'mzCauXdgBdEpuzzFkfJ', 'uOrjMmsHkPpGAhjJwVOFw', 'KbKrrCJrTtiuu', 'jxbLtHIrwYXDERFHfMzVJxgUAofwUrB', 'PLvNyBVQYjRfhhi', 'wTPkeRGqqYiIxwExFu', 'PplvNybvqyJ', 'qOWuzwzvWrvzamVTPUZPMmZkIESq', 'ZDGM', 'nLyiGwqGIcr', 'GdaWtNcVvIYClQBiomWUrBNNKWV', 'QQxsPMoliytEtQ', 'TVarlkYnCsDWm', 'BvqYJr', 'YJr', 'sePrPPLVNYbvqYJRFhh', 'ybvq', 'VQYjrFHh']) from system.numbers limit 10; -select 3 = multiSearchFirstIndexCaseInsensitive(materialize('aliAsDgMSDPISdriLduBFnuWaaRej'), ['gWOFTxMrQGQaLrpJamvRhgeHwk', 'iWsBLzLycWvbJXBNlBazmJqxNlaPX', 'Ri', 'FPLRURSsjvsySncekcxaWQFGKn', 'wgXSTVzddtSGJQWxucYorRjnQQlJcd', 'wOLJWZcjHEatZWYfIwGIqnuzdcHKSFqfARfNLky', 'eEECZMNmWcoEnVeSrDNJxcOKDz', 'duBF', 'EhfLOjeEOQ', 'dUbFNUWA']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('EUzxPFYxMsJaTDzAKRXgZIVSFXU'), ['TDKAgICICjzBKHRqgFAuPCSODemldGGd', 'LvMluSJTIlgL', 'srbRhQKjPIchsipVHsjxwhK', 'vdurVsYkUWiFQVaDOnoNIJEX', 'UzZsZqAUNjMvWJaTqSWMHpzlDhVOaLzHPZfV', 'XcnnPXXEJJv', 'JSwFBNnYzNbIRZdeMfYiAfxzWfnCQFqoTUjns', 'HBMeqdLkrhebQeYfPzfJKAZgtuWHl', 'cMfSOnWgJvGhFPjgZdMBncnqdX', 'orDafpQXkrADEikyLVTHYmbVxtD', 'Vz', 'bfYwQkUC', 'q', 'YqomKpmYpHGv']) from system.numbers limit 10; -select 4 = multiSearchFirstIndexCaseInsensitive(materialize('mDFzyOuNsuOCSzyjWXxePRRIAHi'), ['TfejIlXcxqqoVmNHsOocEogH', 'clyblaTFmyY', 'JQfxMAWVnQDucIQ', 'jw', 'fGetlRA', 'uWwCOCd', 'rInhyxSIFiogdCCdTPqJNrqVaKIPWvLFI', 'mimSJjfCWI', 'jqnJvNZXMEPorpIxpWkhCoiGzlcfqRGyWxQL', 'bxCJeVlWhqGHoakarZcK', 'unsUOcSZyjwxxe', 'E', 'PR', 'nsUoCSZyjwxXEPr', 'sfotzRPMmalUSjHkZDDOzjens', 'zYJwxx', 'DFzyouNsUocsZ', 'QBaQfeznthSEMIPFwuvtolRzrXjjhpUY', 'sQPVBaoeYlUyZRHtapfGM', 'lPiZLi']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('VOAJfSkbDvNWAZNLIwqUgvBOddX'), ['pHrGGgJ', 'VohjtPdQZSNeKAlChDCnRTelroghFbZXVpnD', 'rnWebvdsmiFypMKL', 'NtKRiJOfAkWyKvubXrkOODgmZxvfOohsnHJEO', 'nxsDisKarasSZwESIInCJnYREUcoRUTXHBUH', 'mXYYr', 'jujScxeTBWujKhKyAswXPRszFcOKMSbk', 'INEegRWNgEoxqwNaGZV', 'VVyjMXVWVyuaOwiVnEsYN', 'mkLXSmXppxJhFsmH', 'pRVnBrWjqPeUDHvhVuDbzUgy', 'PzchFdPTkOCIVhCKml', 'KXaGWnzqoHBd', 'PhzQVqIOLleqDSYNHLjAceHLKYPhCVq', 'aixxTqAtOAOylYGSYwtMkZbrKGnQLVxnq', 'ruEiaxeRaOOXGggRSPlUOGWSjxh', 'prSULtHvDMw', 'vEpaIIDbGvIePYIHHZVNSPYJl']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('ZHcEinZEFtfmHBLuCHntUhbIgY'), ['GKElMPEtmkLl', 'mkrzzjSRfXThuCQHkbZxRbhcymzTxcn', 'PREwQjxBJkpkiyuYEvtMZNFELgbINWsgf', 'lFEGlPtaDJSyoXzwREiRfpzNpsaBYo', 'tmVTuLPhqhgnFNhHvqpmc', 'NtijVhVfAwpRsvkUTkhwxcHJ', 'O', 'FSweqlUXdDcrlT', 'uljEFtKVjIzAEUBUeKZXzCWmG', 'dBIsjfm', 'CNaZCAQdKGiRUDOGMtUvFigloLEUr', 'yWjizKZ', 'QqPVdyIFXcweHz', 'uPmgGWGjhzt']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('AYMpbVsUQqAfoaMiJcYsulujYoSIx'), ['aXECumHNmAEefHPJy', 'hTosrERBdVCIilCYcMdHwaRh', 'PVDBpwrc', 'uFvQRPePvmzmocOauvEjqoxMhytzOwPSOCjmtm', 'kQqIlSCHDmWXCKN', 'ybAHGYDEDvvOJsF', 'WpkANi', 'cFGuzEcdahZtTdLFNBrRW', 'EBaybUFxO', 'mRlZUzHzMsMAgvtRtATEDLQvXZnZHw', 'uqxckjqpCBHiLgSPRz', 'Lv', 'AJcRfAvBmQVMOjaFfMfNHJt', 'FYsPM', 'pkKXTPgijOHFclqgVq', 'Ck']) from system.numbers limit 10; -select 11 = multiSearchFirstIndexCaseInsensitive(materialize('gmKSXWkNhKckrVNgvwiP'), ['bdJMecfCwQlrsgxkqA', 'NTgcYkMNDnTiQj', 'fmRZvPRkvNFnamMxyseerPoNBa', 'rfcRLxKJIVkLaRiUSTqnKYUrH', 'YSUWAyEvbUHc', 'PridoKqGiaCKp', 'quwOidiRRFT', 'yHmxxUyeVwXKnuAofwYD', 'gichY', 'QlNKUQpsQPxAg', 'knhkCKRVNGvWIp', 'jAuJorWkuxaGcEvpkXpqetHnWToeEp', 'KnHKCKrvNgVW', 'tCvFhhhzqegmltWKea', 'luZUmrtKmmgasVXS', 'mageZacuFgxBOkBfHsfJVBeAFx', 'hKC', 'hkRCMCgJScJusY', 'MKSXWknHkckrVNgv', 'osbRPcYXDxgYjSodlMgV']) from system.numbers limit 10; -select 15 = multiSearchFirstIndexCaseInsensitive(materialize('lcXsRFUrGxroGIcpdeSJGiSseJldX'), ['pBYVjxNcQiyAFfzBvHYHhheAHZpeLcieaTu', 'SQSQp', 'OQePajOcTpkOhSKmoIKCAcUDRGsQFln', 'AYMDhpMbxWpBXytgWYXjq', 'gkUC', 'oWcNKfmSTwoWNxrfXjyMpst', 'fQSqkjRNiBGSfceVgJsxgZLSnUu', 'LRrhUjQstxBlmPWLGFMwbLCaBEkWdNJ', 'cZnaActZVoCZhffIMlkMbvbT', 'Uxg', 'vlKdriGMajSlGdmrwoAEBrdI', 'Fl', 'XzcNdlUJShjddbUQiRtR', 'AqowAuWqVQMppR', 'SRFUrGXrOgiCP', 'k']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitive(materialize('KhwhbOzWvobUwJcteCHguFCn'), ['LkDYrpvDfPL', 'CIaTaShobVIaWjdbsNsCMdZKlGdtWuJmn', 'zYcsxxFyfuGrPdTPgEvGbXoYy', 'vDIeYpJbLMGMuRkIrPkAnqDDkqXPzy', 'Ievib', 'CREiuEsErFgvGEkQzThHtYtPmcL', 'JjRWKyALtSkoGmRxh', 'JxPhpijkDOpncCKyDEyXvKNua', 'jo', 'mKpFscuBEABMAlQO', 'qiFTgJpcnUMRKzTEuKY', 'pXBtITxCPRaXijM', 'guYVLpIbu', 'tSKYIxv', 'oDnWaFAmsXGRdGvRPhbCIvFSFQNlSVYB', 'phdckINUiYL']) from system.numbers limit 10; -select 14 = multiSearchFirstIndexCaseInsensitive(materialize('pXFoUGwVTAItBqgbBaQwAqmeh'), ['LfBevBpGnaSlmGhbeZ', 'NtBYzEksiXvYI', 'jMeRw', 'omtaduY', 'BsWyvNdkfXsTBxf', 'CtoOIvaesuca', 'pgJcRIBVbyaPBgGsNKP', 'bAwdUMnwKvMXfFHQWrtfMeqcORIJH', 'GDxZblrqWSxUJFjEuXArPtfHPdwSNGGL', 'LLxcfp', 'NrLghkFpwCdvHJBfPBgiMatNRaDKjO', 'XCzr', 'cCojPpfLkGZnaWBGpaZvrGMwgHNF', 'BaQWAQmE', 'AQ', 'RtxxEZDfcEZAgURg']) from system.numbers limit 10; -select 5 = multiSearchFirstIndexCaseInsensitive(materialize('KoLaGGWMRbPbKNChdKPGuNCDKZtWRX'), ['FBmf', 'QJxevrlVWhTDAJetlGoEBZWYz', 'tKoWKKXBOATZukMuBEaYYBPHuyncskOZYD', 'kgjgTpaHXji', '', 'xOJWVRvQoAYNVSN', 'YApQjWJCFuusXpTLfmLPinKNEuqfYAz', 'GXGfZJxhHcChCaoLwNNocnCjtIuw', 'ZLBHIwyivzQDbGsmVNBFDpVaWkIDRqsl', 'Kp', 'EyrNtIFdsoUWqLcVOpuqJBdMQ', 'AggwmRBpbknCHdKPgun', 'xNlnPtyQsdqH', 'hDk']) from system.numbers limit 10; -select 6 = multiSearchFirstIndexCaseInsensitive(materialize('OlyNppgrtlubvhpJfxeWsRHpr'), ['slbiGvzIFnqPgKZbzuh', 'fakuDHZWkYbXycUwNWC', 'HnVViUypZxAsLJocdwFFPgTDIkI', 'bLx', 'fmXVYOINsdIMmTJAQYWbBAuX', 'pjFXews', 'BG', 'vrSQLb', 'ub', 'pREPyIjRhXGKZovTqlDyYIuoYHewBH', 'hnNQpJmOKnGMlVbkSOyJxoQMdbGhTAsQU', 'UwaNyOQuYpkE', 'yHNlFVnuOLUxqHyzAtNgNohLT', 'YJRazuUZkP', 'z', 'lUbVhpjFxEWsRhP']) from system.numbers limit 10; -select 6 = multiSearchFirstIndexCaseInsensitive(materialize('ryHzepjmzFdLkCcYqoFCgnJh'), ['cLwBRJmuspkoOgKwtLXLbKFsj', 'YSgEdzTdYTZAEtaoJpjyfwymbERCVvveR', 'RzdDRzKjPXQzberVJRry', 'HUitVdjGjxYwIaLozmnKcCpFOjotfpAy', 'LWqtEkIiSvufymDiYjwt', 'FDlKCCYqoFCGNj', 'jmZfdlKCcyQOFcGnJ', 'OZCPsxgxYHdhqlnPnfRVGOJRL', 'JfhoyhbUhmDrKtYjZDCDFDcdNs', 'KCCYqo', 'EPJMzFDLKcCYQ', 'zLQb', 'qsqFDGqVnDX', 'MzfdLkCCyQOFc']) from system.numbers limit 10; -select 5 = multiSearchFirstIndexCaseInsensitive(materialize('oQLuuhKsqjdTaZmMiThIJrtwSrFv'), ['MsfVCGMIlgwomkNhkKn', 'fBzcso', 'meOeEdkEbFjgyAaeQeuqZXFFXqIxBkLbYiPk', 'tNV', 'i', 'EwuTkQnYCWktMAIdZEeJkgl', '', 'hUo', 'dtAzmMITHijRtwsrFV', 'vhnipYCl', 'puor', 'TazMmiTh', 'ITHIJRTWSrf', 'luuHksqJDTaz', 'uHkSQjDtazMMiThIjrtwSRFV', 'gpWugfu', 'QjdtazmmIthIjRTWSRFV', 'ZdJpc']) from system.numbers limit 10; - -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ИпрУщйжббКВНИчйацпцоЛП'), ['цШСкЕвеГЕЗЦщруИБтЦсБГАу', 'Хнщта', 'БшА', 'СалШйР', 'ЩфДГРРчшБДММГЧоноЖСчдпВХшшгйН', 'бЕжПШЦддожнЧоЕишчшЕЙфСщиВПФМ', 'ТЗзГФх', 'Чфл', 'КнНкнЖЕкППварНрхдгЙкДешмСКИЛкеО', 'ЖИсЧПСФФМДиТШХЦфмЗУпфрУщСЛщсфмвШ', 'ллЙумпхчОсЦМщУ', 'ГМУНЦФшНУбРжоПвШШщлВФАтоРфИ', 'БХцжеНЗкжЗЗшЦзфгдЖОзЗЖщКМИШАтЦАп', 'мтСкЕнбХШнЛхХГР']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('цмйвГЖруДлдЦавхЖАлоЕхЗКд'), ['ХфБПМДВХЙ', 'саЗваАбднХбЦттмКсМбШбВМУйНКСЖжХЦНц', 'плиЩщШАцЖсхГ', 'ЗнУЕФЗВаНА', 'ЧДйСаЗГЕшойСжбсуЩуЩщбПР', 'ЧЕуЩкФБВвчмабШЦтЖбОрЗп', 'йХбМсрТАФм', 'РЖСЗвЦлНВПЧщГУцЖ', 'ГГлЩрОХКнШРТуДФ', 'шСабРжла', 'ЕчБвгаРЧифаЙщХПпГЦхчШ', 'дайшйцВНЩЧуцйдМХг', 'УнзНКЧххВрцЩМлАнЖСДОДцбИгЛЛР', 'сЛЗзПбиАгзК']) from system.numbers limit 10; -select 2 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('дфЧлзОжММФкЖгиЗЩлоШжФТкцк'), ['ЗРТцИрсФСбПрщГЗ', '', 'ЖГИЗщлОш', 'АДПН', '', 'чЛЗОЖмМфКжг', 'Мфкж', 'ндаовк', 'зГЛРГАНШмСмШМефазшеБкзДвЕШиЖСЗЧПИфо', 'ФЧЛзОЖммфКжгиЗЩ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ИИКДМЛхРчнвЙЕкВЧелТйЛВТ'), ['АчшОЛтНЙуЦЛЙфАКУйуТЗМеЗщОХТМЗеТА', 'НЦУДбчфРТОпЛкОгВпоО', 'неДавнНРеАУфТтфАнДчтнУМЛПШнроАчжш', 'бГржВПЧлЛтСВТтаМЦШШ', 'БщГщРнБхЕЛоЛсмЙцВЕГ', 'цбАжЦРеу', 'ХсЦРаНиН', 'нббДдВЗРС', 'змОПпеЛЖзушлнДЛфчЗлцЙЛфЖрЛКг', 'фШиЖСУоаНПйИВшшаоуЙУА', 'ЛктХиШРП', 'МапщВйцХч', 'жмУТкуГбУ', 'сйпзДЩоНдШЕТбПзФтсрмАФГСз', 'ЛБУвйладЕижрКзШУАгНЩчЕмАа', 'мЧпФлМчРбШРблмтмПМоС']) from system.numbers limit 10; -select 8 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ПоДУЗАтХншЦатИшХвмИЖчГнжчНцух'), ['жЛЧХзкжлиЛцЩбЧСнЛУжЖпКРвиСРН', 'шадмЩеУШБврУдЕБЗИгмЗЕФШчЦБСзПидтАлб', 'йпГмШСз', 'хЖФЙиПГЗЩавиЗЩйПнБЗЦЩмАЧ', 'ХесщтлбСИуЦ', 'вар', 'ЙкМаСхаЩаЗнФЩфКжПщб', 'ОдУзАТХншЦатИШхвМиЖчгнЖч', 'ЗВЗДБпФфцвжУКвНсбухссбЙКЙйккЛиим', 'гХхсГЛшдфЖЛбгчоЕмоЧр']) from system.numbers limit 10; -select 7 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ихзКЖЩсЧРСЖсЖжЛАшкТхИйТгМБпск'), ['ДРОБм', 'нз', 'тОЛ', 'щРзуЖрТ', 'Мдд', 'АЦГРК', 'Чрсжсжжл', 'чРсжсЖжл', 'ктхИйтГмБ', 'аАзЙддМДЦЩФкРТЧзЧПУойоТхБиЧПлХДв', 'иЙтгМбп', 'РицлПн', 'йДГнЧкЕв', 'ВМЩцАш', 'хКЩнДшуБЕЛТФГВгцБПРихШЙХгГД', 'иЙТГМ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('жггкщцзщшамдбРЗжйТзвхшАпХСбе'), ['лВТвтлРБжиЛЦвРЦкАЦаНБгуОН', 'рШаавцжзМрзВЧДРСузб', 'оемрЗМгФБНмжп', 'ЛбмХбФЧШГЛХИуТСрфхп', 'ЖшТдтЧйчМР', 'ЧнИМбфУпмЙлШЗТрТИкКИЩОЧеМщПЩлдБ', 'ГвРдПжГдБаснилз', 'уТнТчТРЗИЛ', 'ИТЕВ', 'дИСЖпПнПСНОвсЩЩшНтХЧшВ', 'штабтлМнсчРЗтфсТЩублЕЧйцеЦТтХ', 'ХбхгУШвАзкшЖ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('нсЩЙЕМмЧЛСйФцГВМиатГХш'), ['КсОПЧИкВсКшРхнкхБжду', 'мШмпТащжФ', 'ББЖнианЧЦпмрГЩГМаЛКжА', 'арИжзжфГТУДИРРРбцил', 'дфдмшМИщТиЗПруКфОнСЦ', 'Рцч', 'гмДгВДАтсщКЗлхвжЦУеФДАТГЙЦЧОЗвРш', 'чфХЩсДбУбВжАМшРлКРщв', 'нцБйсУ', 'фасДЕчвчДмбтЖХвоД', 'аБЧшЖшЖАКргОИшпШЧзТбтфйвкЕц', 'ЗжжсмкжЛд', 'щщлПзг', 'бП']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('сКиурчоиаЦйхгаУДПфчИтИК'), ['МЧПцУАМрХКЧмАЦннУшмРчкЖКХвху', 'РвДуВиашрРКкмжшЖНШБфлцжБЦР', 'йМУиУчНЧчРшДйБЗфЩЦйПсцгкДС', 'НсмаЛзЧвНЦШФуВРпзБГзйКцп', 'ЖлМЛУХОБллСЗСКвМКМдГчЩ', 'ЩХПШиобЛх', 'аФАЖВтРиЦнжбкСожУЖЙипм', 'аУГжУНуМУВФлж', 'ШБчтЗкЖНЙк', 'ЩоГПГчНП', 'мВЗйЛаХПоЕМХиИйДлшРгзугЙЖлнМппКЦ', 'вчмДФхНеЦйЗсЗйкфпОщПтШпспИМдГйВМх', 'ИЗИжЧжаГЩСуцСЩдкскздмЖЦ', 'дАмфЕбгс', 'ГМттнхчЩжМЧДфщШБкфчтЧ', 'ШЕииФБпщЙИДцРиЖжЩл', 'ОпуОлБ', 'хБ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('риШМбгиПЖннНоЧргзГзеДпЛиНт'), ['икДкбйдройВУсвФзрПСусДнАШо', 'чуУеТкУВФхз', 'ЕГпйчехЗвЛлБблЧПДм', 'зеоЩЧожКЛбШЩдАрКБНйшКВШаЗгПш', 'виФКуЗОтгВмТкБ', 'цДрЙгЗРаЧКаМДдБЕЧзСРщВФзПВЧГвЩрАУшс', 'мБЗИУдчХХжТж', 'ФТНМмгЖилуЛйМ', 'ЗегЩЦнЦщцИк', 'оГОусхФсДЖДЩИЕХЗпсПЩХБТГЕп', 'АУКНзАДНкусВЧХвАж', 'КвКрбсВлНАоЗсфХОйЦхТ', 'вФдеХацЧБкрхМЖЗЧчКшпфВчс', 'йХшиОвХЗжТпДТбвУрпшЕ']) from system.numbers limit 10; -select 11 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('МойрЙлтЖйБдББЛЕЕЦузЛфпИЕГт'), ['ПОжЦЩа', 'СШзЧФтСЗохЦЗдФтцНТу', 'вЕдТ', 'ечУФаМДнХщЕНУи', 'вмеосТзБАБуроЙУЛгФжДсЧщтчЕзлепгк', 'ИЧтБрцПмРаВрйИвНЛСйпЖжУВдНрурКшоКХП', 'ЕН', 'щКЦЩгФБСХпкпит', 'ей', 'ЕахшеОМРдЕГХуГЖчвКХМЕ', 'Гт', 'НужЛЛЙОАл']) from system.numbers limit 10; -select 11 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('еззЦАвУаДнзИКЙнЙдртРоП'), ['КгЩбшПЛКвтИРцйчккгЧчЧмтГ', 'кЛппСФщзМмТйВЕтбЩЦлО', 'ШпдзиЖх', 'иИХ', 'пУаАФгсмтофНФХиЦЕтТЗсОШЗЙ', 'фаКАБТцФМиКЖрИКшГБЗБ', 'идЖЙдЦММУнХЦЦфсФМ', 'МиЦечЖЦЙмРВЙОХсБРНнрлйЙшц', 'ТфдСтМгтмимТМАучтхПНЦлуф', 'бейККЛСггУЦБсокЕЙпнРЧ', 'цавУАДНЗИКЙнЙд', 'ЩйЕЖчЧщаПшжФсхХЛЕТчвмЙнуце', 'РТРОП', 'цАВуАДнзИкЙНЙдРтРо', 'аЩПИд', 'ОСчКшОАчВмр', '', 'уЙЛИуЕУвцДшНОгбТбИШв', 'АВУаднзИКЙНйдР', 'жТйоП']) from system.numbers limit 10; -select 12 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('шйМЦУщвфщшбмлТНВохСЖНУ'), ['хшТАпТоШхКНсДпвДЕчДМНбАНччд', 'ХКуПСтфСйРжмБглОШЙлйДкСФВйВ', 'хпмНЦМУШеАД', 'чзмЧВвлбЧкАщПкзТгеуГущб', 'шзжрДд', 'еЗГОЙНйИБЗДщИИНицмсЙЗгФУл', 'кнщЙхооДТООе', 'всзЙнТшжФЗДБДрщВДлбвулДИаз', 'мп', 'уБОйцзнМпИсксхефбдЕЛйгИмГШГЗЩ', 'ОМпзШШщчФФнвУЧгжчиндЧч', 'щВФЩШбмЛТн', 'бм', 'БпфнкнйЗцПдЧЩбВ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('НЗБлОбшмОПктткоччКиКрФсбкШАХ'), ['нффЕББУЖГшЖвГфЦФГЕСщсЩЧлфнАшшктизУ', 'нСмпцхшИои', 'ЧИчЗУтйЦхГезппФРХХШуцЗШВ', 'РИнщН', 'НЩдВТсЙсОдхРбМФнСпАбОПкудБФСчмб', 'йхглпдКтртгош', 'ибгУРАБцх', 'ИЕиЛрИДафмЗИкТвАуГчШугбЧмЛШщсОЧбБкП', 'ЩСМуХМ', 'АУсмдЗБвКфЩ', 'пгбТНОйц', 'МоИ', 'КОйкзОЕИЗМЩ', 'чщттЛРНнГхЗхХй', 'ЩшцЧРКмШЖЩЦемтЧУЛГкХтВНзОжУХТпН', 'ЕшбБНчрДпЩЧМлераУЖХйфйдчтсчПШ', 'дбФйтИАшДйЩтбФйШуПиРлГмВОШаСлШЧИвфЖщгж', 'ОДжТЦщпщИжфуеЩмн', 'ПЛНЕзжСчВКДттуФРУ', 'БбмеГЩХшжрцОжХНииВКВлдиХБДСмнНфХЛТХ']) from system.numbers limit 10; -select 4 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ЕКаЖСЗЗЕЗгПдШкфцЙТцл'), ['ЙКМИХРОХ', 'НвМУХзфчДбАРЙДу', 'чмщжФшшжсЗТв', 'жСЗзеЗг', 'ЛФсКзВСдЦД', 'АЖсЗЗЕЗГ', 'Пдшкфц', 'усйсКщшрДрвнФЛедуГХ', '', 'цйтЦ', 'Ощс', 'ЕЗГпдшКф', 'ззеЗгп', 'УгЛйхШТтшрЛ', 'ЗзЕЗгП', 'КЛмТЩРтрзБбЩРгФбиОБазУнтУЦ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('чЕжАфАрБпКбДмшАшТШККауЩИхНВО'), ['ЧЙпЗЧЧлйПЙЖЙшККг', 'зйхуМЩАИПГЗА', 'ЙцехноХниИбзБЧ', 'чВомЗОфУроС', 'дбРхХЗрзоДДШщЕДжиФаЙ', 'еЛзТцЩДиДГрдМОНЧУнеТуДЩЧЦпГЕщПОРсйпЧ', 'ФчнпМРЧцПЙЩЩвфДХПнУхцЩСИ', 'цлШеУкМБнжЧлУцСуСЙуотшМфйс', 'лугГлкщКщкзЛйпбдсишргДДшОувр', 'ЗРИаФЛЗФрСзм', 'аЗвжВгхЩоЦ', 'чГКлеБНДнИЖЧеШЧДнИвсГДЖЖфБМНсУЦосВс', 'щЦнПУзЧщнЩЕ', 'рВУв']) from system.numbers limit 10; -select 20 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('анктгЦВВкЧвЖиБпфТйлр'), ['НшДПчтсСЧпкидаХжаЙчаДчЦГшГ', 'ХнцЛШИрХВаРхнЧИЙрОЛЛИТпППфгЖЩФ', 'ФАЛущПупмдМБмтйзУшрВМзцзШжгД', 'ГчЛЧеЛДХеипдшЦЦмаШНаРшУТ', 'фОЕфжО', 'ТНсУАнчшУЛЦкцчЙ', 'ЛйЦКБЗГЦйКЩиОПуТЦкБкБувснЙи', 'Бунф', 'ИтХЛШСУНЦВйРСЙчДчНвйшЗЦй', 'АцСКнзБаЖУДЖегавйБгужШАДЙтжИВк', 'ЦцХщфирДПрСуХзхЖМЕщ', 'кфдБЖКншвУФкЗДКуЙ', 'СкиСЦЗЦРмгЦНпБхфХДЙщЛзХ', 'йУепВЖАПНбАЩуЛжвЧпхМ', 'БпЧшпДочУвибщерйхйтОБАСПнЧМИОЩ', 'чФгНЗщвхавбшсООоВштбЧ', 'уДиЕцнЙХВЕйИАГдЕ', 'тп', 'ЧЕРЖсгВ', 'вЖибПФТЙЛ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ипозйпхЛОЛТлСМХЩдМвМгШИвГиЛп'), ['ФСГзиГррБДНКГЛХбААФхИ', 'гегпАвхДЕ', 'ЦХжзщХИвхп', 'ЗЖ', 'ХОКцКзЩо', 'абИОрГПМТКшБ', 'кмХТмФихСЦсшУдхВбИШМНАНмпмХОЗйПЩч', 'еОжТСкфЕТУУжГ', 'НтщМЕПЧИКЙКйй', 'ежСикИвйЛж', 'ушЩФОтпБзЩЛЗЦЧЙиВгБЧоПХНгОуАДТЙж', 'фМЕРефнутпнцФРнрГЖ', 'хшДЧзнХпфорвЩжмГРЦуХГ', 'ЧЖн', 'вВзгОСхгНумм', 'ЗДоВлСжпфщСКсщХаолЛнЛЗбСхвЩвЩНоЩЩМ']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('МрЗтВФуЖРеЕШЧхПФбжжхчД'), ['щжОожЦндцШйТАй', 'йуРСЦУЗФУЦПвРфевСлфдРещЦтИтЩЩТг', 'ЕГЧдмХмРАлнЧ', 'йнкФизГСЗнуКбЙВЙчАТТрСхаЙШтсдгХ', 'ЧПрнРЖЙцХИщ', 'зЕ', 'СжВЩчГзБХбйТиклкдШШИееАлЧЩН', 'МШщГйБХжЙпйЕЗТзКмпе', 'НКбНщОМДзлдЧОс', 'НчзВХОпХХШМОХФумБгсрРЧИчВтгутВЩо']) from system.numbers limit 10; -select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('упТУЖелФкЧЧУЦРжоБтХсжКщД'), ['щКшуОЖааЖйнЕбДИжМК', 'ЕкнШцХРВтНйШоНбЙйУоЧщУиРпШЧхмКЧжх', 'рвЩЗоЗхшЗвлизкСзебЩКМКжбша', 'ДииБНСШвцЦбаСсИжЕЗмхмВ', 'СЦоБЗПМтмшрУлрДТФГЖиувШЗууШзв', 'ЦЗБЕзВХЙбйВОмЗпхндЗ', 'ЗНизЧВШкГВтпсЖж', 'уШиБПЙЧтРаЕгИ', 'ЙшпПА', 'ЧоММаАйМСфбхуФкефФштгУА']) from system.numbers limit 10; - -select 0 = multiSearchFirstPosition(materialize('abcdefgh'), ['z', 'pq']) from system.numbers limit 10; -select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['a', 'b', 'c', 'd']) from system.numbers limit 10; -select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['defgh', 'bcd', 'abcd', 'c']) from system.numbers limit 10; -select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['', 'bcd', 'bcd', 'c']) from system.numbers limit 10; -select 2 = multiSearchFirstPosition(materialize('abcdefgh'), ['something', 'bcd', 'bcd', 'c']) from system.numbers limit 10; -select 6 = multiSearchFirstPosition(materialize('abcdefgh'), ['something', 'bcdz', 'fgh', 'f']) from system.numbers limit 10; - -select 0 = multiSearchFirstPositionCaseInsensitive(materialize('abcdefgh'), ['z', 'pq']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionCaseInsensitive(materialize('aBcdefgh'), ['A', 'b', 'c', 'd']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionCaseInsensitive(materialize('abCDefgh'), ['defgh', 'bcd', 'aBCd', 'c']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionCaseInsensitive(materialize('abCdeFgH'), ['', 'bcd', 'bcd', 'c']) from system.numbers limit 10; -select 2 = multiSearchFirstPositionCaseInsensitive(materialize('ABCDEFGH'), ['something', 'bcd', 'bcd', 'c']) from system.numbers limit 10; -select 6 = multiSearchFirstPositionCaseInsensitive(materialize('abcdefgh'), ['sOmEthIng', 'bcdZ', 'fGh', 'F']) from system.numbers limit 10; - -select 0 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['л', 'ъ']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['а', 'б', 'в', 'г']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['гдежз', 'бвг', 'абвг', 'вг']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['', 'бвг', 'бвг', 'в']) from system.numbers limit 10; -select 2 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['что', 'в', 'гдз', 'бвг']) from system.numbers limit 10; -select 6 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['з', 'бвгя', 'ежз', 'з']) from system.numbers limit 10; - -select 0 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['Л', 'Ъ']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['А', 'б', 'в', 'г']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['гДеЖз', 'бВг', 'АБВг', 'вг']) from system.numbers limit 10; -select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['', 'бвг', 'Бвг', 'в']) from system.numbers limit 10; -select 2 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['что', 'в', 'гдз', 'бвг']) from system.numbers limit 10; -select 6 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежЗ'), ['З', 'бвгЯ', 'ЕЖз', 'з']) from system.numbers limit 10; - -select 1 = multiSearchAny(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), -['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaa']); - -select 1 = multiSearchFirstIndex(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), -['aaaa', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaab']); - -select 1 = multiSearchAny(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), -['aaaa', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaab']); - -select 1 = multiSearchFirstPosition(materialize('aaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), -['aaaa', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaab']); - -select 1 = multiSearchFirstPosition(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), -['aaab', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaa']); - -select 0 = multiSearchAny(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), -['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'b']); - --- 254 -select -[ -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 -] = -multiSearchAllPositions(materialize('string'), -['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); - -select 254 = multiSearchFirstIndex(materialize('string'), -['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); - - -select -[ -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 -] = -multiSearchAllPositions(materialize('string'), -['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); - -select 255 = multiSearchFirstIndex(materialize('string'), -['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); - -select multiSearchAllPositions(materialize('string'), -['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError 42 } - -select multiSearchFirstIndex(materialize('string'), -['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', -'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError 42 } - - -select [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]= multiSearchAllPositions(materialize('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'), -['aaaa', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab', -'aaaa']); diff --git a/tests/queries/0_stateless/00534_filimonov.data b/tests/queries/0_stateless/00534_filimonov.data index f07d79ed1bf..911a8e4d1f3 100644 --- a/tests/queries/0_stateless/00534_filimonov.data +++ b/tests/queries/0_stateless/00534_filimonov.data @@ -176,6 +176,7 @@ SELECT toInt16OrZero(NULL); SELECT formatReadableSize(NULL); SELECT formatReadableQuantity(NULL); SELECT formatReadableTimeDelta(NULL); +SELECT parseTimeDelta(NULL); SELECT concatAssumeInjective(NULL); SELECT toString(NULL); SELECT MACStringToNum(NULL); diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index 89c9d1c4279..6a682210489 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -9,6 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} -q "drop user if exists u_00600" ${CLICKHOUSE_CLIENT} -q "create user u_00600 settings max_execution_time=60, readonly=1" +${CLICKHOUSE_CLIENT} -q "grant select on system.numbers to u_00600" function wait_for_query_to_start() { diff --git a/tests/queries/0_stateless/00849_multiple_comma_join_2.sql b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql index eabede3ff00..eb803450ff7 100644 --- a/tests/queries/0_stateless/00849_multiple_comma_join_2.sql +++ b/tests/queries/0_stateless/00849_multiple_comma_join_2.sql @@ -1,5 +1,6 @@ SET enable_optimize_predicate_expression = 0; SET convert_query_to_cnf = 0; +SET cross_to_inner_join_rewrite = 1; DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; diff --git a/tests/queries/0_stateless/00950_test_gorilla_codec.sql b/tests/queries/0_stateless/00950_test_gorilla_codec.sql index a6e0f1d7b11..e9582480bcb 100644 --- a/tests/queries/0_stateless/00950_test_gorilla_codec.sql +++ b/tests/queries/0_stateless/00950_test_gorilla_codec.sql @@ -1,5 +1,7 @@ DROP TABLE IF EXISTS codecTest; +SET cross_to_inner_join_rewrite = 1; + CREATE TABLE codecTest ( key UInt64, name String, diff --git a/tests/queries/0_stateless/01095_tpch_like_smoke.sql b/tests/queries/0_stateless/01095_tpch_like_smoke.sql index 5971178ade5..1ac9ec229f0 100644 --- a/tests/queries/0_stateless/01095_tpch_like_smoke.sql +++ b/tests/queries/0_stateless/01095_tpch_like_smoke.sql @@ -7,6 +7,8 @@ DROP TABLE IF EXISTS lineitem; DROP TABLE IF EXISTS nation; DROP TABLE IF EXISTS region; +SET cross_to_inner_join_rewrite = 1; + CREATE TABLE part ( p_partkey Int32, -- PK diff --git a/tests/queries/0_stateless/01133_begin_commit_race.sh b/tests/queries/0_stateless/01133_begin_commit_race.sh index 7dadb35ccff..2b266527541 100755 --- a/tests/queries/0_stateless/01133_begin_commit_race.sh +++ b/tests/queries/0_stateless/01133_begin_commit_race.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long +# Tags: long, no-ordinary-database CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/01167_isolation_hermitage.sh b/tests/queries/0_stateless/01167_isolation_hermitage.sh index 7f495801dd0..3f2c8308216 100755 --- a/tests/queries/0_stateless/01167_isolation_hermitage.sh +++ b/tests/queries/0_stateless/01167_isolation_hermitage.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-fasttest, no-replicated-database +# Tags: long, no-fasttest, no-replicated-database, no-ordinary-database # Looks like server does not listen https port in fasttest # FIXME Replicated database executes ALTERs in separate context, so transaction info is lost diff --git a/tests/queries/0_stateless/01168_mutations_isolation.sh b/tests/queries/0_stateless/01168_mutations_isolation.sh index 888858edf32..ebfdffdaeee 100755 --- a/tests/queries/0_stateless/01168_mutations_isolation.sh +++ b/tests/queries/0_stateless/01168_mutations_isolation.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, no-replicated-database +# Tags: no-fasttest, no-replicated-database, no-ordinary-database # Looks like server does not listen https port in fasttest # FIXME Replicated database executes ALTERs in separate context, so transaction info is lost diff --git a/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh index 263b2c84de7..32ad78dead6 100755 --- a/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh +++ b/tests/queries/0_stateless/01169_alter_partition_isolation_stress.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-replicated-database +# Tags: long, no-replicated-database, no-ordinary-database # shellcheck disable=SC2015 diff --git a/tests/queries/0_stateless/01170_alter_partition_isolation.sh b/tests/queries/0_stateless/01170_alter_partition_isolation.sh index 2db178fb6d1..6ac95713800 100755 --- a/tests/queries/0_stateless/01170_alter_partition_isolation.sh +++ b/tests/queries/0_stateless/01170_alter_partition_isolation.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: no-fasttest, no-replicated-database +# Tags: no-fasttest, no-replicated-database, no-ordinary-database # Looks like server does not listen https port in fasttest # FIXME Replicated database executes ALTERs in separate context, so transaction info is lost diff --git a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh index 538f586a673..12b654f4215 100755 --- a/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh +++ b/tests/queries/0_stateless/01171_mv_select_insert_isolation_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-parallel +# Tags: long, no-parallel, no-ordinary-database # Test is too heavy, avoid parallel run in Flaky Check CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) diff --git a/tests/queries/0_stateless/01172_transaction_counters.sql b/tests/queries/0_stateless/01172_transaction_counters.sql index 8e04b6c89bd..ed40ec3f4b6 100644 --- a/tests/queries/0_stateless/01172_transaction_counters.sql +++ b/tests/queries/0_stateless/01172_transaction_counters.sql @@ -1,4 +1,4 @@ --- Tags: no-s3-storage, no-tsan +-- Tags: no-s3-storage, no-tsan, no-ordinary-database -- FIXME this test fails with S3 due to a bug in DiskCacheWrapper -- FIXME It became flaky after upgrading to llvm-14 due to obscure freezes in tsan drop table if exists txn_counters; diff --git a/tests/queries/0_stateless/01173_transaction_control_queries.sql b/tests/queries/0_stateless/01173_transaction_control_queries.sql index e23b5ec8657..03c98f50cc4 100644 --- a/tests/queries/0_stateless/01173_transaction_control_queries.sql +++ b/tests/queries/0_stateless/01173_transaction_control_queries.sql @@ -1,3 +1,5 @@ +-- Tags: no-ordinary-database + drop table if exists mt1; drop table if exists mt2; diff --git a/tests/queries/0_stateless/01174_select_insert_isolation.sh b/tests/queries/0_stateless/01174_select_insert_isolation.sh index cf1bb23f702..dc5c1d7a722 100755 --- a/tests/queries/0_stateless/01174_select_insert_isolation.sh +++ b/tests/queries/0_stateless/01174_select_insert_isolation.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long +# Tags: long, no-ordinary-database # shellcheck disable=SC2015 @@ -36,14 +36,15 @@ function thread_insert_rollback() function thread_select() { while true; do - # Result of `uniq | wc -l` must be 1 if the first and the last queries got the same result + # The first and the last queries must get the same result $CLICKHOUSE_CLIENT --multiquery --query " BEGIN TRANSACTION; - SELECT arraySort(groupArray(n)), arraySort(groupArray(m)), arraySort(groupArray(_part)) FROM mt; + SET throw_on_unsupported_query_inside_transaction=0; + CREATE TEMPORARY TABLE tmp AS SELECT arraySort(groupArray(n)), arraySort(groupArray(m)), arraySort(groupArray(_part)) FROM mt FORMAT Null; SELECT throwIf((SELECT sum(n) FROM mt) != 0) FORMAT Null; SELECT throwIf((SELECT count() FROM mt) % 2 != 0) FORMAT Null; - SELECT arraySort(groupArray(n)), arraySort(groupArray(m)), arraySort(groupArray(_part)) FROM mt; - COMMIT;" | uniq | wc -l | grep -v "^1$" ||: + select throwIf((SELECT * FROM tmp) != (SELECT arraySort(groupArray(n)), arraySort(groupArray(m)), arraySort(groupArray(_part)) FROM mt)) FORMAT Null; + COMMIT;" done } diff --git a/tests/queries/0_stateless/01317_no_password_in_command_line.sh b/tests/queries/0_stateless/01317_no_password_in_command_line.sh index 5b95f077ea2..7f2e91201a3 100755 --- a/tests/queries/0_stateless/01317_no_password_in_command_line.sh +++ b/tests/queries/0_stateless/01317_no_password_in_command_line.sh @@ -10,6 +10,7 @@ set -e user=user_$CLICKHOUSE_TEST_UNIQUE_NAME $CLICKHOUSE_CLIENT --query "DROP USER IF EXISTS $user" $CLICKHOUSE_CLIENT --query "CREATE USER $user IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'hello'" +$CLICKHOUSE_CLIENT --query "GRANT SELECT ON system.numbers TO $user" trap '$CLICKHOUSE_CLIENT --query "DROP USER $user"' EXIT # Wait for query to start executing. At that time, the password should be cleared. diff --git a/tests/queries/0_stateless/01475_read_subcolumns.sql b/tests/queries/0_stateless/01475_read_subcolumns.sql index 4724bec9eff..c287f6d55fa 100644 --- a/tests/queries/0_stateless/01475_read_subcolumns.sql +++ b/tests/queries/0_stateless/01475_read_subcolumns.sql @@ -1,4 +1,4 @@ --- Tags: no-s3-storage +-- Tags: no-s3-storage, no-random-settings SET use_uncompressed_cache = 0; diff --git a/tests/queries/0_stateless/01479_cross_join_9855.sql b/tests/queries/0_stateless/01479_cross_join_9855.sql index 0b549619489..6dc76f22057 100644 --- a/tests/queries/0_stateless/01479_cross_join_9855.sql +++ b/tests/queries/0_stateless/01479_cross_join_9855.sql @@ -1,3 +1,5 @@ +SET cross_to_inner_join_rewrite = 1; + SELECT count() FROM numbers(4) AS n1, numbers(3) AS n2 WHERE n1.number > (select avg(n.number) from numbers(3) n); diff --git a/tests/queries/0_stateless/01533_multiple_nested.sql b/tests/queries/0_stateless/01533_multiple_nested.sql index 03724ce0b46..a61f13fc807 100644 --- a/tests/queries/0_stateless/01533_multiple_nested.sql +++ b/tests/queries/0_stateless/01533_multiple_nested.sql @@ -4,6 +4,7 @@ DROP TABLE IF EXISTS nested; SET flatten_nested = 0; SET use_uncompressed_cache = 0; +SET local_filesystem_read_method='pread'; CREATE TABLE nested ( diff --git a/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql b/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql index ee0617e42a3..dadd7eaba6c 100644 --- a/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql +++ b/tests/queries/0_stateless/01643_replicated_merge_tree_fsync_smoke.sql @@ -1,6 +1,5 @@ --- Tags: no-parallel, no-s3-storage +-- Tags: no-parallel -- no-parallel -- for flaky check and to avoid "Removing leftovers from table" (for other tables) --- no-s3-storage -- hangs now, need investigation -- Temporarily skip warning 'table was created by another server at the same moment, will retry' set send_logs_level='error'; diff --git a/tests/queries/0_stateless/01780_column_sparse_tuple.sql b/tests/queries/0_stateless/01780_column_sparse_tuple.sql index e3dfc16fc74..da679f2c7eb 100644 --- a/tests/queries/0_stateless/01780_column_sparse_tuple.sql +++ b/tests/queries/0_stateless/01780_column_sparse_tuple.sql @@ -1,4 +1,3 @@ --- Tags: no-s3-storage DROP TABLE IF EXISTS sparse_tuple; CREATE TABLE sparse_tuple (id UInt64, t Tuple(a UInt64, s String)) diff --git a/tests/queries/0_stateless/01911_logical_error_minus.sql b/tests/queries/0_stateless/01911_logical_error_minus.sql index 9813c1a8a5d..3dcdedd38f5 100644 --- a/tests/queries/0_stateless/01911_logical_error_minus.sql +++ b/tests/queries/0_stateless/01911_logical_error_minus.sql @@ -1,6 +1,8 @@ -- This test case is almost completely generated by fuzzer. -- It appeared to trigger assertion. +SET cross_to_inner_join_rewrite = 1; + DROP TABLE IF EXISTS codecTest; CREATE TABLE codecTest ( diff --git a/tests/queries/0_stateless/01961_roaring_memory_tracking.sql b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql index 64c31472e89..9e14bb9e138 100644 --- a/tests/queries/0_stateless/01961_roaring_memory_tracking.sql +++ b/tests/queries/0_stateless/01961_roaring_memory_tracking.sql @@ -1,4 +1,4 @@ -- Tags: no-replicated-database SET max_memory_usage = '100M'; -SELECT cityHash64(rand() % 1000) as n, groupBitmapState(number) FROM numbers_mt(2000000000) GROUP BY n; -- { serverError 241 } +SELECT cityHash64(rand() % 1000) as n, groupBitmapState(number) FROM numbers_mt(2000000000) GROUP BY n FORMAT Null; -- { serverError 241 } diff --git a/tests/queries/0_stateless/02098_with_types_use_header.sh b/tests/queries/0_stateless/02098_with_types_use_header.sh index 5d88a994052..457182a08f2 100755 --- a/tests/queries/0_stateless/02098_with_types_use_header.sh +++ b/tests/queries/0_stateless/02098_with_types_use_header.sh @@ -19,9 +19,9 @@ echo -e "y\tz\tx\nString\tDate\tUInt32\ntext\t2020-01-01\t1" | $CLICKHOUSE_CLIEN echo -e "x\tz\ty\nUInt32\tString\tDate\n1\ttext\t2020-01-01" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CustomSeparatedWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' echo "CSVWithNamesAndTypes" -echo -e "'x','y','z'\n'String','Date','UInt32'\n'text','2020-01-01',1" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CSVWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' -echo -e "'y','z','x'\n'String','Date','UInt32'\n'text','2020-01-01',1" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CSVWithNamesAndTypes" && echo 'OK' || echo 'FAIL' -echo -e "'x','z','y'\n'UInt32','String',Date'\n1,'text','2020-01-01'" | $CLICKHOUSE_CLIENT --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CSVWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' +echo -e "'x','y','z'\n'String','Date','UInt32'\n'text','2020-01-01',1" | $CLICKHOUSE_CLIENT --format_csv_allow_single_quotes=1 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CSVWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' +echo -e "'y','z','x'\n'String','Date','UInt32'\n'text','2020-01-01',1" | $CLICKHOUSE_CLIENT --format_csv_allow_single_quotes=1 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CSVWithNamesAndTypes" && echo 'OK' || echo 'FAIL' +echo -e "'x','z','y'\n'UInt32','String',Date'\n1,'text','2020-01-01'" | $CLICKHOUSE_CLIENT --format_csv_allow_single_quotes=1 --input_format_with_names_use_header=1 --input_format_with_types_use_header=1 -q "INSERT INTO test_02098 FORMAT CSVWithNamesAndTypes" 2>&1 | grep -F -q "INCORRECT_DATA" && echo 'OK' || echo 'FAIL' echo "JSONCompactEachRowWithNamesAndTypes" diff --git a/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh b/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh index 08d380bf559..4f38d662590 100755 --- a/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh +++ b/tests/queries/0_stateless/02155_csv_with_strings_with_slash.sh @@ -10,13 +10,13 @@ ${CLICKHOUSE_CLIENT} --query="create table test_02155_csv (A Int64, S String, D echo "input_format_null_as_default = 1" -cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv SETTINGS input_format_null_as_default = 1 FORMAT CSV" +cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv SETTINGS input_format_null_as_default = 1, format_csv_allow_single_quotes=1 FORMAT CSV" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test_02155_csv" ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE test_02155_csv" echo "input_format_null_as_default = 0" -cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv SETTINGS input_format_null_as_default = 0 FORMAT CSV" +cat $CUR_DIR/data_csv/csv_with_slash.csv | ${CLICKHOUSE_CLIENT} -q "INSERT INTO test_02155_csv SETTINGS format_csv_allow_single_quotes = 1, input_format_null_as_default = 0 FORMAT CSV" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM test_02155_csv" diff --git a/tests/queries/0_stateless/02226_s3_with_cache.sql b/tests/queries/0_stateless/02226_s3_with_cache.sql index 0d0653d4089..3bd4b93155d 100644 --- a/tests/queries/0_stateless/02226_s3_with_cache.sql +++ b/tests/queries/0_stateless/02226_s3_with_cache.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-fasttest, long +-- Tags: no-parallel, no-fasttest, long, no-random-settings SET max_memory_usage='20G'; SET enable_filesystem_cache_on_write_operations = 0; diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql index 60a8eba8f3e..00a1f5cf5f4 100644 --- a/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-fasttest, no-s3-storage +-- Tags: no-parallel, no-fasttest, no-s3-storage, no-random-settings -- { echo } diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql b/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql index 871f9305c55..d49096cb9b2 100644 --- a/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_cache_log.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-fasttest, no-s3-storage +-- Tags: no-parallel, no-fasttest, no-s3-storage, no-random-settings -- { echo } diff --git a/tests/queries/0_stateless/02240_system_remote_filesystem_query_cache.sql b/tests/queries/0_stateless/02240_system_remote_filesystem_query_cache.sql index 3fa3fdb5926..2a4f4ae219c 100644 --- a/tests/queries/0_stateless/02240_system_remote_filesystem_query_cache.sql +++ b/tests/queries/0_stateless/02240_system_remote_filesystem_query_cache.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-fasttest, no-s3-storage +-- Tags: no-parallel, no-fasttest, no-s3-storage, no-random-settings -- { echo } diff --git a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh index 6589765f739..e8aa5914912 100755 --- a/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh +++ b/tests/queries/0_stateless/02246_tsv_csv_best_effort_schema_inference.sh @@ -158,8 +158,10 @@ echo "CSV" echo -e "42,Some string,'[1, 2, 3, 4]','[(1, 2, 3)]' 42\,abcd,'[]','[(4, 5, 6)]'" > $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +CLIENT_CMD="$CLICKHOUSE_CLIENT --format_csv_allow_single_quotes=1" + +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" echo -e "\"[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'key2' : NULL}, [NULL], NULL)]\" '[]' @@ -168,8 +170,8 @@ echo -e "\"[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'k \"[({}, ['String3'], NULL)]\" \"[({'key3': NULL}, []), NULL]\""> $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV') settings input_format_csv_use_best_effort_in_schema_inference=false" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV') settings input_format_csv_use_best_effort_in_schema_inference=false" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV') settings input_format_csv_use_best_effort_in_schema_inference=false" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV') settings input_format_csv_use_best_effort_in_schema_inference=false" echo -e "\"[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'key2' : NULL}, [NULL], NULL)]\" '[]' @@ -178,43 +180,43 @@ echo -e "\"[({'key' : 42.42}, ['String', 'String2'], 42.42), ({}, [], -42), ({'k \"[({}, ['String3'], NULL)]\" \"[({'key3': NULL}, [], NULL)]\""> $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" echo -e "true false \N" > $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" echo -e "'[true, NULL]' '[]' '[NULL]' '[false]'" > $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" echo -e "'(1, 2, 3)'"> $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" -echo -e "'123.123'"> $DATA_FILE +echo -e '"123.123"'> $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" echo -e "'[(1, 2, 3)]'"> $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" echo -e "\"[(1, 2, 3)]\""> $DATA_FILE -$CLICKHOUSE_CLIENT -q "desc file('$FILE_NAME', 'CSV')" -$CLICKHOUSE_CLIENT -q "select * from file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "desc file('$FILE_NAME', 'CSV')" +$CLIENT_CMD -q "select * from file('$FILE_NAME', 'CSV')" diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.sql b/tests/queries/0_stateless/02286_drop_filesystem_cache.sql index 8610299d08a..c0ba0cb3051 100644 --- a/tests/queries/0_stateless/02286_drop_filesystem_cache.sql +++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel, no-fasttest, no-s3-storage +-- Tags: no-parallel, no-fasttest, no-s3-storage, no-random-settings -- { echo } diff --git a/tests/queries/0_stateless/02313_group_by_modifiers_with_non-default_types.reference b/tests/queries/0_stateless/02313_group_by_modifiers_with_non_default_types.reference similarity index 100% rename from tests/queries/0_stateless/02313_group_by_modifiers_with_non-default_types.reference rename to tests/queries/0_stateless/02313_group_by_modifiers_with_non_default_types.reference diff --git a/tests/queries/0_stateless/02313_group_by_modifiers_with_non-default_types.sql b/tests/queries/0_stateless/02313_group_by_modifiers_with_non_default_types.sql similarity index 100% rename from tests/queries/0_stateless/02313_group_by_modifiers_with_non-default_types.sql rename to tests/queries/0_stateless/02313_group_by_modifiers_with_non_default_types.sql diff --git a/tests/queries/0_stateless/02313_test_fpc_codec.sql b/tests/queries/0_stateless/02313_test_fpc_codec.sql index 3b1127350f0..4fe54b87c9c 100644 --- a/tests/queries/0_stateless/02313_test_fpc_codec.sql +++ b/tests/queries/0_stateless/02313_test_fpc_codec.sql @@ -1,5 +1,7 @@ DROP TABLE IF EXISTS codecTest; +SET cross_to_inner_join_rewrite = 1; + CREATE TABLE codecTest ( key UInt64, name String, diff --git a/tests/queries/0_stateless/02324_compatibility_setting.reference b/tests/queries/0_stateless/02324_compatibility_setting.reference new file mode 100644 index 00000000000..e3a9ed7a73e --- /dev/null +++ b/tests/queries/0_stateless/02324_compatibility_setting.reference @@ -0,0 +1,19 @@ +allow_settings_after_format_in_insert +22.3 +1 +1 +22.4 +0 +22.5 +0 +async_socket_for_remote +21.2 +1 +21.3 +0 +21.4 +0 +21.5 +1 +21.6 +1 diff --git a/tests/queries/0_stateless/02324_compatibility_setting.sh b/tests/queries/0_stateless/02324_compatibility_setting.sh new file mode 100755 index 00000000000..043f045d9be --- /dev/null +++ b/tests/queries/0_stateless/02324_compatibility_setting.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +echo "allow_settings_after_format_in_insert" +echo "22.3" +$CLICKHOUSE_CLIENT --compatibility=22.3 -q "select value from system.settings where name='allow_settings_after_format_in_insert'" +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&compatibility=22.3" -d "select value from system.settings where name='allow_settings_after_format_in_insert'" +echo "22.4" +$CLICKHOUSE_CLIENT --compatibility=22.4 -q "select value from system.settings where name='allow_settings_after_format_in_insert'" +echo "22.5" +$CLICKHOUSE_CLIENT --compatibility=22.5 -q "select value from system.settings where name='allow_settings_after_format_in_insert'" + + +echo "async_socket_for_remote" +echo "21.2" +$CLICKHOUSE_CLIENT --compatibility=21.2 -q "select value from system.settings where name='async_socket_for_remote'" +echo "21.3" +$CLICKHOUSE_CLIENT --compatibility=21.3 -q "select value from system.settings where name='async_socket_for_remote'" +echo "21.4" +$CLICKHOUSE_CLIENT --compatibility=21.4 -q "select value from system.settings where name='async_socket_for_remote'" +echo "21.5" +$CLICKHOUSE_CLIENT --compatibility=21.5 -q "select value from system.settings where name='async_socket_for_remote'" +echo "21.6" +$CLICKHOUSE_CLIENT --compatibility=21.6 -q "select value from system.settings where name='async_socket_for_remote'" + diff --git a/tests/queries/0_stateless/02325_compatibility_setting_2.reference b/tests/queries/0_stateless/02325_compatibility_setting_2.reference new file mode 100644 index 00000000000..9eed1825cc8 --- /dev/null +++ b/tests/queries/0_stateless/02325_compatibility_setting_2.reference @@ -0,0 +1,8 @@ +0 +1 +0 +1 +0 +1 +1 +1 diff --git a/tests/queries/0_stateless/02325_compatibility_setting_2.sql b/tests/queries/0_stateless/02325_compatibility_setting_2.sql new file mode 100644 index 00000000000..5ce0bf1ef8b --- /dev/null +++ b/tests/queries/0_stateless/02325_compatibility_setting_2.sql @@ -0,0 +1,13 @@ +select value from system.settings where name='allow_settings_after_format_in_insert'; +select value from system.settings where name='allow_settings_after_format_in_insert' settings compatibility='22.3'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.3'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.4'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set allow_settings_after_format_in_insert=1; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.4'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.3'; +select value from system.settings where name='allow_settings_after_format_in_insert'; diff --git a/tests/queries/0_stateless/02326_settings_changes_system_table.reference b/tests/queries/0_stateless/02326_settings_changes_system_table.reference new file mode 100644 index 00000000000..c4a3c71edfd --- /dev/null +++ b/tests/queries/0_stateless/02326_settings_changes_system_table.reference @@ -0,0 +1,3 @@ +version String +changes Array(Tuple(name String, previous_value String, new_value String, reason String)) +22.5 [('memory_overcommit_ratio_denominator','0','1073741824','Enable memory overcommit feature by default'),('memory_overcommit_ratio_denominator_for_user','0','1073741824','Enable memory overcommit feature by default')] diff --git a/tests/queries/0_stateless/02326_settings_changes_system_table.sql b/tests/queries/0_stateless/02326_settings_changes_system_table.sql new file mode 100644 index 00000000000..e56cd62ce55 --- /dev/null +++ b/tests/queries/0_stateless/02326_settings_changes_system_table.sql @@ -0,0 +1,2 @@ +DESC system.settings_changes; +SELECT * FROM system.settings_changes WHERE version = '22.5' diff --git a/tests/queries/0_stateless/02343_group_by_use_nulls.reference b/tests/queries/0_stateless/02343_group_by_use_nulls.reference new file mode 100644 index 00000000000..24b7bb5277c --- /dev/null +++ b/tests/queries/0_stateless/02343_group_by_use_nulls.reference @@ -0,0 +1,215 @@ +-- { echoOn } +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; +0 0 0 +0 \N 0 +1 1 1 +1 \N 1 +2 0 2 +2 \N 2 +3 1 3 +3 \N 3 +4 0 4 +4 \N 4 +5 1 5 +5 \N 5 +6 0 6 +6 \N 6 +7 1 7 +7 \N 7 +8 0 8 +8 \N 8 +9 1 9 +9 \N 9 +\N \N 45 +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; +0 0 0 +0 0 0 +0 0 45 +1 0 1 +1 1 1 +2 0 2 +2 0 2 +3 0 3 +3 1 3 +4 0 4 +4 0 4 +5 0 5 +5 1 5 +6 0 6 +6 0 6 +7 0 7 +7 1 7 +8 0 8 +8 0 8 +9 0 9 +9 1 9 +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; +0 0 0 +0 \N 0 +1 1 1 +1 \N 1 +2 0 2 +2 \N 2 +3 1 3 +3 \N 3 +4 0 4 +4 \N 4 +5 1 5 +5 \N 5 +6 0 6 +6 \N 6 +7 1 7 +7 \N 7 +8 0 8 +8 \N 8 +9 1 9 +9 \N 9 +\N 0 20 +\N 1 25 +\N \N 45 +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; +0 0 0 +0 0 0 +0 0 20 +0 0 45 +0 1 25 +1 0 1 +1 1 1 +2 0 2 +2 0 2 +3 0 3 +3 1 3 +4 0 4 +4 0 4 +5 0 5 +5 1 5 +6 0 6 +6 0 6 +7 0 7 +7 1 7 +8 0 8 +8 0 8 +9 0 9 +9 1 9 +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 1; +0 \N 0 +1 \N 1 +2 \N 2 +3 \N 3 +4 \N 4 +5 \N 5 +6 \N 6 +7 \N 7 +8 \N 8 +9 \N 9 +\N 0 20 +\N 1 25 +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 0; +0 0 0 +0 0 20 +0 1 25 +1 0 1 +2 0 2 +3 0 3 +4 0 4 +5 0 5 +6 0 6 +7 0 7 +8 0 8 +9 0 9 +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; +0 0 0 +0 \N 0 +1 1 1 +1 \N 1 +2 0 2 +2 \N 2 +3 1 3 +3 \N 3 +4 0 4 +4 \N 4 +5 1 5 +5 \N 5 +6 0 6 +6 \N 6 +7 1 7 +7 \N 7 +8 0 8 +8 \N 8 +9 1 9 +9 \N 9 +\N \N 45 + +0 0 45 +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; +0 0 0 +0 \N 0 +1 1 1 +1 \N 1 +2 0 2 +2 \N 2 +3 1 3 +3 \N 3 +4 0 4 +4 \N 4 +5 1 5 +5 \N 5 +6 0 6 +6 \N 6 +7 1 7 +7 \N 7 +8 0 8 +8 \N 8 +9 1 9 +9 \N 9 +\N 0 20 +\N 1 25 +\N \N 45 + +0 0 45 diff --git a/tests/queries/0_stateless/02343_group_by_use_nulls.sql b/tests/queries/0_stateless/02343_group_by_use_nulls.sql new file mode 100644 index 00000000000..a14db824013 --- /dev/null +++ b/tests/queries/0_stateless/02343_group_by_use_nulls.sql @@ -0,0 +1,62 @@ +-- { echoOn } +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 1; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 0; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; diff --git a/tests/queries/0_stateless/02343_group_by_use_nulls_distributed.reference b/tests/queries/0_stateless/02343_group_by_use_nulls_distributed.reference new file mode 100644 index 00000000000..7a9263e883c --- /dev/null +++ b/tests/queries/0_stateless/02343_group_by_use_nulls_distributed.reference @@ -0,0 +1,157 @@ +-- { echoOn } +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; +0 0 0 +0 \N 0 +1 1 2 +1 \N 2 +2 0 4 +2 \N 4 +3 1 6 +3 \N 6 +4 0 8 +4 \N 8 +5 1 10 +5 \N 10 +6 0 12 +6 \N 12 +7 1 14 +7 \N 14 +8 0 16 +8 \N 16 +9 1 18 +9 \N 18 +\N \N 90 +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; +0 0 0 +0 0 0 +0 0 90 +1 0 2 +1 1 2 +2 0 4 +2 0 4 +3 0 6 +3 1 6 +4 0 8 +4 0 8 +5 0 10 +5 1 10 +6 0 12 +6 0 12 +7 0 14 +7 1 14 +8 0 16 +8 0 16 +9 0 18 +9 1 18 +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; +0 0 0 +0 \N 0 +1 1 2 +1 \N 2 +2 0 4 +2 \N 4 +3 1 6 +3 \N 6 +4 0 8 +4 \N 8 +5 1 10 +5 \N 10 +6 0 12 +6 \N 12 +7 1 14 +7 \N 14 +8 0 16 +8 \N 16 +9 1 18 +9 \N 18 +\N 0 40 +\N 1 50 +\N \N 90 +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; +0 0 0 +0 0 0 +0 0 40 +0 0 90 +0 1 50 +1 0 2 +1 1 2 +2 0 4 +2 0 4 +3 0 6 +3 1 6 +4 0 8 +4 0 8 +5 0 10 +5 1 10 +6 0 12 +6 0 12 +7 0 14 +7 1 14 +8 0 16 +8 0 16 +9 0 18 +9 1 18 +SELECT + number, + number % 2, + sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 1; +0 \N 0 +1 \N 2 +2 \N 4 +3 \N 6 +4 \N 8 +5 \N 10 +6 \N 12 +7 \N 14 +8 \N 16 +9 \N 18 +\N 0 40 +\N 1 50 +SELECT + number, + number % 2, + sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 0; +0 0 0 +0 0 40 +0 1 50 +1 0 2 +2 0 4 +3 0 6 +4 0 8 +5 0 10 +6 0 12 +7 0 14 +8 0 16 +9 0 18 diff --git a/tests/queries/0_stateless/02343_group_by_use_nulls_distributed.sql b/tests/queries/0_stateless/02343_group_by_use_nulls_distributed.sql new file mode 100644 index 00000000000..15ac1127de7 --- /dev/null +++ b/tests/queries/0_stateless/02343_group_by_use_nulls_distributed.sql @@ -0,0 +1,51 @@ +-- { echoOn } +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT + number, + number % 2, + sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 1; + +SELECT + number, + number % 2, + sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 0; + diff --git a/tests/queries/0_stateless/02345_implicit_transaction.sql b/tests/queries/0_stateless/02345_implicit_transaction.sql index 677affeec39..e3f9cca37d1 100644 --- a/tests/queries/0_stateless/02345_implicit_transaction.sql +++ b/tests/queries/0_stateless/02345_implicit_transaction.sql @@ -1,3 +1,5 @@ +-- Tags: no-ordinary-database + CREATE TABLE landing (n Int64) engine=MergeTree order by n; CREATE TABLE target (n Int64) engine=MergeTree order by n; CREATE MATERIALIZED VIEW landing_to_target TO target AS diff --git a/tests/queries/0_stateless/02346_additional_filters.reference b/tests/queries/0_stateless/02346_additional_filters.reference new file mode 100644 index 00000000000..22d53173e71 --- /dev/null +++ b/tests/queries/0_stateless/02346_additional_filters.reference @@ -0,0 +1,263 @@ +-- { echoOn } + +select * from table_1; +1 a +2 bb +3 ccc +4 dddd +select * from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +1 a +3 ccc +4 dddd +select * from table_1 settings additional_table_filters={'table_1' : 'x != 2 and x != 3'}; +1 a +4 dddd +select x from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +1 +3 +4 +select y from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +a +ccc +dddd +select * from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +1 a +4 dddd +select * from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +1 a +3 ccc +select * from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +1 a +select x from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +1 +4 +select x from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +1 +3 +select x from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +1 +select y from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +a +dddd +select y from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +a +ccc +select y from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +a +select x from table_1 where x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; +1 +3 +4 +select x from table_1 prewhere x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; +1 +3 +4 +select x from table_1 prewhere x != 2 where x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; +1 +3 +4 +select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy = 0'}; +0 +0 +select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy != 0'}; +select * from system.numbers limit 5; +0 +1 +2 +3 +4 +select * from system.numbers as t limit 5 settings additional_table_filters={'t' : 'number % 2 != 0'}; +1 +3 +5 +7 +9 +select * from system.numbers limit 5 settings additional_table_filters={'system.numbers' : 'number != 3'}; +0 +1 +2 +4 +5 +select * from system.numbers limit 5 settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; +0 +1 +2 +4 +5 +select * from (select number from system.numbers limit 5 union all select x from table_1) order by number settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; +0 +1 +1 +2 +3 +4 +4 +5 +select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x settings additional_table_filters={'system.numbers' : 'number != 3', 'table_1' : 'x != 2'}; +0 0 +1 1 a +2 0 +4 4 dddd +5 0 +select b + 1 as c from (select a + 1 as b from (select x + 1 as a from table_1)) settings additional_table_filters={'table_1' : 'x != 2 and x != 3'}; +4 +7 +-- { echoOn } +select * from v_numbers; +1 +2 +3 +4 +5 +select * from v_numbers settings additional_table_filters={'system.numbers' : 'number != 3'}; +1 +2 +3 +5 +6 +select * from v_numbers settings additional_table_filters={'v_numbers' : 'x != 3'}; +1 +2 +4 +5 +select * from v_numbers settings additional_table_filters={'system.numbers' : 'number != 3', 'v_numbers' : 'x != 3'}; +1 +2 +5 +6 +-- additional filter for inner tables for Materialized View does not work because it does not create internal interpreter +-- probably it is expected +-- { echoOn } +select * from mv_table; +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from mv_table settings additional_table_filters={'mv_table' : 'x != 5'}; +4 dddd +6 ffffff +7 ggggggg +select * from mv_table settings additional_table_filters={'table_1' : 'x != 5'}; +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from mv_table settings additional_table_filters={'table_2' : 'x != 5'}; +4 dddd +5 eeeee +6 ffffff +7 ggggggg +-- additional filter for inner tables for Merge does not work because it does not create internal interpreter +-- probably it is expected +-- { echoOn } +select * from m_table order by x; +1 a +2 bb +3 ccc +4 dddd +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 2'}; +1 a +2 bb +3 ccc +4 dddd +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'table_2' : 'x != 5'}; +1 a +2 bb +3 ccc +4 dddd +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 2', 'table_2' : 'x != 5'}; +1 a +2 bb +3 ccc +4 dddd +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 4'}; +1 a +2 bb +3 ccc +4 dddd +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'table_2' : 'x != 4'}; +1 a +2 bb +3 ccc +4 dddd +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 4', 'table_2' : 'x != 4'}; +1 a +2 bb +3 ccc +4 dddd +4 dddd +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'m_table' : 'x != 4'}; +1 a +2 bb +3 ccc +5 eeeee +6 ffffff +7 ggggggg +select * from m_table order by x settings additional_table_filters={'m_table' : 'x != 4', 'table_1' : 'x != 2', 'table_2' : 'x != 5'}; +1 a +2 bb +3 ccc +5 eeeee +6 ffffff +7 ggggggg +-- additional_result_filter + +select * from table_1 settings additional_result_filter='x != 2'; +1 a +3 ccc +4 dddd +select *, x != 2 from table_1 settings additional_result_filter='x != 2'; +1 a 1 +3 ccc 1 +4 dddd 1 +select * from table_1 where x != 1 settings additional_result_filter='x != 2'; +3 ccc +4 dddd +select * from table_1 where x != 1 settings additional_result_filter='x != 2 and x != 3'; +4 dddd +select * from table_1 prewhere x != 3 where x != 1 settings additional_result_filter='x != 2'; +4 dddd +select * from table_1 limit 3 settings additional_result_filter='x != 2'; +1 a +3 ccc +select x + 1 from table_1 settings additional_result_filter='`plus(x, 1)` != 2'; +3 +4 +5 +select * from (select x + 1 as a, y from table_1 union all select x as a, y from table_1) order by a, y settings additional_result_filter='a = 3'; +3 bb +3 ccc +select * from (select x + 1 as a, y from table_1 union all select x as a, y from table_1) order by a, y settings additional_result_filter='a != 3'; +1 a +2 a +2 bb +4 ccc +4 dddd +5 dddd diff --git a/tests/queries/0_stateless/02346_additional_filters.sql b/tests/queries/0_stateless/02346_additional_filters.sql new file mode 100644 index 00000000000..9e0bee4549b --- /dev/null +++ b/tests/queries/0_stateless/02346_additional_filters.sql @@ -0,0 +1,95 @@ +drop table if exists table_1; +drop table if exists table_2; +drop table if exists v_numbers; +drop table if exists mv_table; + +create table table_1 (x UInt32, y String) engine = MergeTree order by x; +insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); + +-- { echoOn } + +select * from table_1; +select * from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 settings additional_table_filters={'table_1' : 'x != 2 and x != 3'}; +select x from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 where x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 2 where x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; + +select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy = 0'}; +select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy != 0'}; + +select * from system.numbers limit 5; +select * from system.numbers as t limit 5 settings additional_table_filters={'t' : 'number % 2 != 0'}; +select * from system.numbers limit 5 settings additional_table_filters={'system.numbers' : 'number != 3'}; +select * from system.numbers limit 5 settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; +select * from (select number from system.numbers limit 5 union all select x from table_1) order by number settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; +select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x settings additional_table_filters={'system.numbers' : 'number != 3', 'table_1' : 'x != 2'}; +select b + 1 as c from (select a + 1 as b from (select x + 1 as a from table_1)) settings additional_table_filters={'table_1' : 'x != 2 and x != 3'}; + +-- { echoOff } + +create view v_numbers as select number + 1 as x from system.numbers limit 5; + +-- { echoOn } +select * from v_numbers; +select * from v_numbers settings additional_table_filters={'system.numbers' : 'number != 3'}; +select * from v_numbers settings additional_table_filters={'v_numbers' : 'x != 3'}; +select * from v_numbers settings additional_table_filters={'system.numbers' : 'number != 3', 'v_numbers' : 'x != 3'}; + +-- { echoOff } + +create table table_2 (x UInt32, y String) engine = MergeTree order by x; +insert into table_2 values (4, 'dddd'), (5, 'eeeee'), (6, 'ffffff'), (7, 'ggggggg'); + +create materialized view mv_table to table_2 (x UInt32, y String) as select * from table_1; + +-- additional filter for inner tables for Materialized View does not work because it does not create internal interpreter +-- probably it is expected +-- { echoOn } +select * from mv_table; +select * from mv_table settings additional_table_filters={'mv_table' : 'x != 5'}; +select * from mv_table settings additional_table_filters={'table_1' : 'x != 5'}; +select * from mv_table settings additional_table_filters={'table_2' : 'x != 5'}; + +-- { echoOff } + +create table m_table (x UInt32, y String) engine = Merge(currentDatabase(), '^table_'); + +-- additional filter for inner tables for Merge does not work because it does not create internal interpreter +-- probably it is expected +-- { echoOn } +select * from m_table order by x; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 2'}; +select * from m_table order by x settings additional_table_filters={'table_2' : 'x != 5'}; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 2', 'table_2' : 'x != 5'}; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'table_2' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 4', 'table_2' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'m_table' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'m_table' : 'x != 4', 'table_1' : 'x != 2', 'table_2' : 'x != 5'}; + +-- additional_result_filter + +select * from table_1 settings additional_result_filter='x != 2'; +select *, x != 2 from table_1 settings additional_result_filter='x != 2'; +select * from table_1 where x != 1 settings additional_result_filter='x != 2'; +select * from table_1 where x != 1 settings additional_result_filter='x != 2 and x != 3'; +select * from table_1 prewhere x != 3 where x != 1 settings additional_result_filter='x != 2'; + +select * from table_1 limit 3 settings additional_result_filter='x != 2'; + +select x + 1 from table_1 settings additional_result_filter='`plus(x, 1)` != 2'; + +select * from (select x + 1 as a, y from table_1 union all select x as a, y from table_1) order by a, y settings additional_result_filter='a = 3'; +select * from (select x + 1 as a, y from table_1 union all select x as a, y from table_1) order by a, y settings additional_result_filter='a != 3'; diff --git a/tests/queries/0_stateless/02353_explain_ast_optimize.reference b/tests/queries/0_stateless/02353_explain_ast_optimize.reference new file mode 100644 index 00000000000..f4e0de5ca98 --- /dev/null +++ b/tests/queries/0_stateless/02353_explain_ast_optimize.reference @@ -0,0 +1,53 @@ +-- { echoOn } +EXPLAIN AST optimize=0 SELECT * FROM numbers(0); +SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 2) + ExpressionList (children 1) + Asterisk + TablesInSelectQuery (children 1) + TablesInSelectQueryElement (children 1) + TableExpression (children 1) + Function numbers (children 1) + ExpressionList (children 1) + Literal UInt64_0 +EXPLAIN AST optimize=1 SELECT * FROM numbers(0); +SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 2) + ExpressionList (children 1) + Identifier number + TablesInSelectQuery (children 1) + TablesInSelectQueryElement (children 1) + TableExpression (children 1) + Function numbers (children 1) + ExpressionList (children 1) + Literal UInt64_0 +EXPLAIN AST optimize=0 SELECT countDistinct(number) FROM numbers(0); +SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 2) + ExpressionList (children 1) + Function countDistinct (children 1) + ExpressionList (children 1) + Identifier number + TablesInSelectQuery (children 1) + TablesInSelectQueryElement (children 1) + TableExpression (children 1) + Function numbers (children 1) + ExpressionList (children 1) + Literal UInt64_0 +EXPLAIN AST optimize=1 SELECT countDistinct(number) FROM numbers(0); +SelectWithUnionQuery (children 1) + ExpressionList (children 1) + SelectQuery (children 2) + ExpressionList (children 1) + Function uniqExact (children 1) + ExpressionList (children 1) + Identifier number + TablesInSelectQuery (children 1) + TablesInSelectQueryElement (children 1) + TableExpression (children 1) + Function numbers (children 1) + ExpressionList (children 1) + Literal UInt64_0 diff --git a/tests/queries/0_stateless/02353_explain_ast_optimize.sql b/tests/queries/0_stateless/02353_explain_ast_optimize.sql new file mode 100644 index 00000000000..a46a47a2e64 --- /dev/null +++ b/tests/queries/0_stateless/02353_explain_ast_optimize.sql @@ -0,0 +1,6 @@ +-- { echoOn } +EXPLAIN AST optimize=0 SELECT * FROM numbers(0); +EXPLAIN AST optimize=1 SELECT * FROM numbers(0); +EXPLAIN AST optimize=0 SELECT countDistinct(number) FROM numbers(0); +EXPLAIN AST optimize=1 SELECT countDistinct(number) FROM numbers(0); +-- { echoOff } diff --git a/tests/queries/0_stateless/02353_explain_ast_rewrite.reference b/tests/queries/0_stateless/02353_explain_ast_rewrite.reference deleted file mode 100644 index 5ee3e0d126c..00000000000 --- a/tests/queries/0_stateless/02353_explain_ast_rewrite.reference +++ /dev/null @@ -1,25 +0,0 @@ --- { echoOn } -EXPLAIN AST rewrite=0 SELECT * FROM numbers(0); -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_0 -EXPLAIN AST rewrite=1 SELECT * FROM numbers(0); -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_0 diff --git a/tests/queries/0_stateless/02353_explain_ast_rewrite.sql b/tests/queries/0_stateless/02353_explain_ast_rewrite.sql deleted file mode 100644 index 7310aa62704..00000000000 --- a/tests/queries/0_stateless/02353_explain_ast_rewrite.sql +++ /dev/null @@ -1,4 +0,0 @@ --- { echoOn } -EXPLAIN AST rewrite=0 SELECT * FROM numbers(0); -EXPLAIN AST rewrite=1 SELECT * FROM numbers(0); --- { echoOff } diff --git a/tests/queries/0_stateless/02353_isnullable.reference b/tests/queries/0_stateless/02353_isnullable.reference index 74240c4b196..d99961f5c6a 100644 --- a/tests/queries/0_stateless/02353_isnullable.reference +++ b/tests/queries/0_stateless/02353_isnullable.reference @@ -2,3 +2,7 @@ 1 1 1 +0 +1 +0 +1 diff --git a/tests/queries/0_stateless/02353_isnullable.sql b/tests/queries/0_stateless/02353_isnullable.sql index 567e294d498..279eea2520a 100644 --- a/tests/queries/0_stateless/02353_isnullable.sql +++ b/tests/queries/0_stateless/02353_isnullable.sql @@ -3,3 +3,9 @@ SELECT isNullable(toNullable(3)); SELECT isNullable(NULL); SELECT isNullable(materialize(NULL)); + +SELECT isNullable(toLowCardinality(1)); +SELECT isNullable(toNullable(toLowCardinality(1))); + +SELECT isNullable(toLowCardinality(materialize(1))); +SELECT isNullable(toNullable(toLowCardinality(materialize(1)))); diff --git a/tests/queries/0_stateless/02353_partition_prune_nullable_key.reference b/tests/queries/0_stateless/02353_partition_prune_nullable_key.reference new file mode 100644 index 00000000000..dec7d2fabd2 --- /dev/null +++ b/tests/queries/0_stateless/02353_partition_prune_nullable_key.reference @@ -0,0 +1 @@ +\N diff --git a/tests/queries/0_stateless/02353_partition_prune_nullable_key.sql b/tests/queries/0_stateless/02353_partition_prune_nullable_key.sql new file mode 100644 index 00000000000..5a5109c3140 --- /dev/null +++ b/tests/queries/0_stateless/02353_partition_prune_nullable_key.sql @@ -0,0 +1,9 @@ +drop table if exists n; + +create table n(nc Nullable(int)) engine = MergeTree order by (tuple()) partition by (nc) settings allow_nullable_key = 1; + +insert into n values (null); + +select * from n where nc is null; + +drop table n; diff --git a/tests/queries/0_stateless/02353_simdjson_buffer_overflow.reference b/tests/queries/0_stateless/02353_simdjson_buffer_overflow.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql b/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql new file mode 100644 index 00000000000..0572536e19e --- /dev/null +++ b/tests/queries/0_stateless/02353_simdjson_buffer_overflow.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest, no-msan +-- Tag: no-msan: fuzzer can make this query very memory hungry, and under MSan, the MemoryTracker cannot account for the additional memory used by sanitizer, and OOM happens. + +SET max_execution_time = 3; +SET timeout_overflow_mode = 'break'; + +SELECT count() FROM system.numbers_mt WHERE NOT ignore(JSONExtract('{' || repeat('"a":"b",', rand() % 10) || '"c":"d"}', 'a', 'String')) FORMAT Null; diff --git a/tests/queries/0_stateless/02354_parse_timedelta.reference b/tests/queries/0_stateless/02354_parse_timedelta.reference new file mode 100644 index 00000000000..f9dd7879057 --- /dev/null +++ b/tests/queries/0_stateless/02354_parse_timedelta.reference @@ -0,0 +1,11 @@ +95 +11.23 +41103.1 +0.00123 +36806400 +1331 +40273293 +1.001001001 +1.001001001 +1.001001001 +1.11111111111 diff --git a/tests/queries/0_stateless/02354_parse_timedelta.sql b/tests/queries/0_stateless/02354_parse_timedelta.sql new file mode 100644 index 00000000000..29f2bf9fdfc --- /dev/null +++ b/tests/queries/0_stateless/02354_parse_timedelta.sql @@ -0,0 +1,25 @@ +SELECT parseTimeDelta('1 min 35 sec'); +SELECT parseTimeDelta('0m;11.23s.'); +SELECT parseTimeDelta('11hr 25min 3.1s'); +SELECT parseTimeDelta('0.00123 seconds'); +SELECT parseTimeDelta('1yr2mo'); +SELECT parseTimeDelta('11s+22min'); +SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ; 33 seconds'); +SELECT parseTimeDelta('1s1ms1us1ns'); +SELECT parseTimeDelta('1s1ms1μs1ns'); +SELECT parseTimeDelta('1s - 1ms : 1μs ; 1ns'); +SELECT parseTimeDelta('1.11s1.11ms1.11us1.11ns'); + +-- invalid expressions +SELECT parseTimeDelta(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT parseTimeDelta('1yr', 1); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT parseTimeDelta(1); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT parseTimeDelta(' '); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('-1yr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1yr-'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('yr2mo'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1.yr2mo'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1-yr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1 1yr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1yyr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ;. 33 seconds'); -- {serverError BAD_ARGUMENTS} diff --git a/tests/queries/0_stateless/02354_read_in_order_prewhere.reference b/tests/queries/0_stateless/02354_read_in_order_prewhere.reference new file mode 100644 index 00000000000..7d5543bf9cc --- /dev/null +++ b/tests/queries/0_stateless/02354_read_in_order_prewhere.reference @@ -0,0 +1,10 @@ +1 +1 +1 +2001 +2001 +1 +1 +1 +2001 +2001 diff --git a/tests/queries/0_stateless/02354_read_in_order_prewhere.sql b/tests/queries/0_stateless/02354_read_in_order_prewhere.sql new file mode 100644 index 00000000000..c5abd5945f3 --- /dev/null +++ b/tests/queries/0_stateless/02354_read_in_order_prewhere.sql @@ -0,0 +1,30 @@ +drop table if exists order; + +CREATE TABLE order +( + ID Int64, + Type Int64, + Num UInt64, + t DateTime +) +ENGINE = MergeTree() +PARTITION BY toYYYYMMDD(t) +ORDER BY (ID, Type, Num); + +system stop merges order; + +insert into order select number%2000, 1, number, (1656700561 - intDiv(intHash32(number), 1000)) from numbers(100000); +insert into order select number%2000, 1, number, (1656700561 - intDiv(intHash32(number), 1000)) from numbers(100000); +insert into order select number%2000, 1, number, (1656700561 - intDiv(intHash32(number), 1000)) from numbers(100000); + +SELECT Num +FROM order +WHERE Type = 1 AND ID = 1 +ORDER BY Num ASC limit 5; + +SELECT Num +FROM order +PREWHERE Type = 1 +WHERE ID = 1 +ORDER BY Num ASC limit 5; + diff --git a/tests/queries/0_stateless/02354_tuple_element_with_default.reference b/tests/queries/0_stateless/02354_tuple_element_with_default.reference new file mode 100644 index 00000000000..d5dfff17ef1 --- /dev/null +++ b/tests/queries/0_stateless/02354_tuple_element_with_default.reference @@ -0,0 +1,26 @@ +z +SELECT tupleElement(t1, \'z\', \'z\') +FROM t_tuple_element_default +0 +SELECT tupleElement(t1, \'z\', 0) +FROM t_tuple_element_default +z +SELECT tupleElement(t2, \'z\', \'z\') +FROM t_tuple_element_default +-------------------- +[(3,4)] +SELECT tupleElement([(1, 2)], \'a\', [(3, 4)]) +-------------------- +SELECT tupleElement(t1, \'a\', [tuple(1)]) +FROM t_tuple_element_default +-------------------- +[(0)] +SELECT tupleElement(t1, \'a\', [tuple(0)]) +FROM t_tuple_element_default +[0] +SELECT tupleElement(t1, \'a\', [0]) +FROM t_tuple_element_default +[0] +[0] +SELECT tupleElement(t1, \'a\', [0]) +FROM t_tuple_element_default diff --git a/tests/queries/0_stateless/02354_tuple_element_with_default.sql b/tests/queries/0_stateless/02354_tuple_element_with_default.sql new file mode 100644 index 00000000000..908a869885b --- /dev/null +++ b/tests/queries/0_stateless/02354_tuple_element_with_default.sql @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS t_tuple_element_default; + +CREATE TABLE t_tuple_element_default(t1 Tuple(a UInt32, s String), t2 Tuple(UInt32, String)) ENGINE = Memory; +INSERT INTO t_tuple_element_default VALUES ((1, 'a'), (2, 'b')); + +SELECT tupleElement(t1, 'z', 'z') FROM t_tuple_element_default; +EXPLAIN SYNTAX SELECT tupleElement(t1, 'z', 'z') FROM t_tuple_element_default; +SELECT tupleElement(t1, 'z', 0) FROM t_tuple_element_default; +EXPLAIN SYNTAX SELECT tupleElement(t1, 'z', 0) FROM t_tuple_element_default; +SELECT tupleElement(t2, 'z', 'z') FROM t_tuple_element_default; +EXPLAIN SYNTAX SELECT tupleElement(t2, 'z', 'z') FROM t_tuple_element_default; + +SELECT tupleElement(t1, 3, 'z') FROM t_tuple_element_default; -- { serverError 127 } +SELECT tupleElement(t1, 0, 'z') FROM t_tuple_element_default; -- { serverError 127 } + +DROP TABLE t_tuple_element_default; + +SELECT '--------------------'; + +SELECT tupleElement(array(tuple(1, 2)), 'a', 0); -- { serverError 645 } +SELECT tupleElement(array(tuple(1, 2)), 'a', array(tuple(1, 2), tuple(3, 4))); -- { serverError 190 } +SELECT tupleElement(array(array(tuple(1))), 'a', array(array(1, 2, 3))); -- { serverError 190 } + +SELECT tupleElement(array(tuple(1, 2)), 'a', array(tuple(3, 4))); +EXPLAIN SYNTAX SELECT tupleElement(array(tuple(1, 2)), 'a', array(tuple(3, 4))); + +SELECT '--------------------'; + +CREATE TABLE t_tuple_element_default(t1 Array(Tuple(UInt32)), t2 UInt32) ENGINE = Memory; + +SELECT tupleElement(t1, 'a', array(tuple(1))) FROM t_tuple_element_default; +EXPLAIN SYNTAX SELECT tupleElement(t1, 'a', array(tuple(1))) FROM t_tuple_element_default; + +SELECT '--------------------'; + +INSERT INTO t_tuple_element_default VALUES ([(1)], 100); + +SELECT tupleElement(t1, 'a', array(tuple(0))) FROM t_tuple_element_default; +EXPLAIN SYNTAX SELECT tupleElement(t1, 'a', array(tuple(0))) FROM t_tuple_element_default; + +SELECT tupleElement(t1, 'a', array(0)) FROM t_tuple_element_default; +EXPLAIN SYNTAX SELECT tupleElement(t1, 'a', array(0)) FROM t_tuple_element_default; + +INSERT INTO t_tuple_element_default VALUES ([(2)], 200); + +SELECT tupleElement(t1, 'a', array(0)) FROM t_tuple_element_default; +EXPLAIN SYNTAX SELECT tupleElement(t1, 'a', array(0)) FROM t_tuple_element_default; + +DROP TABLE t_tuple_element_default; + diff --git a/tests/queries/0_stateless/02356_insert_query_log_metrics.reference b/tests/queries/0_stateless/02356_insert_query_log_metrics.reference new file mode 100644 index 00000000000..0d707252cc7 --- /dev/null +++ b/tests/queries/0_stateless/02356_insert_query_log_metrics.reference @@ -0,0 +1 @@ +1,1,1,1 diff --git a/tests/queries/0_stateless/02356_insert_query_log_metrics.sql b/tests/queries/0_stateless/02356_insert_query_log_metrics.sql new file mode 100644 index 00000000000..dabb898093e --- /dev/null +++ b/tests/queries/0_stateless/02356_insert_query_log_metrics.sql @@ -0,0 +1,5 @@ +CREATE TABLE 02356_destination (a Int64, b String) ENGINE = Memory; + +INSERT INTO 02356_destination (a, b) SELECT * FROM generateRandom('a Int64, b String') LIMIT 100 SETTINGS max_threads=1, max_block_size=100; +SYSTEM FLUSH LOGS; +SELECT read_rows = written_rows, read_rows = result_rows, read_bytes = written_bytes, read_bytes = result_bytes FROM system.query_log where normalized_query_hash = 1214411238725380014 and type='QueryFinish' and current_database = currentDatabase() FORMAT CSV; \ No newline at end of file diff --git a/tests/queries/0_stateless/02360_send_logs_level_colors.reference b/tests/queries/0_stateless/02360_send_logs_level_colors.reference new file mode 100644 index 00000000000..fe2824243c4 --- /dev/null +++ b/tests/queries/0_stateless/02360_send_logs_level_colors.reference @@ -0,0 +1,3 @@ +ASCII text +ASCII text +ASCII text diff --git a/tests/queries/0_stateless/02360_send_logs_level_colors.sh b/tests/queries/0_stateless/02360_send_logs_level_colors.sh new file mode 100755 index 00000000000..4e5ce057702 --- /dev/null +++ b/tests/queries/0_stateless/02360_send_logs_level_colors.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +file_name="${CLICKHOUSE_TMP}/res_${CLICKHOUSE_DATABASE}.log" +CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=trace/g') + +# Run query via expect to make isatty() return true +function run() +{ + command=$1 + expect << EOF +log_user 0 +set timeout 3 +match_max 100000 + +spawn bash -c "$command" +expect 1 +EOF + + file "$file_name" | grep -o "ASCII text" + file "$file_name" | grep -o "with escape sequences" +} + +run "$CLICKHOUSE_CLIENT -q 'SELECT 1' 2>$file_name" +run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=$file_name" +run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=- >$file_name" + +rm -f "$file_name" diff --git a/tests/queries/0_stateless/02364_dictionary_datetime_64_attribute_crash.reference b/tests/queries/0_stateless/02364_dictionary_datetime_64_attribute_crash.reference new file mode 100644 index 00000000000..cd97db4debd --- /dev/null +++ b/tests/queries/0_stateless/02364_dictionary_datetime_64_attribute_crash.reference @@ -0,0 +1,2 @@ +2022-01-24 02:30:00.008122000 +1 diff --git a/tests/queries/0_stateless/02364_dictionary_datetime_64_attribute_crash.sql b/tests/queries/0_stateless/02364_dictionary_datetime_64_attribute_crash.sql new file mode 100644 index 00000000000..77fc9e1183b --- /dev/null +++ b/tests/queries/0_stateless/02364_dictionary_datetime_64_attribute_crash.sql @@ -0,0 +1,15 @@ +create table dat (blockNum Decimal(10,0), eventTimestamp DateTime64(9)) Engine=MergeTree() primary key eventTimestamp; +insert into dat values (1, '2022-01-24 02:30:00.008122000'); + +CREATE DICTIONARY datDictionary +( + `blockNum` Decimal(10, 0), + `eventTimestamp` DateTime64(9) +) +PRIMARY KEY blockNum +SOURCE(CLICKHOUSE(TABLE 'dat')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(FLAT()); + +select (select eventTimestamp from datDictionary); +select count(*) from dat where eventTimestamp >= (select eventTimestamp from datDictionary); diff --git a/tests/queries/0_stateless/02364_multiSearch_function_family.reference b/tests/queries/0_stateless/02364_multiSearch_function_family.reference new file mode 100644 index 00000000000..eb93a2509b6 --- /dev/null +++ b/tests/queries/0_stateless/02364_multiSearch_function_family.reference @@ -0,0 +1,12874 @@ +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02364_multiSearch_function_family.sql b/tests/queries/0_stateless/02364_multiSearch_function_family.sql new file mode 100644 index 00000000000..4ad1a68eeb7 --- /dev/null +++ b/tests/queries/0_stateless/02364_multiSearch_function_family.sql @@ -0,0 +1,536 @@ +SET send_logs_level = 'fatal'; + +select 0 = multiSearchAny('\0', CAST([], 'Array(String)')); +select 0 = multiSearchAnyCaseInsensitive('\0', CAST([], 'Array(String)')); +select 0 = multiSearchAnyCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchAnyUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndex('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndexCaseInsensitive('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndexUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPosition('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPositionCaseInsensitive('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPositionCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPositionUTF8('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositions('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositionsCaseInsensitive('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositionsCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositionsUTF8('\0', CAST([], 'Array(String)')); + +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']); + +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']); + +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']); + +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']); + +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']); +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']); +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']); +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']); + +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']); +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']); +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']); + +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']); +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']); + +select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']); + +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 10; + +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']) from system.numbers limit 10; + +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 10; + +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']) from system.numbers limit 10; + +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']) from system.numbers limit 10; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']) from system.numbers limit 10; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']) from system.numbers limit 10; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']) from system.numbers limit 10; + +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']) from system.numbers limit 10; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']) from system.numbers limit 10; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']) from system.numbers limit 10; + +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']) from system.numbers limit 10; +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']) from system.numbers limit 10; + +select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']) from system.numbers limit 10; + +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 129; + +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']) from system.numbers limit 129; + +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 129; + +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']) from system.numbers limit 129; + +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']) from system.numbers limit 129; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']) from system.numbers limit 129; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']) from system.numbers limit 129; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']) from system.numbers limit 129; + +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']) from system.numbers limit 129; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']) from system.numbers limit 129; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']) from system.numbers limit 129; + +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']) from system.numbers limit 129; +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']) from system.numbers limit 129; + +select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']) from system.numbers limit 129; + +select [2] = multiSearchAllPositions(materialize('abc'), ['b']); +select [2] = multiSearchAllPositions(materialize('abc'), ['bc']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcde']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdef']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefg']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefgh']); + +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdefg']); +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdef']); +select [0] = multiSearchAllPositions(materialize('abc'), ['abcde']); +select [0] = multiSearchAllPositions(materialize('abc'), ['abcd']); +select [1] = multiSearchAllPositions(materialize('abc'), ['abc']); +select [1] = multiSearchAllPositions(materialize('abc'), ['ab']); +select [1] = multiSearchAllPositions(materialize('abc'), ['a']); + +select [3] = multiSearchAllPositions(materialize('abcd'), ['c']); +select [3] = multiSearchAllPositions(materialize('abcd'), ['cd']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cde']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdef']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefg']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefgh']); + +select [0] = multiSearchAllPositions(materialize('abc'), ['defgh']); +select [0] = multiSearchAllPositions(materialize('abc'), ['defg']); +select [0] = multiSearchAllPositions(materialize('abc'), ['def']); +select [0] = multiSearchAllPositions(materialize('abc'), ['de']); +select [0] = multiSearchAllPositions(materialize('abc'), ['d']); + + +select [2] = multiSearchAllPositions(materialize('abc'), ['b']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abc'), ['bc']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcde']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdef']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefgh']) from system.numbers limit 10; + + +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdefg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdef']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['abcde']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['abcd']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['abc']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['ab']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['a']) from system.numbers limit 10; + +select [3] = multiSearchAllPositions(materialize('abcd'), ['c']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcd'), ['cd']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cde']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdef']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefgh']) from system.numbers limit 10; + +select [0] = multiSearchAllPositions(materialize('abc'), ['defgh']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['defg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['def']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['de']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['d']) from system.numbers limit 10; + +select [1] = multiSearchAllPositions(materialize('abc'), ['']); +select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 100; +select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 1000; + +select [1] = multiSearchAllPositions(materialize('abab'), ['ab']); +select [1] = multiSearchAllPositions(materialize('abababababababababababab'), ['abab']); +select [1] = multiSearchAllPositions(materialize('abababababababababababab'), ['abababababababababa']); + +select [1] = multiSearchAllPositions(materialize('abc'), materialize([''])); +select [1] = multiSearchAllPositions(materialize('abc'), materialize([''])) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abab'), materialize(['ab'])); +select [2] = multiSearchAllPositions(materialize('abab'), materialize(['ba'])); +select [1] = multiSearchAllPositionsCaseInsensitive(materialize('aBaB'), materialize(['abab'])); +select [3] = multiSearchAllPositionsUTF8(materialize('ab€ab'), materialize(['€'])); +select [3] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ab€AB'), materialize(['€ab'])); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['b']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['a']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['c']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['def']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['de']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['d']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['e']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['f']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['g']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['h']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['b']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['a']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['c']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['def']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['de']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['d']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['e']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['f']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['g']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['h']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['b']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['a']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['c']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['def']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['de']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['d']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['e']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['f']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['g']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['h']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abc'), ['b']); +select 1 = multiSearchAny(materialize('abc'), ['bc']); +select 0 = multiSearchAny(materialize('abc'), ['bcde']); +select 0 = multiSearchAny(materialize('abc'), ['bcdef']); +select 0 = multiSearchAny(materialize('abc'), ['bcdefg']); +select 0 = multiSearchAny(materialize('abc'), ['bcdefgh']); + +select 0 = multiSearchAny(materialize('abc'), ['abcdefg']); +select 0 = multiSearchAny(materialize('abc'), ['abcdef']); +select 0 = multiSearchAny(materialize('abc'), ['abcde']); +select 0 = multiSearchAny(materialize('abc'), ['abcd']); +select 1 = multiSearchAny(materialize('abc'), ['abc']); +select 1 = multiSearchAny(materialize('abc'), ['ab']); +select 1 = multiSearchAny(materialize('abc'), ['a']); + +select 1 = multiSearchAny(materialize('abcd'), ['c']); +select 1 = multiSearchAny(materialize('abcd'), ['cd']); +select 0 = multiSearchAny(materialize('abcd'), ['cde']); +select 0 = multiSearchAny(materialize('abcd'), ['cdef']); +select 0 = multiSearchAny(materialize('abcd'), ['cdefg']); +select 0 = multiSearchAny(materialize('abcd'), ['cdefgh']); + +select 0 = multiSearchAny(materialize('abc'), ['defgh']); +select 0 = multiSearchAny(materialize('abc'), ['defg']); +select 0 = multiSearchAny(materialize('abc'), ['def']); +select 0 = multiSearchAny(materialize('abc'), ['de']); +select 0 = multiSearchAny(materialize('abc'), ['d']); + + +select 1 = multiSearchAny(materialize('abc'), ['b']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['bc']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcde']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcdef']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcdefg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcdefgh']) from system.numbers limit 10; + + +select 0 = multiSearchAny(materialize('abc'), ['abcdefg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['abcdef']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['abcde']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['abcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['abc']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['ab']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['a']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcd'), ['c']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcd'), ['cd']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cde']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cdef']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cdefg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cdefgh']) from system.numbers limit 10; + +select 0 = multiSearchAny(materialize('abc'), ['defgh']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['defg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['def']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['de']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['d']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abc'), ['']); +select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 100; +select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 1000; + +select 1 = multiSearchAny(materialize('abab'), ['ab']); +select 1 = multiSearchAny(materialize('abababababababababababab'), ['abab']); +select 1 = multiSearchAny(materialize('abababababababababababab'), ['abababababababababa']); + + +select 0 = multiSearchFirstPosition(materialize('abcdefgh'), ['z', 'pq']) from system.numbers limit 10; +select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['a', 'b', 'c', 'd']) from system.numbers limit 10; +select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['defgh', 'bcd', 'abcd', 'c']) from system.numbers limit 10; +select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 2 = multiSearchFirstPosition(materialize('abcdefgh'), ['something', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 6 = multiSearchFirstPosition(materialize('abcdefgh'), ['something', 'bcdz', 'fgh', 'f']) from system.numbers limit 10; + +select 0 = multiSearchFirstPositionCaseInsensitive(materialize('abcdefgh'), ['z', 'pq']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitive(materialize('aBcdefgh'), ['A', 'b', 'c', 'd']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitive(materialize('abCDefgh'), ['defgh', 'bcd', 'aBCd', 'c']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitive(materialize('abCdeFgH'), ['', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 2 = multiSearchFirstPositionCaseInsensitive(materialize('ABCDEFGH'), ['something', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 6 = multiSearchFirstPositionCaseInsensitive(materialize('abcdefgh'), ['sOmEthIng', 'bcdZ', 'fGh', 'F']) from system.numbers limit 10; + +select 0 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['л', 'ъ']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['а', 'б', 'в', 'г']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['гдежз', 'бвг', 'абвг', 'вг']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['', 'бвг', 'бвг', 'в']) from system.numbers limit 10; +select 2 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['что', 'в', 'гдз', 'бвг']) from system.numbers limit 10; +select 6 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['з', 'бвгя', 'ежз', 'з']) from system.numbers limit 10; + +select 0 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['Л', 'Ъ']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['А', 'б', 'в', 'г']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['гДеЖз', 'бВг', 'АБВг', 'вг']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['', 'бвг', 'Бвг', 'в']) from system.numbers limit 10; +select 2 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['что', 'в', 'гдз', 'бвг']) from system.numbers limit 10; +select 6 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежЗ'), ['З', 'бвгЯ', 'ЕЖз', 'з']) from system.numbers limit 10; + +select +[ +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +] = +multiSearchAllPositions(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + +select 254 = multiSearchFirstIndex(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + + +select +[ +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +] = +multiSearchAllPositions(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + +select 255 = multiSearchFirstIndex(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + +select multiSearchAllPositions(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError 42 } + +select multiSearchFirstIndex(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError 42 } diff --git a/tests/queries/0_stateless/02364_setting_cross_to_inner_rewrite.reference b/tests/queries/0_stateless/02364_setting_cross_to_inner_rewrite.reference new file mode 100644 index 00000000000..fcb49fa9945 --- /dev/null +++ b/tests/queries/0_stateless/02364_setting_cross_to_inner_rewrite.reference @@ -0,0 +1,7 @@ +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02364_setting_cross_to_inner_rewrite.sql b/tests/queries/0_stateless/02364_setting_cross_to_inner_rewrite.sql new file mode 100644 index 00000000000..cdbac93937e --- /dev/null +++ b/tests/queries/0_stateless/02364_setting_cross_to_inner_rewrite.sql @@ -0,0 +1,22 @@ + + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 ( x Int ) Engine = Memory; +INSERT INTO t1 VALUES ( 1 ), ( 2 ), ( 3 ); + +CREATE TABLE t2 ( x Int ) Engine = Memory; +INSERT INTO t2 VALUES ( 2 ), ( 3 ), ( 4 ); + +SET cross_to_inner_join_rewrite = 1; +SELECT count() = 1 FROM t1, t2 WHERE t1.x > t2.x; +SELECT count() = 2 FROM t1, t2 WHERE t1.x = t2.x; +SELECT count() = 2 FROM t1 CROSS JOIN t2 WHERE t1.x = t2.x; +SELECT count() = 1 FROM t1 CROSS JOIN t2 WHERE t1.x > t2.x; + +SET cross_to_inner_join_rewrite = 2; +SELECT count() = 1 FROM t1, t2 WHERE t1.x > t2.x; -- { serverError INCORRECT_QUERY } +SELECT count() = 2 FROM t1, t2 WHERE t1.x = t2.x; +SELECT count() = 2 FROM t1 CROSS JOIN t2 WHERE t1.x = t2.x; +SELECT count() = 1 FROM t1 CROSS JOIN t2 WHERE t1.x > t2.x; -- do not force rewrite explicit CROSS diff --git a/tests/queries/0_stateless/02364_window_case.reference b/tests/queries/0_stateless/02364_window_case.reference new file mode 100644 index 00000000000..f00c965d830 --- /dev/null +++ b/tests/queries/0_stateless/02364_window_case.reference @@ -0,0 +1,10 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 diff --git a/tests/queries/0_stateless/02364_window_case.sql b/tests/queries/0_stateless/02364_window_case.sql new file mode 100644 index 00000000000..b34686c3c9a --- /dev/null +++ b/tests/queries/0_stateless/02364_window_case.sql @@ -0,0 +1,4 @@ +SELECT CASE + WHEN sum(number) over () > 0 THEN number + 1 + ELSE 0 END +FROM numbers(10) diff --git a/tests/queries/0_stateless/02365_multisearch_random_tests.reference b/tests/queries/0_stateless/02365_multisearch_random_tests.reference new file mode 100644 index 00000000000..394c420ae8c --- /dev/null +++ b/tests/queries/0_stateless/02365_multisearch_random_tests.reference @@ -0,0 +1,3641 @@ +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/tests/queries/0_stateless/02365_multisearch_random_tests.sql b/tests/queries/0_stateless/02365_multisearch_random_tests.sql new file mode 100644 index 00000000000..3243dd47689 --- /dev/null +++ b/tests/queries/0_stateless/02365_multisearch_random_tests.sql @@ -0,0 +1,379 @@ +SET send_logs_level = 'fatal'; + +select [4, 1, 1, 2, 6, 1, 1, 0, 4, 1, 14, 0, 10, 0, 16, 6] = multiSearchAllPositions(materialize('jmdqwjbrxlbatqeixknricfk'), ['qwjbrxlba', 'jmd', '', 'mdqwjbrxlbatqe', 'jbrxlbatqeixknric', 'jmdqwjbrxlbatqeixknri', '', 'fdtmnwtts', 'qwjbrxlba', '', 'qeixknricfk', 'hzjjgrnoilfkvzxaemzhf', 'lb', 'kamz', 'ixknr', 'jbrxlbatq']) from system.numbers limit 10; +select [0, 0, 0, 2, 3, 0, 1, 0, 5, 0, 0, 0, 11, 10, 6, 7] = multiSearchAllPositions(materialize('coxcctuehmzkbrsmodfvx'), ['bkhnp', 'nlypjvriuk', 'rkslxwfqjjivcwdexrdtvjdtvuu', 'oxcctuehm', 'xcctuehmzkbrsm', 'kfrieuocovykjmkwxbdlkgwctwvcuh', 'coxc', 'lbwvetgxyndxjqqwthtkgasbafii', 'ctuehmzkbrsmodfvx', 'obzldxjldxowk', 'ngfikgigeyll', 'wdaejjukowgvzijnw', 'zkbr', 'mzkb', 'tuehm', 'ue']) from system.numbers limit 10; +select [1, 1, 0, 0, 0, 1, 1, 1, 4, 0, 6, 6, 0, 10, 1, 5] = multiSearchAllPositions(materialize('mpswgtljbbrmivkcglamemayfn'), ['', 'm', 'saejhpnfgfq', 'rzanrkdssmmkanqjpfi', 'oputeneprgoowg', 'mp', '', '', 'wgtljbbrmivkcglamemay', 'cbpthtrgrmgfypizi', 'tl', 'tlj', 'xuhs', 'brmivkcglamemayfn', '', 'gtljb']) from system.numbers limit 10; +select [1, 0, 0, 8, 6, 0, 7, 1, 3, 0, 0, 0, 0, 12] = multiSearchAllPositions(materialize('arbphzbbecypbzsqsljurtddve'), ['arbphzb', 'mnrboimjfijnti', 'cikcrd', 'becypbz', 'z', 'uocmqgnczhdcrvtqrnaxdxjjlhakoszuwc', 'bbe', '', 'bp', 'yhltnexlpdijkdzt', 'jkwjmrckvgmccmmrolqvy', 'vdxmicjmfbtsbqqmqcgtnrvdgaucsgspwg', 'witlfqwvhmmyjrnrzttrikhhsrd', 'pbzsqsljurt']) from system.numbers limit 10; +select [7, 0, 0, 8, 0, 2, 0, 0, 6, 0, 2, 0, 3, 1] = multiSearchAllPositions(materialize('aizovxqpzcbbxuhwtiaaqhdqjdei'), ['qpzcbbxuhw', 'jugrpglqbm', 'dspwhzpyjohhtizegrnswhjfpdz', 'pzcbbxuh', 'vayzeszlycke', 'i', 'gvrontcpqavsjxtjwzgwxugiyhkhmhq', 'gyzmeroxztgaurmrqwtmsxcqnxaezuoapatvu', 'xqpzc', 'mjiswsvlvlpqrhhptqq', 'iz', 'hmzjxxfjsvcvdpqwtrdrp', 'zovxqpzcbbxuhwtia', 'ai']) from system.numbers limit 10; +select [0, 0, 0, 19, 14, 22, 10, 0, 0, 13, 0, 8] = multiSearchAllPositions(materialize('ydfgiluhyxwqdfiwtzobwzscyxhuov'), ['srsoubrgghleyheujsbwwwykerzlqphgejpxvog', 'axchkyleddjwkvbuyhmekpbbbztxdlm', 'zqodzvlkmfe', 'obwz', 'fi', 'zsc', 'xwq', 'pvmurvrd', 'uulcdtexckmrsokmgdpkstlkoavyrmxeaacvydxf', 'dfi', 'mxcngttujzgtlssrmluaflmjuv', 'hyxwqdfiwtzobwzscyxhu']) from system.numbers limit 10; +select [6, 1, 1, 0, 0, 5, 1, 0, 8, 0, 5, 0, 2, 12, 0, 15, 0, 0] = multiSearchAllPositions(materialize('pyepgwainvmwekwhhqxxvzdjw'), ['w', '', '', 'gvvkllofjnxvcu', 'kmwwhboplctvzazcyfpxhwtaddfnhekei', 'gwainv', 'pyepgwain', 'ekpnogkzzmbpfynsunwqp', 'invmwe', 'hrxpiplfplqjsstuybksuteoz', 'gwa', 'akfpyduqrwosxcbdemtxrxvundrgse', 'yepgwainvmw', 'wekwhhqxxvzdjw', 'fyimzvedmyriubgoznmcav', 'whhq', 'ozxowbwdqfisuupyzaqynoprgsjhkwlum', 'vpoufrofekajksdp']) from system.numbers limit 10; +select [0, 0, 5, 1, 1, 0, 15, 1, 5, 10, 4, 0, 1, 0, 3, 0, 0, 0] = multiSearchAllPositions(materialize('lqwahffxurkbhhzytequotkfk'), ['rwjqudpuaiufle', 'livwgbnflvy', 'hffxurkbhh', '', '', 'xcajwbqbttzfzfowjubmmgnmssat', 'zytequ', 'lq', 'h', 'rkbhh', 'a', 'immejthwgdr', '', 'llhhnlhcvnxxorzzjt', 'w', 'cvjynqxcivmmmvc', 'wexjomdcmursppjtsweybheyxzleuz', 'fzronsnddfxwlkkzidiknhpjipyrcrzel']) from system.numbers limit 10; +select [0, 1, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1] = multiSearchAllPositions(materialize('nkddriylnakicwgdwrfxpodqea'), ['izwdpgrgpmjlwkanjrffgela', '', 'kicw', 'hltmfymgmrjckdiylkzjlvvyuleksikdjrg', 'yigveskrbidknjxigwilmkgyizewikh', 'xyvzhsnqmuec', 'odcgzlavzrwesjks', 'oilvfgliktoujukpgzvhmokdgkssqgqot', 'llsfsurvimbahwqtbqbp', 'nxj', 'pimydixeobdxmdkvhcyzcgnbhzsydx', 'couzmvxedobuohibgxwoxvmpote', 'driylnakicwgdwrf', 'nkddr']) from system.numbers limit 10; +select [0, 0, 0, 3, 0, 15, 0, 0, 12, 7, 0, 0, 0, 0, 5, 0] = multiSearchAllPositions(materialize('jnckhtjqwycyihuejibqmddrdxe'), ['tajzx', 'vuddoylclxatcjvinusdwt', 'spxkhxvzsljkmnzpeubszjnhqczavgtqopxn', 'ckhtjqwycyi', 'xlbfzdxspldoes', 'u', 'czosfebeznt', 'gzhabdsuyreisxvyfrfrkq', 'yihuejibqmd', 'jqwycyihuejibqm', 'cfbvprgzx', 'hxu', 'vxbhrfpzacgd', 'afoaij', 'htjqwycyihu', 'httzbskqd']) from system.numbers limit 10; +select [0, 0, 12, 4, 4, 0, 13, 23, 0, 1, 0, 2, 0, 0, 0, 3, 0, 0] = multiSearchAllPositions(materialize('dzejajvpoojdkqbnayahygidyrjmb'), ['khwxxvtnqhobbvwgwkpusjlhlzifiuclycml', 'nzvuhtwdaivo', 'dkqbnayahygidyr', 'jajvpoo', 'j', 'wdtbvwmeqgyvetu', 'kqbn', 'idyrjmb', 'tsnxuxevsxrxpgpfdgrkhwqpkse', '', 'efsdgzuefhdzkmquxu', 'zejajvpoojdkqbnayahyg', 'ugwfuighbygrxyctop', 'fcbxzbdugc', 'dxmzzrcplob', 'ejaj', 'wmmupyxrylvawsyfccluiiene', 'ohzmsqhpzbafvbzqwzftbvftei']) from system.numbers limit 10; +select [6, 8, 1, 4, 0, 10, 0, 1, 14, 0, 1, 0, 5, 0, 0, 0, 0, 15, 0, 1] = multiSearchAllPositions(materialize('ffaujlverosspbzaqefjzql'), ['lvero', 'erossp', 'f', 'ujlverosspbz', 'btfimgklzzxlbkbuqyrmnud', 'osspb', 'muqexvtjuaar', 'f', 'bzaq', 'lprihswhwkdhqciqhfaowarn', 'ffaujlve', 'uhbbjrqjb', 'jlver', 'umucyhbbu', 'pjthtzmgxhvpbdphesnnztuu', 'xfqhfdfsbbazactpastzvzqudgk', 'lvovjfoatc', 'z', 'givejzhoqsd', '']) from system.numbers limit 10; +select [5, 7, 0, 1, 6, 0, 0, 1, 1, 2, 0, 1, 4, 2, 0, 6, 0, 0] = multiSearchAllPositions(materialize('hzftozkvquknsahhxefzg'), ['ozkvquknsahhxefzg', 'kv', 'lkdhmafrec', '', 'zkvquknsahh', 'xmjuizyconipirigdmhqclox', 'dqqwolnkkwbyyjicsoshidbay', '', '', 'zf', 'sonvmkapcjcakgpejvn', 'hzftoz', 't', 'zftozkvqukns', 'dyuqohvehxsvdzdlqzl', 'zkvquknsahhx', 'vueohmytvmglqwptfbhxffspf', 'ilkdurxg']) from system.numbers limit 10; +select [1, 7, 6, 4, 0, 1, 0, 0, 0, 9, 7, 1, 1, 0, 0, 0] = multiSearchAllPositions(materialize('aapdygjzrhskntrphianzjob'), ['', 'jz', 'gjzrh', 'dygjzrhskntrphia', 'qcnahphlxmdru', '', 'rnwvzdn', 'isbekwuivytqggsxniqojrvpwjdr', 'sstwvgyavbwxvjojrpg', 'rhskn', 'jzrhskntrp', '', '', 'toilvppgjizaxtidizgbgygubmob', 'vjwzwpvsklkxqgeqqmtssnhlmw', 'znvpjjlydvzhkt']) from system.numbers limit 10; +select [0, 1, 0, 1, 0, 0, 10, 0, 0, 0, 11, 0, 5, 0] = multiSearchAllPositions(materialize('blwpfdjjkxettfetdoxvxbyk'), ['wgylnwqcrojacofrcanjme', 'bl', 'qqcunzpvgi', '', 'ijemdmmdxkakrawwdqrjtrttig', 'qwkaifalc', 'xe', 'zqocnfuvzowuqkmwrfxw', 'xpaayeljvly', 'wvphqqhulpepjjjnxjfudfcomajc', 'ettfetdoxvx', 'ikablovwhnbohibbuhwjshhdemidgreqf', 'fdjjkxett', 'kiairehwbxveqkcfqhgopztgpatljgqp']) from system.numbers limit 10; +select [0, 0, 6, 1, 1, 0, 0, 1, 2, 0, 0, 0, 0, 0] = multiSearchAllPositions(materialize('vghzgedqpnqtvaoonwsz'), ['mfyndhucfpzjxzaezny', 'niejb', 'edqpnqt', '', 'v', 'kivdvealqadzdatziujdnvymmia', 'lvznmgwtlwevcxyfbkqc', 'vghzge', 'gh', 'tbzle', 'vjiqponbvgvguuhqdijbdeu', 'mshlyabasgukboknbqgmmmj', 'kjk', 'abkeftpnpvdkfyrxbrihyfxcfxablv']) from system.numbers limit 10; +select [0, 0, 0, 0, 9, 0, 7, 0, 9, 8, 0, 0] = multiSearchAllPositions(materialize('oaghnutqsqcnwvmzrnxgacsovxiko'), ['upien', 'moqszigvduvvwvmpemupvmmzctbrbtqggrk', 'igeiaccvxejtfvifrmimwpewllcggji', 'wnwjorpzgsqiociw', 'sq', 'rkysegpoej', 'tqsqcnwvmzrnxgacsovxiko', 'ioykypvfjufbicpyrpfuhugk', 's', 'qsqcnwvmzrnxgacsov', 'hhbeisvmpnkwmimgyfmybtljiu', 'kfozjowd']) from system.numbers limit 10; +select [0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 20, 5, 0, 0, 14, 1, 1, 0, 0] = multiSearchAllPositions(materialize('wbjfsevqspsvbwlzrkhcfuhxddbq'), ['ltgjbz', 's', 'qdfnmggupdfxjfnmvwyrqopxtxf', 'sazlkmaikcltojbzbmdfddu', 'yzanifqxufyfwrxzkhngoxkrrph', 'iwskc', 'xkykshryphyfnwcnmjfqjrixykmzmwm', 'wwpenztbhkdbwidfkypqlxivsjs', 'rlkevy', 'qigywtkezwd', '', 'c', 'sevqspsvbwlzrk', 'gwg', 'iduhrjsrtodxdkjykjoghtjtvplrscitxnvt', 'wlzrkhcfuhxddb', '', 'wbjfsev', 'zytusrcvqbazb', 'tec']) from system.numbers limit 10; +select [0, 1, 5, 0, 6, 8, 0, 3, 2, 0, 0, 9, 0, 4, 0, 0] = multiSearchAllPositions(materialize('mxiifpzlovgfozpgirtio'), ['srullnscuzenzhp', '', 'f', 'apetxezid', 'pzlovgf', 'lo', 'ecbmso', 'i', 'xiifpzlovgfozpgir', 'bnefwypvctubvslsesnctqspdyctq', 'tdncmgbikboss', 'o', 'zmgobcarxlxaho', 'ifpzlovgfozpg', 'dwmjqyylvsxzfr', 'pxhrecconce']) from system.numbers limit 10; +select [0, 0, 0, 2, 0, 0, 2, 0, 8, 0, 0, 0, 7, 0, 0, 0, 21, 3, 1, 8] = multiSearchAllPositions(materialize('jtvnrdpdevgnzexqdrrxqgiujexhm'), ['ibkvzoqmiyfgfztupug', 'iqzeixfykxcghlbgsicxiywlurrgjsywwk', 'vzdffjzlqxgzdcrkgoro', 'tvnrdpdevgnzexqdr', 'nqywueahcmoojtyjlhfpysk', 'iqalixciiidvrtmpzozfb', 'tv', 'rxkfeasoff', 'devgnzexqdrrxqgiuj', 'kvvuvyplboowjrestyvdfrxdjjujvkxy', 'shkhpneekuyyqtxfxutvz', 'yy', 'pdevgnz', 'nplpydxiwnbvlhoorcmqkycqisi', 'jlkxplbftfkxqgnqnaw', 'qdggpjenbrwbjtorbi', 'qgiuje', 'vnrdpd', '', 'dev']) from system.numbers limit 10; +select [14, 0, 0, 7, 20, 6, 0, 13, 0, 0, 20, 0, 20, 2, 0, 8, 2, 11, 2, 0] = multiSearchAllPositions(materialize('asjwxabjrwgcdviokfaoqvqiafz'), ['v', 'zqngytligwwpzxhatyayvdnbbj', 'gjicovfzgbyagiirn', 'bjrwgcdviok', 'oqvqiafz', 'abjrwgc', 'wulrpfzh', 'dviokfao', 'esnchjuiufjadqmdtrpcd', 'tkodqzsjchpaftk', 'oqvq', 'eyoshlrlvmnqjmtmloryvg', 'oqv', 'sjwx', 'uokueelyytnoidplwmmox', 'jrwgcdviokfaoqvqiaf', 'sjwxabjrwgcdviokfaoqvqi', 'gcdviokfa', 'sjwxab', 'zneabsnfucjcwauxmudyxibnmxzfx']) from system.numbers limit 10; +select [0, 16, 8, 0, 10, 0, 0, 0, 0, 1, 0, 6, 0, 1, 0, 4, 0, 6, 0, 0] = multiSearchAllPositions(materialize('soxfqagiuhkaylzootfjy'), ['eveprzxphyenbrnnznpctvxn', 'oo', 'iuhka', 'ikutjhrnvzfb', 'h', 'duyvvjizristnkczgwj', 'ihfrp', 'afpyrlj', 'uonp', 'soxfqagiuhkaylzootfjy', 'qeckxkoxldpzzpmkbvcex', 'agiuhkaylzo', 'tckcumkbsgrgqjvtlijack', '', 'fnfweqlldcdnwfaohqohp', 'fqagiuhkayl', 'pqnvwprxwwrcjqvfsbfimwye', 'agi', 'ta', 'r']) from system.numbers limit 10; +select [3, 7, 1, 6, 0, 1, 0, 11, 0, 9, 17, 1, 18, 12] = multiSearchAllPositions(materialize('ladbcypcbcxahmujwezkvweud'), ['db', 'pcbcxahm', 'lad', 'ypcb', 'atevkzyyxhphtuekymhh', 'lad', 'mltjrwaibetrtwpfa', 'xahmujwezkvweud', 'dg', 'bcxahmujw', 'we', '', 'e', 'ahmujwezkvw']) from system.numbers limit 10; +select [6, 0, 11, 0, 7, 0, 0, 0, 6, 1, 0, 3, 0, 0, 0, 0] = multiSearchAllPositions(materialize('hhkscgmqzmuwltmrhtxnnzsxl'), ['gmqzmuwltmrh', 'qtescwjubeqhurqoqfjauwxdoc', 'uwltmrh', 'qlhyfuspwdtecdbrmrqcnxghhlnbmzs', 'm', 'kcsuocwokvohnqonnfzmeiqtomehksehwc', 'hoxocyilgrxxoek', 'nisnlmbdczjsiw', 'gmqz', '', 'cqzz', 'k', 'utxctwtzelxmtioyqshxedecih', 'ifsmsljxzkyuigdtunwk', 'ojxvxwdosaqjhrnjwisss', 'dz']) from system.numbers limit 10; +select [0, 0, 19, 7, 0, 0, 1, 0, 0, 12, 0, 0, 1, 0, 1, 1, 5, 0, 23, 8] = multiSearchAllPositions(materialize('raxgcqizulxfwivauupqnofbijxfr'), ['sxvhaxlrpviwuinrcebtfepxxkhxxgqu', 'cuodfevkpszuimhymxypktdvicmyxm', 'pqnof', 'i', 'ufpljiniflkctwkwcrsbdhvrvkizticpqkgvq', 'osojyhejhrlhjvqrtobwthjgw', '', 'anzlevtxre', 'ufnpkjvgidirrnpvbsndfnovebdily', 'fwivauupqnofbi', 'rywyadwcvk', 'ltnlhftdfefmkenadahcpxw', '', 'xryluzlhnsqk', 'r', '', 'cqizulxfwivauupqnofb', 'y', 'fb', 'zulxfwivauupqnofbijxf']) from system.numbers limit 10; +select [4, 0, 0, 0, 0, 24, 1, 2, 0, 2, 0, 0, 8, 0] = multiSearchAllPositions(materialize('cwcqyjjodlepauupgobsgrzdvii'), ['q', 'yjppewylsqbnjwnhokzqtauggsjhhhkkkqsy', 'uutltzhjtc', 'pkmuptmzzeqhichaikwbggronli', 'erzgcuxnec', 'dvii', '', 'w', 'fkmpha', 'wcqyjjodlepauupgobsgrz', 'cbnmwirigaf', 'fcumlot', 'odlepauu', 'lthautlklktfukpt']) from system.numbers limit 10; +select [1, 1, 1, 1, 22, 0, 0, 8, 18, 15] = multiSearchAllPositions(materialize('vpscxxibyhvtmrdzrocvdngpb'), ['', '', '', '', 'n', 'agrahemfuhmftacvpnaxkx', 'dqqwvfsrqv', 'byhvtmrdzrocv', 'ocvdn', 'dzrocvdngpb']) from system.numbers limit 10; +select [1, 1, 1, 15, 10, 0, 0, 0, 0, 2] = multiSearchAllPositions(materialize('nfoievsrpvheprosjdsoiz'), ['', 'nfo', '', 'osjd', 'vheprosjdsoiz', 'az', 'blhvdycvjnxaipvxybs', 'umgxmpkvuvuvdaczkz', 'gfspmnzidixcjgjw', 'f']) from system.numbers limit 10; +select [0, 0, 2, 2, 0, 0, 0, 11, 10, 4, 9, 1, 6, 4, 0, 0] = multiSearchAllPositions(materialize('bdmfwdisdlgbcidshnhautsye'), ['uxdceftnmnqpveljer', 'xdnh', 'dmf', 'dmfwdisdlgbc', 'cpwnaijpkpyjgaq', 'doquvlrzhusjbxyqcqxvwr', 'llppnnmtqggyfoxtawnngsiiunvjjxxsufh', 'gbcidshnhau', 'lgbcids', 'f', 'dlgbc', 'bdmfwdisdlgbcids', 'disdlgbcidshnhautsy', 'fwdisdlgbcidshn', 'zfpbfc', 'triqajlyfmxlredivqiambigmge']) from system.numbers limit 10; +select [0, 0, 16, 0, 0, 0, 14, 6, 2, 1, 0, 0, 1, 0, 10, 12, 0, 0, 0, 0] = multiSearchAllPositions(materialize('absimumlxdlxuzpyrunivcb'), ['jglfzroni', 'wzfmtbjlcdxlbpialqjafjwz', 'yrun', 'fgmljkkp', 'nniob', 'fdektoyhxrumiycvkwekphypgti', 'zp', 'um', 'bsimu', '', 'yslsnfisaebuujltpgcskhhqcucdhb', 'xlaphsqgqsfykhilddctrawerneqoigb', '', 'pdvcfxdlurmegspidojt', 'd', 'xu', 'fdp', 'xjrqmybmccjbjtvyvdh', 'nvhdfatqi', 'neubuiykajzcrzdbvpwjhlpdmd']) from system.numbers limit 10; +select [0, 0, 0, 9, 0, 0, 1, 1, 1, 1] = multiSearchAllPositions(materialize('lvyenvktdnylszlypuwqecohy'), ['ihlsiynj', 'ctcnhbkumvbgfdclwjhsswpqyfrx', 'rpgqwkydwlfclcuupoynwrfffogxesvmbj', 'dnyl', 'coeqgdtbemkhgplprfxgwpl', 'dkbshktectbduxlcaptlzspq', 'l', 'lvyenvktdnylszlypuw', 'lvyenvk', '']) from system.numbers limit 10; +select [1, 0, 0, 0, 0, 1, 2, 22, 8, 17, 1, 13, 0, 0, 0, 0, 0, 5] = multiSearchAllPositions(materialize('wphcobonpgaqwgfenotzadgqezx'), ['', 'qeuycfhkfjwokxgrkaodqioaotkepzlhnrv', 'taehtytq', 'gejlcipocalc', 'poyvvvntrvqazixkwigtairjvxkgouiuva', '', 'phc', 'dg', 'npgaqwg', 'notzadgqe', '', 'wgfe', 'smipuxgvntys', 'qhrfdytbfeujzievelffzrv', 'cfmzw', 'hcywnyguzjredwjbqtwyuhtewuhzkc', 'tssfeinoykdauderpjyxtmb', 'obonpgaqwgfen']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 6, 6, 0, 0, 2, 0, 5, 2, 0, 6, 3] = multiSearchAllPositions(materialize('qvslufpsddtfudzrzlvrzdra'), ['jxsgyzgnjwyd', 'hqhxzhskwivpuqkjheywwfhthm', 'kbwlwadilqhgwlcpxkadkamsnzngms', 'fxunda', 'nlltydufobnfxjyhch', 'fpsddtfudzrzl', 'fp', 'ykhxjyqtvjbykskbejpnmbxpumknqucu', 'iyecekjcbkowdothxc', 'vslufpsddtfu', 'mjgtofkjeknlikrugkfhxlioicevil', 'uf', 'vslufpsdd', 'cxizdzygyu', 'fpsddtfudzrz', 'slufp']) from system.numbers limit 10; +select [12, 0, 0, 0, 0, 1, 6, 0, 1, 2] = multiSearchAllPositions(materialize('ydsbycnifbcforymknzfi'), ['forymkn', 'vgxtcdkfmjhc', 'ymugjvtmtzvghmifolzdihutqoisl', 'fzooddrlhi', 'bdefmxxdepcqi', '', 'cnif', 'ilzbhegpcnkdkooopaguljlie', '', 'dsbycnifbcforym']) from system.numbers limit 10; +select [0, 2, 4, 1, 1, 3, 0, 0, 0, 7] = multiSearchAllPositions(materialize('sksoirfwdhpdyxrkklhc'), ['vuixtegnp', 'ks', 'oirfwdhpd', 'sksoirf', 'skso', 'soi', 'eoxpa', 'vpfmzovgatllf', 'txsezmqvduxbmwu', 'fw']) from system.numbers limit 10; +select [2, 21, 8, 10, 6, 0, 1, 11, 0, 0, 21, 4, 29, 0] = multiSearchAllPositions(materialize('wlkublfclrvgixpbvgliylzbuuoyai'), ['l', 'ylzbuu', 'clr', 'rvgi', 'lf', 'bqtzaqjdfhvgddyaywaiybk', '', 'vgixpbv', 'ponnohwdvrq', 'dqioxovlbvobwkgeghlqxtwre', 'y', 'ublfclrvgix', 'a', 'eoxxbkaawwsdgzfweci']) from system.numbers limit 10; +select [0, 0, 2, 1, 1, 9, 1, 0, 0, 1] = multiSearchAllPositions(materialize('llpbsbgmfiadwvvsciak'), ['knyjtntotuldifbndcpxzsdwdduv', 'lfhofdxavpsiporpdyfziqzcni', 'lpbsbgmf', 'llpbsbgmfi', 'llpbsbgmfiadwvv', 'fia', '', 'uomksovcuhfmztuqwzwchmwvonk', 'ujbasmokvghmredszgwe', '']) from system.numbers limit 10; +select [3, 0, 0, 0, 6, 1, 7, 0, 2, 1, 1, 0, 7, 0, 1, 0, 1, 1, 5, 11] = multiSearchAllPositions(materialize('hnmrouevovxrzrejesigfukkmbiid'), ['m', 'apqlvipphjbui', 'wkepvtnpu', 'amjvdpudkdsddjgsmzhzovnwjrzjirdoxk', 'ue', '', 'evov', 'qoplzddxjejvbmthnplyha', 'nmrouevovxrz', '', 'hnmrouev', 'hnzevrvlmxnjmvhitgdhgd', 'evovxrzrejesig', 'yvlxrjaqdaizishkftgcuikt', '', 'buyrmbkvqukochjteumqchrhxgtmuorsdgzlfn', '', 'hnmrouevov', 'ouevovx', 'xr']) from system.numbers limit 10; +select [0, 13, 0, 0, 0, 0, 0, 14, 0, 0, 1, 12, 0, 1] = multiSearchAllPositions(materialize('uwfgpemgdjimotxuxrxxoynxoaw'), ['uzcevfdfy', 'otxuxrxxoynxoa', 'xeduvwhrogxccwhnzkiolksry', 'pxdszcyzxlrvkymhomz', 'vhsacxoaymycvcevuujpvozsqklahstmvgt', 'zydsajykft', 'vdvqynfhlhoilkhjjkcehnpmwgdtfkspk', 'txuxrx', 'slcaryelankprkeyzaucfhe', 'iocwevqwpkbrbqvddaob', 'uwfg', 'motxuxrxx', 'kpzbg', '']) from system.numbers limit 10; +select [1, 1, 0, 6, 6, 0, 0, 0, 8, 0, 8, 14, 1, 5, 6, 0, 0, 1] = multiSearchAllPositions(materialize('epudevopgooprmhqzjdvjvqm'), ['ep', 'epudevopg', 'tlyinfnhputxggivtyxgtupzs', 'vopgoop', 'v', 'hjfcoemfk', 'zjyhmybeuzxkuwaxtcut', 'txrxzndoxyzgnzepjzagc', 'pgooprmhqzj', 'wmtqcbsofbe', 'pgo', 'm', '', 'evopgooprmhqzjdv', 'vopgooprmhqzjdv', 'gmvqubpsnvrabixk', 'wjevqrrywloomnpsjbuybhkhzdeamj', '']) from system.numbers limit 10; +select [15, 4, 4, 0, 0, 1, 1, 0, 0, 0, 0, 20, 0, 10, 1, 1, 0, 2, 4, 3] = multiSearchAllPositions(materialize('uogsfbdefogwnekfoeobtkrgiceksz'), ['kfoeobtkrgice', 'sfbd', 'sfbdefogwn', 'zwtenhiqavmqoolkvjiqjfb', 'vnjkshyvpwhrauackplqllakcjyamvsuokrxbfv', 'uog', '', 'qtzuhdcdymytgtscvzlzswdlrqidreuuuqk', 'vlridmjlbxyiljpgxsctzygzyawqqysf', 'xsnkwyrmjaaaryvrdgtoshdxpvgsjjrov', 'fanchgljgwosfamgscuuriwospheze', 'btkrgicek', 'ohsclekvizgfoatxybxbjoxpsd', 'ogwnekfoeobtkr', '', '', 'vtzcobbhadfwubkcd', 'og', 's', 'gs']) from system.numbers limit 10; +select [0, 0, 5, 1, 0, 5, 1, 6, 0, 1, 9, 0, 1, 1] = multiSearchAllPositions(materialize('aoiqztelubikzmxchloa'), ['blc', 'p', 'ztelubikzmxchlo', 'aoiqztelubi', 'uckqledkyfboolq', 'ztelubikzmxch', 'a', 'telubikzm', 'powokpdraslpadpwvrqpbb', 'aoiqztelu', 'u', 'kishbitagsxnhyyswn', '', '']) from system.numbers limit 10; +select [5, 11, 0, 0, 0, 5, 0, 0, 0, 1, 16, 0, 0, 0, 0, 0] = multiSearchAllPositions(materialize('egxmimubhidowgnfziwgnlqiw'), ['imubhidowgnfzi', 'dowgnf', 'yqpcpfvnfpxetozraxbmzxxcvtzm', 'xkbaqvzlqjyjoiqourezbzwaqkfyekcfie', 'jjctusdmxr', 'imubhi', 'zawnslbfrtqohnztmnssxscymonlhkitq', 'oxcitennfpuoptwrlmc', 'ac', 'egxmi', 'fziwgn', 'rt', 'fuxfuctdmawmhxxxg', 'suulqkrsfgynruygjckrmizsksjcfwath', 'slgsq', 'zcbqjpehilwyztumebmdrsl']) from system.numbers limit 10; +select [20, 0, 9, 0, 0, 14, 0, 5, 8, 3, 0, 0, 0, 4] = multiSearchAllPositions(materialize('zczprzdcvcqzqdnhubyoblg'), ['obl', 'lzrjyezgqqoiydn', 'vc', 'nbvwfpmqlziedob', 'pnezljnnujjbyviqsdpaqkkrlogeht', 'dn', 'irvgeaq', 'rzdcvcqzqdnh', 'cvcqzqdnh', 'zprzdcv', 'wvvgoexuevmqjeqavsianoviubfixdpe', 'aeavhqipsvfkcynyrtlxwpegwqmnd', 'blckyiacwgfaoarfkptwcei', 'prz']) from system.numbers limit 10; +select [2, 1, 1, 9, 10, 5, 0, 0, 0, 2, 9, 7, 9, 0, 1, 9, 7, 0] = multiSearchAllPositions(materialize('mvovpvuhjwdzjwojcxxrbxy'), ['vo', '', '', 'jwdz', 'wdzj', 'pvu', 'ocxprubxhjnji', 'phzfbtacrg', 'jguuqhhxbrwbo', 'vovpvuhjwd', 'jw', 'u', 'jwdzjwojcx', 'nlwfvolaklizslylbvcgicbjw', '', 'jwd', 'uhjwdz', 'bbcsuvtru']) from system.numbers limit 10; +select [2, 0, 21, 0, 0, 0, 3, 0, 0, 0, 0, 10, 1, 18] = multiSearchAllPositions(materialize('nmdkwvafhcbipwoqtsrzitwxsnabwf'), ['m', 'ohlfouwyucostahqlwlbkjgmdhdyagnihtmlt', 'itwx', 'jjkyhungzqqyzxrq', 'abkqvxxpu', 'lvzgnaxzctaarxuqowcski', 'dkwvafhcb', 'xuxjexmeeqvyjmpznpdmcn', 'vklvpoaakfnhtkprnijihxdbbhbllnz', 'fpcdgmcrwmdbflnijjmljlhtkszkocnafzaubtxp', 'hmysdmmhnebmhpjrrqpjdqsgeuutsj', 'cbipwoqtsrzitwxsna', 'nm', 'srzitwx']) from system.numbers limit 10; +select [17, 5, 0, 13, 0, 0, 10, 1, 0, 19, 10, 8, 0, 4] = multiSearchAllPositions(materialize('gfvndbztroigxfujasvcdgfbh'), ['asvcdgf', 'dbztroigxfujas', 'pr', 'xfujas', 'nxwdmqsobxgm', 'wdvoepclqfhy', 'oigxfu', '', 'flgcghcfeiqvhvqiriciywbkhrxraxvneu', 'vcd', 'oigxfu', 'troigxfuj', 'gbnyvjhptuehkefhwjo', 'ndbz']) from system.numbers limit 10; +select [0, 14, 1, 0, 0, 1, 1, 11, 0, 8, 6, 0, 3, 19, 7, 0] = multiSearchAllPositions(materialize('nofwsbvvzgijgskbqjwyjmtfdogzzo'), ['kthjocfzvys', 'skbqjwyjmtfdo', 'nof', 'mfapvffuhueofutby', 'vqmkgjldhqohipgecie', 'nofwsbv', '', 'ijgs', 'telzjcbsloysamquwsoaso', 'vzgijgskbqjwyjmt', 'bvvzgijgskbqjwyjmtfd', 'hdlvuoylcmoicsejofcgnvddx', 'fwsbvvzgijgskb', 'wyjm', 'vvzgijg', 'fwzysuvkjtdiufetvlfwf']) from system.numbers limit 10; +select [10, 2, 13, 0, 0, 0, 2, 0, 9, 2, 4, 1, 1, 0, 1, 6] = multiSearchAllPositions(materialize('litdbgdtgtbkyflsvpjbqwsg'), ['tbky', 'itdbgdtgtb', 'yflsvpjb', 'ikbylslpoqxeqoqurbdehlroympy', 'hxejlgsbthvjalqjybc', 'sontq', 'itdbgd', 'ozqwgcjqmqqlkiaqppitsvjztwkh', 'gtbkyf', 'itdbgdtgtbkyfls', 'dbg', 'litdb', '', 'qesbakrnkbtfvwu', 'litd', 'g']) from system.numbers limit 10; +select [0, 0, 1, 1, 5, 0, 8, 12, 0, 2, 0, 7, 0, 6] = multiSearchAllPositions(materialize('ijzojxumpvcxwgekqimrkomvuzl'), ['xirqhjqibnirldvbfsb', 'htckarpuctrasdxoosutyxqioizsnzi', '', '', 'jxu', 'dskssv', 'mpvcxwgekqi', 'xwgek', 'qsuexmzfcxlrhkvlzwceqxfkyzogpoku', 'jzojx', 'carjpqihtpjniqz', 'umpvcxwgekq', 'krpkzzrxxtvfhdopjpqcyxfnbas', 'xumpvcxwg']) from system.numbers limit 10; +select [0, 0, 0, 6, 0, 8, 0, 2, 0, 0, 0, 0, 14, 0, 0, 1, 1, 0, 0, 0] = multiSearchAllPositions(materialize('zpplelzzxsjwktedrrtqhfmoufv'), ['jzzlntsokwlm', 'cb', 'wuxotyiegupflu', 'lzzxsjwkte', 'owbxgndpcmfuizpcduvucnntgryn', 'zxsjwktedrrtqhf', 'kystlupelnmormqmqclgjakfwnyt', 'pple', 'lishqmxa', 'mulwlrbizkmtbved', 'uchtfzizjiooetgjfydhmzbtmqsyhayd', 'hrzgjifkinwyxnazokuhicvloaygeinpd', 'tedrrt', 'shntwxsuxux', 'evrjehtdzzoxkismtfnqp', 'z', '', 'nxtybut', 'vfdchgqclhxpqpmitppysbvxepzhxv', 'wxmvmvjlrrehwylgqhpehzotgrzkgi']) from system.numbers limit 10; + +select [15, 19, 0, 0, 15, 0, 0, 1, 2, 6] = multiSearchAllPositionsUTF8(materialize('зжерхмчсйирдчрришкраоддцфгх'), ['ришкра', 'раоддц', 'фттиалусгоцжлтщзвумрдчи', 'влййи', 'ришкра', 'цгфжуцгивй', 'ккгжхрггчфглх', 'з', 'жерхмчсйи', 'мчсйирдчрришкраоддц']) from system.numbers limit 10; +select [0, 0, 0, 1, 4, 0, 14, 0, 1, 8, 8, 9, 0, 0, 4, 0] = multiSearchAllPositionsUTF8(materialize('етвхйчдобкчукхпщлмжпфайтфдоизщ'), ['амфшужперосрфщфлижйййжжжй', 'ххкбщшзлмщггтшцпсдйкдшйвхскемц', 'ергйплгпнглккшкарещимгапхг', '', 'хйчдо', 'вввбжовшзйбгуоиждепйабаххеквщижтйиухос', 'хпщл', 'жфуомщуххнедзхищнгхрквлпмзауеегз', 'етвхй', 'о', 'о', 'бк', 'цфецккифж', 'аизлокл', 'х', 'слщгеивлевбчнчбтшгфмжрфка']) from system.numbers limit 10; +select [0, 0, 1, 2, 0, 0, 14, 0, 3, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('йбемооабурнирйофшдгпснж'), ['гпфцл', 'нчбперпмцкввдчсщвзйрдфнф', '', 'бем', 'ч', 'жгш', 'йофшдгпснж', 'шасгафчг', 'емооабур', 'пиохцжццгппщчопзйлмуотз', 'рпдомнфвопхкшешйишумбацтл', 'нисиийфррбдоц']) from system.numbers limit 10; +select [1, 18, 12, 0, 0, 1, 1, 3, 7, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('гсщнфийтфзжцйпфбйалущ'), ['', 'алущ', 'цйпфбйал', 'цвбфцйвсвлицсчнргпцнр', 'х', 'гс', '', 'щн', 'й', 'дгйрвцщтп', 'уитвквоффвцхфишрлерйцувф', 'кфтййлпнзжчижвглзкижн']) from system.numbers limit 10; +select [14, 0, 5, 5, 0, 6, 0, 16, 0, 0] = multiSearchAllPositionsUTF8(materialize('ефщнйнуйебнснлрцгкеитбг'), ['лрцгкеитб', 'епклжфцпнфопе', 'йнуйебн', 'й', 'тлт', 'нуйебнснлрцгкеит', 'глечршгвотумкимтлм', 'цгк', 'щгйчой', 'звкцкчк']) from system.numbers limit 10; +select [0, 1, 18, 6, 0, 3, 0, 0, 25, 0, 0, 1, 16, 5, 1, 7, 0, 0] = multiSearchAllPositionsUTF8(materialize('пумгмцшмжштсшлачсжарерфиозиг'), ['чсуубфийемквмоотванухмбрфхжоест', '', 'жар', 'цшмжш', 'жртещтинтвпочнкдткцза', 'м', 'адзгтбаскщгдшжл', 'штфжшллезпджигщфлезфгзчайанхктицштйй', 'о', 'етадаарйсцейдошшцечхзлшлрртсрггцртспд', 'зтвшалрпфлщбцд', 'пу', 'ч', 'мцшмжштсшлачсж', '', 'шмжшт', 'ещтжшйтчзчаноемрбц', 'тевбусешйрйчшзо']) from system.numbers limit 10; +select [7, 10, 0, 0, 0, 0, 1, 12, 9, 2, 0, 0, 0, 4, 1, 1, 0, 6] = multiSearchAllPositionsUTF8(materialize('дупгвндвйжмаузнллнзл'), ['двйжмаузн', 'жмаузнлл', 'емйжркоблновцгпезрдавкбелцщста', 'щзкгм', 'лебрпцрсутшриащгайвц', 'лзнмл', 'д', 'ауз', 'йжмау', 'упгвндвйж', 'жщсббфвихг', 'всигсеигцбгаелтчкирлнзшзцжещнс', 'рмшиеиесрлщципщхкхтоцщчйоо', 'гвн', '', '', 'йадеоцлпшпвщзещзкхйрейопмажбб', 'ндв']) from system.numbers limit 10; +select [0, 0, 0, 8, 3, 10, 22, 0, 13, 11, 0, 1, 18, 0, 1, 0] = multiSearchAllPositionsUTF8(materialize('жшзфппавввслфцлнщшопкдшку'), ['саоткнхфодзаа', 'кйхванкзаисйбврщве', 'бчоуучватхфукчф', 'вввслфц', 'з', 'вслфцлнщшопк', 'дшк', 'из', 'фцл', 'с', 'зртмцтпощпщхк', 'жшзфппавввслфц', 'шопк', 'збтхрсдтатхпрзлхдооощифачхчфн', '', 'жщшийугз']) from system.numbers limit 10; +select [2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 21, 0, 6, 0] = multiSearchAllPositionsUTF8(materialize('пчботухвгдчекмжндбоожш'), ['чботухвгдчекмжндб', 'от', 'гвсжжйлбтщчучнхсмдйни', 'жцжзмшлибшефуоуомпацбщщу', 'онхфлуцйлхтбмц', 'йтепжу', 'хтдрпвкщрли', 'аддайф', 'нхегщккбфедндоацкиз', 'йгкцзртфжгв', 'буелрщмхйохгибжндфшщвшрлдччрмфмс', 'цщцтзфнщ', 'уч', 'пчб', 'жш', 'пнфббтшйгхйрочнлксщпгвжтч', 'ухвг', 'лсцппузазщрйхймщбзоршощбзленхп']) from system.numbers limit 10; +select [0, 0, 4, 11, 0, 0, 0, 0, 0, 11, 2, 4, 6, 0, 0, 1, 2, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('тжрмчпваухрхуфбгнифгбопфт'), ['дпмгкекщлнемссаицщпащтиуцхкфчихтц', 'акйиуоатунтчф', 'мчпва', 'рхуфбгнифгб', 'кнаишж', 'пчвотенеафкухжцешбцхг', 'опеа', 'ушчадфтчхечеуркбтел', 'ашшптаударчжчмвалтдхкимищпф', 'рхуфбгниф', 'ж', 'мчпваухрхуфбгнифг', 'пваухрху', 'зргачбтцдахвймсбсврбндзтнущхвп', 'асбфцавбгуолг', 'тж', 'жрмчпваухрх', 'мрвзцгоб', 'чрцснчсдхтзжвнздзфцвхеилишдбж', 'кчт']) from system.numbers limit 10; +select [0, 2, 4, 0, 6, 0, 0, 0, 0, 19, 7, 1, 0, 1, 0, 0, 2, 10, 0, 1] = multiSearchAllPositionsUTF8(materialize('опрурпгабеарушиойцрхвбнсщ'), ['йошуоесдщеж', 'пр', 'урпгабеарушиой', 'хщиаршблашфажщметчзи', 'пгабеарушиойцрхвб', 'щцбдвц', 'еечрззвкожзсдурйщувмцйшихдц', 'офхачгсзашфзозрлба', 'айдфжджшжлрргмабапткбцпиизигдтс', 'рх', 'габ', '', 'цнкдбфчщшмчулврбцчакщвзхлазфа', '', 'екбтфпфилсаванхфкмчнпумехиищди', 'епвщхаклшомвцжбф', 'прурпгабе', 'еарушиойцрхв', 'црвтгрзтитц', 'опрурпг']) from system.numbers limit 10; +select [0, 10, 1, 0, 0, 0, 0, 0, 10, 0, 15, 2] = multiSearchAllPositionsUTF8(materialize('угпщлзчжшбзвууцшатпщцр'), ['цоуарцжсз', 'бз', '', 'пщфтзрч', 'лфуипмсдмнхнгйнтк', 'айжунцйбйцасчфдхй', 'щдфщлцптплсачв', 'грв', 'бзвууц', 'бумййшдшфашцгзфвчвзвтсувнжс', 'цшатпщ', 'гпщлзчжшб']) from system.numbers limit 10; +select [0, 15, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 0, 0] = multiSearchAllPositionsUTF8(materialize('цнлеодлмдцдйснитвдчтхжизв'), ['ивкчсзшугоцжчохщцабл', 'итвдчт', 'кнх', '', 'одлм', 'ктшфзбщзцуймагсоукщщудвуфо', 'ххеаефудгчхр', 'одлмдцдйснитвдчт', 'умцлпкв', 'зщсокйтцзачщафвбповжгнлавсгйг', 'бкибм', '', 'охсоихнцчцшевчеележтука', 'фаийхгжнсгищгщц']) from system.numbers limit 10; +select [0, 0, 0, 2, 0, 0, 0, 0, 3, 2, 3, 6, 0, 0, 0, 12, 4, 1] = multiSearchAllPositionsUTF8(materialize('бгдбувдужщвоошлтчрбй'), ['щвбаиф', 'итчднесжкчжвпжйвл', 'мм', 'г', 'хктзгтзазфгщшфгбеулцмдмдбдпчзх', 'сфуак', 'злйфцощегзекщб', 'фшлдтолрщфзжчмих', 'дбувдужщ', 'гдб', 'дбувдужщ', 'в', 'лчищкечнжщисцичбнзшмулпмлп', 'чжцсгмгфвлиецахзнрбмщин', 'обпжвй', 'о', 'бувдужщвоош', '']) from system.numbers limit 10; +select [0, 2, 5, 3, 2, 0, 1, 0, 0, 4, 2, 0, 0, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('шсушлорзфжзудбсейенм'), ['чнзпбновтршеумбвщчлх', 'су', 'лорзфж', 'ушлорзфжзудб', 'сушлорзфжзудбсейенм', 'ткдрхфнб', '', 'пщд', 'чбдцмщ', 'шлорзфж', 'су', 'сккигркедчожжемгнайвйчтдмхлтти', 'мц', 'пхнхрхйцйсйбхчлктз', 'иафжстлйфцр', 'алщщлангнбнйхлшлфшйонщек']) from system.numbers limit 10; +select [12, 1, 0, 5, 0, 10, 1, 0, 7, 4, 0, 1, 12, 1, 1, 1, 0, 1, 15, 0] = multiSearchAllPositionsUTF8(materialize('ощзллчубоггцвжриуардрулащйпу'), ['цвжр', '', 'нмзкаиудзтиффззшзканжвулт', 'лчубоггцвжриуардрулащйпу', 'чтцлзшуижолибаоххвшихбфжйхетивп', 'ггцвжри', '', 'йдгнвс', 'у', 'л', 'зпщнжуойдлдвхокцжнзйсйзе', '', 'цв', '', '', '', 'ехлцзгвф', '', 'риу', 'уйжгтжноомонгщ']) from system.numbers limit 10; +select [0, 12, 13, 20, 0, 1, 0, 0, 3, 4] = multiSearchAllPositionsUTF8(materialize('цбкифйтшузажопнжщарбштвдерзтдш'), ['щлмлижтншчсмксгтнсврро', 'жопнжщарбштвд', 'опнжщарб', 'бштвдерзтд', 'пуфслейщбкжмпнш', 'ц', 'маве', 'кмйхойрдлшцхишдтищвйбцкщуигваещгтнхйц', 'кифй', 'и']) from system.numbers limit 10; +select [0, 6, 0, 0, 0, 8, 0, 3, 6, 0] = multiSearchAllPositionsUTF8(materialize('еачачгбмомоххкгнвштггпчудл'), ['ндзчфчвжтцщпхщуккбеф', 'г', 'рткнфвчтфннхлжфцкгштймгмейжй', 'йчннбщфкщф', 'лсртщиндшшкичзррущвдйвнаркмешерв', 'момоххк', 'рфафчмсизлрхзуа', 'ч', 'гбмомоххкгнвштг', 'валжпошзбгзлвевчнтз']) from system.numbers limit 10; +select [0, 0, 10, 0, 8, 13, 0, 0, 19, 15, 3, 1] = multiSearchAllPositionsUTF8(materialize('зокимчгхухшкшмтшцчффвззкалпва'), ['цалфжажщщширнрвтпвмщжннрагвойм', 'оукзрдцсадешжмз', 'хшкшмтшцч', 'ауилтсаомуркпаркбцркугм', 'хухшкшмтшцчффв', 'шмтшцч', 'зщгшпцхзгцншднпеусмтжбцшч', 'щлраащсйлщрд', 'ффвзз', 'тшцчффвззкалпв', 'кимчгхухшкш', '']) from system.numbers limit 10; +select [0, 0, 1, 0, 6, 0, 6, 0, 5, 0, 13, 0, 0, 6] = multiSearchAllPositionsUTF8(materialize('йдйндиибщекгтчбфйдредпхв'), ['тдршвтцихцичощнцницшдхйбогбчубие', 'акппакуцйсхцдххнотлгирввоу', '', 'улщвзхохблтксчтб', 'и', 'ибейзчшклепзриж', 'иибщекгт', 'шидббеухчпшусцнрз', 'диибщекгтчбфйд', 'дейуонечзйзлдкшщрцйбйклччсцуй', 'тч', 'лшицлшме', 'чйнжчоейасмрщегтхвйвеевбма', 'ии']) from system.numbers limit 10; +select [15, 3, 3, 2, 0, 11, 0, 0, 0, 2, 0, 4, 0, 1, 1, 3, 0, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('нхгбфчшджсвхлкхфвтдтлж'), ['хфвтдтлж', 'гбфчшд', 'гбфчш', 'х', 'ачдгбккжра', 'вхлк', 'мщчвещлвшдщпдиимлшрвнщнфсзгщм', 'жчоббгшзщлгеепщжкчецумегпйчт', 'жжд', 'хг', 'мтсааролшгмоуйфйгщгтрв', 'бфчшд', 'чейрбтофпшишгуасоодлакчдф', 'н', 'нхгбфч', 'гбф', 'гдежсх', 'йифжацзгжбклх', 'ещпзущпбаолплвевфиаибшйубйцсзгт', 'жезгчжатзтучжб']) from system.numbers limit 10; +select [0, 10, 1, 0, 0, 0, 4, 0, 13, 1, 12, 1, 0, 6] = multiSearchAllPositionsUTF8(materialize('акбдестрдшерунпвойзв'), ['нркчх', 'шерунп', '', 'зжвахслфббтоиоцрзаззасгнфчх', 'шлжмдг', 'тлйайвцжчсфтцйрчосмижт', 'дестрдшерунп', 'мвамйшцбдщпчлрщд', 'у', 'акбдестрд', 'рунпвойз', '', 'айздцоилсйшцфнчтхбн', 'с']) from system.numbers limit 10; +select [1, 0, 0, 3, 2, 1, 0, 0, 1, 10, 7, 0, 5, 0, 8, 4, 1, 0, 8, 1] = multiSearchAllPositionsUTF8(materialize('кйхпукаеуддтйччхлнпсуклрф'), ['кйхпукаеуддтйччхл', 'йатлрйкстлхфхз', 'фгихслшкж', 'хпу', 'йхпукаеу', '', 'сруакбфоа', 'оажуз', 'кйхпукаеуддтйччх', 'ддтйччхлн', 'аеуддтйччхл', 'тмажиойщтпуцглхфишеиф', 'укаеуддтйччхлнпс', 'ретифе', 'еуддтйччхлнпсуклр', 'пукаеуд', 'кйхпу', 'таппфггвджлцпжшпишбпциуохсцх', 'еуд', '']) from system.numbers limit 10; +select [2, 3, 3, 16, 5, 13, 0, 0, 0, 18, 0, 6, 0, 16, 0, 10, 3, 0] = multiSearchAllPositionsUTF8(materialize('плврйщовкзнбзлбжнсатрцщщучтйач'), ['лврйщовкзнбзлбж', 'врйщовкзнбзлбжнса', 'врйщовкзнбз', 'жнсатрцщщучтйач', 'йщовкзнбзлбжнсатрцщщуч', 'злбжнсатрцщ', 'ввтбрдт', 'нжйапойг', 'ннцппгперхйвдхоеожупйебочуежбвб', 'сатрцщщу', 'деваийтна', 'щ', 'вкжйгкужжгтевлцм', 'жнс', 'датг', 'знбзлбжнсатрцщщучтйа', 'врйщовк', 'оашмкгчдзщефм']) from system.numbers limit 10; +select [3, 1, 19, 1, 0, 0, 0, 0, 11, 3, 0, 0] = multiSearchAllPositionsUTF8(materialize('фчдеахвщжхутхрккхасвсхепщ'), ['деах', '', 'свсхепщ', '', 'анчнсржйоарвтщмрж', 'нечбтшщвркгд', 'вштчцгшж', 'з', 'у', 'деахвщ', 'ххкцжрвзкжзжчугнфцшуиаклтмц', 'фцкжшо']) from system.numbers limit 10; +select [16, 0, 0, 1, 8, 14, 0, 12, 12, 5, 0, 0, 16, 0, 11, 0] = multiSearchAllPositionsUTF8(materialize('щмнжчввбжцчммчшсрхйшбктш'), ['срхйшбк', 'йлзцнржчууочвселцхоучмщфчмнфос', 'еижлафатшхщгшейххжтубзвшпгзмзцод', '', 'бжцчммчшсрхй', 'чшсрхй', 'влемчммйтителщвзган', 'ммч', 'ммчшсрх', 'чввбж', 'нобзжучшошмбщешлхжфгдхлпнгпопип', 'цгт', 'срхйш', 'лкклмйжтеа', 'чммчшсрхйшбктш', 'йежффзнфтнжхфедгбоахпг']) from system.numbers limit 10; +select [1, 12, 9, 5, 1, 0, 6, 3, 0, 1] = multiSearchAllPositionsUTF8(materialize('кжнщсашдзитдмщцхуоебтфжл'), ['', 'дмщцхуоебт', 'зитдмщцхуоебт', 'сашдзитдмщцхуое', 'кжнщ', 'тхкйтшебчигбтмглшеужззоббдилмдм', 'ашдзитдмщцхуоебтф', 'нщсашдз', 'аузщшр', 'кжнщсашдз']) from system.numbers limit 10; +select [2, 0, 0, 0, 1, 0, 2, 0, 0, 17, 0, 8, 7, 14, 0, 0, 0, 7, 9, 23] = multiSearchAllPositionsUTF8(materialize('закуфгхчтшивзчжаппбжнтслщввущ'), ['а', 'днойвхфрммтж', 'внтлжрхзрпчбтуркшдатннглечг', 'ахиеушжтфкгцщтзхмжнрхдшт', '', 'тцчгрззржмдшйщфдцрбшжеичч', 'а', 'ктиечцпршнфнбчуолипацчдсосцнлфаццм', 'аусрлхдцегферуо', 'ппбжнт', 'жкццуосгвп', 'чтшивзчжаппб', 'хчтшивзчжаппб', 'чжаппбжнтслщ', 'ччрлфдмлу', 'щзршффбфчзо', 'ущуймшддннрхзийлваежщухч', 'хчтши', 'тшивзчжаппбжнтсл', 'слщв']) from system.numbers limit 10; +select [1, 1, 9, 2, 0, 3, 7, 0, 0, 19, 2, 2, 0, 8] = multiSearchAllPositionsUTF8(materialize('мвкзккупнокченйнзкшбдрай'), ['м', '', 'н', 'вкз', 'гдпертшйбртотунур', 'к', 'упнокченйнзкшбдр', 'нфшрг', 'нмждрйббдцлйемжпулдвкещхтжч', 'ш', 'вкзккупнокченйнзкшбдр', 'вкзккупнокченйнзкшбдрай', 'адииксвеавогтйторчтцвемвойшпгбнз', 'пнокченй']) from system.numbers limit 10; +select [15, 0, 0, 1, 12, 1, 0, 0, 1, 11, 0, 4, 0, 2] = multiSearchAllPositionsUTF8(materialize('отарлшпсабждфалпшножид'), ['лпшно', 'вт', 'лпжшосндутхорлиифжаакш', 'отарлшпсабждфалпшнож', 'дфал', '', 'бкцжучншжбгзжхщпзхирртнбийбтж', 'уцвцкшдзревпршурбсвйнемоетчс', '', 'ждфал', 'тлскхрнпмойчбцпфущфгф', 'рлшпсабж', 'нхнмк', 'тарлшпса']) from system.numbers limit 10; +select [0, 2, 0, 20, 0, 17, 18, 0, 1, 1, 21, 1, 0, 1, 6, 26] = multiSearchAllPositionsUTF8(materialize('ачйвцштвобижнзжнчбппйеабтцнйн'), ['сзхшзпетншйисщкшрвйшжуогцвбл', 'чйвцштво', 'евз', 'пй', 'хуждапрахитйажрищуллйзвчт', 'чбппйе', 'бппйеабтцнйн', 'схш', 'а', 'ачйвцштвобижнзжнчбпп', 'йеабтцнй', '', 'ег', '', 'штвобижнзжнчбпп', 'цн']) from system.numbers limit 10; +select [1, 0, 0, 3, 4, 12, 0, 9, 0, 12, 0, 0, 8, 0, 10, 3, 4, 1, 1, 9] = multiSearchAllPositionsUTF8(materialize('жмхоужежйуфцзеусеоднчкечфмемба'), ['', 'идосйксзнщйервосогф', 'тхмсйлвкул', 'хоужежйуф', 'оужежйуфцзеусеоднчкечфм', 'цзеусеоднчкеч', 'бецвдиубххвхйкажуурщщшщфбзххт', 'йуфцзеусеодн', 'мглкфтуеайсржисстнпкгебфцпа', 'цзеусео', 'уехцфучецчгшйиржтсмгхакчшввохочжпухс', 'дчвмсбткзталшбу', 'жйуфцзеусеоднчке', 'ччшщтдбпвчд', 'уфцзеусеоднчкечфмем', 'хоужежйуфцзеусеоднчкечф', 'оуже', '', 'жмхоужежйуфцзеу', 'й']) from system.numbers limit 10; +select [0, 0, 0, 3, 0, 0, 0, 0, 1, 0, 1, 0, 1, 2, 0, 0, 0, 6] = multiSearchAllPositionsUTF8(materialize('лшпцхкмтресзпзйвцфрз'), ['енрнцепацлщлблкццжсч', 'ецжужлуфаееоггрчохпчн', 'зхзнгасхебнаейбддсфб', 'пцхкмтресзпзйв', 'фчетгеодщтавиииухцундпнхлчте', 'шшгсдошкфлгдвкурбуохзчзучбжйк', 'мцщщцп', 'рх', '', 'зйошвщцгхбж', '', 'ввлпнамуцвлпзеух', '', 'шпцхкмтре', 'маабтруздрфйпзшлсжшгож', 'фдчптишмштссщшдшгх', 'оллохфпкаем', 'кмтресзпз']) from system.numbers limit 10; +select [2, 5, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1, 1, 12, 0, 0, 0, 4, 8] = multiSearchAllPositionsUTF8(materialize('есипзсвшемлхчзмйрсфз'), ['с', 'з', 'пщчсмаиахппферзжбпвиибаачй', 'гтщкзоиежав', 'свшемлхчзм', 'шийанбке', 'зхе', 'авркудфаусзквкфффйцпзжщввенттб', 'ножцваушапиж', 'иизкежлщиафицкчщмалнпсащсднкс', 'вчмв', 'кщеурмуужжлшррце', '', '', 'х', 'алзебзпчеложихашжвхмйхрицн', 'тпзмумчшдпицпдшиаог', 'сулксфчоштаййзбзшкджббщшсей', 'пзсвшемлхчзм', 'ш']) from system.numbers limit 10; +select [0, 1, 2, 4, 0, 0, 14, 1, 13, 4, 0, 0, 1, 1] = multiSearchAllPositionsUTF8(materialize('сзиимонзффичвфжоеулсадону'), ['зфтшебтршхддмеесчд', '', 'зиимонзф', 'имон', 'езбдйшжичценлгршщшаумайаицй', 'птпщемтбмднацлг', 'фжоеулса', '', 'вфжоеулсадону', 'имонзфф', 'йщвдфдиркважгйджгжашарчучйххйднпт', 'дй', '', '']) from system.numbers limit 10; +select [12, 0, 24, 0, 9, 0, 1, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('ижсщщрзжфнгццпзкфбвезгбохлж'), ['ццпзкфбвез', 'ацррвхоптаоснулнжкщжел', 'охлж', 'тнсхбпшщнб', 'фнг', 'урйвг', '', 'цохс', 'щбйрйкжчмйзачуефч', 'афа']) from system.numbers limit 10; +select [9, 0, 0, 0, 1, 0, 7, 7, 0, 0, 1, 0, 7, 0, 0, 8, 0, 3, 0, 0] = multiSearchAllPositionsUTF8(materialize('рерфвирачйнашхрмцебфдйааеммд'), ['чйнашхрмцебфдйааеммд', 'сжщзснвкущлжплцзлизаомдизцнжлмййбохрцч', 'еппбжджмримфчйеаолидпцруоовх', 'едтжкоийггснехшсчйлвфбкцжжрчтш', '', 'пжахфднхсотй', 'ра', 'рач', 'вчримуцнхбкуйжрвфиугзфсзг', 'кщфехрххциаашщсифвашгйцвхевцщнйахтбпжщ', '', 'ртщиобчжстовйчфабалзц', 'рачйнашхрмцебфдйаае', 'ощгжосччфкуг', 'гехвжнщжссидмрфчйтнепдсртбажм', 'а', 'ицжлсрсиатевбвнжрдмзцувввтзцфтвгвш', 'рф', 'прсмлча', 'ндлхшцааурмзфгверуфниац']) from system.numbers limit 10; +select [2, 14, 10, 0, 6, 15, 1, 0, 0, 4, 5, 17, 0, 0, 3, 0, 3, 0, 9, 0] = multiSearchAllPositionsUTF8(materialize('влфощсшкщумчллфшшвбшинфппкчуи'), ['лфощ', 'лфшшвбшинфпп', 'умчллфшшвбшинф', 'слмтнг', 'сшкщумчллфшшвбшинф', 'фшшвб', '', 'рчфбчййсффнодцтнтнбцмолф', 'щфнщокхжккшкудлцжрлжкнп', 'ощ', 'щсшкщумчлл', 'швбшинфппкч', 'септзкщотишсехийлоцчапщжшжсфмщхсацг', 'нт', 'фощсшкщумчллфшшвбшинфп', 'нщпдш', 'фощс', 'мивсмча', 'щумч', 'щчйнткжпмгавфтйтибпхх']) from system.numbers limit 10; +select [0, 10, 0, 0, 0, 0, 0, 3, 0, 0, 0, 2, 0, 11, 0, 0] = multiSearchAllPositionsUTF8(materialize('еаиалмзхцгфунфеагшчцд'), ['йнш', 'гфун', 'жлйудмхнсвфхсуедспщбтутс', 'елмуийгдйучшфлтхцппамфклйг', 'евйдецц', 'пчтфцоучфбсйщпвдацмчриуцжлтжк', 'нстмпумчспцвцмахб', 'иалмз', 'зифчп', 'чогфщимоопт', 'фдйблзеп', 'аиа', 'щугмзужзлйдктш', 'фунфеагшч', 'нйхшмсгцфжчхжвхгдхцуппдц', 'асмвмтнрейшгардллмсрзгзфйи']) from system.numbers limit 10; +select [23, 0, 8, 0, 0, 0, 0, 0, 0, 4, 0, 5, 7, 1, 9, 4] = multiSearchAllPositionsUTF8(materialize('зузйфзлхходфрхгтбпржшрктпйхеоп'), ['ктпйхео', 'лжитуддикчсмкглдфнзцроцбзтсугпвмхзллжж', 'х', 'меуфтено', 'фтдшбшрпоцедктсийка', 'кхтоомтбчвеонксабшйптаихжбтирпзшймчемжим', 'чиаущлрдкухцрдумсвивпафгмр', 'фрнпродв', 'тдгтишхйсашвмдгкчбмшн', 'йфзлхходфрхгтбпржшр', 'бежшлрйврзмумеуооплкицхлйажвцчнчсеакм', 'ф', 'лхходфрхгтб', '', 'ходфрхгтбпржшр', 'й']) from system.numbers limit 10; +select [0, 0, 0, 1, 0, 1, 22, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 1] = multiSearchAllPositionsUTF8(materialize('чфгвчхчпщазтгмбнплдгщикойчднж'), ['мштцгтмблаезочкхзвхгрбпкбмзмтбе', 'канбжгсшхшз', 'кзинвщйччажацзйнсанкнщ', 'чфгвчхчпщазтгмбнп', 'етйцгтбнщзнржнйхж', '', 'ик', '', 'еизщвпрохдгхир', 'псумйгшфбвгщдмхжтц', 'слмжопинйхнштх', 'йшралцицммбщлквмгхцввизопнт', 'л', 'чфгвчхчпщазтгмбнплдгщ', 'пбзмхжнпгикиищжтшботкцеолчцгхпбвхи', 'хзкцгрмшгхпхуоцгоудойнжлсоййосссмрткцес', 'ажуофйпщратдйцбржжлжнжащцикжиа', '']) from system.numbers limit 10; +select [6, 0, 2, 5, 2, 9, 10, 0, 0, 4, 0, 6, 3, 2] = multiSearchAllPositionsUTF8(materialize('ишогпсисжашфшлйичлба'), ['сисжашфшлй', 'пднещбгзпмшепкфосовбеге', 'шогп', 'пс', 'шогпси', 'жаш', 'аш', 'деисмжатуклдшфлщчубфс', 'грмквкщзур', 'гпсис', 'кйпкбцмисчхдмшбу', 'сисжашф', 'о', 'шо']) from system.numbers limit 10; +select [8, 15, 13, 0, 1, 2, 5, 2, 9, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('нсчщчвсанпрлисблснокзагансхм'), ['анпрлисблснокзагансхм', 'блснокз', 'исб', 'дрмгвснпл', '', 'счщчвса', 'чвсанпрлисблснокзагансх', 'счщчвсанпрлис', 'нпрли', 'пциишуецнймуодасмжсойглретиефо', 'фхимщвкехшлг', 'слщмаимшжчфхзпрцмхшуниврлуйлжмфжц']) from system.numbers limit 10; +select [0, 5, 0, 0, 14, 0, 12, 0, 2, 3, 0, 3, 21, 5] = multiSearchAllPositionsUTF8(materialize('хажуижанндвблищдтлорпзчфзк'), ['щуфхл', 'и', 'фцежлакчннуувпаму', 'щесщжрчиктфсмтжнхекзфс', 'ищдтлорпзчф', 'дееичч', 'блищ', 'гиефгйзбдвишхбкбнфпкддмбтзиутч', 'ажуижа', 'жуижанндвблищдтлорпзчфзк', 'чщщдзетвщтччмудвзчгг', 'ж', 'пзчфз', 'ижанн']) from system.numbers limit 10; +select [0, 0, 0, 9, 15, 0, 0, 0, 1, 3, 0, 0, 1, 0, 10, 0, 4, 0, 0, 7] = multiSearchAllPositionsUTF8(materialize('россроапцмцагвиигнозхзчотус'), ['ошажбчвхсншсвйршсашкм', 'пфдчпдчдмауцгкйдажрйефапввшжлшгд', 'иеаочутввжмемчушлуч', 'цмцагвиигно', 'ииг', 'ммпжщожфйкакбущчирзоммагеиучнщмтвгихк', 'укррхбпезбжууеипрзжсло', 'ншопзжфзббилйбувгпшшиохврнфчч', '', 'ссроап', 'лийщфшдн', 'йчкбцциснгначдцйчпа', 'россроапцмцагвииг', 'кштндцтсшорввжсфщчмщчжфжквзралнивчзт', 'мца', 'нбтзетфтздцао', 'сроа', 'мщсфие', 'дткодбошенищйтрподублжскенлдик', 'апцмцагвиигноз']) from system.numbers limit 10; +select [16, 0, 0, 2, 1, 1, 0, 1, 9, 0, 0, 3] = multiSearchAllPositionsUTF8(materialize('тйсдйилфзчфплсджбарйиолцус'), ['жбарйиолцу', 'цназщжждефлбрджктеглщпунйжддгпммк', 'хгжоашцшсзкеазуцесудифчнощр', 'йс', '', 'тйсдйилфзчфп', 'ивфсплшвслфмлтххжчсстзл', '', 'зчфплсдж', 'йртопзлодбехрфижчдцйс', 'цлащцкенмшеоерееиуноп', 'с']) from system.numbers limit 10; +select [3, 2, 1, 1, 0, 0, 0, 14, 6, 0] = multiSearchAllPositionsUTF8(materialize('нсцннйрмщфбшщховвццбдеишиохл'), ['цннйр', 'сцннйрм', 'н', 'нс', 'двтфхйзгеиеиауимбчхмщрцутф', 'пчтмшйцзсфщзшгнхщсутфжтлпаввфгххв', 'лшмусе', 'ховвццбд', 'йрмщфбшщховвццбдеи', 'гндруущрфзсфжикшзцжбил']) from system.numbers limit 10; +select [0, 18, 0, 1, 2, 0, 0, 0, 1, 7, 10, 0, 1, 0, 2, 0, 0, 18] = multiSearchAllPositionsUTF8(materialize('щидмфрсготсгхбомлмущлаф'), ['тлтфхпмфдлуоцгчскусфжчкфцхдухм', 'мущла', 'емлвзузхгндгафги', '', 'идмфрсготсгхбомлмущла', 'зфаргзлщолисцфдщсеайапибд', 'кдхоорхзжтсйимкггйлжни', 'лчгупсзждплаблаеклсвчвгвдмхклщк', 'щидмфр', 'сготсгхбомлму', 'тсгхбомлмущла', 'хсзафйлкчлди', '', 'й', 'ид', 'щлйпмздйхфзайсщсасейлфцгхфк', 'шдщчбшжбмййзеормнрноейй', 'мущ']) from system.numbers limit 10; +select [0, 13, 0, 0, 1, 0, 7, 7, 8, 0, 2, 0, 3, 0, 0, 13] = multiSearchAllPositionsUTF8(materialize('трцмлщввадлжввзчфипп'), ['хшзйийфжмдпуигсбтглй', 'ввзчфи', 'нсцчцгзегммтсшбатщзузпкшрг', 'гувйддежзфилйтш', '', 'хгзечиа', 'ввадлжввз', 'ввадлжввзчфи', 'ва', 'щтшсамклегш', 'рцмлщ', 'учзмиерфбтцучйдглбщсз', 'цмлщввадлжввзчфи', 'орйжччцнаррбоабцжзйлл', 'квпжматпцсхзузхвмйч', 'ввзчфип']) from system.numbers limit 10; +select [0, 1, 1, 0, 11, 4, 1, 2, 0, 0] = multiSearchAllPositionsUTF8(materialize('инкщблбвнскцдндбмсщщш'), ['жхрбсусахрфкафоилмецчебржкписуз', 'инкщблбвнс', '', 'зисгжфлашймлджинаоджруй', 'кцднд', 'щблбвнскцдндбмсщщ', 'инкщблбвнс', 'н', 'зб', 'фчпупшйфшбдфенгитатхч']) from system.numbers limit 10; +select [6, 0, 4, 20, 1, 0, 5, 0, 1, 0] = multiSearchAllPositionsUTF8(materialize('рзтецуйхлоорйхдбжашнларнцт'), ['у', 'бпгййекцчглпдвсцсещщкакцзтцбччввл', 'ецуйхлоо', 'нлар', 'рз', 'ккнжзшекфирфгсгбрнвжчл', 'цуйхлоорйхдбжашн', 'йнучгрчдлйвводт', 'рзте', 'нткрввтубчлщк']) from system.numbers limit 10; + +select [1, 1, 0, 0, 1, 0, 0, 3, 3, 3, 1, 0, 8, 0, 8, 1, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('OTMMDcziXMLglehgkklbcGeAZkkdh'), ['', 'OTmmDCZiX', 'SfwUmhcGTvdYgxlzsBJpikOxVrg', 'ngqLQNIkqwguAHyqA', '', 'VVZPhzGizPnKJAkRPbosoNGJTeO', 'YHpLYTVkHnhTxMODfABor', 'mMdcZi', 'MmdCZI', 'MMdCZixmlg', '', 'hgaQHHHkIQRpPjv', 'ixMLgLeHgkkL', 'uKozJxZBorYWjrx', 'i', '', 'WSOYdEKatHkWiCtlwsCbKRnXuKcLggbkBxoq', '']) from system.numbers limit 10; +select [4, 15, 0, 0, 0, 0, 5, 0, 5, 1, 0, 1, 13, 0, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('VcrBhHvWSFXnSEdYCYpU'), ['bhhVwSfXnSEd', 'DycyP', 'kEbKocUxLxmIAFQDiUNoAmJd', 'bsOjljbyCEcedqL', 'uJZxIXwICFBPDlUPRyDHMmTxv', 'BCIPfyArrdtv', 'hHv', 'eEMkLteHsuwsxkJKG', 'hHVWsFxNseDy', '', 'HsFlleAQfyVVCoOSLQqTNTaA', '', 'sEDY', 'UMCKQJY', 'j', 'rBhHvw']) from system.numbers limit 10; +select [1, 1, 0, 0, 1, 0, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('wZyCLyiWnNNdNAPWeGSQZcdqk'), ['w', '', 'vlgiXgFTplwqRbnwBumAjHvQuM', 'QoIRVKDHMlapLNiIZXvwYxluUivjY', 'WZY', 'gAFpUfPDAwgzARCIMrtbZUsNcR', 'egkLWqqdNiETeETsMG', 'dzSlJaoHKlQmENIboow', 'vPNBhcaIfsgLH', 'mlWPTCBDVTdKHxlvIUVcJXBrmTcJokAls']) from system.numbers limit 10; +select [0, 10, 0, 1, 7, 1, 6, 1, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pqliUxqpRcOOKMjtrZSEsdW'), ['YhskuppNFdWaTaZo', 'Coo', 'mTEADzHXPeSMCQaYbKpikXBqcfIGKs', 'PQLiUxq', 'qpRCoOK', 'PQLIu', 'XQPrcoOK', '', 'pR', 'cTmgRtcSdRIklNQVcGZthwfarLtAYh']) from system.numbers limit 10; +select [16, 1, 1, 1, 1, 4, 17, 0, 0, 0, 1, 0, 0, 0, 20, 0] = multiSearchAllPositionsCaseInsensitive(materialize('kJyseeDFCeUWoqMfubYqJqWA'), ['fub', 'kJY', '', '', 'Kj', 's', 'uBYQJq', 'sUqCmHUZIBtZPswObXSrYCwrdxdznM', 'mtZDCJENYuikJnCcJfRcSCDYDPXU', 'IDXjRjHhmjqXmCOlQ', '', 'jiEwAxIsJDu', 'YXqcEKbHxlgUliIALorSKDMlGGWeCO', 'OstKrLpYuASEUrIlIuHIRdwLr', 'qJq', 'tnmvMTFvjsW']) from system.numbers limit 10; +select [11, 3, 1, 0, 9, 0, 0, 0, 0, 8, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('EBSPtFpDaCIydASuyreS'), ['iyD', 'sptfpdAciyDAsuyR', 'EbS', 'IJlqfAcPMTUsTFXkvmtsma', 'AcIYda', 'fbWuKoCaCpRMddUr', 'srlRzZKeOQGGLtTLOwylLNpVM', 'ZeIgfTFxUyNwDkbnpeiPxQumD', 'j', 'daciydA', 'sp', 'dyGFtyfnngIIbcCRQzphoqIgIMt']) from system.numbers limit 10; +select [6, 0, 0, 0, 10, 0, 1, 4, 0, 15, 0, 2, 2, 6] = multiSearchAllPositionsCaseInsensitive(materialize('QvlLEEsgpydemRZAZcYbqPZHx'), ['eSgpYDEMRzAzcyBQPzH', 'NUabuIKDlDxoPXoZOKbUMdioqwQjQAiArv', 'pRFrIAGTrggEOBBxFmnZKRPtsUHEMUEg', 'CDvyjef', 'YdEMrzaZc', 'BO', '', 'leEsgPyDEmRzaZCYBqPz', 'EzcTkEbqVXaVKXNuoxqNWHM', 'Z', 'cuuHNcHCcLGb', 'V', 'vllEes', 'eS']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 5, 7, 5, 0, 11, 1] = multiSearchAllPositionsCaseInsensitive(materialize('eiCZvPdGJSmwxMIrZvEzfYFOFJmV'), ['lSydrmJDeXDYHGFFiFOOJGyCbCCDbLzbSbub', 'ewsAVflvcTBQFtvWBwuZOJKkrUArIg', 'fpEkBWaBkRWypFWtMz', 'YatSURyNtcSuerWWlTBSdBNClO', 'YO', 'CZvpdg', 'uoH', 'gtGwQSVqSJDVROmsBIxjuVNfrQnxDhWGXLBH', 'IKNs', 'HElLuRMlsRgINaNp', 'V', 'DGjsMW', 'vPDgJSmW', 'SGCwNiAmNfHSwLGZkRYEqrxBTaDRAWcyHZYzn', 'mWXMiRZvezfYf', '']) from system.numbers limit 10; +select [23, 1, 0, 17, 0, 0, 9, 3, 0, 2] = multiSearchAllPositionsCaseInsensitive(materialize('BizUwoENfLxIIYVDflhOaxyPJw'), ['yPJ', '', 'gExRSJWtZwOptFTkNlBGuxyQrAu', 'FLH', 'hCqo', 'oVGcArersxMUCNewhTMmjpyZYAIU', 'FlXIiYVdflHoAX', 'ZuWOe', 'bhfAfNdgEAtGdHylxkjgvU', 'IZUWo']) from system.numbers limit 10; +select [0, 9, 0, 0, 0, 0, 1, 0, 0, 1, 3, 0, 13, 0, 3, 5] = multiSearchAllPositionsCaseInsensitive(materialize('loKxfFSIAjbRcguvSnCdTdyk'), ['UWLIDIermdFaQVqEsdpPpAJ', 'ajBrcg', 'xmDmuYoRpGu', 'wlNjlKhVzpC', 'MxIjTspHAQCDbGrIdepFmLHgQzfO', 'FybQUvFFJwMxpVQRrsKSNHfKyyf', '', 'vBWzlOChNgEf', 'DiCssjczvdDYZVXdCfdSDrWaxmgpPXDiD', '', 'kxFFSIAjBRCGUVSNcD', 'LrPRUqeehMZapsyNJdu', 'cGuVSNcdTdy', 'NmZpHGkBIHVSoOcj', 'KxffSIAjBr', 'ffsIaJB']) from system.numbers limit 10; +select [14, 0, 11, 0, 10, 0, 0, 0, 13, 1, 2, 11, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('uijOrdZfWXamCseueEbq'), ['sE', 'VV', 'AmcsEu', 'fUNjxmUKgnDLHbbezdTOzyLaknQ', 'XAmCsE', 'HqprIpxIcOTkDIKcVK', 'NbmirQlNsTHnAVKlF', 'VVDNOxFKSnQGKPsTqgtwLhZnIPkL', 'c', '', 'IJ', 'aM', 'rDzF', 'YFwP']) from system.numbers limit 10; +select [0, 8, 17, 0, 1, 0, 0, 0, 0, 0, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('PzIxktujxHZsaDlwSGQPgvA'), ['zrYlZdnUxlPrVJJeZEASwdCHlNEm', 'jxhZS', 'sGQPgV', 'MZMChmRBgsxhdgspUhALoxmrkZVp', 'pzIxktuJxHzsADlw', 'xavwOAibQuoKg', 'vuuETOrWLBNLhrMeWLgGQpeFPdcWmWu', 'TZrAgmdorqZIdudhyCMypHYKFO', 'ztcCyGxRKrcUTv', 'OUvwdMZrcZuwGtjuEBeGU', 'k', 'rFTpnfGIOCfwktWnyOMeXQZelkYwqZ']) from system.numbers limit 10; +select [3, 1, 4, 1, 0, 17, 13, 0, 0, 0, 0, 0, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pUOaQLUvgmqvxaMsfJpud'), ['OaqLUvGm', '', 'aQ', '', 'VajqJSlkmQTOYcedjiwZwqNH', 'f', 'xaMsfj', 'CirvGMezpiIoacBGAGQhTJyr', 'vucKngiFjTlzltKHexFVFuUlVbey', 'ppalHtIYycBCEjsgsXbFeecpkQMNr', 'nEgIYVoGkhTsFgBUSHJvIcYCYbuOBP', 'efjBVRVzknGrikGHxExlFEtYf', 'v', 'QgRBCaGlwNYWRslDylOrfPxZxAOF']) from system.numbers limit 10; +select [14, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 20, 5, 0, 4, 0] = multiSearchAllPositionsCaseInsensitive(materialize('WZNWOCjFkCAAzIptkUtyPCyC'), ['iPTkuT', 'BngeNlFbKymzMYmNPfV', 'XKEjbLtADFMqS', 'dbRQKJGSFhzljAiZV', 'wZnwoCjFKCAAzIPTKuTYpc', 'yBaUvSSGOEL', 'iEYopROOYKxBwPdCgbPNPAsMwVksHgagnO', 'TljXPJVebHqrnhSiTGwpMaNeKy', 'wzNWocjF', 'bLxLrZnOCeIfxkfZEOcqDteUvc', 'CtHYpAZDANEv', '', 'XMAMpGYMiOb', 'y', 'o', 'floswnnFjXDTxantSvDYPSnaORL', 'WOcjFkcAaZIp', 'buqBHbZsLDnCUDhLdgd']) from system.numbers limit 10; +select [0, 20, 14, 0, 2, 0, 1, 14, 0, 0, 0, 1, 0, 26, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('XJMggEHaxfddDadtwKMCcPsMlSFVJ'), ['NzbUAZvCsnRnuzTglTsoT', 'ccP', 'ADTwKmc', 'JaUzcvWHMotuEMUtjsTfJzrsXqKf', 'jMGgEHaXfdddAdTWKMCcpsM', 'SMnb', '', 'AdTWkMccPSMlsfv', 'fVjPVafkp', 'goqsYAFqhhnCkGwhg', 'CNHNPZHZreFwhRMr', '', 'vcimNhmdbtoiCgVzNuvdgZG', 'sfvJ', 'AqKmroxmRMSFAKjfhwrzxmNSSjMHxKow', 'Xhub']) from system.numbers limit 10; +select [0, 0, 7, 0, 1, 1, 0, 0, 13, 0, 1, 1, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('VQuEWycGbGcTcCCvWkujgdoWjKgVYy'), ['UevGaXmEAtBdWsPhBfqp', 'aQOrNMPmoVGSu', 'c', 'TMhzvbNJCaxtGNUgRBmTFEqgNBIBpSJ', '', 'vq', 'pVNUTCqXr', 'QSvkansbdPbvVmQpcQXDk', 'cCCvwkUjgdOWjKgVYy', 'EtCGaEzsSbJ', 'V', '', 'WycgBgCTCcCvwkujgdoWJKgv', 'xPBJqKrZbZHJawYvPxgqrgxPN']) from system.numbers limit 10; +select [4, 1, 0, 0, 0, 0, 0, 0, 0, 18] = multiSearchAllPositionsCaseInsensitive(materialize('LODBfQsqxfeNuoGtzvrUMRVWNKUKKs'), ['Bf', 'lOdbfQs', 'ZDSDfKXABsFiZRwsebyU', 'DT', 'GEUukPEwWZ', 'GNSbrGYqEDWNNCFRYokZbZEzGzc', 'kYCF', 'Kh', 'jRMxqdmGYpTkePeReXJNdnxagceitMJlmbbro', 'VrumrvWnKU']) from system.numbers limit 10; +select [1, 1, 3, 1, 10, 0, 9, 2, 2, 0, 0, 0, 0, 0, 8, 0, 1, 11, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('lStPVtsQypFlZQoQhCuP'), ['', '', 'tpV', 'L', 'PF', 'pGPggwbkQMZandXugTpUorlPOubk', 'yPFlz', 'sTPVTsQyPfLzQOqhCU', 'StPVtSq', 'cbCxBjAfJXYgueqMFNIoSguFm', 'AosIZKMPduRfumDZ', 'AGcNTHObH', 'oPaGpsQ', 'kwQCczyY', 'q', 'HHUYdzGAzVJyn', '', 'fLZQoqHcUp', 'q', 'SSonzfqLVwIGzdHtj']) from system.numbers limit 10; +select [0, 1, 2, 0, 0, 0, 13, 1, 27, 1, 0, 1, 3, 1, 0, 1, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('NhKJtvBUddKWpseWwRiMyBsTWmlk'), ['toBjODDZoRAjFeppAdsne', '', 'HKjTvBu', 'QpFOZJzUHHQAExAqkdoBpSbXzPnTzuPd', 'gE', 'hLmXhcEOwCkatUrLGuEIJRkjATPlqBjKPOV', 'Ps', 'NH', 'l', '', 'aSZiWpmNKfglqAbMZpEwZKmIVNjyJTtDianY', 'NhKJTvBUDDkwpS', 'KJtvbUDDKWPSewwrimYbstwm', 'NHKJTvbudDKwpSEwwR', 'hmMeWEpksVAaXd', 'NHkJTvBUDd', 'kjTvbudd', 'kmwUzfEpWSIWkEylDeRPpJDGb']) from system.numbers limit 10; +select [0, 5, 0, 0, 0, 1, 1, 15, 2, 3, 4, 5] = multiSearchAllPositionsCaseInsensitive(materialize('NAfMyPcNINKcgsShJMascJunjJva'), ['ftHhHaJoHcALmFYVvNaazowvQlgxwqdTBkIF', 'yp', 'zDEdjPPkAdtkBqgLpBfCtsepRZScuQKbyxeYP', 'yPPTvdFcwNsUSeqdAUGySOGVIhxsJhMkZRGI', 'JQEqJOlnSSam', 'nAFmy', '', 'sHJmaScjUnJj', 'afmY', 'FmYpcnINKCg', 'MYPCniNkcgSS', 'YPCNiNkCgSsHjmasCJuNjJ']) from system.numbers limit 10; +select [0, 0, 6, 3, 2, 0, 8, 2, 2, 10, 0, 0, 14, 0, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('hgpZVERvggiLOpjMJhgUhpBKaN'), ['Nr', 'jMcd', 'e', 'PZVeRvggiLOPjmjh', 'GpZVe', 'cVbWQeTQGhYcWEANtAiihYzVGUoHKH', 'VGgilOPj', 'GPZVervgGiLopjmjHGuHp', 'GP', 'gil', 'fzwDPTewvwuCvpxNZDi', 'gLLycXDitSXUZTgwyeQgMSyC', 'PJmjh', 'bTQdrFiMiBtYBcEnYbKlqpTvGLmo', 'ggHxiDatVcGTiMogkIWDxmNnKyVDJth', 'pzv']) from system.numbers limit 10; +select [7, 1, 9, 3, 0, 0, 2, 0, 1, 11] = multiSearchAllPositionsCaseInsensitive(materialize('xUHVawrEvgeYyUZGmGZejClfinvNS'), ['RevGeYyuz', 'XUHvAWrev', 'Vg', 'hvawR', 'eRQbWyincvqjohEcYHMwmDbjU', 'nuQCxaoxEdadhptAhZMxkZl', 'UhVAwREvGEy', 'lHtwTFqlcQcoOAkujHSaj', '', 'eYYUzgMgzEjCLfIn']) from system.numbers limit 10; +select [0, 0, 8, 5, 9, 1, 0, 4, 12, 6, 4, 0, 0, 12] = multiSearchAllPositionsCaseInsensitive(materialize('DbtStWzfvScJMGVPQEGkGFoS'), ['CSjYiEgihaqQDxZsOiSDCWXPrBdiVg', 'aQukOYRCSLiildgifpuUXvepbXuAXnYMyk', 'fvsCjmgv', 'TWZFV', 'VscjMgVpQ', 'dBtSTwZfVsCjmGVP', 'wqpMklzJiEvqRFnZYMfd', 'StwZfVScJ', 'j', 'wzfVsCjmGV', 'STWZfVS', 'kdrDcqSnKFvKGAcsjcAPEwUUGWxh', 'UtrcmrgonvUlLnzWXvZI', 'jMgvP']) from system.numbers limit 10; +select [0, 0, 0, 0, 7, 3, 0, 11, 1, 10, 0, 0, 7, 1, 4, 0, 17, 3, 15, 0] = multiSearchAllPositionsCaseInsensitive(materialize('YSBdcQkWhYJMtqdEXFoLfDmSFeQrf'), ['TnclcrBJjLBtkdVtecaZQTUZjkXBC', 'SPwzygXYMrxKzdmBRTbppBQSvDADMUIWSEpVI', 'QnMXyFwUouXBoCGLtbBPDSxyaLTcjLcf', 'dOwcYyLWtJEhlXxiQLRYQBcU', 'KWhYjMtqdEXFo', 'BD', 'nnPsgvdYUIhjaMRVcbpPGWOgVjJxoUsliZi', 'j', '', 'YjmtQdeXF', 'peeOAjH', 'agVscUvPQNDwxyFfXpuUVPJZOjpSBv', 'kWh', '', 'dcQKWHYjmTQD', 'qjWSZOgiTCJyEvXYqaPFqbwvrwadJsGVTOhD', 'xfoL', 'b', 'DeXf', 'HyBR']) from system.numbers limit 10; +select [4, 0, 0, 13, 1, 0, 3, 13, 16, 1, 0, 1, 16, 1, 12, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('SoVPMQNqmaTGuzYxDvZvapSuPiaP'), ['pMqNQMAtGuzYxDVz', 'TEJtgLhyredMnIpoZfmWvNwpkxnm', 'XRWmsfWVOCHhk', 'u', '', 'HvkXtxFdhVIyccpzFFSL', 'VPM', 'uZyXDVzvAPsUpIaP', 'xDvzV', 'sovpmqNQmATguZYx', 'wEG', 'soVPmQnQ', 'XDVzV', '', 'GUZyXdvzva', 'FetUahWwGtwEpVdlJCJntL', 'B', 'lSCUttZM']) from system.numbers limit 10; +select [1, 0, 1, 2, 15, 0, 0, 0, 1, 0] = multiSearchAllPositionsCaseInsensitive(materialize('zFWmqRMtsDjSeWBSFoqvWsrV'), ['', 'GItrPyYRBwNUqwSaUBpbHJ', '', 'f', 'BsfOQvWsR', 'JgvsMUZzWaddD', 'wxRECkgoCBPjSMRorZpBwuOQL', 'xHKLLxUoWexAM', '', 'YlckoSedfStmFOumjm']) from system.numbers limit 10; +select [11, 1, 1, 1, 0, 0, 1, 0, 4, 0, 0, 0, 1, 0, 5, 8] = multiSearchAllPositionsCaseInsensitive(materialize('THBuPkHbMokPQgchYfBFFXme'), ['KpqGchyfBF', '', '', 'TH', 'NjnC', 'ssbzgYTybNDbtuwJnvCCM', 'tHbupKHBMOkPQgcHy', 'RpOBhT', 'uPKHbMoKpq', 'oNQLkpSKwocBuPglKvciSjttK', 'TaCqLisKvOjznOxnTuZe', 'HmQJhFyZrcfeWbXVXsnqpcgRlg', 'tHB', 'gkFGbYje', 'pkhbMokPq', 'Bm']) from system.numbers limit 10; +select [7, 10, 0, 0, 9, 0, 0, 3, 0, 10] = multiSearchAllPositionsCaseInsensitive(materialize('ESKeuHuVsDbiNtvxUrfPFjxblv'), ['uvsDBiNtV', 'DbInTvxu', 'YcLzbvwQghvrCtCGTWVuosE', 'cGMNo', 'SDb', 'nFIRTLImfrLpxsVFMBJKHBKdSeBy', 'EUSiPjqCXVOFOJkGnKYdrpuxzlbKizCURgQ', 'KeUHU', 'gStFdxQlrDcUEbOlhLjdtQlddJ', 'DBInTVx']) from system.numbers limit 10; +select [1, 0, 2, 18, 1, 3, 15, 8, 0, 0, 1, 3, 0, 23, 2, 0, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('TzczIDSFtrkjCmDQyHxSlvYTNVKjMT'), ['', 'AmIFsYdYFaIYObkyiXtxgvnwMVZxLNlmytkSqAyb', 'ZcZI', 'HXsLVYTnvKjm', '', 'CZiDsFtRKJ', 'DQYhxSl', 'fTRKjCmdqYHxsLvYtNvk', 'hxVpKFQojYDnGjPaTNPhGkRFzkNhnMUeDLKnd', 'RBVNIxIvzjGYmQBNFhubBMOMvInMQMqXQnjnzyw', '', 'c', 'vcvyskDmNYOobeNSfmlWcpfpXHfdAdgZNXzNm', 'ytnvKJM', 'ZcZidsFtRKjcmdqy', 'IRNETsfz', 'fTR', 'POwVxuBifnvZmtBICqOWhbOmrcU']) from system.numbers limit 10; +select [14, 16, 10, 2, 6, 1, 0, 8, 0, 0, 12, 1, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('tejdZOLhjpFLkGBWTGPfmk'), ['GBWtgPF', 'Wt', 'PflkgBWTgpFmK', 'ejdZOLhJPFlKgb', 'o', 'TejDZ', 'HlQfCP', 'hJP', 'ydiyWEfPGyRwcKGfGVdYxAXmkY', 'QsOyrgkTGMpVUAmLjtnWEIW', 'LKGBw', 'tejDzolHJpFLKgbWT', 'IK', '', 'WrzLpcmudcIJEBapkToDbYSazKTwilW', 'DmEWOxoieDsQHYsLNelMc']) from system.numbers limit 10; +select [9, 0, 1, 4, 13, 0, 0, 1, 3, 7, 9, 0, 1, 1, 0, 7] = multiSearchAllPositionsCaseInsensitive(materialize('ZWHpzwUiXxltWPAIGGxIcJB'), ['XxLTWpA', 'YOv', '', 'pzwUIXXl', 'wp', 'lpMMLDAuflLnWMFrETXRethzCUZOWfQ', 'la', '', 'HPZ', 'UixxlTw', 'xXLTWP', 'YlfpbSBqkbddrVwTEmXxgymedH', '', '', 'QZWlplahlCRTMjmNBeoSlcBoKBTnNZAS', 'UiXxlTwPAiGG']) from system.numbers limit 10; +select [0, 9, 6, 0, 4, 0, 3, 0, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('NytxaLUvmiojEepjuCzwUYPoWL'), ['LcOnnmjbZSifx', 'm', 'lUvMIOjeE', 'vuZsNMSsutiLCDbClPUSsrziohmoZaQeXtKG', 'XaLuvm', 'hlUevDfTSEGOjvLNdRTYjJQvMvwrMpwy', 'TXALuVmioJeePjUczw', 'pKaQKZg', 'PAdX', 'FKLMfNAwNqeZeWplTLjd', 'DODpbzUmMCzfGZwfkjH', 'HMcEGRHLspYdJIiJXqwjDUBp']) from system.numbers limit 10; +select [2, 1, 0, 16, 8, 1, 6, 0, 0, 1, 8, 0, 7, 0, 9, 1, 1, 0, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('WGVvkXuhsbzkLqiIEOuyiRfomy'), ['GVv', '', 'VbldWXHWzdziNcJKqIkDWrO', 'iEOUyIRFomy', 'hsBZklqiieOuy', '', 'X', 'emXjmIqLvXsNz', 'rxhVkujX', 'wgvvK', 'HsBzKLQiie', 'wVzJBMSdKOqjiNrXrfLEjjXozolCgYv', 'UHsbzklQiiEouyirf', 'UOvUsiKtUnwIt', 'SBZKLqiIEoUYIrfom', 'wg', '', 'BefhETEirL', 'WyTCSmbKLbkQ', '']) from system.numbers limit 10; +select [8, 1, 2, 8, 1, 0, 5, 0, 0, 4, 0, 1, 14, 0, 0, 7, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('uyWhVSwxUFitYoVQqUaCVlsZN'), ['XufitYOVqqUACVlszn', '', 'ywH', 'XUFIT', 'uywHvSWXuFIt', 'dGhpjGRnQlrZhzGeInmOj', 'vswXuFitYovqQuA', 'dHCfJRAAQJUZeMJNXLqrqYCygdozjAC', 'rojpIwYfNLECl', 'hVswxufiTYov', 'bgJdgRoye', '', 'ovQ', 'AdVrJlq', 'krJFOKilvBTGZ', 'WxuFITYOV', 'AsskQjNPViwyTF', 'u']) from system.numbers limit 10; +select [0, 2, 0, 0, 0, 6, 0, 5, 0, 15, 0, 0, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BEKRRKLkptaZQvBxKoBL'), ['HTwmOxzMykTOkDVKjSbOqaAbg', 'eKrRKl', 'UrLKPVVwK', 'TyuqYmTlQDMXJUfbiTCr', 'fyHrUaoMGdq', 'KLkPtaZq', 'cPUJp', 'RKLk', 'yMnNgUOpDdP', 'BX', 'tXZScAuxcwYEfSKXzyfioYPWsrpuZz', 'dsiqhlAKbCXkyTjBbXGxOENd', 'k', 'juPjORNFlAoEeMAUVH']) from system.numbers limit 10; +select [9, 0, 0, 0, 1, 4, 2, 0, 0, 0, 0, 8, 0, 2, 0, 3, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('PFkLcrbouhBTisTkuUcO'), ['UhBtistKU', 'ioQunYMFWHD', 'VgYHTKZazRtfgRtvywtIgVoBqNBwVn', 'ijSNLKch', 'pFKlcrBOuhbtIsTku', 'lCRboUHBtI', 'fKLCRBOu', 'XTeBYUCBQVFwqRkElrvDOpZiZYmh', 'KzXfBUupnT', 'OgIjgQO', 'icmYVdmekJlUGSmPLXHc', 'OuH', 'BWDGzBZFhTKQErIRCbtUDIIjzw', 'F', 'LuWyPfSdNHIAOYwRMFhP', 'kL', 'PQmvXDCkEhrlFBkUmRqqWBxYi', 'kLcrbo']) from system.numbers limit 10; +select [0, 1, 1, 6, 14, 3, 0, 1, 9, 1, 9, 0, 1, 10, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pfynpJvgIjSqXWlZzqSGPTTW'), ['ZzeqsJPmHmpoYyTnKcWJGReOSUCITAX', '', 'P', 'jvGIj', 'wLZzQsgP', 'YnPjVGij', 'DmpcmWsyilwHwAFcKpLhkiV', '', 'I', 'pFy', 'IjsqxwLZzqSgpT', 'pKpe', 'PfynpJvgiJSqXwlzZ', 'jsQXwLZZqs', 'onQyQzglEOJwMCO', 'GV']) from system.numbers limit 10; +select [1, 17, 1, 20, 0, 0, 5, 0, 0, 0, 24, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BLNRADHLMQstZkAlKJVylmBUDHqEVa'), ['bLnRaDhLm', 'kJVYlmbuD', 'bLnr', 'yLMbU', 'eAZtcqAMoqPEgwtcrHTgooQcOOCmn', 'jPmVwqZfp', 'aDHlmqS', 'fmaauDbUAQsTeijxJFhpRFjkbYPX', 'aqIXStybzbcMjyDKRUFBrhfRcNjauljlqolfDX', 'WPIuzORuNbTGTNb', 'uDhqeVa', 'fQRglSARIviYABcjGeLK']) from system.numbers limit 10; +select [2, 0, 4, 5, 1, 15, 1, 9, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BEwjMzphoTMoGikbrjTVyqDq'), ['E', 'sClgniMsZoGTEuLO', 'jmzphotmoGIKBRjtv', 'MZPhOtmo', '', 'Kb', '', 'otm', 'tVpxYRttoVpRLencV', 'SJAhAuMttGaeMsalRjeelAGG']) from system.numbers limit 10; +select [1, 0, 0, 0, 0, 0, 4, 0, 0, 19, 0, 7] = multiSearchAllPositionsCaseInsensitive(materialize('yNnYRQfcyemQdxUEPOiwRn'), ['', 'SJteoGNeIAMPWWBltkNKMrWDiVfR', 'kKnnKQhIPiekpnqTXJuyHfvWL', 'GPDUQEMWKzEEpvjLaIRYiuNfpzxsnSBX', 'oPrngRKwruyH', 'ukTSzFePSeVoeZeLQlAaOUe', 'yRqfcyemQDXUepo', 'CwmxidvpPHIbkJnVfSpbiZY', 'FUxmQdFVISApa', 'iwr', 'ciGHzDpMGNQbytsKpRP', 'Fcy']) from system.numbers limit 10; +select [0, 1, 0, 11, 2, 0, 1, 3, 0, 0, 0, 21] = multiSearchAllPositionsCaseInsensitive(materialize('EgGWQFaRsjTzAzejYhVrboju'), ['DVnaLFtCeuFJsFMLsfk', '', 'thaqudWdT', 'Tzazejy', 'GGW', 'RolbbeLLHOJpzmUgCN', '', 'gwqfarsjtzaZeJYHvR', 'KkaoIcijmfILoe', 'UofWvICTEbwVgISstVjIzkdrrGryxNB', 'UJEvDeESWShjvsJeioXMddXDkaWkOiCV', 'B']) from system.numbers limit 10; +select [0, 5, 2, 0, 0, 7, 0, 0, 0, 11, 0, 12, 22, 10, 0, 12] = multiSearchAllPositionsCaseInsensitive(materialize('ONgpDBjfRUCmkAOabDkgHXICkKuuL'), ['XiMhnzJKAulYUCAUkHa', 'dbj', 'nGpDbJFRU', 'xwbyFAiJjkohARSeXmaU', 'QgsJHnGqKZOsFCfxXEBexQHrNpewEBFgme', 'JFruCM', 'DLiobjNSVmQk', 'vx', 'HYQYzwiCArqkVOwnjoVNZxhbjFaMK', 'Cm', 'ckHlrEXBPMrVIlyD', 'M', 'xI', 'UcmkAOabdKg', 'jursqSsWYOLbXMLQAEhvnuHclcrNcKqB', 'mKaoaBdKghxiCkkUUL']) from system.numbers limit 10; +select [0, 1, 0, 1, 0, 0, 0, 0, 7, 21] = multiSearchAllPositionsCaseInsensitive(materialize('WhdlibCbKUmdiGbJRshgdOWe'), ['kDPiHmzbHUZB', '', 'CukBhVOzElTdbEBHyrspj', '', 'QOmMle', 'wiRqgNwjpdfgyQabxzksjg', 'RgilTJqakLrXnlWMn', 'bSPXSjkbypwqyazFLQ', 'CBkuMDiGbJRShGdOWe', 'dow']) from system.numbers limit 10; +select [0, 8, 0, 1, 1, 0, 1, 7, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('oOccAoDDoPzHUyRqdWhJxNmATEqtE'), ['LFuvoQkVx', 'DoPzh', 'YaBSTdWvmUzlgRloppaShkRmLC', 'oO', '', 'eeEpOSLSXbyaOxTscOPoaTcKcchPmSGThk', '', 'dDO', 'oFXmyIJtmcSnebywDlKruvPUgmPFzEnMvA', 'vCs', 'MsxHLTgQcaQYZdPWJshIMWbk', 'yqrjIzvrxd']) from system.numbers limit 10; +select [0, 16, 0, 0, 0, 0, 7, 1, 0, 0, 1, 2, 1, 4, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('FtjOSBIjcnZecmFEoECoep'), ['FQQwzxsyauVUBufEBdLTKKSdxSxoMFpL', 'EOecoEP', 'HGWzNTDfHxLtKrIODGnDehl', 'ZxirLbookpoHaxvASAMfiZUhYlfuJJN', 'mKh', 'GZaxbwVOEEsApJgkLFBRXvmrymSp', 'Ij', '', 'X', 'AnCEVAe', 'fTj', 'tjOSbIjcNZECMfeoEC', '', 'OsBIjcN', 'LtdJpFximOmwYmawvlAIadIstt', 'JOsBiJCNzEc']) from system.numbers limit 10; +select [0, 2, 0, 0, 19, 0, 0, 12, 1, 0, 3, 1, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('ugpnWWncvqSLsYUCVXRZk'), ['yOWnQmZuhppRVZamgmRIXXMDQdeUich', 'gPNww', 'jlyFSbvmjaYPsMe', 'fQUeGVxgQdmPbVH', 'rZk', 'ariCX', 'grAffMPlefMQvugtAzN', 'LsYuCVX', '', 'jZFoQdWEWJFfSmNDqxIyNjvxnZJ', 'P', 'UgPN', 'JmKMsbegxNvusaiGGAZKglq', 'qArXLxzdYvabPv']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 0, 8, 0, 0, 1, 1, 15, 0, 1, 7, 0] = multiSearchAllPositionsCaseInsensitive(materialize('nxwotjpplUAXvoQaHgQzr'), ['ABiEhaADbBLzPwhSfhu', 'TbIqtlkCnFdPgvXAYpUuLjqnnDjDD', 'oPszWpzxuhcyuWxiOyfMBi', 'fLkacEEeHXCYuGYQXbDHKTBntqCQOnD', 'GHGZkWVqyooxtKtFTh', 'CvHcLTbMOQBKNCizyEXIZSgFxJY', 'PlUAxVoQah', 'zrhYwNUzoYjUSswEFEQKvkI', 'c', 'NXWOt', '', 'qAhG', 'JNqCpsMJfOcDxWLVhSSqyNauaRxC', '', 'PpLuaxV', 'DLITYGE']) from system.numbers limit 10; +select [2, 0, 0, 1, 0, 0, 28, 1, 16, 1] = multiSearchAllPositionsCaseInsensitive(materialize('undxzJRxBhUkJpInxxJZvcUkINlya'), ['ndxzjRxbhuKjP', 'QdJVLzIyWazIfRcXU', 'oiXcYEsTIKdDZSyQ', 'U', 'dRLPRY', 'jTQRHyW', 'Y', '', 'nxxJZVcU', '']) from system.numbers limit 10; +select [1, 4, 1, 0, 4, 1, 0, 1, 16, 1, 0, 0, 0, 8, 12, 14, 0, 2] = multiSearchAllPositionsCaseInsensitive(materialize('lrDgweYHmpzOASVeiFcrDQUsv'), ['', 'gwEYhMP', 'LrDGwEyHmPzOaSVEifC', 'oMN', 'gwEYhMpZO', 'lrdGWEy', 'pOKrxN', 'lrDgwEyhmpZoaSv', 'eifcrdqU', 'LrDgw', 'dUvarZ', 'giYIvswNbNaBWprMd', 'pPPqKPhVaBhNdmZqrBmb', 'hmPzoASVEiF', 'O', 'SVEi', 'gIGLmHnctIkFsDFfeJWahtjDzjPXwY', 'rDGweyHmP']) from system.numbers limit 10; +select [0, 0, 11, 1, 1, 1, 0, 16, 0, 1, 5, 0, 0, 0, 2, 0, 2, 0] = multiSearchAllPositionsCaseInsensitive(materialize('XAtDvcDVPxZSQsnmVSXMvHcKVab'), ['bFLmyGwEdXiyNfnzjKxUlhweubGMeuHxaL', 'IhXOeTDqcamcAHzSh', 'ZSQsNMvsxmVHcK', '', '', '', 'dbrLiMzYMQotrvgwjh', 'MvsxMV', 'zMp', 'XaTDvCdvpXzsqSNMVSxm', 'v', 'LkUkcjfrhyFmgPXPmXNkuDjGYlSfzPi', 'ULpAlGowytswrAqYdaufOyWybVOhWMQrvxqMs', 'wGdptUwQtNaS', 'ATdVcdVPXzSqsnmVSXMvHcKVab', 'JnhhGhONmMlUvrKGjQcsWbQGgDCYSDOlor', 'atdvCdvpXzsqSnMVSxMVhCkvAb', 'ybNczkKjdlMoOavqBaouwI']) from system.numbers limit 10; +select [8, 0, 0, 0, 4, 0, 0, 5, 5, 2] = multiSearchAllPositionsCaseInsensitive(materialize('XPquCTjqgYymRuwolcgmcIqS'), ['qgyYMruW', 'tPWiStuETZYRkfjfqBeTfYlhmsjRjMVLJZ', 'PkTdqDkRpPpQAMksmkRNXydKBmrlOAzIKe', 'wDUMtn', 'UcTJQgYYMRuWoLCgMcI', 'PieFD', 'kCBaCC', 'Ct', 'C', 'pQuctjqgyymRuwOLCgmc']) from system.numbers limit 10; + +select [1, 0, 7, 1, 0, 24, 17, 0, 0, 0, 2, 0, 1, 7, 4, 1, 12, 8] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('гГБаДнФбпнЩврЩшЩЩМщЕБшЩПЖПчдт'), ['', 'таОХхрзИДжЛСдЖКЧжБВЩжЛкКХУКждАКРеаЗТгч', 'Ф', '', 'ЙЩИФМфАГщХзКЩЧТЙжмуГшСЛ', 'ПЖпчдТ', 'ЩМщЕбшЩПжПч', 'ФгА', 'гУД', 'зУцкжРоППЖчиШйЗЕшаНаЧаЦх', 'гбаДНФбПНЩВРЩШЩщМЩеБшЩпжПЧд', 'РДЧЖАбрФЦ', 'гГ', 'ФбпНщвр', 'адНфБПнщвРщШщщМщЕбШщ', 'ггб', 'ВРЩ', 'бПНщврЩш']) from system.numbers limit 10; +select [0, 12, 8, 0, 12, 0, 0, 10, 0, 8, 4, 6] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('айРВбЧБжКВИхБкчФЖЖНВнпФйФБДфЗ'), ['ЛрЦфуУДВК', 'хБкчфЖжНвнпфйфБдФ', 'жКВИХБкчФЖжНвнПф', 'кЖчвУцВСфЗБТИфбСжТИдРкшгзХвщ', 'хбк', 'штДезйААУЛчнЖофМисНЗо', 'нлнШЧВЙхОПежкцевчлКрайдХНчНб', 'вИХбкчфжжНВН', 'ЩдзЦТуоЛДСеШГфЦ', 'ЖКВихбКЧфжЖ', 'вбЧбЖкВихБкЧфЖжНВ', 'Чб']) from system.numbers limit 10; +select [18, 15, 0, 0, 0, 0, 5, 0, 14, 1, 0, 0, 0, 0, 0, 15] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('пМИОкоЗжГйНТПЙацччЧАЩгЕВБбЕ'), ['ЧЧАЩгЕВБ', 'а', 'ФбРВщшййпХдфаЗЖлЛСЗПРШПпАОинЧКзЩхждН', 'ЛфРКДЙВСУСЙОчтнИкРЗбСГфкЩреИхЛлчХчШСч', 'ШйвБПАдФдФепЗТкНУрААйеЧПВйТоЧмБГДгс', 'ФтЙлЖЕсИАХИФЗаЕМшсшуцлцАМФМгбО', 'КО', 'лиШБнлпОХИнБаФЩдмцпжЗИЛнвсЩЙ', 'йацччЧАщгевбБЕ', 'ПмИоКозжГйНТП', 'ИГНннСчКАИСБщцП', 'ПнжмЙЛвШтЩейХЛутОРЩжифбЗчгМУЛруГпх', 'ХжЗПлГЖЛйсбпрЩОТИеБвулДСиГзлЛНГ', 'учклЦНЕгжмщлжАшщжМд', 'ЩеПОЙтЖзСифОУ', 'АЦЧ']) from system.numbers limit 10; +select [10, 0, 1, 1, 6, 1, 7, 6, 0, 0, 0, 2, 12, 0, 6, 0, 4, 8, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('квхБнцхйзЕпйИмтЙхфзвгдФ'), ['еПйИМт', 'хгкиМжСБИТНщенЩИщНСкй', '', 'Квхб', 'цхЙЗЕПйИмТйХФЗ', 'к', 'хйЗЕПЙИмтй', 'Цх', 'нКлШбМЖГйШкРзадрЛ', 'ДштШвБШТг', 'СЦКйЕамЦщПглдСзМлоНШарУтМднЕтв', 'ВхБнцхйЗЕПйимТ', 'йимтЙХФЗВГД', 'жчссунЙаРцМкЖУЦщнцОЕхнРж', 'цХЙЗЕП', 'ОгНФдМЛПТИдшцмХИеКйРЛД', 'бнЦхЙ', 'ЙЗе', 'згЩищШ', 'фХлФчлХ']) from system.numbers limit 10; +select [0, 0, 0, 12, 0, 0, 27, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('хНпсРТХВдтоЦчдлеФПвнЛгЗКлПйнМВ'), ['ШиБфЗШПДЧхОЩшхфщЗЩ', 'иГйСЧЗтШЛуч', 'АЗХЦхедхОуРАСВЙС', 'цчдЛЕфП', 'СДбйГйВЕРмЙЩЛщнжен', 'НДлцСфТшАщижгфмуЖицжчзегЕСЕНп', 'й', '', 'йлчМкРИЙиМКЙжссЦТцРГзщнхТмОР', 'ПРцГувЧкйУХггОгЖНРРсшГДрлЧНжГМчрХЗфЧЕ']) from system.numbers limit 10; +select [0, 0, 2, 0, 10, 7, 1, 1, 0, 9, 0, 2, 0, 17, 0, 0, 0, 6, 5, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЙзЗжпжДЕСУхчйдттСЙзоЗо'), ['щОЙУшееЧщкхГККреБкВ', 'жВ', 'ззЖпждЕсУХчЙДТТсЙ', 'ЙЦШЦЙЖзХШРвнкЕд', 'УхчйДтТсйЗОз', 'дЕСу', '', '', 'дсцеррищндЗдНкжаНЦ', 'сУхчЙдттсйзОзО', 'ЦЖРжмц', 'ЗЗ', 'СгЛГАГЕЖНгщОеЖЦДмБССцЩафзЗ', 'Сйзоз', 'ЦГХТЕвЕЗБМА', 'пмВоиеХГжВшдфАЖАшТйуСщШчИДРЙБнФц', 'Оа', 'ждЕ', 'ПжДесу', 'ЗзЖПждЕСУ']) from system.numbers limit 10; +select [0, 0, 0, 0, 5, 1, 0, 6, 0, 1, 17, 15, 1, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('уФШЙбШТоХВбзЦцЖОЕКТлщхнЖГ'), ['цЛ', 'ууМ', 'ТИгЙолМФсибтЕМнетквЦИЩИБккйн', 'оФОаМогсХЧЦооДТПхб', 'бШтОХВбЗцЦЖоЕКтЛ', 'уфШйбШтоХ', 'фдтщрФОЦсшигдПУхЛцнХрЦл', 'ШтО', 'НИкИТрбФБГИДКфшзЕмЙнДЖОсЙпЩцщкеЖхкР', 'уфШЙБш', 'екТлщ', 'ЖоекТл', 'уфШйБшТоХвбз', 'ТуОхдЗмгФеТаафЙм']) from system.numbers limit 10; +select [0, 1, 6, 1, 0, 1, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чМЩБЛЛПРлщкВУбПефХВФлАЗШао'), ['гаТкЛВнрвдПМоеКПОйр', 'ч', 'ЛпрЛЩКвуБпе', 'ЧмЩб', 'ц', '', 'жгаччЖйГЧацмдсИИВЩЩжВЛо', 'йГеЙнБзгнкЦЛБКдОЕЧ', 'ПоЦРвпЕЗСАШж', 'ЙОНЦОбиееО']) from system.numbers limit 10; +select [2, 0, 17, 1, 0, 0, 0, 5, 0, 4, 0, 0, 0, 0, 0, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЕаЩичщМЦЖиЗБЛЧжуНМЧК'), ['АЩиЧЩ', 'ИлУсшДБнжщаатуРТтраПОЙКЩйТГ', 'НМЧк', 'Еа', 'зАВФЛЩбФрМВШбПФГгВЕвЖббИТйе', 'РЗНРБЩ', 'ЦдЙНГпефзЛчпУ', 'ч', 'НШШчПЗР', 'ИчЩмЦжИЗБлЧЖУНМч', 'аннвГДлмОнТЖЗЙ', 'ШдчЩшЕБвхПУсШпг', 'гФИШНфЖПжймРчхАБШкЖ', 'ЖзгЖАБлШЗДпд', 'Д', 'ащиЧ']) from system.numbers limit 10; +select [4, 1, 0, 7, 0, 7, 1, 1, 0, 3, 7, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('иОцХКЙвувМИжШдУМУЕйНсБ'), ['ХкйвуВмИжШдУм', '', 'звМАОМЩщЙПшкиТчЩдгТЦмфзеИ', 'вуВМиж', 'КДщчшЙВЕ', 'в', '', 'ИоЦхКЙВувМижШ', 'ЕвТАРи', 'цхКЙвувмИЖШДумуе', 'вУвМи', 'зПШИХчУщШХУвврХйсуЙЗеВЧКНмКШ']) from system.numbers limit 10; +select [0, 5, 0, 0, 0, 0, 0, 12, 0, 11] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЦОфбчУФсвТймЦчдщгЩжИАБ'), ['йлрк', 'ЧуФсвтйМцчдЩгщ', 'МНлЕжорв', 'иНзТЖМсмх', 'шЕМЖжпИчсБжмтЧЙчщФХб', 'жШХДнФКАЩГсОЩвЕаам', 'НпКЦХулЛвФчШЕЗкхХо', 'мЦчДЩгЩжиАб', 'мпцгВАЕ', 'Й']) from system.numbers limit 10; +select [1, 0, 0, 0, 8, 0, 2, 0, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чТХЙНщФфцИНБаеЖкОвлиУДР'), ['', 'рВХмжКцНцИЙраштМппсодЛнЧАКуЩ', 'ИХфХЖЧХВкзЩВЙхчфМрчдтКздиОфЙжУ', 'Гзлр', 'фЦи', 'абПф', 'тХЙНщффЦИн', 'нссГбВеЖх', 'амлЗщрсУ', 'фФ']) from system.numbers limit 10; +select [0, 9, 11, 0, 11, 1, 0, 0, 0, 1, 6, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зДЗпщАцвТгРдврщхЩфЖл'), ['йХЛ', 'Т', 'рд', 'АИЦщгниДфВОе', 'Р', 'здзпщ', 'вКТвВШмгч', 'ввирАйбЗЕЕНПс', 'тХиХоОтхПК', '', 'аЦВТгРДврщ', '', 'уЗЗЖвУЕйтчудноЕКМЖцВРаНТЙЗСОиЕ', 'оЕфПхЕДжАаНхЕцЖжжофЦхкШоБЙр']) from system.numbers limit 10; +select [1, 1, 0, 0, 1, 7, 0, 0, 0, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('йЛПЛшмЦШНЖРрЧрМцкЖзЕНжЧДелФжАн'), ['', 'йЛПлшМЦшНЖррч', 'ПНКдфтДейуиШзЗХАРУХизВ', 'ПценмщЧОФУСЙЖв', '', 'ЦшнжрРчрМЦКЖЗе', 'МрПзЕАгжРбТЧ', 'ЕДФмаФНвТЦгКТЧЦжцЛбещЛ', 'УтПУвЛкТасдЦкеИмОещНИАоИжЖдЛРгБЩнвЖКЛЕП', 'Л']) from system.numbers limit 10; +select [1, 5, 1, 1, 0, 0, 1, 1, 0, 2, 19, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('сйДпмжнДРщКБгфцЖОчтГНБ'), ['', 'МЖнДРщ', 'Сй', '', 'пУщ', 'йгВИАЦнозаемТиХВвожКАПТдкПИаж', 'Сйд', 'СЙДпмжНдРщ', 'ФПщБцАпетаЙФГ', 'ЙдпМжНдрЩКбГфЦжОЧТГНб', 'т', 'гллрБВМнвУБгНаЙцМцТйЙФпзЧОЙЛвчЙ', 'йДПМжндРЩкБ', 'ЗмфОмГСНПщшЧкиССдГБУсчМ']) from system.numbers limit 10; +select [0, 18, 10, 5, 0, 2, 8, 1, 4, 11] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ХпИРддХрмВНйфчвгШиЧМКП'), ['хЗФДлДУБЙаЦтжРБЗсуйнЦпш', 'иЧмК', 'внЙ', 'д', 'зиМУЩГиГ', 'ПИр', 'РМвнЙфчвгШич', '', 'РдДхРМ', 'нЙфчВГШИ']) from system.numbers limit 10; +select [18, 0, 0, 1, 0, 0, 6, 0, 0, 9] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('нГгФкдуФШуИТбшпХфтаГт'), ['Таг', 'рРпшУйчГд', 'гК', '', 'лаВНбездпШШ', 'ЕБРйаНрОБожкКИсв', 'ДУфШУитБ', 'ГРиГШфШтйфЖлРФзфбащМЗ', 'мхЩжЛнК', 'ШуИтБШ']) from system.numbers limit 10; +select [13, 0, 0, 7, 0, 15, 0, 0, 15, 0, 0, 5, 6, 0, 18, 21, 11, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('рлобшдПЦИхжФуХщжгПФукшзт'), ['УхщжГ', 'ТВщЦфФсчЩГ', 'ЕжФШойжуЛРМчУвк', 'пцИХжфуХЩж', 'бР', 'щЖГПфуКШЗТ', 'йжРГгЛуШКдлил', 'ТщЖГкбШНИщЩеЩлаАГхрАфЙНцЦгВкб', 'щжГПфУ', 'бкаДБЛХ', 'АЗ', 'шДПЦихжфух', 'дП', 'вфнЙобСцвЩмКОбЦсИббФКзЩ', 'пФУкшзТ', 'К', 'жфу', '']) from system.numbers limit 10; +select [12, 19, 8, 1, 0, 0, 0, 15, 0, 0, 12, 2, 0, 4, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЦкЛЗепкЕХЩГлКФрБдТрлвйАхдООШ'), ['лК', 'рЛв', 'Ехщ', '', 'еаПКБгЦЩАоЗВонйТЗгМхццСАаодМЕЩГ', 'ишОНиеБидфбФБЖриУЩЩ', 'дуж', 'РбДТ', 'пЗсГХКсгРущкЙРФкАНЩОржФвбЦнЩНЖЩ', 'щрОУАГФащзхффКвЕйизцсйВТШКбнБПеОГ', 'лкФрБдТРлвЙа', 'КЛзеп', 'УЛФЗРшкРщзеФуМвгПасШЧЛАЦр', 'зеПКеХщглкфР', 'ЦЖЗдХеМЕ', 'зЖжрт', 'уЩФрйрЖдЦз', 'МфцУГЩтвПАЦжтМТоеищЕфнЖй']) from system.numbers limit 10; +select [0, 0, 1, 0, 1, 0, 0, 7, 0, 5, 1, 6, 1, 1, 1, 5, 6, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РННЕШвжМКФтшДЙлфЛИзЙ'), ['ГаМРош', 'Дтфс', '', 'еБбиаКщГхххШвхМЖКзЛАезФУчХо', 'РНн', 'сВбТМ', 'ЖЗЦПБчиСйе', 'жМкфтШДЙл', 'нЖХуеДзтЧтулиСХпТпеМлИа', 'ШВжМкФТШдЙлфл', '', 'вЖМКфТ', '', '', '', 'швЖМКфтШДЙЛфлИЗй', 'вЖмКФТ', 'еМ']) from system.numbers limit 10; +select [0, 0, 15, 1, 0, 0, 8, 1, 0, 0, 0, 4, 8, 10] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РиучГийдХутДЕЙДпфиуд'), ['ЩмгцлЖрц', 'ЕСжСлЩЧИЖгЗЛлф', 'дП', '', 'щГЦаБтПШВзЦСрриСЙбД', 'тдРгОЛТШ', 'д', '', 'КЕбЗКСХЦТщЦДЖХпфаЧйоХАл', 'мТвзелНКрЖЧЦПпЕЙвдШтеШйБ', 'ЙОТКрБСШпШд', 'ЧГ', 'ДХУТДЕЙд', 'УТд']) from system.numbers limit 10; +select [0, 0, 0, 0, 15, 0, 0, 0, 11, 0, 0, 5, 1, 1, 0, 2, 3, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('МшазшргхОПивОлбмДоебАшцН'), ['ЦИшштН', 'еМСЗкФЕКДйОНМ', 'ЛСчГрбеРЕбЩМПМЗЦИп', 'ХнИПЧжЗдзФщЗ', 'бмдоЕ', 'гМОдйсбТСЦЩбФВЗШзшщбчегаЕмЕБаХаРР', 'фщнР', 'щмТчФчсМАОгчБщшг', 'иВ', 'УщцГОшТзпУХКоКЖБеМШ', 'мйаАЛцАегСмПОаСТИСфбЧДБКоИВчбЦЙ', 'шРгхоп', '', '', 'еИпАЩпнЛцФжЩХИрЧаИИТЛвшиСНЩ', 'шаЗ', 'АЗ', 'ФгдтфвКЩБреногуир', 'ДБжШгщШБЩпЖИЛК', 'ЧдРЩрбфЛзЙклхдМСФУЙЛн']) from system.numbers limit 10; +select [5, 0, 0, 18, 13, 0, 2, 7, 0, 0, 1, 15, 1, 0, 0, 0, 3, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('хщеКЗПчуНЙтрЧЩгфСбоКЕАДТййАрр'), ['зп', 'хчПЦшпДбзСфНВЧзНжЕМФОП', 'ЧЖхЕУк', 'БОКеАдтЙЙа', 'чЩГфС', 'шллддЩщеМжШйкЩн', 'щЕкзпЧуНЙТ', 'ЧунйтРЧщгФс', 'ввНздЙуоТЖРаВЙчМИчхРвфЛЖБН', 'ЗХМХПщПкктцАзщЙкдпжф', '', 'ГФСбОкеАДтйЙа', '', 'МБХВЕчпБМчуххРбнИМЛТшЩИщЙгаДцзЛАМвйаО', 'ЛкОзц', 'ЕцпАДЗСРрсЕвтВщДвцбЗузУннТИгХжхрцПДРДПм', 'екЗПЧунЙТРчщгФсбоК', 'шпИфЕчгШжцГВСйм', 'ЛхйЧбЧД', 'ВзЗоМцкЩНХГж']) from system.numbers limit 10; +select [0, 0, 6, 20, 0, 10, 0, 0, 0, 9, 10, 3, 23, 1, 0, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('лцапШиХчЛДшдксСНИбшгикзчЙанми'), ['ХууатТдтбодМГЧгщЧнклШтЗПНчкЦОаЙг', 'МЦЧчпИхКЛаФхщХдРУДщжУчфлжахц', 'иХЧлдшдкСсНИбШГикзЧЙ', 'гикЗчйА', 'ГсТзЛОфИББлекЩАсЛвмБ', 'Д', 'ЦХрТЖощНрУШфнужзжецсНХВфЩБбДУоМШШиГйж', 'йуВдЕзоггПВДЖб', 'ЙфБГйХМбжоакЖЛфБаГИаБФСнБЖсТшбмЗЙТГОДКИ', 'ЛДШдКССНИБшГикзч', 'ДШдКССниБ', 'аПШИХчЛДШДКсс', 'з', '', 'ФоохПЩОГЖоУШлКшзЙДоуп', 'хАДХЩхлвУИсшчрбРШУдФА', 'ЦА', 'гвптУФлчУуРхпрмЖКИрБеЩКчН']) from system.numbers limit 10; +select [0, 4, 5, 7, 15, 3, 3, 17, 7, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зЗАЩлЕЕЕПИохЧчШвКЧйрсКХдд'), ['пКРбуШОНТЙБГНзИРвЖБсхрЛщчИрлЧУ', 'ЩЛЕЕЕПиоХЧ', 'ЛеЕеп', 'Еепио', 'швкЧйрС', 'ащЛеееПИох', 'АЩлеЕЕпиОхЧЧШвкЧЙРсК', 'КчйРскхД', 'ЕЕПИохччшВКчй', 'у']) from system.numbers limit 10; +select [1, 12, 0, 8, 1, 1, 0, 1, 5, 0, 1, 0, 0, 0, 0, 3, 1, 0, 4, 5] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ПмКСйСКЖККмШеоигЙчПфжТ'), ['', 'Шео', 'РчвлдЙЙлПщуКмтН', 'жкКмшЕоИГЙЧ', '', '', 'йРмМЖнПиЙ', '', 'йс', 'тфФРСцл', '', 'щлЩХиКсС', 'кпнТЖпФЩиЙЛ', 'абкКптбИВгмЧкцфЦртЛДЦФФВоУхЗБн', 'чНшоВСГДМйДлтвфмхХВВуеЩЦВтЖтв', 'кС', '', 'фидБлйеЙЧШРЗЗОулщеЕЩщЙсЙшА', 'СЙс', 'йсКжкКМшЕо']) from system.numbers limit 10; +select [0, 0, 1, 0, 2, 2, 1, 2, 7, 0, 1, 2, 1, 0, 6, 8] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('УгЖЕугАЩХйчидаррлжНпфФГшр'), ['утвШ', 'кЕвФч', 'угжеУг', 'тШлТвЕШЗчЖеЛНджЦазЩХцж', 'гЖеугаЩхй', 'ГжЕугаЩХйЧидАР', 'УгжЕУГаЩХЙЧИда', 'гЖеу', 'ащхЙчИ', 'мЧлщгкЛдмЙЩРЧДИу', '', 'ГжеугАщХйЧиДаРРЛЖНП', '', 'зЕМвИКбУГКЩФшоГЧГ', 'ГАЩХйчИДАррлЖНпФфг', 'ЩХЙчИдАррЛЖНпфФгш']) from system.numbers limit 10; +select [1, 0, 0, 7, 0, 6, 0, 11, 0, 0, 0, 2, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЗЕГЛЩПцГНтзЕЦШЧхНКГТХЙЙФШ'), ['', 'шзкиЗсаИщАБмаз', 'Ж', 'ц', 'гШуЕжЛСПодРнхе', 'пцГНтЗЕЦ', 'щРкЩАеНржЙПМАизшщКвЗщглТкКИф', 'ЗеЦшчхнКГтхЙЙ', 'пелгЩКкцвтфнжЖУуКосЙлкЛ', 'рф', 'хНШчНрАХМШщфЧкЩБНзХУкилЙмП', 'ЕгЛЩПЦгнтзецШЧ', 'ЩУчБчРнЖугабУоиХоИККтО', 'СГмЦШтФШЛмЙЩ', 'ауТПЛШВадоХМПиБу', 'ЩЩйр']) from system.numbers limit 10; +select [2, 2, 1, 0, 0, 0, 0, 0, 1, 0, 7, 9, 0, 15, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('гЙЧЙФХнЖБвомгАШГбОВГксИйцз'), ['ЙЧйфхНЖбвО', 'Й', 'гЙЧйфхнЖбв', 'хсЩмШЙЙММВЦмУБТчгзУЛР', 'зктшп', 'дЕоиЖлгШж', 'хКкаНЛБ', 'ЗКйСчсоЗшскГЩбИта', '', 'у', 'НжбВОмгашГ', 'БВо', 'ещфРШлчСчмаЖШПЧфоК', 'шгбо', 'ЙСтШШДЩшзМмдпЧдЙЖевТвоУСЕп', 'Л']) from system.numbers limit 10; +select [0, 9, 0, 0, 18, 13, 13, 11, 0, 0, 4, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЙЛмоЦСдТаоФчШКЖЦСНРаРЦзоС'), ['ДфгЗАасВфаМмшхчлмР', 'аоФчШкЖцСнРАРЦзОС', 'зЩзнйтФРТЙжУлхФВт', 'чЦкШВчЕщДУМкхЛУЩФшА', 'н', 'Шк', 'шКЖцсНРаРцЗос', 'фчшкЖцснрАРЦз', 'лку', 'пЧШМЦквоемЕщ', 'о', 'йЛМоцСДТАофЧшкжЦСнРаРЦзос']) from system.numbers limit 10; +select [21, 0, 0, 17, 1, 11, 0, 2, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кЧЖнЕбМЛпШЗХиЙжиМщлнСФрПЧЖВН'), ['сФ', 'гцХаШЛсаШЛкшфЧОКЛцзешХСиЩоаЕОш', 'Г', 'МщЛНСФРпч', '', 'зХ', 'ОАДепНпСГшгФАЦмлуНуШШЗфдЧРШфрБЛчРМ', 'чЖне', 'СфЕАбФн', 'М']) from system.numbers limit 10; +select [4, 0, 1, 1, 0, 2, 4, 16, 3, 6, 5, 0, 0, 6, 1, 0, 5, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кдАпЩСШИСцРхтеСиФЖЧСсОоц'), ['пщСшиСцрХТЕсифЖчССоОц', 'рхнкикДТКДВШчиЖЦнВм', '', '', 'жПЛСнЦцн', 'дА', 'ПщсШИсцрХтЕс', 'иФжЧсСоОЦ', 'ап', 'с', 'щсШИ', 'МАзашДРПЩПзРТЛАсБцкСШнЕРЙцИЩлТЛеУ', 'ичцпДбАК', 'сшИСЦрхтЕсифжчСсООц', 'КдАПЩСшИСЦРХТЕсИфЖЧСсо', 'ЛнБсИПоМЩвЛпиЩЗЖСд', 'щс', 'шщДНБаСщЗАхкизжнЛАХЙ']) from system.numbers limit 10; +select [0, 13, 0, 2, 16, 1, 3, 0, 9, 0, 2, 0, 1, 4, 0, 0, 0, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('иНхеЕкхЩщмгзМГхсгРБсхОКцУгуНБ'), ['ДиоУлФЖЛисУСЕтсЕалщн', 'МгХсгрБСХО', 'ЖХНцршПшгйО', 'нХЕЕкхЩ', 'сГРбсхОКцУг', '', 'х', 'Ж', 'щМгЗмгхСг', 'СрпхДГОУ', 'НхеЕкХщ', 'ПМтБцЦЙЖАЙКВБпФ', 'ИнхеЕ', 'еЕКхЩ', 'мМГлРзш', 'гтдоЙБСВещкЩАЩЦйТВИгоАЦлчКнНРНПДЖшСЧа', 'ЖшеН', '']) from system.numbers limit 10; +select [1, 5, 0, 0, 3, 0, 2, 0, 14, 14, 1, 0, 17, 13, 3, 25] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('айлзсЗБоГйтГжЙРККФхКшлНРОрЦкфо'), ['', 'с', 'Д', 'шиБраНИЦЧуИжп', 'Лз', 'ДРБСУфКСшцГДц', 'йЛЗСЗбОгЙтГЖйРК', 'ЕЙЦсвРЕШшщЕЗб', 'ЙркКфхкшЛнРОР', 'ЙРкКФхкШ', 'а', 'ГдоДКшСудНл', 'КФхКшлНРоР', 'ж', 'лзСзБогйТГЖйрККф', 'оР']) from system.numbers limit 10; +select [6, 0, 8, 10, 1, 0, 1, 13, 0, 0, 0, 2, 2, 0, 4, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РучУлрХчЗУпИчДТЕфщИЙщрНлн'), ['РХЧ', 'оДсГСЛЙшйиЧРСКзчХВоХарцНШ', 'ЧЗУпИ', 'УПичдТе', 'Р', 'ВЙЩхжАутПСНЦфхКщеЩИуЧдчусцАесзМпмУв', '', 'ЧдТ', 'ООсШИ', 'ФШсВжХтБУШз', 'ЕЩуДдшкМУРЕБшщпДОСАцйауи', 'УЧ', 'УЧУЛрХчзуПИчдТеФщий', 'йнЦцДСхйШВЛнШКМСфмдЩВйлнеЖуВдС', 'улрхчзупиЧдтефщИ', 'СХТЧШшГТВвлЕИчНОВи']) from system.numbers limit 10; +select [0, 0, 0, 2, 1, 1, 0, 1, 19, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('УецжлЦЦщМшРГгЩЩдБмхЖЗЧзШЙб'), ['НзИуАузуРЗРуКфоТМмлПкрсмЕЕЕнТ', 'ЕЩГХхЧш', 'ХоЙпООчфЖввИжЙшЖжЕФОтБхлВен', 'ЕЦЖЛЦцщ', '', '', 'ухогСИФвемдпаШЗуЛтлизОЧ', 'УецЖ', 'ХЖзЧЗ', 'П', 'мБкзХ', 'уБуОБхШ']) from system.numbers limit 10; +select [6, 1, 15, 5, 0, 0, 0, 3, 2, 4, 0, 12, 0, 2, 0, 3, 1, 6, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ГЖФеачМаКчПСпкВкхсПтг'), ['чмАкЧ', '', 'ВкХс', 'ачМА', 'КлтжУлОЛршБЕблФЩ', 'тцуМфж', 'л', 'фе', 'Жф', 'ЕАЧМак', 'лЖЕРТнФбЧЙТййвзШМСплИхбЙЛЖзДпм', 'СпкВК', 'ЩзчжИш', 'жФеАчМ', 'КбЦбйЕШмКтЩЕКдуЩтмпИЕВТЖл', 'ФЕаЧмАКчПСПквкхспТ', 'гжФеАЧмаКчпСп', 'ЧмАК', 'дцкДННМБцйЕгайхшжПГх', 'ТЩбвЦЖАНшрАШФДчОщй']) from system.numbers limit 10; +select [1, 6, 0, 1, 0, 0, 3, 1, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('МФННЧйОнцЛИЧЕПШПЧйоГФО'), ['', 'йОн', 'шУлгИЛЛРЙАсфЗоИЙЗРхуПбОЙсшдхо', 'МФННчЙоНц', 'лзВжбЦзфкзтуОйзуЗ', 'ЖГДщшЦзсжщцЦЖеЧвРфНИНОСАОщг', 'ННчйОНЦлИчЕПШ', '', 'Ф', 'ЩрИдНСлЙуАНЗвЕчмчАКмФУипндиП']) from system.numbers limit 10; +select [5, 0, 8, 13, 0, 0, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зВйймХЩМзЦГЕкЕКфоСтхПблуКМхц'), ['МХщмз', 'НАНрШоНДмурМлО', 'мзцгЕкек', 'кеКфоСтХПбЛУК', 'СУУксО', 'ЦоШжЧфйШЦаГЧйбЛШГЙггцРРчт', 'НбтвВбМ', '', 'тЩФкСтоСЧЦЦЙаСДЩСГЙГРИФЗОЗфбТДЙИб', 'ВГж']) from system.numbers limit 10; +select [0, 0, 0, 8, 19, 0, 3, 12, 1, 4] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ДпбЙЖНЗбПнЛбахБаХТуабШ'), ['цИаЩвгеИР', 'Ф', 'РЖиА', 'БпнЛб', 'У', 'Тфн', 'Б', 'БА', '', 'ЙЖНзБПнлбАхбаХ']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 1, 0, 17, 1, 0, 1, 1, 1, 11, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ТЦмЩОинХзоДДпПНЩигрРщОзКц'), ['ЕжЙВпПл', 'ВКфКТ', 'ШкДсЖхшфоПИадУбхФЩБчОАкпУеБхи', 'НТЕЙОШЦЖоЩбзВзшс', 'учГгуКФзлУдНУУуПУлкаЦЕ', '', 'фАПМКуЧйБЧзСоЗргШДб', 'ИГРрщОзк', '', 'йупОМшУйзВиВрЛЩЕеЩмп', '', '', '', 'дДППнщИгРР', 'ШФвИЧакеЦвШ', 'ТцМЩоинхЗОДдппнЩ', 'мрОгЩшЩеЧ', 'еЖРиркуаОТсАолЩДББВАМБфРфпШшРРРм']) from system.numbers limit 10; +select [3, 0, 0, 0, 0, 0, 1, 0, 0, 14, 0, 1, 0, 1, 1, 1, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('аОкиЛгКйхаОГОУзЦЛрбцш'), ['кИЛГкйхАогоУЗЦл', 'щЧДпХИхбпсГвфДФХкчХ', 'ШвАмБЗлДОИПткжхФТФН', 'щфсхФмЦсЛеувЙО', 'лВУЖц', 'еИщРшозЖАдцтКииДУлДОУФв', 'а', 'ХгЦРШ', 'ФзрЖкРЗЩЧИеЧцКФИфЧЧжаооИФк', 'уЗ', 'фЦФдцжжМчЗЖлиСЧзлщжжЦт', '', 'МдхжизИХфвбМААрйФНХдЕжп', 'аОкиЛг', 'АОКИЛгкйХАОГОУЗЦ', '', 'МбЖйрсумщиеОЩк', 'КйХАоГоУЗцлРБЦШ']) from system.numbers limit 10; +select [0, 0, 2, 1, 0, 0, 12, 0, 17, 0, 0, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('КУчЛХФчЛХшвбМЦинРвНрФМРкмиеЕп'), ['ТБЩБзхАмщПщЧПИФПашгЕТиКЦМБМпСЩСуЩМчтшеш', 'йлВЕЙшфшаШЗШЩВХЦчЛБс', 'УЧл', '', 'ЛДсЖщмНЦсКуфЗуГиука', 'РТТОТфГЕлЩЕгЛтДфлВЖШГзЦЖвнЗ', 'БМцИНРвнРф', 'ОЕИЕдИсАНаифТПмузЧчЖфШЕуеЩсслСШМоЖуЩЛМп', 'рвНРфМркМи', 'ЦзБМСиКчУжКУЩИИПУДвлбдБИОЙКТЛвтз', 'злСГе', 'ВдтцвОИРМЕжХО', 'учЛХфЧл', 'БшччШбУзЕТзфКпиШжнезвоеК']) from system.numbers limit 10; +select [0, 7, 0, 0, 0, 0, 7, 6, 0, 16, 12, 12, 15, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('оЖиогсфклШМСДрбхРбМбрЕщНЙЗйод'), ['иПмДКейууОклНХГЗсбаЙдШ', 'ФКлШмсДрБХРбМбрещНЙЗЙОд', 'арчжтСТнк', 'чбТНЛЕжооЗшзОУ', 'ощАЩучРСУгауДхГКлмОхЙцЕо', 'аЛбкиЦаКМбКхБМДнмФМкйРвРр', 'ФКлШмСДрбХРбм', 'СфклШ', 'еДйилкУлиИчХЙШтхцЗБУ', 'хрБ', 'СДрбХрбМБР', 'СдрбхРБ', 'бхрБМБРЕщНйз', 'КИб']) from system.numbers limit 10; +select [22, 1, 8, 0, 0, 1, 0, 3, 0, 6, 20, 0, 0, 0, 4, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЕЖДФбКужЙЦЦмсЖГГжБзеЙнПйЙри'), ['НПййР', '', 'Жй', 'Щ', 'ФхУО', 'ЕЖДфБКУЖйЦЦмСжГГ', 'НФЙзщЩГЧпфсфЦШОМЕЗгцрс', 'д', 'ЦтщДДЖтбвкгКонСк', 'кУЖЙЦЦм', 'ЕйНПййРИ', 'РчеЙйичФбдЦОтпчлТЖИлДучЙПгЗр', 'внчзшЗзОнФфхДгфзХТеНПШРшфБТЖДйф', 'кНснгмулМуГНурщЕББСузВмбнЧаХ', 'фбКУЖйЦцМсЖГгЖб', 'ЩСЕ']) from system.numbers limit 10; +select [0, 0, 0, 1, 10, 4, 0, 0, 5, 0, 1, 0, 7, 0, 3, 7, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чБхлжгКЖХлЙнкКЦфжЕгЖАндЧ'), ['ПдмРрЖАтВнСдСБШпПЗГгшИ', 'цшцг', 'тчАЙЧОеЕАвГпЗцЖЧгдХуЛСЛНрвАЖщ', '', 'Лй', 'Л', 'ОйррцУжчуЦБАжтшл', 'вХУКк', 'жгКжхЛЙН', 'уцбЕЕОЧГКУПуШХВЕчГБнт', '', 'ПсАжБИКштЕаН', 'КжхлЙН', 'ЩгШухЦПАТКежхгХксгокбщФЙПсдТНШФЦ', 'Х', 'кЖХЛйНккЦФжЕГЖ', 'ЙзРДСПднаСтбЧЖхощ', 'пАПОУЧмИпслБЗПфУ']) from system.numbers limit 10; +select [0, 0, 0, 5, 2, 16, 4, 4, 11, 0, 0, 3, 3, 0, 0, 6] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кпМаоуГГфвощолЦЩщЧПРОКепеА'), ['ЗзуФжНшщПТнЧЦКВОиАУсЧХОШбк', 'тмПкАпеайзуХсурШй', 'АЕЦавбШиСДвВДумВкиИУБШЕ', 'о', 'ПМаОУггФВощоЛЦЩЩЧПрокЕПеа', 'щЩ', 'аоУг', 'аОуГгФВ', 'оЩоЛЦЩщчПРОК', 'виХЛшчБсщ', 'УчАМаЦкйДЦфКСмГУЧт', 'мАоУ', 'МАО', 'щФФА', 'Н', 'У']) from system.numbers limit 10; +select [0, 3, 10, 8, 3, 0, 4, 0, 9, 4, 1, 9] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('щЙЧРпшИцхпргЦНуДййусЧЧнЖ'), ['ДлУцтееЖБКХгМзСВжА', 'чРпШИЦ', 'пргЦнУДЙЙУ', 'Ц', 'ЧРПш', 'нЩрЕвмрМеРйхтшЩче', 'РпШИЦхПРГцнУд', 'ПНоЙтПкоаОКгПОМЦпДЛФЩДНКПбСгЗНЗ', 'ХПРГцНудЙЙ', 'рПши', '', 'ХПРГ']) from system.numbers limit 10; +select [11, 4, 1, 0, 1, 0, 0, 0, 0, 12, 0, 9, 5, 0, 16, 0, 12, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('пкзщщЛНОНбфЦноИЧфхбФ'), ['ф', 'щщл', 'ПКзЩщЛНОн', 'ЩшФйЧБНДОИзМхеЖНЦцеЛлУЧ', '', 'сЗоЙТклйДШкДИЗгЖ', 'орЛФХПвБбУхНс', 'доЗмЩВу', 'ШиЕ', 'ЦНО', 'ндЩдРУЖШМпнзНссЖШДЦФвпТмуМЙйцН', 'НбФЦнОИч', 'ЩлНонБФ', 'ЛдРжКММЙм', 'чфх', 'ЦматДйиСфЦфааЦо', 'ЦНОИчФх', 'иржЦщн']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 3, 0, 5] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чЖажцВбшЛттзДааАугШщАйПгщП'), ['ШгУТсчГОВЦЦеЛАСфдЗоЗЦВЛйлТДзчвЛва', 'УшЕшищЖткрвРСйиФЗйТФТЛЗаЗ', 'ВдикЙббщузоФХщХХГтЗоДпхбЕкМщц', 'срйеХ', 'рАшуПсЙоДнхчВкПЖ', '', 'гНЗбКРНСБВрАВФлнДШг', 'фХЧгмКнлПШлЩР', 'мкйЗбИФрЗахжгАдвЕ', 'чжаЖцВБШлТ', 'лХЕСрлПрОс', '', 'ЗЧПтчЙОцвОФУФО', 'ажцвБшЛТт', 'уНчЖШчМЕА', 'ц']) from system.numbers limit 10; +select [7, 1, 0, 7, 1, 19, 8, 6, 3, 0, 2, 13, 6, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('НТКПпмБжДцбАКПНСЖоиТФД'), ['б', '', 'аУщЛМХЖбвИтНчГБМГдДнч', 'Б', 'НТкппм', 'и', 'Жд', 'МБждЦбАкП', 'кппмБждцБа', 'ПЕрнЦпМЦВгЧЧгГ', 'ткПпМБЖДцбаКпнСжО', 'кПнСЖоИ', 'МБжДцБакпН', 'гхОХжГуОвШШАкфКМщсшФДШеИжоАйг']) from system.numbers limit 10; + +select 0 = multiSearchAny(materialize('mpnsguhwsitzvuleiwebwjfitmsg'), ['wbirxqoabpblrnvvmjizj', 'cfcxhuvrexyzyjsh', 'oldhtubemyuqlqbwvwwkwin', 'bumoozxdkjglzu', 'intxlfohlxmajjomw', 'dxkeghohv', 'arsvmwwkjeopnlwnan', 'ouugllgowpqtaxslcopkytbfhifaxbgt', 'hkedmjlbcrzvryaopjqdjjc', 'tbqkljywstuahzh', 'o', 'wowoclosyfcuwotmvjygzuzhrery', 'vpefjiffkhlggntcu', 'ytdixvasrorhripzfhjdmlhqksmctyycwp']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('qjjzqexjpgkglgxpzrbqbnskq'), ['vaiatcjacmlffdzsejpdareqzy', 'xspcfzdufkmecud', 'bcvtbuqtctq', 'nkcopwbfytgemkqcfnnno', 'dylxnzuyhq', 'tno', 'scukuhufly', 'cdyquzuqlptv', 'ohluyfeksyxepezdhqmtfmgkvzsyph', 'ualzwtahvqvtijwp', 'jg', 'gwbawqlngzcknzgtmlj', 'qimvjcgbkkp', 'eaedbcgyrdvv', 'qcwrncjoewwedyyewcdkh', 'uqcvhngoqngmitjfxpznqomertqnqcveoqk', 'ydrgjiankgygpm', 'axepgap']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('fdkmtqmxnegwvnjhghjq'), ['vynkybvdmhgeezybbdqfrukibisj', 'knazzamgjjpavwhvdkwigykh', 'peumnifrmdhhmrqqnemw', 'lmsnyvqoisinlaqobxojlwfbi', 'oqwfzs', 'dymudxxeodwjpgbibnkvr', 'vomtfsnizkplgzktqyoiw', 'yoyfuhlpgrzds', 'cefao', 'gi', 'srpgxfjwl', 'etsjusdeiwbfe', 'ikvtzdopxo', 'ljfkavrau', 'soqdhxtenfrkmeic', 'ktprjwfcelzbup', 'pcvuoddqwsaurcqdtjfnczekwni', 'agkqkqxkfbkfgyqliahsljim']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('khljxzxlpcrxpkrfybbfk'), ['', 'lpc', 'rxpkrfybb', 'crxp', '', 'pkr', 'jxzxlpcrxpkrf', '', 'xzxlpcr', 'xpk', 'fyb', 'xzxlpcrxpkrfybbfk', 'k', 'lpcrxp', 'ljxzxlpcr', 'r', 'pkr', 'fk']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('rbrizgjbigvzfnpgmpkqxoqxvdj'), ['ee', 'cohqnb', 'msol', 'yhlujcvhklnhuomy', 'ietn', 'vgmnlkcsybtokrepzrm', 'wspiryefojxysgrzsxyrluykxfnnbzdstcel', 'mxisnsivndbefqxwznimwgazuulupbaihavg', 'vpzdjvqqeizascxmzdhuq', 'pgvncohlxcqjhfkm', 'mbaypcnfapltsegquurahlsruqvipfhrhq', 'ioxjbcyyqujfveujfhnfdfokfcrlsincjbdt', 'cnvlujyowompdrqjwjx', 'wobwed', 'kdfhaoxiuifotmptcmdbk', 'leoamsnorcvtlmokdomkzuo', 'jjw', 'ogugysetxuqmvggneosbsfbonszepsatq']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('uymwxzyjbfegbhgswiqhinf'), ['lizxzbzlwljkr', 'ukxygktlpzuyijcqeqktxenlaqi', 'onperabgbdiafsxwbvpjtyt', 'xfqgoqvhqph', 'aflmcwabtwgmajmmqelxwkaolyyhmdlc', 'yfz', 'meffuiaicvwed', 'hhzvgmifzamgftkifaeowayjrnnzw', 'nwewybtajv', 'ectiye', 'epjeiljegmqqjncubj', 'zsjgftqjrn', 'pssng', 'raqoarfhdoeujulvqmdo']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('omgghgnzjmecpzqmtcvw'), ['fjhlzbszodmzavzg', 'gfofrnwrxprkfiokv', 'jmjiiqpgznlmyrxwewzqzbe', 'pkyrsqkltlmxr', 'crqgkgqkkyujcyoc', 'endagbcxwqhueczuasykmajfsvtcmh', 'xytmxtrnkdysuwltqomehddp', 'etmdxyyfotfyifwvbykghijvwv', 'mwqtgrncyhkfhjdg', 'iuvymofrqpp', 'pgllsdanlhzqhkstwsmzzftp', 'disjylcceufxtjdvhy']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('mznihnmshftvnmmhnrulizzpslq'), ['nrul', 'mshftvnmmhnr', 'z', 'mhnrulizzps', 'hftvnmmhnrul', 'ihnmshftvnmmhnrulizzp', 'izz', '', 'uli', 'nihnmshftvnmmhnru', 'hnrulizzp', 'nrulizz']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('ruqmqrsxrbftvruvahonradau'), ['uqmqrsxrbft', 'ftv', 'tvruvahonrad', 'mqrsxrbftvruvahon', 'rbftvruvah', 'qrsxrbftvru', 'o', 'ahonradau', 'a', 'ft', '', 'u', 'rsxrbftvruvahonradau', 'ruvahon', 'bftvruvahonradau', 'qrsxrbftvru', 't', 'vahonrada', 'vruvahonradau', 'onra']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('gpsevxtcoeexrltyzduyidmtzxf'), ['exrltyzduyid', 'vxtcoeexrltyz', 'xr', 'ltyzduyidmt', 'yzduy', 'exr', 'coeexrltyzduy', 'coeexrltyzduy', 'rlty', 'rltyzduyidm', 'exrltyz', 'xtcoeexrlty', 'vxtcoeexrltyzduyidm', '', 'coeexrl', 'sevxtcoeexrltyzdu', 'dmt', '']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('dyhycfhzyewaikgursyxfkuv'), ['sktnofpugrmyxmbizzrivmhn', 'fhlgadpoqcvktbfzncxbllvwutdawmw', 'eewzjpcgzrqmltbgmhafwlwqb', 'tpogbkyj', 'rtllntxjgkzs', 'mirbvsqexscnzglogigbujgdwjvcv', 'iktwpgjsakemewmahgqza', 'xgfvzkvqgiuoihjjnxwwpznxhz', 'nxaumpaknreklbwynvxdsmatjekdlxvklh', 'zadzwqhgfxqllihuudozxeixyokhny', 'tdqpgfpzexlkslodps', 'slztannufxaabqfcjyfquafgfhfb', 'xvjldhfuwurvkb', 'aecv', 'uycfsughpikqsbcmwvqygdyexkcykhbnau', 'jr']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('vbcsettndwuntnruiyclvvwoo'), ['dwuntnru', '', 'ttndwuntnruiyclvv', 'ntnr', 'nruiyclvvw', 'wo', '', 'bcsettndwuntnruiycl', 'yc', 'untnruiyclvvw', 'csettndwuntnr', 'ntnruiyclvvwo']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('pqqnugshlczcuxhpjxjbcnro'), ['dpeedqy', 'rtsc', 'jdgla', 'qkgudqjiyzvlvsj', 'xmfxawhijgxxtydbd', 'ebgzazqthb', 'wyrjhvhwzhmpybnylirrn', 'iviqbyuclayqketooztwegtkgwnsezfl', 'bhvidy', 'hijctxxweboq', 't', 'osnzfbziidteiaifgaanm']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('loqchlxspwuvvccucskuytr'), ['', 'k', 'qchlxspwu', 'u', 'hlxspwuvv', 'wuvvccucsku', 'vcc', 'uyt', 'uvv', 'spwu', 'ytr', 'wuvvccucs', 'xspwuv', 'lxspwuvvccuc', 'spwuvvccu', 'oqchlxspwuvvccucskuy']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('pjjyzupzwllshlnatiujmwvaofr'), ['lnatiujmwvao', '', 'zupzwllsh', 'nati', 'wllshl', 'hlnatiujmwv', 'mwvao', 'shlnat', 'ati', 'wllshlnatiujmwvao', 'wllshlnatiujmwvaofr', 'nat']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('iketunkleyaqaxdlocci'), ['nkleyaqaxd', 'etunkleyaq', 'yaqaxdlocci', 'tunkleyaq', 'eyaqaxdlocc', 'leyaq', 'nkleyaqaxdl', 'tunkleya', 'kleyaqa', 'etunkleya', 'leyaqa', 'dlo', 'yaqa', 'leyaqaxd', 'etunkleyaq', '']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('drqianqtangmgbdwruvblkqd'), ['wusajejyucamkyl', 'wsgibljugzrpkniliy', 'lhwqqiuafwffyersqjgjvvvfurx', 'jfokpzzxfdonelorqu', 'ccwkpcgac', 'jmyulqpndkmzbfztobwtm', 'rwrgfkccgxht', 'ggldjecrgbngkonphtcxrkcviujihidjx', 'spwweavbiokizv', 'lv', 'krb', 'vstnhvkbwlqbconaxgbfobqky', 'pvxwdc', 'thrl', 'ahsblffdveamceonqwrbeyxzccmux', 'yozji', 'oejtaxwmeovtqtz', 'zsnzznvqpxdvdxhznxrjn', 'hse', 'kcmkrccxmljzizracxwmpoaggywhdfpxkq']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('yasnpckniistxcejowfijjsvkdajz'), ['slkpxhtsmrtvtm', 'crsbq', 'rdeshtxbfrlfwpsqojassxmvlfbzefldavmgme', 'ipetilcbpsfroefkjirquciwtxhrimbmwnlyv', 'knjpwkmdwbvdbapuyqbtsw', 'horueidziztxovqhsicnklmharuxhtgrsr', 'ofohrgpz', 'oneqnwyevbaqsonrcpmxcynflojmsnix', 'shg', 'nglqzczevgevwawdfperpeytuodjlf']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('ueptpscfgxhplwsueckkxs'), ['ohhygchclbpcdwmftperprn', 'dvpjdqmqckekndvcerqrpkxen', 'lohhvarnmyi', 'zppd', 'qmqxgfewitsunbuhffozcpjtc', 'hsjbioisycsrawktqssjovkmltxodjgv', 'dbzuunwbkrtosyvctdujqtvaawfnvuq', 'gupbvpqthqxae', 'abjdmijaaiasnccgxttmqdsz', 'uccyumqoyqe', 'kxxliepyzlc', 'wbqcqtbyyjbqcgdbpkmzugksmcxhvr', 'piedxm', 'uncpphzoif', 'exkdankwck', 'qeitzozdrqopsergzr', 'hesgrhaftgesnzflrrtjdobxhbepjoas', 'wfpexx']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('ldrzgttlqaphekkkdukgngl'), ['gttlqaphekkkdukgn', 'ekkkd', 'gttlqaphe', 'qaphek', 'h', 'kdu', 'he', 'phek', '', 'drzgttlqaphekkkd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('ololo'), ['ololo', 'ololo', 'ololo']); + +select 1 = multiSearchAnyUTF8(materialize('иечбпрхгебилцмпфвжцс'), ['лцмпфвж', 'ечбпрхгебилц', 'фвж', 'мпфвж', 'вжцс', 'пфвжцс', 'ц', 'чбпрхгебил', 'илцмп', 'фвж', 'ечбпрхгеби', '', 'б', 'хгеб', '', '', 'ил', 'ебилцмпфвжцс']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('змейдмоодкшуищвеишчддуцпх'), ['здсщесгдкзмчбжчщчиоо', 'чфззцмудщхтфрмсзрвшйщ', 'рлунбнзрфубуббдочтвлзмпгскузохк', 'ктзлебцам', 'вчспмж', 'нгкк', 'гпзйа', 'щпйкччнабакцтлапсбваихншхфридб', 'афсузжнайхфи', 'йрздеучфдбсвпжохрз', 'ошбечпзлг', 'полшхидфр']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('лшнуухевгплвйужчошгнкнгбпщф'), ['гбпщф', 'б', 'ф', 'чошгнкнг', 'йужчо', 'гплвйужчошгнкн', 'бпщф', 'плвйужч', 'шгнкнг', 'хевгплвй', 'плвйужчошгн', 'вй', 'лвйужчошгнкнгбпщф', 'лвйужчошгнкн']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('кцпгуоойвщталпобщафибирад'), ['ойвщталпобща', 'щта', 'пгуоойвщтал', 'ф', 'общ', 'цпгуоойвщталпобща', 'побщ', 'ф', 'цпгуоойвщталпобщафиб', 'побщаф', 'лпобщафи', 'цпгуоойвщталпобщафи', 'пгуоойвщталпобщаф', 'талпоб', 'уоойвщталпо', 'гуоойвщтал', 'уоойвщталп', 'щ', '', 'цпгуоойвщталпобщафибирад']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('фвгйсеккзбщвфтмблщходео'), ['еккзбщвфтмблщходе', 'йсеккзбщвфтм', 'вфтмблщходео', 'вгйсеккзбщ', '', 'йсеккзбщвфт', 'бщвфтмблщход', 'ккзбщвфтмблщход', 'ккзбщвфтм', 'еккзбщвфтмблщходе', 'еккзбщвфтмблщх', 'вгйсеккзбщвф', 'оде', 'оде', '', 'бщвфтмблщх', 'б', 'йсеккзбщвфтмблщходео', 'вфтмблщ', 'кзбщ']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('хбаипфшнкнлтбшрскшщдувчтг'), ['хгшгднфуркшщвфгдглххс', 'цогчщки', 'тдмщшйзйхиквмб', 'етелфмшвмтзгеурнтбгчнщпмйпйжжциш', 'чсбк', 'ибащлшздеуревжйфуепфхкузбзао', 'дкмбщдсбжййсвгкхбхпшноншлщ', 'щхбеехнцегрфжжу', 'збфлпгсмащр', 'скчдигцнсзфрещйлвзнбнл', 'освзелагррдоортлрз', 'утхрч', 'йкбрвруенчччпшрнгмхобщимантешищщбж', 'жгивтеншхкцаргдасгирфанебкзаспбдшж', 'ййекжшщцщ', 'ефдсфбунйчдбуй', 'бвжцирзшмзщ', 'випжцщйзхнгахчсцвфгщзкдтвчйцемшлй', 'лдрфгвднеиопннтчсйффвлхемввег', 'бмтцжжеоебщупфчазпгхггцегнрутр']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('фбуоойпцщишщлбхчрсллзвг'), ['уччхщ', 'вщчсарфмйшгшпйфгмжугмщжкцщгйжзфл', 'кклл', 'лпнжирпсиуо', 'нчипзфщхнтштхйхщрпзитко', 'вйпсдергвцзсцсгмхпбз', 'чфщдфоилгцевпц', 'чааиае', 'чгингршжтчпу', 'щетбнгутшйсгмвмучдхстнбрптничихб']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('лйвзжфснтлгбгцерлзсжфещ'), ['зсжф', '', 'бгц', 'зжфснтлгбгц', 'л', 'цер', 'жфснтлгбгц', 'тлгбг', 'це', 'гбгцерл', 'нтлгбгцерлзсж', 'жфещ', 'взжфснтлг', 'фснтлгбгцерлзсжфещ', 'нтлгбгцерлзсж', 'зжфснтлгбг', 'взжфснтлгбгцерлз', 'взжфснтлгбгце']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('нфдцжбхуучеинивсжуеблмйрзцршз'), ['чеинивсжуеблм', 'жуебл', 'блмйрзцрш', 'цр', 'м', 'фдцжбхуучеинивсжуеблмйрзцр', 'нивсж', 'ивсжуеблмй', 'й', 'всжуеблмйрзцршз']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('всщромуцйсхрпчщрхгбцмхшуиоб'), ['муцйсхрп', '', 'уцйсхрп', 'сщромуцйсхрпчщ', 'схрпчщр', 'сщромуцйсхрп', '', 'уцйсхрпчщрхгбцмх', '', 'цмхшуиоб', 'гбц', 'пчщр', 'цйсхрпчщр', 'омуцйсхрпч', 'схрпчщрхгбцм', 'йсхрпчщрхгбцм', '', 'пчщрхгбцм', 'уцйсхрпчщрхгбцмх', 'омуцйсхрпчщ']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('уузшсржоцчтсачтедебозцвчвс'), ['бомбсзхйхкх', 'отвгстзихфойукарацуздшгбщеховпзкй', 'мфнев', 'вйийшшггилцохнзбхрлхи', 'втинбтпсщрбевзуокб', 'оиойвулхкзлифкзиххт', 'зацччзвибшицщрзиптвицзхщхкбйгшфи', 'кнузршшднмвтощрцвтрулхцх', 'рчбкагчкпзжвтбажиабиркдсройцл', 'щргчкзожийтпдзфч', 'щбошгщзсжтнжцтлкщитеееигзцлцсмч', 'сцкк']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('щчбслгзвйдйжрнщчвфшй'), ['пдашзбалйнзвузкдвймц', 'щхтшйоч', 'фднвфигозржаз', 'рйфопхкшщвщдвл', 'цдкйхтусожпешпджпатфуиткп', 'щпбчсслгщййлвскшц', 'жпснс', 'уиицуувешвмчмиеднлекшснчлйц', 'пххаедштхмчщчбч', 'ичтмжз', 'лсбкчу', 'бгфдвпзрл', 'йицц', 'цфйвфлнвопкмщк', 'бгщцвбелхефв', 'мймсвзаелхнжйчохомлизенфш', 'трйднхндшсщмпвщомашчнгхд', 'жфцнифлгдзйе', 'зспкшщщенбцжгл', 'рщтб']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('шщпееасбтхогвгвцниуевисгшгбч'), ['гпа', 'стимсркзебхрвфпиемзчзу', 'нзгофухвекудблкадбшшусбеулрлмгфнйгиух', 'кфиашфобакщворувгвкчавфзшх', 'гфпгщгедкмтгрдодфпуйддхзчждихгрчтб', 'тцтжр', 'рцйна', 'йцбпбдрреаолг', 'житсфосшлтгсщдцидгсгфтвлз', 'жвтнжедцфцтхжчщч']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('вхкшгфпфмнщаохтмизпврйопцуйзмк'), ['дтчбкхащаткифружжейабфйкйтрскбощиеч', 'фтоуабхмдааиснрбраттклмйонлфна', 'цадзиднщймшкщолттпгщбх', 'кштбчжтждпкцнтщвмухнлби', 'микудпдпумцдцгфахгб', 'ирик', 'емлжухвмк', 'чгуросфйдцшигцхжрухжпшдкфгдклмдцнмодкп', 'ттбнллквдувтфжвчттжщажзчлнбждчщцонцлуж', 'елцофйамкхзегхклйгглаувфтуувее', 'двкзчсифвтекб', 'шсус']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('йхцглкцвзтшщочпзмнчтуеао'), ['йечдай', 'дащжщзлосмй', 'афуккгугаазшрчпцнхщцтмлфф', 'чфтфскрфйщк', 'жлччкцшнфижтехппафхвщфс', 'бзжчв', 'щкщймнкщлпедидсу', 'оцбажцзшзйпптгщтфекртдпдзшодвойвох', 'йжддбссерхичгнчлкидвгбдзуфембрц', 'ктщвшкрщмдшчогхфхусдотсщтцхтищ', 'пшстккамнбнардпзчлшечхундргтоегцзр', 'нсрнфузгжррчнжначучиелебрб', 'шгжмквршжтккднгаткзтпвкгзхшйр', 'змквцефтулхфохбнхбакдичудфмйчп']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('шждйрчйавщбйфвмнжоржмвдфжх'), ['ор', '', 'йрчйавщбйфвмнжо', 'вщбйфвмнжорж', 'ждйрчйавщбйфвмнжорж', 'йавщбйф', 'дф', 'вщбйф', 'бйфвмнжорж', 'мнж']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('кдшнсйршгвлицбенйбцфрсаччетфм'), ['асмун', 'йогкдчодиф', 'лштйбжнзфкикмпбитжшгкбоослщгзнщо', 'улштжцисцажзчштгжтфффабйлофедуфме', 'дрпгкчджихшзммймиамзфнуиорлищзгйвху', 'йиоршнйоввквбдвдзасма', 'члмвасмфрхжсхрбцро', 'лшкизщушборшчшастйсцкжцбонсшейрщ', 'масдфкршлупасвйфщфважсуфсейшзлащхрж', 'дгхшщферодщцнйна', 'цзфзждбавкжрткст', 'рфбожзееаце', 'кошомвгпрщсдквазчавожпечдиуйлщадфкгфи', 'бшпхнхсгшикеавааизцсйажсдийаачбхч']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('хтиелйтарквурйлжпеегфш'), ['зпмйвзуднцпвжкбмйрпушдуавднвцх', 'фбссчгчвжакуагдвижйтщтшоабпхабжш', 'щхшибаскрщбшрндххщт', 'сммрсцзмптисвим', 'цсргщфж', 'восжбшйштезвлкммвдхд', 'вбсапкефецщжквплуо', 'даеуфчвеби', 'бтптлжпин', 'шчддтнсйкщйщ', 'фжхщецпзчбйкц', 'цсвфпздхрщхцбуцвтег']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('апрчвзфжмбутццрйщкар'), ['индхжз', 'жилцовщччгстби', 'ажс', 'фктйамйтаг', 'шммнзачггоннксцушпчн', 'чдлйтзтоцдгзццисц', 'пнбтувщцдсчнщмсакрлгфмгрй', 'овмсйнщзушвщгуитщрхвйодф', 'бзлштезвлаижхбмигйзалчолшеунлц', 'фкжпеввгшгащз', 'тменбщжмсхщсогттршгек', 'чап', 'х', 'шкомегурлнйпшбщглав']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('двхопооллаеийтпцчфжштнргкк'), ['йймчнздешхбццбжибопгктрнркевпиз', 'фйрохсамщцнмф', 'ййхфдпецжзгнуорвбплоахрфиле', 'пкллкацнвдббогг', 'йщдезамтжйзихщжмцлх', 'гдзувмщиеулиддердшпитвд', 'фхтунйшзхтщжтзхгцорошднпбс', 'фнситбеелцдкйщойлатиуухгффдвищсше', 'нзщщщндцрнищпхйвтбвмцтнуадцбву', 'вбщкапшнв', 'зйлмуимчскщнивтшлчмуузщепшйр', 'шжбвйдр', 'гддждбкначдттфшжшхпфиклртпгм', 'еншащцфафчнгбнщххнзочбтпушщорегшцзб', 'уунеущкззоетбучкц', 'щасифзоажребийещ', 'пщбххсдгйтт', 'хшсчуотрт', 'жкднйрозбцшужчшбкккагрщчхат', 'шачефцгч']) from system.numbers limit 10; + +select 0 = multiSearchAnyCaseInsensitive(materialize('QWyWngrQGrDmZxgRnlOMYHBtuMW'), ['ZnvckNbkeVHnIBwAwpPZIr', 'NCzFhWQmOqIGQzMORw', 'tDYaxfQXWpKNLsawBUUOmik', 'IMveCViyAvmoTEQqmbcTbdfjULnnl', 'NRvsdotmmfwumsDpDtZU', 'mnqVnwWOvMiD', 'HXpHrMvGQpbuhVgnUkfFPqjpoRdhXBrFB', 'awtr', 'IMIdOmMHZccbOZHhWOKcKjkwwgkJSfxHDCzR', 'jPLISbIwWJEKPwgvajTxVLws', 'HBfRrzEC', 'VXsysGnAsFbqNOvIaR', 'upCaeaIOK', 'GUDFkrzBiqrbZVnS', 'MoCOePXRlVqCQpSCaIKpEXkH', 'rfF', 'fjhMEpySIpevBVWLOpqi', 'KdeskLSktU', 'vjUuNUlBEGkQyRuojZLyrmf', 'SvSxotkTKCeVzNICcSZLsScKsf']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('gcDqqBCNqhQgVVgsxMXkevYIAxNl'), ['BHnoKRqOoKgmOVkjtehGSsInDvavDWOhkKAUL', 'nYqpmKPTWGdnyMcg', 'TIplHzsSXUz', 'SiQwpQgEdZ', 'YoJTWBJgsbJvq', 'CwyazvXERUFMCJWhTjvltxFBkkvMwAysRLe', 'tXUxqmPbYFeLUlNrNlvKFKAwLhCXg', 'vUbNusJGlwsOyAqxPS', 'ME', 'ASUzpELipnYwAknh', 'VtTdMpsQALpibryKQfPBzDFNLz', 'KmujbORrULAYfSBDyYvA', 'BaLGNBliWdgmqnzUx', 'IzwKIbbSUiwhFQrujMgRcigX', 'pnS', 'UKSZbRGwGtFyLMSxcinKvBvaX']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('HCPOPUUEVVsuZDbyRnbowGuOMhQ'), ['UzDbYrNBoWgUo', '', 'pUUEVVsUzdByrNB', 'nBO', 'SUZdbYrNbOWgUoMH', 'pOpuUevVSUZDbYRnb', 'bowGUoMh', 'VsUZDbyrNbo', 'suzdBYrN', 'uueVvsUZDBYRnBoW', 'gUom', 'eVvsuzDBYRNBoWgUOM']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('RIDPJWYYSGBFWyXikHofbTcZAnj'), ['aFxQyVe', 'OcnZBgPsA', 'iBQaH', 'oesSvsWtgQprSSIPaDHdW', 'EfytiMfW', 'qHiFjeUvQRm', 'LfQkfmhTMUfoTOmGJUnJpevIoPpfpzMuKKjv', 'scYbCYNzJhEMMg', 'yTLwClSbqklywqDiSKmEdyfU', 'HYlGFMM', 'TMQhjOMTImXbCv', 'AVtzpxurFkmpVkddQANedlyVlQsCXWcRjEr']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('GEsmYgXgMWWYsdhZaVvikXZiN'), ['wySd', 'smYgxGMWWYsDHZ', 'vIk', 'smyGxgmwWysDHzAvvikxZi', 'WYsdHZAvVI', 'YGxGmwWYSDhzavvI', 'XzI', 'ySDhZAvvIK', '', 'myGXgmwWySdHz', 'MYGxgmwWySdHZaVvik', 'wYsDhzAvvikXz', 'wwYsdHzav', 'Z']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('XKCeCpxYeaYOWzIDcreyPWJWdrck'), ['tTRLUYJTkSWOabLJlIBshARIkwVRKemt', 'jQgn', 'wdNRsKIVunGlvwqkwn', 'BsbKGBJlkWQDBwqqeIjENvtkQue', 'yLuUru', 'zoLGzThznNmsitmJFIjQ', 'WFKnfdrnoxOWcXBqxkvqrFbahQx', 'QHbgRXcfuESPcMkwGJuDN', 'NPqfqLS', 'bi', 'HnccYFPObXjeGYtrmAEHDZQiXTvbNcOiesqRPS', 'KobVCJewfUsjBXDfgSnPxzeJhz', 'AqYNUPOYDZjwXx', 'xbZydBGZFFYFsFHwm']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('AnIhBNnXKYQwRSuSqrDCnI'), ['', 'HBNNxkyqWRS', 'xKyqwrSUSQR', 'yQwr', 'ihbnnxKYQWrsUS', 'bnnXkYqwrSuS', 'qWRs', 'nXKyqWRSUS', 'qrdcN', 'NiHBnNXkYQWrS', 'NnXkYQwRSUsqRDCn', 'rSusqRd']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('OySHBUpomaqcWHcHgyufm'), ['lihJlyBiOyyqzeveErImIJuJlfl', 'WyfAXSwZPcxOEDtiCGBJvkCHNnYfA', 'hZ', 'fDQzngAutwHSVeoGVihUyvHXmAE', 'aCpcZqWKdNqTdLwBnQENgQptIyRuOT', 'PFQVrlctEwb', 'ggpNUNnWqoubvmAFdjhLXzohmT', 'VFsfaLwcwNME', 'nHuIzNMciJjmK', 'OryyjtFfIaxViPXRyzKiMu', 'XufDMKXzqKjYynmmZzZHcDm', 'xWbDgq', 'ArElRZqdLQmN', 'obzvBzKQuJXZHMVmEBgFdnnQvtZSV', 'ZEHSnSmlbfsjc', 'gjmWPiLylEkYMTFCOVFB']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('NwMuwbdjhSYlzKoAZIceDx'), ['ZKOaZ', 'wBDJhsYlZKo', 'hSy', 'MUwbDjHsyl', 'sYlzK', 'ylZKOAZ', 'y', 'lZKoaZICEdX', 'azIce', 'djHSylZkoAzice', 'djHsYLZKoAzi', 'dJHSYlZK', 'muWbDJHsYLzKOaziC', 'zi']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('gtBXzVqRbepHJVsMocOxn'), ['DidFXiqhRVBCHBVklLHudA', 'yEhumIpaYXlj', 'iaEmViTRLPM', 'vTwKBlbpaJZGYGdMifOVd', 'zvgfzWeLsMQNLutdAdCeuAgEBhy', 'Ca', 'iHabiaRoIeiJgSx', 'EBfgrJnzHbuinysDBKc', 'kT', 'SGIT', 'BTRuKgHDuXMzxwwEgvE', 'OWJIeTLqLfaPT', 'BQM', 'yMimBqutKovoBIvMBok', 'zIBCYVNYAwu', 'EFDEFWGqvuxygsLszSwSiWYEqJu', 'QJDIXvPOYtvhPyfIKqebhTfL', 'ssALaXRxjguUIVKMCdWRPkivww']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('MowjvqBkjnVTelCcXpoSuUowuzF'), ['Su', 'vqBkJNvTelC', 'Elccxp', 'vtElc', 'JVqBkJnVTELCcxpOsU', 'OsUuOWUz', 'ElccxPoSU', 'wJVQbkJNVtElCC', 'xpOSUUo', 'VQbkJnvTELCCXp', '', 'TeLcCxPOsuuO']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('VfVQmlYIDdGBpRyfoeuLffUUpMordC'), ['vqMLyIddgBPrYFoEulFFu', 'lyIDdgBPrYFOeul', 'dGBPRYFOeUlffUupmOrD', 'OEulffU', 'pMordc', 'FVqmlyiDdgBpRyFoeUlFfuUpMOrD', 'PmO', 'o', 'YiDDgbPRYFOe', 'DGBPryfoeU', 'yIDdgbpRyFOeULfFU', 'lyIddgBPryfoeulfFuU', 'gbPrYfOeUlFfuupmO', 'yFoeULF']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('CdnrzjzmwtMMPLjgcXWsbtrBs'), ['RfgIUeerlPIozKpRQR', 'QRoYzjZlgngJxX', 'mEbqlBIzTQH', 'UmrfJxKyTllktPfyHA', 'ukoZeOPA', 'pbbRaUcJijcxt', 'Rg', 'lSBG', 'HvuwuiqVy', 'Fo', 'aGpUVjaFCrOwFCvjc', 'zKhfkgymcWmXdsSrqAHBnxJhvcpplgUecg', 'ioTdwUnrJBGUEESnxKuaRM', 'QciYRCjRDUxPkafN']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('miTQkQcxbKMwGOyzzRJpfXLyGx'), ['yMwgQQJkeshUugm', 'wGVe', 'XncShWqjp', 'KWjGQCOsfMKWRcgCfebkXZwZ', 'SFWbU', 'WdFDMIcfWeApTteNfcDsHIjEB', 'XRuUJznPOCQbK', 'tibBMGZHiIKVAKuUAIwuRAAfG', 'VVCqVGGObZLQsuqUjrXrsBSQJKChGpZxb', 'bWYAOLuwMcwWYeECkpVYLGeWHRrIp', 'SLzCgfkRWmZQQcQzP', 'VvfOhFBhfiVezUSPdIbr']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('KXoTIgVktxiXoEwfoLCENiEhz'), ['oLCENie', 'xix', 'en', 'IgvktxIXoEWFOLCEnieHz', 'xOEWFoL', 'LC', 'ktxIxoEwfolCenie', 'ce', 'oTIGvktXIXOE', 'eW', 'otigVKTXIXOEwFolC', 'E', 'CEni', 'gVKtxIxoEwfOLCENieh']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('DXKzSivrdLuBdCrEYfMEgPhOZ'), ['', 'sIVRDlUBdcr', 'luBDcrE', 'rDLUbDCreY', 'KzSiVRdLuBDCr', 'dcREYFme', 'lUbdCReyFMEgph', 'sivrDlubdCr', 'BdcreYfMEgP', 'ZSiVrdluBDCrEYfmegpHOZ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('lTvINMXVojkokvNBXPZOm'), ['ZQOJMEJfrjm', 'vIpmXnGlmWze', 'wbdDKcjrrIzBHypzJU', 'omotHOYbZjWfyVNeNtyOsfXPALJG', 'SXxu', 'yZPDFsZq', 'OVYVWUjQDSQTKRgKoHSovXbROLRQ', 'RnXWZfZwHipewOJimTeRoNRYIdcZGzv', 'sizoEJibbfzwqFb', 'vgFmePQYlajiqSyBpvaKdmMYZohM', 'ENsFoFCxDQofsBSkLZRtOcJNU', 'nG']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('LsTqxiGRdvQClVNBCGMOUHOAmOqPEC'), ['NdFuUQEUWaxS', 'fdOHzUzineBDnWJJvhPNZgB', 'rYAWGIBPxOLrjuquqGjLLoIHrHqSFmjh', 'IVgYBJARY', 'ToivVgUJAxRJoCIFo', 'yQXGrRjhIqFtC', 'PNYdEPsWVqjZOhanGNAq', 'nrQIDDOfETr', 'usJcPtiHKhgKtYO', 'vPKqumGhPbmAJGAoiyZHJvNBd', 'eXINlP', 'WQeESQJcJJV']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('gRzzQYOwLNiDcMFjXzSFleV'), ['XZSfLe', 'wLnIdcMFjxZSf', 'F', 'm', 'Le', 'qYoWLNidcMFjXzsf', 'zqyoWlNIdcMFj', '', 'oWlnIDCMfJxzsfL', 'wlNIdCmfjXzS']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('cYnMXJMJCdibMXoUQHEw'), ['BFrGFZRgzwHGkUVbBiZMe', 'piORdVIWHMBsBDeJRLbGZAHGBrzNg', 'bmDePbTPnFQiCFfBJUxAEYNSbgrOoM', 'gtzeAGwqjFrasTQUgAscfcangexE', 'okLG', 'l', 'EBkkGYNZZURgFgJPlb', 'HDQVngp', 'vEHhtBqWhZHCOrqEKO', 'fgqdFc', 'COig', 'VftTpSXAmTmvnShHJqJTdEFcyKPUN', 'WDI', 'knBm']) from system.numbers limit 10; + +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('мтдчЛВЖАгвзщущвкфИКмТбжВ'), ['щУщвкФИкМ', 'чЛвжАГвЗЩуЩвКФикм', 'ДчлвЖАГвзЩУЩвКфИКМтБЖВ', 'ЖагвзщуЩВКФикМТБжВ', 'ВжагВзЩУ', 'гВЗщущвкфИКмТБж', 'ГвЗщ', 'щВкФикМТБЖВ', 'вЖАГВзщущ', 'взЩуЩвКФИкМТ', 'ЧЛВЖагвЗщуЩВк', 'тДчлвЖагвзЩуЩвкфИк', 'ТДЧлвжаГВзЩущВ', 'тДчлВжАГВЗЩУ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('дтрцФхИнпиОШфдАгзктвбУвсб'), ['чТрВиУРФсРпДЩОащчзЦНцхИДА', 'ЗжмПВтмиойУГхАЦПиДУЦноНуййЩХаФТофшЩ', 'уБшлОЙцМПгетЖЧетШжу', 'ЧзИАУХобФрачТеХОШбМщЖСамиМВАКРщАЦ', 'ВйвТзхЙФЧоАЖвщиушАз', 'ЦшИфххкжиФйСЛЛНЛчВоЙВПпхиИ', 'ОатЕтщкЦпбСБйцОшГШРОшхБцщЙЧиУЩЕеФлщ', 'цСПпЧА', 'ШЧНфПмФсКМКДВЦАоФчОУеТЦИзЦ', 'зАбдЛНДГИ', 'фхЩлЗДНСсКЖИФлУАбЛеТФЕпЖлпПхЙиТЕ', 'иВшкНслТКМШЗиДПйфвйНкМЛхеФДзИм', 'лпушПБванпцев', 'ЧОшЧЧмшЦЛЙйГСДФйЛАв']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('нщМаНдЧЛмиВврПокПШмКБичкхвРГ'), ['АЙбаЙйШЛЙРЦмЗчВеИЕощсЦ', 'щЦФдВжчТСЩВКЦСпачЙсумщАтЩувеиниХПДоМС', 'иоАкДРршуойиЩищпрфВаЦПж', 'еЖПйШкГжЧтоГЙМВ', 'ЩПалиБ', 'ТвВлт', 'оХжйЛФеКчхЗВвЕ', 'ерцЩ', 'ШХЖОАрзеп', 'ККМрфктКГишпГЩхаллхДиВИИЛЗДеКйХмжШ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('вШЙчоМгОттЧАЕнЧаВеЦщчЧошМУ'), ['ЧОмГотТчАЕН', 'ОмГотТчАЕнчАвецЩчч', 'ЧАВецЩч', 'ТЧАеНЧаВ', 'ттчаЕнча', 'ТчАЕ', 'мготтЧАенчавЕЦЩ', 'НЧаВец', 'тТЧаенчАвецщчЧошм', 'Ав', 'ТЧаЕнчавецщчЧоШму', 'аЕнЧав', 'АеНЧав', 'шйЧомГОТТчаЕнчАВЕ', 'шйчоМгОтТЧаЕНчаВеЦщЧчош', 'МУ', 'ошМ', 'гОТтЧаеНЧА']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('фйадзЧмщЖШйЖЛшцГигцШ'), ['НТХеМРшДНУЗгадцуЧИ', 'жпСИКЩМлНлиоктлЦИвНЛ', 'КхшКРчХ', 'кгТЗаШИарХЧЛЖмСЖм', 'ОмиЛй', 'жЕРбФЩНуЕКЕАВоБМОнАЕнКщшзйПкОЗ', 'гиЗдадкбжХМЗслшВИШай', 'двтЗйЙНгПуТзД', 'ТНкмаВЕФ', 'Шеа']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШЕшхмеЦХеАСКощеКИфлсТЧИЗЛ'), ['КифЛсТ', 'ХеаСКощЕк', 'КифлсТЧ', 'шХМеЦхЕаскОЩеКИ', 'ЕшхмЕцХеаСК', 'ХЕасКоЩ', 'чИ', 'ЕцхеАсКОЩек', 'ЩЕкИфлс', 'асКощЕкифЛсТ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('шоКнВЕрОЖЛпУйХзСугКПВжиРсЙпо'), ['игВербфНахчжЙггч', 'лтимрдфЕг', 'нкеаЖАшНБвйСдКИВГДшАГиАТнФШ', 'МжсТЙМГОииУКВГнцткДнцсоАд', 'ХтпгУСдБдцАЖЛАННоЕцзЕшштккз', 'ншУЦгФСЖшмс', 'нЩшМ', 'гоЖхМшаЕмаДРЧБЛИТпмЗОоД', 'фГКШхчФбЕГЛйкчПИЙххуМГНШзхг', 'ХпХщПЦАзщтг']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('кЧбоЗХфвБХхусмШгНаШШаГзш'), ['Ури', 'лММшткфНзцЦСВАдЩПМШфйОМшефигЖлуЕП', 'сМтЕдчЦафйСТЖЗфлРЙПЦдипжШскцВКХЦЖ', 'АУкжИФцшЛБЦЧм', 'ФПлнАаДСХзфоХПСБоСгМТОкЗЧйЛ', 'ЦшСГЛрцДмнНнХщивППттжв', 'жзЕгнциФ', 'МШЛсЙЧтЛАГжд', 'уИиЕжцоРНх', 'ЧбйГуХтшОНкрЧИеПД', 'ЦдЩЕкКвРЦжщЧциекЗРйхрббЖуЧ', 'иВжен', 'ГчОржвБГсжштРЕБ', 'ШоЖдуЙфчсЧегумщс', 'йчЙГ', 'РДедвТ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ткРНбЩаРкгГчХшецИкНЕнСЖкйзАуУЖ'), ['ХлЖхУИллрРННйЗйсРуШЧвМбЧЧщфФЦц', 'СЛчКБцСФДшлфщаФлЙСзШабмбхуБжТСТ', 'УКУиввЗЩуВМцпчбпнДГбпЕЖрПбИДркМРОеЧмЧдГ', 'ПчщвШЩвГсЛмММГБ', 'хКЦЧсчжХЩИЖХеНнтоФЦлнмЛЧРФКпмСшгСЧДБ', 'удсЗйУДНЧУнтЕйЦЗЖзВСх', 'хПЖЙИрцхмУкКоСмГсвПаДОаЦНЖПп', 'сВОей', 'ЩЦжщоабнСгдчрХнЩиМХзжЩмФцррвД', 'ЦИсйнЦДоЕДглЕЦД', 'жзйПфБфУФоцзмКЩГПЧХхщщПТпдодмап', 'ДНХГНипжШлСхХхСнШЩЛИснУйЧЩЖДССФфиС', 'ОйЩНнйЕшцФчБГЛвхЖ', 'КЧРВшИуШйВфрпБНМсУмнСЦРпхЗАщЗУСвЧйБХтшХЧ', 'зЛбНу', 'ЗСрзпшЕйРржПСсФсШиМдйМЦГхдйтРКЩКНцкбмгС', 'СУццБуКнчОищГ', 'уЕГЧлЗБНпУисЕЛ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ВЦХсЖЗЧЙБЗНбРитщстеМНжвВ'), ['итщст', 'ЧйБЗНбрИтщстЕМнЖ', 'ХСЖЗЧйбзНБриТщ', 'Темнж', 'сЖзЧЙБзнб', 'хСжЗчйБзнБрИтЩстЕм', 'БзнБРиТщ', 'ЗчЙбзНбрИТщ', 'чйбЗНбри', 'зЧйбзНБРИ', 'нБРитщсТе', 'зНб', 'цхСжзчйБЗнБРИТЩСтЕм', 'жЗЧЙБЗнбрит']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ХцМШКАБАОххЕижгГХЩГиНциД'), ['ОРАБЕРВомЛфГНМИКупбхЛаАкЗдМзтш', 'лЗУЩнлбмиЛАфсгМРкцВтлснййишИНАС', 'ТлжлУоУгжукФжЖва', 'жоСШПоУНЩшРМГшОЛзЦБЛиЛдТхПДнфжн', 'чнСУЗбДаГогжДфвШКеЙПБПутрРпсалцоБ', 'ЙозоПщчакщаАлРХбЦгац', 'иаИСсчЙЧБШорлгЧТнчцйзоВБХбхЙФтоЩ', 'ПСзсБЗЕщурфДЛХйГИеПНрмииаРнвСФч', 'ЦйЖЕуТфЖбхЩМтйсЙОгЛбхгтКЕЩСАЩ', 'гтЗуЩлужДУцФВПЛмрБТсСНпА', 'тГвлбчЗМасМЖхдЕгхмЩксоЩдрквук', 'ВРаг']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('тУйВЖдНнщцЗЖфКгфжГфиХСБЕЩ'), ['КгФЖГФи', 'сБе', 'ЖФ', 'гфжгФИхсбе', 'ВЖДНнщЦзжфКГфЖгфИхсбещ', 'ВЖДНнЩЦзжфкГ', 'вЖДННЩЦзжФКГфЖгФ', 'ф', 'НщЦЗж', 'нщЦЗЖФк', 'Их', 'дННщцзЖФКгф', '', 'нщцзжФкг']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШкКРаоПеЗалРсТОиовРжгЙЧМКЛШ'), ['рчсажЕК', 'пЧТМфУрУММждЛйжзУрбкмам', 'бАШеНмВШзлзтушШШсхОсцрчЙПКИБнКжфЧЕХ', 'ЖМЛшбсУМкшфзочщАЖцМбмШСбВб', 'гтРХсщхАИОащчлИЧуйиСпСДФПбРл', 'ЧуОРУаоойГбУппМйЩФДКПВ', 'уУпугйРЕетвцБес', 'ЙЖЦТбСЖж', 'ИБКЛ', 'ТДтвОШСХГКУИПСмФМтНМзвбЦрднлхвДРсРФ', 'вВгНЙХИрвйЕЗпчРГЩ', 'ПчмТуивШб']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('РлчгхзуВШежХЦуМмнВЙщдцО'), ['ХшвМЦДШпЩОСшЦПдруа', 'ФИЦчУвРкпнПшИЕСЧАувиХд', 'фшвбЦОИЗфпИУМщзОЧЗфВцЙПнмтаТгг', 'мЖЩйавтнМСЛ', 'НВбШ', 'ааФДДрВвЙТдПд', 'ЗнчЧущшхЙС', 'рзуСзнеДфЩПуХЙЕл', 'ШСЩсАгдЦбНиШмшКрКс', 'ггнЕфБГзрОнАГЙзЧеИП', 'вшТИпЧдЖРкМНшзпиоиЩчзДмлШКТдпЦчж', 'фЦТЙц', 'ОтУшмбптТКЗеПлЧцЛОкЩБпккфгИн', 'ЩпвхпЗлШБЦ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЙбйнхНщЧЖщчГОАпчФнЛШФбгЛа'), ['щчг', '', 'апЧфНЛШфб', 'ЙнхНЩЧЖщчгОАПЧф', 'ХНщЧжЩЧгоАпч', 'ХНщЧжщчГо', 'нщЧжщчГОа', 'чЖЩЧГоапЧФНл', 'оапчФ', 'щЧГОАпЧФНлшФ', 'ЩЧГОАпЧФНЛшфБг', 'БЙНхнщчЖщчГоаПЧФНЛШФБгЛ', 'ОапЧфн', 'ф', 'БглА', 'ш', 'шфбГ', 'ХнЩЧЖщчГоА', 'ХНщчжщЧгоапч', 'хНЩчжщЧГоапчфнлшФбгЛ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('кдЙДТЩеВЕфйКЩЦДиКБМф'), ['щфЛ', 'фЧЩЩичрКйЦКхеИИАпоБВЙЗбДАФио', 'мИтиЦРоВЙсБбСлНзиЛЧОфФевТмижщК', 'тЙгнКШфНТЕБЛцтГШЦхШхБ', 'уаабРГрМЙпМаБуЗпБЙчНивЦеДК', 'мпВЛНДеКПУгРЛЛинзуЕщиВШ', 'ЩжКйШшпгллщУ', 'пршЙПцхХЗжБС', 'нбЗНЙШБш', 'йцхИщиоцаМРсвнНфКБекзЛкчТ', 'хсмЦмнТрЩкДТЖиХщцкЦМх', 'ГмЛАбМщЗцЦйаОНвзуЗмЕКПБЙмАЕЛГ', 'ОЦХРЗРмкжмРИЖИЙ', 'з', 'лЕТкпкдЗчЗшжНфо', 'ИТПфйгЖЛзУТсЩ', 'ОфрбЛпГА', 'МЖооШпЦмсуГцАвМЕ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЩГТРШКИОРБРеНЖПКиуМОкхЛугИе'), ['брЕнЖ', 'РбрЕНЖпКиУМокХЛу', 'ГТрШКИорБРеНЖпКиУМ', 'рШКиоРбрЕнЖпкИУМОК', 'ИорбрЕнЖПК', 'Окхл', 'шкИоРБРеНЖПк', 'ТРШкИоРБрЕнжПКИУМОкхл', 'КИОРБРЕнжпкиУм', 'Н', 'КиОРбРЕнЖпкИУмоКхл', 'к', 'ГтРшКИоРБРЕнЖпк', 'гтрШкиорбрЕНЖпк']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШНвпкфЗвгДжУЙГлрТШаШЛгНЗг'), ['нЗБенВшщрЛАрблцщшБАдзччммсцКЖ', 'бЗЩхзЗЗбФЕйМоазщугБбмМ', 'рЙсВжВсхдйлЩгБтХлчсщФ', 'пиБшКРнбВБгЕуЖ', 'жПшнхпШзУБрУЛРНЩДиаГШщКдЕвшоуПС', 'чЕщкЗмДуузуСдддзгКлИнгРмЙщВКТчхзЗЛ', 'кЖУЗЖС', 'щххОВМшуажвН', 'фбцЖМ', 'ДШитЧЩДсйНбдШеООУдг', 'ЛХПфБВХЦТИаФПЕвгкпкпщлхмЙхГбц', 'чЦсщЗщрМ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ФРХгаСлчЧОцкШгзмКЗшФфББвЧ'), ['кзШфФб', 'ГАслЧЧОцкшг', 'ФфббВЧ', 'ЦкШ', '', 'АслчЧОЦКШгзМкЗШффбБвч', 'РХгаслЧчОЦКШГз', 'РхгаслчЧОцКШгзМкзшФфБбВ', 'Шг', 'Ф', 'ХГАслчЧоцКШГзМкзш', 'ШгЗмКЗшфФб']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЧдйШкхОлалщНйбССХКаФзОМрКЕЙР'), ['бссХкафзОм', 'ХОЛаЛщнйБссХкаФз', 'лаЛщнйБсСХ', 'ЩнЙбСсхКаФЗО', 'йБСсХКАФЗОмР', 'йшкХолаЛЩНйбсСхК', 'С', '', 'ЙшкхОлалщНЙБсСхКаФзом', 'Йр', 'щнЙБссхКАфзоМрК', 'рКе']) from system.numbers limit 10; + +select 1 = multiSearchFirstIndex(materialize('alhpvldsiwsydwhfdasqju'), ['sydwh', 'dwh', 'dwhfdasqj', 'w', 'briozrtpq', 'fdasq', 'lnuvpuxdhhuxjbolw', 'vldsiws', 'dasqju', 'uancllygwoifwnnp', 'wfxputfnen', 'hzaclvjumecnmweungz']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('kcwchxxyujbhrxkxgnomg'), ['jmvqipszutxfnhdfaxqwoxcz', 'nrgzkbsakdtdiiyphozjoauyughyvlz', 'qbszx', 'sllthykcnttqecpequommemygee', 'bvsbdiufrrrjxaxzxgbd', 'hdkpcmpdyjildw', 'frxkyukiywngfcxfzwkcun', 'dmvxf', 'esamivybor', 'eoggdynqwlnlxr']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('zggbeilrfpkleafjjldgyfgn'), ['rpypxkpgvljhqthneremvabcd', 'qchzlnsctuwkdxqcrjgihvtfxhqxfqsxm', 'vtozkivjyqcqetmqenuihq', 'fixcvjyzbzejmwdivjf', 'lydoolvnuuamwlnzbyuuwpqqjaxf', 'elkodwthxqpcybwezm', 'wpiju', 'wdzuuwumlqfvga', 'iokphkai', 'wkbwdstplhivjyk', 'wxfbhfturuqoymwklohawgwltptytc', 'jehprkzofqvurepbvuwdqj']) from system.numbers limit 10; +select 9 = multiSearchFirstIndex(materialize('bwhfigqufrbwsrnnkjdzjhplfck'), ['v', 'ovusuizkdn', 'ttnsliwvxbvck', 'uh', 'lfourtjqblwdtvbgtbejkygkdurerqqdwm', 'snmtctvqmyyqiz', 'ckpixecvternrg', 'gluetlfyforxcygqnj', 'igqufrbwsr', 'om', 'huwazltjsnohsrcbfttzwyvcrobdixsuerkle', 'gqufrbwsrnnkjdzj', 'hfigqufrbwsrn', 'lhhyosbtznyeqzsddnqkfxayiyyajggxb', 'igqufrbwsrnnkjdzjhplf', 'pl', 'jtbqaqakbkesnazbvlaaojppxlbxccs', 'gqufrbwsrnnkjdz']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('yevfiumtjatfdnqixatbprvzwqlfgu'), ['ozghvskaixje', 'vmdrvdjhwxdvajmkpcxigsjzmtuhdxgllhzrpqd', 'qfhnxpcmtzpociajidwlcvobjfyxfcugsxy', 'pgamvhedjibcghinjrnowqzkfzibmfmh', 'bcmrdzpcczhquy', 'czosacvwfsbdvwwyirpvbve', 'qu', 'fdkobwlnmxbpvjkapextlbcrny', 'bqutjqobkyobhtpevjvewyksnoqyjunnnmtocr', 'kjlgff', 'oitltmhdburybwfxrjtxdiry', 'kiokuquyllpeagxygqugfmtm', 'wlbkl', 'khubpmstqjzzjzmsvfmrbmknykszqvue', 'lqrbmyndsztyrkcgqxcsnsanqjigimaxce', 'nitnyonuzedorrtkxhhgedohqcojbvtvjx']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('wmvuoeqphsycrvtxghrcozortmdnh'), ['hv', 'ugcmpebvlzgdtcmgkbgzyfel', 'qvmofayljsvybupvvnbhhibsz', 'zvlihxmyxlxwbffwjzjrfjgimmltftqqre', 'mwassqvxptav', 'jrumvqzkiaewngoufhrleakcfrsaxhpxyg', 'sxlxwhvkpavgfhxrxcbnqbstyrejtosxwe', 'psnlqakyfhcupryqatrmwqlswwjylpaiqammx', 'ivozojwldsgtnxpvsi', 'epyzjs', 'legi', 'sdqxxahfbddhacqrglgdcmlslraxfaahhfyodon']) from system.numbers limit 10; +select 12 = multiSearchFirstIndex(materialize('lebwdwxfdzwquhqhbvmte'), ['mwhruilzxvlyrgxivavxbbsq', 'ubuiizuasp', 'xpkzcsf', 'qpeqitoqqqeivohajzhmjbo', 'kbftixqmqgonemmbfpazcvf', 'iyhluioqs', 'hws', 'tupfdksgc', 'ows', 'pngzkoedabstewcdtdc', 'zdmyczldeftgdlwedcjfcoqycjcivf', '', 'xt', 'syuojejhbblohzwvjzzedzgmwc']) from system.numbers limit 10; +select 7 = multiSearchFirstIndex(materialize('wcrqaoecjwkhnskrbahqxfqgf'), ['qegldkdmyaznlmlhzvxfgoukngzbatnuq', 'khgcvgrifwtc', 'hkwcpogbbdqulizrycmneqmqynvj', 'zkqjf', 'xfduxyy', 'ructdekcoywfxsvpumfefoglljptsuwd', 'wkhnskrbahq', 'crqaoecjwkh', 'ikmpbunpguleinptzfelysiqc', 'lhldcci', 'nooepfypkoxxbriztycqam', 'uxeroptbiqrjartlnxzhhnlvjp']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('psgkkcwttitgrjsobiofheyohadu'), ['achfrepey', 'minlzeiwgjfvvmhnevisky', 'oxfghfdthtyczzveppcoxrued', 'ydhaupodnezvxhcqahfkwtpvxnymriixf', 'slxsbxidylxyurq', 'socyyabwbjdabnuqswrtjtqogirctqsk', 'lvbnacirctyxxspjmispi', 'oj', 'ihmmuuqlosorrwhfxvpygfrzsqpmilcvjodmcz', 'idmtmemqfyrlbwhxz', 'hsqfsfdzvslwbtlwrfavez', 'gszl', 'ei', 'pnywjnezncpjtyazuudpaxulyv', 'iqgavdjfqmxufapuziwwzkdmovdprlhfpl', 'yigk', 'mjidozklrpedutllijluv', 'vixwko']) from system.numbers limit 10; +select 3 = multiSearchFirstIndex(materialize('xtjxvytsseiqrpkbspwipjns'), ['bwmoghrdbaeybrmsnucbd', 'zoslqabihtlcqatlczbf', 'sseiqrpkbspwipjn', 'mdnbzcvtayycqfbycwum', 'npueimpsprhfdfnbtyzcogqsb', 'ytsseiqrpkbspwipj', 'fzvhcobygkwqohwutfyauwocwid', 'naacyhhkirpqlywrrpforhkcjrjsnz', 'vezbzderculzpmsehxqrkoihfoziaxhghh', 'mvvdfqzskcyomjbaxjfrtmbduvm', 'pwipjns', 'tsseiqrpkbspwipjn', 'sseiqrpkbspwip', 'qgrtbcdqcbybzevizw', 'isjouwql', 'rlbeidykltcyopzsfstukduxabothywwbq']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('zxmeusmehplcgbqabjof'), ['hqxgrw', 'fydjyrr', 'cocwtbazwjrswygttvrna', 'wpkvowuq', 'mwnzdxihrxihzhqtl', 'ljkjtmrfbonhqkioyzotyeegrw', 'ofxo', 'rjubwtpbweratrelqlrqotl', 'wvxkcil', 'qvolxxgqs', 'afqlhjnlvxowtnuuzywxuob', 'slwbmq']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('tjcmtoisgbilkygushkpuxklis'), ['bkdohwx', 'dfohgzhcjqirlbrokwy', 'zaemgqgxltznvkccyumhgsftnfigbol', 'otgcaybejwe', 'qn', 'gvfzcyhvmsnbgkulsqrzeekmjkc', 'cajuyauvmhkrriehgwfmtqbkupysudle', 'pmcupysyllzpstolkfpdvieffxaupqtjty', 'elhlzvescbfpayngnnalzixxgunqdhx', 'cvxpgdnqcxeesk', 'etlewyipypeiiowuoewulkpalvcfe', 'ordhwrkwqq', 'wnroixlkrqnydblfrtlbywc', 'xshujuttvcdxzbetuvifiqi', 'meqqxqhntkvzwoptnwskdgsxsgjdawe', 'dnmicrfshqnzosxhnrftxxeifoqlnfdhheg']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('iepqqbvekaflprupsmnpoijrld'), ['kqomoeysekwcplpegdwcdoeh', 'mwdvr', 'aobviioktzwzmpilblbdwstndhimabfgct', 'vqustluciruiyfkoontehnwylnauwpol', 'utcqnitztcgr', 'ityszrqmlwzspnrwdcvdhtziob', 'hmll', 'ilfzvuxbkyppwejtp', 'euxdzqcqutnfeiivw', 'rbcjlmjniiznzaktsuawnfjzqjri', 'fzyxlzzretsshklrkwru', 'jrujmdevqqojloz']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('cufztqffwjhtlkysekklpaywemm'), ['cpawuauqodogaitybtvplknjrsb', 'ynsocxfnxshzwnhlrfilynvz', 'ylrpytgcvtiumdckm', 'mvgrkueaslpgnjvvhzairgldtl', 'iliorsjypskmxfuuplfagktoycywb', 'drvwngp', 'zviuhcxaspwmqqz', 'qfgmrmhycskus', 'szj', 'rooivliiqufztcqlhrqyqvp', 'tufdmsmwue', 'cssowtldgwksbzlqyfereodcpuedighwd', 'odcjdffchhabtaxjvnr', 'o']) from system.numbers limit 10; +select 7 = multiSearchFirstIndex(materialize('zqwvlarwmhhtjjgwrivwfpsjkvx'), ['zcwhagxehtswbdkey', 'okezglmrjoim', 'ilwdviqimijzgoopmxdswouh', 'aqztpsntwjqpluygrvwdyz', 'uzxhjuhiwpz', 'akgc', 'larwmhhtjjgwrivwfpsj', 'isqghxsmcrwlgyloslmlyeboywtttgejdyma', 'arwmhhtjjgwri', 'rwmhhtjj']) from system.numbers limit 10; +select 9 = multiSearchFirstIndex(materialize('fuddujwwcewlhthgwsrn'), ['shtzrrtukxmdovtixf', 'rkcnzzzojqvvysm', 'jlamctgphjqcxlvmpzyxtghnoaq', 'pthrwvbheydmrot', 'kpniaqbcrgtxdyxxdxonbbltbdo', 'igulngxgtauumhckvbdt', 'khgrmskijoxruzzzaigjxonsc', 'rxzeykfxwssltw', 'hthg', '']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('jtgvvkggpkqhbxptjgoy'), ['nplzawmacgtqfxsp', 'oosw', 'akw', 'hnsenqoqwiydiufozomkyirgjepeqw', 'fpafgahvfdxukzvskbuy', 'tqimmsqffiqfoni', 'rrxkjklmkdhxqwcpfyutqzxu', 'esfqeujcbqxwnvodkwwdbsyozptaf', 'rqnyguyz', 'fftl', 'ccfyavxtxrpi', 'wftpsblszgovfgf']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('steccxkwnptybaddcuau'), ['qagxfznhjaxtyclxdsi', 'rtxwptfyzgthkwrx', 'rmcoxxs', 'vlubx', 'siecygstzivz', 'tksiagm', 'kq', 'dgsqrobxegmdbjkanb', 'lxokyvhveklvdakrxyiqokr', 'tgpmehwdrirpfjonqzhqshbo', 'cqmkargvsfjoxrguymtzsfwkg', 'avkmufhoywprjw', 'xzywtvlpoozmgkrcavevwebv', 'hfiuwslapamiceaouznxm', 'tmfjhqddafhhjbybfphlbwu', 'mrigvhmjvdpny']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('ccbgxzoivbqtmyzqyooyepnmwufizz'), ['lcclseplkhxbrrzlnani', 'xggxivwqlpxmpypzovprdkmhrcgjkro', 'dbbmiegotfxjxybs', 'hqtcowpupsyqfx', 'znatfzjbeevbaqbmpofhywbyfxn', 'mnditiygex', 'lazqapwjswhkuimwmjoyseyucllnrfxrwnzj', 'jg', 'dmqwnuvsufgffuubhqeugwcanvflseorrydyyxvr', 'wpjfcfwfgjiybncrw', 'joucnvxxcyjyqlwhrzwnstyj', 'babtxkzasyaffxzd', 'wgcfdyhwxjoytbxffdxbdfinolbltnhqkvyzybc', 'yhrgwbdwopznltjtyidxawqg', 'bvrrt', 'bcwmsys', 'ijdjojhhzaiyjyai', 'eevxwppogogdbmqpbeqtembiqxeiwf']) from system.numbers limit 10; +select 2 = multiSearchFirstIndex(materialize('xrwjeznohtbdvijwsbdksf'), ['hwdfufmoemohatqafdrcvdk', 'tbdvijwsbdks', 'xzwjczbuteujfjifzkbxvezs', 'bdvijwsbd', 'eznohtbdvijwsbdks', 'xadezwhbbmlqz', 'b', 'socrdjxsibkb', 'dk', 'eznohtbdvijws', 'pavsosnncajr', 'jixlmxxmxnnbpebjhitvtsaiwzmtqq', 'yuxmmnrqz', 'mpzytweuycabvu', 'tbdvi', 'ip']) from system.numbers limit 10; + +select 0 = multiSearchFirstIndexUTF8(materialize('црвтгмсрооацволепкщкпнгшкамщ'), ['гйцбсханрейщжнфбхтщбйала', 'дирдфнжпнччхаоцшрийнйнечллтгцбфедгсш', 'жфйндбффаилбндмлточиирасдзйлжбдзег', 'жвоуйфсйойфцвгзшцитсчпкч', 'ршонтбгщжооилчхрзшгсдцпзчесххцп', 'пйучихссгнхщлутвменлмм', 'хишгешегдефесо', 'знупгж', 'щчфу', 'знвтжифбнщсибеноожжметачаохфхсжосдзйуп', 'ггтоцйпгхчсбохлрчлваисивжбшбохдурввагш', 'щлийбчштбсч']) from system.numbers limit 10; +select 5 = multiSearchFirstIndexUTF8(materialize('опднхссгртрхтотлпагхжипхпитраб'), ['шфршсцешушклудефцугщцмйщлошечедзг', 'нйумйхфщцгщклдожхвосочжжислцрц', 'згтпвзцбхйптцбагсвцгтнф', 'пшичси', 'ссгртрхтотлпа', 'апзазогвсбежзрйгщоитмдкн', 'непгайтзкгштглхифмзданоихц', 'пднхссгртрхтотлпагхжипхпитр', 'ждднфлрзалшптсбтущвошрйтхкцнегшхрсв', 'брп', 'сгртрхтотлпагхжипх', 'нхссгртрхтотлпагхжипхп', 'пагхж', 'мфкжм']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('овччцнтчайомсйййоуйуучщххиффсб'), ['жжрддцпнехйр', 'шзбвуооинпаххесйкпкошжмцзгхе', 'ррсннилщлщжгцтйрпхабкехахззнтщемагдйшпсч', 'пуфугнказепщ', 'гддхтплвд', 'сщсчи', 'бйрсахедщфкхиевкетнс', 'йфжцжшпхлййхачзхнфоц', 'цтмтжлщдщофисзрвтбо', 'кщсевбоуйб', 'щгаапзкн', 'осймщовшчозцййизм', 'фкмаат', 'бкзцсдонфгттнфтаглпрцтбхбсок', 'жлмичлйнйсжбгсейбсиезщдмутационжгмзп', 'нбищижнлпмтморлхцхвеибщщлкйкндлтпбд']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('фдситчщдвхмфйтхшдтуцтщжрочщо'), ['ейшфдннтйечгк', 'фуйщгбйшдцирзб', 'ехйцмчщрсртнк', 'увтцмдорщжфгцгзущпувтщкнрфсйбщрзй', 'хчщпхвуарнббпзсцшчщуносйгщпсбтх', 'жтдчрхфмхцххккзппзбнуббс', 'тчохнмбаваошернеймгготлузвсбрщезднеил', 'стссчкшрчррйбхдуефвеепщшзмербгц', 'жбезжпещ', 'вйтсрхптлкшвавдаакгохжцоощд', 'искеубочвчмдхе', 'щмлочпзбунщнхлрдлщтбеощчшчхцелшоп', 'екуийтсйукцн', 'дочахгжошвшйжцпчзвжйкис', 'лтеенешпсболгчиожпжобка', 'букзппщрчбпшвпопвйцач']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('гопвмрутфпфбхмидшлуб'), ['цнхшдойгщн', 'дкаежщрапщпщеа', 'фмогимдничрфтхмсцмчпдфтиофнтйц', 'фчмсщисхщуп', 'ощмвдчефозйжбеесбмещочевцчд', 'апкбцйщжщабвппофм', 'мтйоддлфцгдуммптднпщшрн', 'икхнсмжчбхнфхнссгл', 'ущмунинлбпрман', 'ллкнечрезп', 'ажтнвбиччджсзтйешйффдгдрувер', 'йрщ', 'чигдкйшфщжужзлвщулквдфщхубги', 'иккшсмаеодейнкмгхбдлоижххдан']) from system.numbers limit 10; +select 12 = multiSearchFirstIndexUTF8(materialize('срлцчуийдлрзтейоцгиз'), ['жщлнвбубжпф', 'оклвцедмиср', 'нлзхмчдзрззегщ', 'хоу', 'шайиуд', 'ерслщтзцфзвмйтжвфеблщдурстмйжо', 'жмгуйузнчгтт', 'стеглмрдмирйрумилвшнзззр', 'втедлчрчайвщнллнцдмурутш', 'цимхргмрвмщиогврнпиччубцйе', 'ктчтцбснзцйцймридвш', 'ейоц']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('лрицжленфилзсжпжйнцжжупупдфз'), ['чпбрмлрнцмвеуфу', 'рмпизмпжчшбхдудчшохтжш', 'гргцжчпгщищннусв', 'ийщтщвзчшпдзитщубакусхавслрсбткб', 'бйбакижцтибгбгхжцвйчжжщжсжкзф', 'чгрп', 'чуносжусжфчмфжхрщзлщрдвбашажаанча', 'чекршбш', 'лбцкхйсооцц', 'сгвнлегвфмпчтййлрмд', 'наатущркхйимхщщг', 'щпзоеимфощулбзхафпц', 'дцабцхлврк', 'умидмчуегтхпу', 'дщнаойрмчсуффиббдйопдииуефосжхнлржрйлз', 'щзжетезвндхптпфлк', 'бгчемкццдбжп', 'иихуеоцедгрсеужрииомкбззцнгфифоаневц']) from system.numbers limit 10; +select 3 = multiSearchFirstIndexUTF8(materialize('бхжвчашрощбмсбущлхевозожзуцгбе'), ['амидхмуеийхрнчйейтущлуегрртщрхвг', 'фнисцщггбщйа', 'хжвчашрощбмсбу', 'фщвщцнеспдддцчччекчвеещ', 'ущуджсшежчелмкдмщхашв', 'цкуфбиз', 'евозожз', 'ппт', 'лвцнелшхцш', 'ощбмсбущлхев', 'ефхсзишшвтмцжнвклцуо', 'цржсржмчвмфмнеещхмиркчмцойвйц', 'ашрощбмсбущлхевозожзу', 'гхщншфрщзтнтжкмлщанв', '', 'хевозо', 'ощбмсбущлхевозожзуц', 'возожзуц']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('мзчатйжщгтзлвефчшмлшт'), ['гхшфрунирйдзтеафщгк', 'ймхмфлц', 'звуумивмвштчтнтеобзщесакийгк', 'чщжетзнцишхрммтбцакиббчп', 'блмидикавущщдпгпчхйаатйанд', 'цмщшбклгцгмчредмущаофпткеф', 'бнетввйцзпдерхщ', 'ицйнцрввемсвтштчфрпжнатаихцклкц', 'дзлщсштофвздтмчвсефишс', 'пбзртдцвгкглцфесидлвваисщр', 'ммеилбзфнчищч', 'жш', 'лздиззтпемкх', 'байлужднфугмкшгвгулффмщзхомпав', 'рсзнббедсчзущафббзбйоелид', 'цфшйкцксйгуйо']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('жжмзмащйфжщлрффбпврзнидииейщ'), ['ржфзнлйщсздйткаоцруйцгцт', 'илинксщмгщшещееифвпданмйлж', 'кг', 'гпааймцщпмсочтеиффосицхйпруйшнццвс', 'кнзфгжйирблщлл', 'ищуушфчорзлкбцппидчннцвхщщжййнкфтлрдчм', 'тбтдчлвцилргоргжсфбоо', 'ехаех', 'нехщмдлйджждмрцпйкбрнщсифхфщ', 'тцжпснйофцжфивзфбхзузщтмдкцжплавозмше']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('биаризлрвххжкпщтккучфизуршткпн'), ['йбручвндбщвссаеха', 'ол', 'еузкмпогщзгзафшдшоплбфнфдккх', 'ибзихщйфбтаз', 'ибрчиейш', 'нафрпбснзрузнтмнйиомтечтшзбкпзутдилтф', 'тщтбапцчдий', 'щкнггмфцжрзщцзжвлкчбммхтхтуж', 'ваам', 'цкфиушзигбжтацнчдлжжзфшщммтнлж', 'туфовжтнкзщсщщизмрйкхкпц', 'пирзксзикфтшодожшчцг', 'жфчфцфвлйбмеглжйдазгптзщгж', 'тутириждкзчвтсоажп', 'мотзусбхту', 'слщкгхжщфщоцкцтрлгп', 'бругтбфесвсшцхнтулк', 'восур', 'ссежгнггщдтишхйнн', 'вгзосзгоукмтубахжнзгшн']) from system.numbers limit 10; +select 8 = multiSearchFirstIndexUTF8(materialize('мчслвбжвманджййсикнврцдчмш'), ['рлбмй', 'иб', 'жажлцсзхйфдцудппефвжфк', 'огггхзгтцфслхацбщ', 'дзтцкогаибевсйещпг', 'зпцтйзфмвгщшуоилл', 'етщзгцпдйчзмфнхпфцен', 'нджййсик', 'сикнврцдчмш', 'жййсикн', 'икнврцдч', 'паокаочввеулщв', '', '', 'кечзсшип', 'вбжвманджййсикнвр']) from system.numbers limit 10; +select 2 = multiSearchFirstIndexUTF8(materialize('нвррммппогдйншбшнехнвлхм'), ['нфошцншблеооту', 'лх', 'цртд', 'огдйншбшн', 'уулддйдщицчпшбоиоцшй', '', 'дрдужзжпцкслетгвп', 'й', 'мппогдйншбшнех', 'дйншб', 'лжвофчзвдд', 'рммппогдйншб', 'ехнв', 'втущсщзбчсжцмаанчлнасп']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('удехбкабиацхпгзнхжелшц'), ['фмнбтйезсфоахофофдблкжщжфмгхтзс', 'тщтамзафозхлз', 'цшжфсбл', 'йзгзилупшллвипучхавшнмщафзмнк', 'лу', 'гтебпднцчвмктщсзи', 'лпщлмцийгуеджекшд', 'пцдхфоецфрунзм', 'зис', 'хпж', 'цтцплхцжишфнплуеохн', 'впх', 'чцчдацлуецрчцжижфиквтйийкез', 'гчшмекотд', 'пйгкцчафеавзихзтххтсмкал', 'сжфхпцгдфицжслрдчлдхлсувчнрогнву']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('щщвфчгамтжашнуошбзшуйчтшх'), ['дийу', 'жеомлсжщймемрсччошдфажцтдп', 'нгопнцквбф', 'хопб', 'ив', 'чвфвшфрдфелрдбтатшвейтг', 'вхкцадмупдчбаушшлдксйв', 'жтжбсвмшшсйеуфдпбдлкквдиовж', 'гтсдолснхесйцкйкмищгсзедх', 'ошплп', 'ифпуррикбопйгиччи', 'чдфймудаибвфчжтзглс', 'зпцмвпнлтунвйж', 'еждрйитхччещлцч', 'вмофсужхгрнзехкх', 'щжгквкрфжмжжсефпахст']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('рфгигуужжцфмоаешщечувщгонт'), ['слащченщлуоцргврбаб', 'тцизут', 'лйрсцолзклжбчрзгббммоищщ', 'уицмлоилзф', 'зпхмшвфйккфщщп', 'ймижрпдщмшв', 'пуощжлрмжлщхмкйгщшщивдпчпжчл', 'ойахшафнж', 'гксомбвцрсбжепхкхжхнсббци', 'панлраптщмцмйфебцщемйахенг', 'сохлгожштлднчсзпгтифсйгфмфп', 'аждчвзну', 'дхшуфд', 'борзизцхнийбщгхепрнзшй', 'фщшздруггрке', 'оевупрйщктнолшбкунзжху']) from system.numbers limit 10; +select 8 = multiSearchFirstIndexUTF8(materialize('кщзпапйднучлктхжслмищ'), ['апмдйлсафхугшдезксш', 'кйрм', 'цйивайчшуалгащсхйш', 'злорнмхекг', 'сгщврурфопжнлхкбилдч', 'бнлпщшнвубддрлижпайм', 'нукдонццнрмовфнбгзщсшщшдичежффе', 'йднучлктхжс', 'зпапйднучлктхж', 'затйотдсмпбевлжаиутсуг']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('жцажссефррршнфмнупщаоафгкщваа'), ['жфпщкгзкрмтщчцтжйчпйдошбшоцд', 'бхгйлйдробптвущшппзуиидежнлтпбжащткцф', 'хлещазйцепдханпажчизнхгншйуазщхй', 'ашцк', 'фрбммхдднчзшс', 'нжцанилзжаречвучозрущцдщаон', 'длмчзцрмжщбневрхуонпйейм', 'шкбщттврлпреабпоиожнууупшмкере', 'вуцпщдиифпеоурчвибойбпкпбкйбшхдбхнаббж', 'нртжвкдйтнлншцанцпугтогщгчигзтоищпм', 'цкплнкщлкшемощмстздхпацефогтск', 'цвждйбсмпгацфн', 'шсжшрзрардтпщлгчфздумупд', 'цйииткглчжйвуейеиииинврщу', 'унлодтулшпймашоквббчйнибтвалалрвбцж', 'нбнфнвйишйжлзхкахчмнлшзуеенк', 'бшлпсщжквпцахигчдтибкййб', 'фчакпзовтрлкншзцулшщмпзж']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('иматеччдфлггшпучумджпфпзмвх'), ['дахахпчлцлаачгцгтфпнжлшчйуцбшсг', 'атжйувхец', 'грдсбвиднницдвшпйршгмегцаоопнжгй', 'чзлхречмктфащмтеечуиагоуб', 'савбхлпилийщтихутйчдгфсойй', 'вбгочбзистзщшденусцофит', 'мар', 'дфшажхдсри', 'тжлмщшж', 'птсрсщгшммв', 'ре', 'зратамкткфкинййй', 'гуцмсизулвазужфдмхнелфнжббдтрудчтнфцр', 'нйчинеучкхнпчгнйвчвсвлгминуцахгщввжц', 'ечагчнуулфббгбел', 'йшжуговрйкащцофдокфчушжктнптйеззушфо']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexUTF8(materialize('азтммйтшхцхлгдрнтхфжбдрлцхщ'), ['нпучщфвспндщшспзмшочгсщжчйгжбжзжжтн', 'хккдйшабисдузфртнллщпбоуооврайцз', 'йпхрфжждгпнйаспйппвхбргшйвжччт', 'ффеее', 'кежцновв', 'еххрчштарзмкпйззсйлмплхбчбулзибвчбщ', 'шфжйдотрщттфхобббг', 'ожоцжущопгоцимсфчйщцддзнфи', 'цуимеимймкфччц', 'прммщмтбт', 'хцхлгдрнтхфж', 'лгд', 'цжбдаичхпщзцасбиршшикджцунйохдлхй', 'пидхцмхйнспйокнттмййвчщпхап', 'йтйзмеаизкшйошзвфучйирг', 'хцхлгдр']) from system.numbers limit 10; + +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('gyhTlBTDPlwbsznFtODVUzGJtq'), ['seSqNDSccPGLUJjb', 'xHvtZaHNEwtPVTRHuTPZDFERaTsDoSdX', 'QCeZOYqoYDU', 'bsybOMriWGxpwvJhbPfYR', 'FFHhlxfSLzMYwLPPz', 'tvDAJjaLNCCsLPbN', 'kOykGaSibakfHcr', 'mWAZaefkrIuYafkCDegF', 'ILrFDapnEDGCZWEQxSDHjWnjJmeMJlcMXh', 'zHvaaTgspUDUx', 'tss', 'laUe', 'euUKFLSUqGCjgj', 'Kd', 'MxyBG', 'qRXMsQbNsmFKbYSfEKieYGOxfVvSOuQZw', 'PdBrNIsprvTHfTuLgObTt', 'kMekbxI']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('ZxTznPEbfoBfLElYOrRiHrDLMmTpIh'), ['bJhYwKLeeLvLmXwWvQHWFkDQp', 'dLyZmUicTZmUfjfsFjxxgOiMJn', 'UCYbbGcY', 'kpPiwfWHEuh', 'jviwmHeiTQGxlTKGVEnse', 'cVnEyLFjKXiLebXjjVxvVeNzPPhizhAWnfCFr', 'gkcoAlFFA', 'ahZFvTJLErKpnnqesNYueUzI', 'VIJXPlFhp', 'rxWeMpmRFMZYwHnUP', 'iFwXBONeEUkQTxczRgm', 'ZnbOGKnoWh', 'SokGzZpkdaMe', 'EfKstISJNTmwrJAsxJoAqAzmZgGCzVRoC', 'HTmHWsY', 'CpRDbhLIroWakVkTQujcAJgrHHxc']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('VELfidaBvVtAghxjkrdZnG'), ['fvEFyRHvixuAYbuXygKeD', 'zFNHINreSOFksEGssBI', 'hcdWEcKDGWvfu', 'KczaFjvN', 'nZLTZAYSbfqcNWzWuGatDPUBYaRzuMBO', 'UdOdfdyPWPlUVeBzLRPMnqKLSuHvHgKX', 'DgVLuvxPhqRdSHVRSeoJwWeJQKQnKqFM', 'NNfgQylawNsoRJNpmFJVjAtoYy', 'tWFyALHEAyladtnPaTsmFJQfafkFjL', 'lYIXNiApypgtQuziDNKYfjwAqT', 'QjbTezRorweORubheFFrj', 'htIjVIFzLlMJDsPnBPF', 'ltDTemMQEgITf', 'fprmapUHaSQNLkRLWAfhOZNy', 'dOJMvPoNCUjEk', 'm', 'vEEXwfF', 'aVIsuUeKGAcmBcxOHubKuk']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('kOzLaInSCOFHikwfkXaBfkyjdQ'), ['t', 'emHGfAiZSkZaVTSfplxRiPoDZUTT', 'YHnGJDTzxsboDsLPGHChMHwrHHICBIs', 'gbcbVHSlVeVDOeILWtSLkKfVVjG', 'fPaJjbnNthEwWZyg', 'qS', 'PCQxoLaSdQOjioMKPglmoWR', 'KLMNszm', 'TCErEFyxOvqnHs', 'dRbGzEJqvIGAcilZoHlXtZpjmLLZfsYueKqo', 'iKHmNSbGgaJYJEdMkbobXTdlFgAGEJMQ', 'mUGB']) from system.numbers limit 10; +select 1 = multiSearchFirstIndexCaseInsensitive(materialize('JGcICnWOGwFmJzHjtGJM'), ['fmJzHj', 'LhGTreYju', 'yCELHyNLiAJENFOLKOeuvEPxDPUQj', 'kWqx', 'OBnNMuaeQWmZqjWvQI', 'ektduDXTNNeelv', 'J', 'iCNwoGwfMJzhjtGJ', 'uiIipgCRWeKm', 'bNIWEfWyZlLd']) from system.numbers limit 10; +select 7 = multiSearchFirstIndexCaseInsensitive(materialize('fsoSePRpplvNyBVQYjRFHHIh'), ['ZqGBzyQJYuhTupkOLLqgXdtIkhZx', 'pouH', 'mzCauXdgBdEpuzzFkfJ', 'uOrjMmsHkPpGAhjJwVOFw', 'KbKrrCJrTtiuu', 'jxbLtHIrwYXDERFHfMzVJxgUAofwUrB', 'PLvNyBVQYjRfhhi', 'wTPkeRGqqYiIxwExFu', 'PplvNybvqyJ', 'qOWuzwzvWrvzamVTPUZPMmZkIESq', 'ZDGM', 'nLyiGwqGIcr', 'GdaWtNcVvIYClQBiomWUrBNNKWV', 'QQxsPMoliytEtQ', 'TVarlkYnCsDWm', 'BvqYJr', 'YJr', 'sePrPPLVNYbvqYJRFhh', 'ybvq', 'VQYjrFHh']) from system.numbers limit 10; +select 3 = multiSearchFirstIndexCaseInsensitive(materialize('aliAsDgMSDPISdriLduBFnuWaaRej'), ['gWOFTxMrQGQaLrpJamvRhgeHwk', 'iWsBLzLycWvbJXBNlBazmJqxNlaPX', 'Ri', 'FPLRURSsjvsySncekcxaWQFGKn', 'wgXSTVzddtSGJQWxucYorRjnQQlJcd', 'wOLJWZcjHEatZWYfIwGIqnuzdcHKSFqfARfNLky', 'eEECZMNmWcoEnVeSrDNJxcOKDz', 'duBF', 'EhfLOjeEOQ', 'dUbFNUWA']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('EUzxPFYxMsJaTDzAKRXgZIVSFXU'), ['TDKAgICICjzBKHRqgFAuPCSODemldGGd', 'LvMluSJTIlgL', 'srbRhQKjPIchsipVHsjxwhK', 'vdurVsYkUWiFQVaDOnoNIJEX', 'UzZsZqAUNjMvWJaTqSWMHpzlDhVOaLzHPZfV', 'XcnnPXXEJJv', 'JSwFBNnYzNbIRZdeMfYiAfxzWfnCQFqoTUjns', 'HBMeqdLkrhebQeYfPzfJKAZgtuWHl', 'cMfSOnWgJvGhFPjgZdMBncnqdX', 'orDafpQXkrADEikyLVTHYmbVxtD', 'Vz', 'bfYwQkUC', 'q', 'YqomKpmYpHGv']) from system.numbers limit 10; +select 4 = multiSearchFirstIndexCaseInsensitive(materialize('mDFzyOuNsuOCSzyjWXxePRRIAHi'), ['TfejIlXcxqqoVmNHsOocEogH', 'clyblaTFmyY', 'JQfxMAWVnQDucIQ', 'jw', 'fGetlRA', 'uWwCOCd', 'rInhyxSIFiogdCCdTPqJNrqVaKIPWvLFI', 'mimSJjfCWI', 'jqnJvNZXMEPorpIxpWkhCoiGzlcfqRGyWxQL', 'bxCJeVlWhqGHoakarZcK', 'unsUOcSZyjwxxe', 'E', 'PR', 'nsUoCSZyjwxXEPr', 'sfotzRPMmalUSjHkZDDOzjens', 'zYJwxx', 'DFzyouNsUocsZ', 'QBaQfeznthSEMIPFwuvtolRzrXjjhpUY', 'sQPVBaoeYlUyZRHtapfGM', 'lPiZLi']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('VOAJfSkbDvNWAZNLIwqUgvBOddX'), ['pHrGGgJ', 'VohjtPdQZSNeKAlChDCnRTelroghFbZXVpnD', 'rnWebvdsmiFypMKL', 'NtKRiJOfAkWyKvubXrkOODgmZxvfOohsnHJEO', 'nxsDisKarasSZwESIInCJnYREUcoRUTXHBUH', 'mXYYr', 'jujScxeTBWujKhKyAswXPRszFcOKMSbk', 'INEegRWNgEoxqwNaGZV', 'VVyjMXVWVyuaOwiVnEsYN', 'mkLXSmXppxJhFsmH', 'pRVnBrWjqPeUDHvhVuDbzUgy', 'PzchFdPTkOCIVhCKml', 'KXaGWnzqoHBd', 'PhzQVqIOLleqDSYNHLjAceHLKYPhCVq', 'aixxTqAtOAOylYGSYwtMkZbrKGnQLVxnq', 'ruEiaxeRaOOXGggRSPlUOGWSjxh', 'prSULtHvDMw', 'vEpaIIDbGvIePYIHHZVNSPYJl']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('ZHcEinZEFtfmHBLuCHntUhbIgY'), ['GKElMPEtmkLl', 'mkrzzjSRfXThuCQHkbZxRbhcymzTxcn', 'PREwQjxBJkpkiyuYEvtMZNFELgbINWsgf', 'lFEGlPtaDJSyoXzwREiRfpzNpsaBYo', 'tmVTuLPhqhgnFNhHvqpmc', 'NtijVhVfAwpRsvkUTkhwxcHJ', 'O', 'FSweqlUXdDcrlT', 'uljEFtKVjIzAEUBUeKZXzCWmG', 'dBIsjfm', 'CNaZCAQdKGiRUDOGMtUvFigloLEUr', 'yWjizKZ', 'QqPVdyIFXcweHz', 'uPmgGWGjhzt']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('AYMpbVsUQqAfoaMiJcYsulujYoSIx'), ['aXECumHNmAEefHPJy', 'hTosrERBdVCIilCYcMdHwaRh', 'PVDBpwrc', 'uFvQRPePvmzmocOauvEjqoxMhytzOwPSOCjmtm', 'kQqIlSCHDmWXCKN', 'ybAHGYDEDvvOJsF', 'WpkANi', 'cFGuzEcdahZtTdLFNBrRW', 'EBaybUFxO', 'mRlZUzHzMsMAgvtRtATEDLQvXZnZHw', 'uqxckjqpCBHiLgSPRz', 'Lv', 'AJcRfAvBmQVMOjaFfMfNHJt', 'FYsPM', 'pkKXTPgijOHFclqgVq', 'Ck']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexCaseInsensitive(materialize('gmKSXWkNhKckrVNgvwiP'), ['bdJMecfCwQlrsgxkqA', 'NTgcYkMNDnTiQj', 'fmRZvPRkvNFnamMxyseerPoNBa', 'rfcRLxKJIVkLaRiUSTqnKYUrH', 'YSUWAyEvbUHc', 'PridoKqGiaCKp', 'quwOidiRRFT', 'yHmxxUyeVwXKnuAofwYD', 'gichY', 'QlNKUQpsQPxAg', 'knhkCKRVNGvWIp', 'jAuJorWkuxaGcEvpkXpqetHnWToeEp', 'KnHKCKrvNgVW', 'tCvFhhhzqegmltWKea', 'luZUmrtKmmgasVXS', 'mageZacuFgxBOkBfHsfJVBeAFx', 'hKC', 'hkRCMCgJScJusY', 'MKSXWknHkckrVNgv', 'osbRPcYXDxgYjSodlMgV']) from system.numbers limit 10; +select 15 = multiSearchFirstIndexCaseInsensitive(materialize('lcXsRFUrGxroGIcpdeSJGiSseJldX'), ['pBYVjxNcQiyAFfzBvHYHhheAHZpeLcieaTu', 'SQSQp', 'OQePajOcTpkOhSKmoIKCAcUDRGsQFln', 'AYMDhpMbxWpBXytgWYXjq', 'gkUC', 'oWcNKfmSTwoWNxrfXjyMpst', 'fQSqkjRNiBGSfceVgJsxgZLSnUu', 'LRrhUjQstxBlmPWLGFMwbLCaBEkWdNJ', 'cZnaActZVoCZhffIMlkMbvbT', 'Uxg', 'vlKdriGMajSlGdmrwoAEBrdI', 'Fl', 'XzcNdlUJShjddbUQiRtR', 'AqowAuWqVQMppR', 'SRFUrGXrOgiCP', 'k']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('KhwhbOzWvobUwJcteCHguFCn'), ['LkDYrpvDfPL', 'CIaTaShobVIaWjdbsNsCMdZKlGdtWuJmn', 'zYcsxxFyfuGrPdTPgEvGbXoYy', 'vDIeYpJbLMGMuRkIrPkAnqDDkqXPzy', 'Ievib', 'CREiuEsErFgvGEkQzThHtYtPmcL', 'JjRWKyALtSkoGmRxh', 'JxPhpijkDOpncCKyDEyXvKNua', 'jo', 'mKpFscuBEABMAlQO', 'qiFTgJpcnUMRKzTEuKY', 'pXBtITxCPRaXijM', 'guYVLpIbu', 'tSKYIxv', 'oDnWaFAmsXGRdGvRPhbCIvFSFQNlSVYB', 'phdckINUiYL']) from system.numbers limit 10; +select 14 = multiSearchFirstIndexCaseInsensitive(materialize('pXFoUGwVTAItBqgbBaQwAqmeh'), ['LfBevBpGnaSlmGhbeZ', 'NtBYzEksiXvYI', 'jMeRw', 'omtaduY', 'BsWyvNdkfXsTBxf', 'CtoOIvaesuca', 'pgJcRIBVbyaPBgGsNKP', 'bAwdUMnwKvMXfFHQWrtfMeqcORIJH', 'GDxZblrqWSxUJFjEuXArPtfHPdwSNGGL', 'LLxcfp', 'NrLghkFpwCdvHJBfPBgiMatNRaDKjO', 'XCzr', 'cCojPpfLkGZnaWBGpaZvrGMwgHNF', 'BaQWAQmE', 'AQ', 'RtxxEZDfcEZAgURg']) from system.numbers limit 10; +select 5 = multiSearchFirstIndexCaseInsensitive(materialize('KoLaGGWMRbPbKNChdKPGuNCDKZtWRX'), ['FBmf', 'QJxevrlVWhTDAJetlGoEBZWYz', 'tKoWKKXBOATZukMuBEaYYBPHuyncskOZYD', 'kgjgTpaHXji', '', 'xOJWVRvQoAYNVSN', 'YApQjWJCFuusXpTLfmLPinKNEuqfYAz', 'GXGfZJxhHcChCaoLwNNocnCjtIuw', 'ZLBHIwyivzQDbGsmVNBFDpVaWkIDRqsl', 'Kp', 'EyrNtIFdsoUWqLcVOpuqJBdMQ', 'AggwmRBpbknCHdKPgun', 'xNlnPtyQsdqH', 'hDk']) from system.numbers limit 10; +select 6 = multiSearchFirstIndexCaseInsensitive(materialize('OlyNppgrtlubvhpJfxeWsRHpr'), ['slbiGvzIFnqPgKZbzuh', 'fakuDHZWkYbXycUwNWC', 'HnVViUypZxAsLJocdwFFPgTDIkI', 'bLx', 'fmXVYOINsdIMmTJAQYWbBAuX', 'pjFXews', 'BG', 'vrSQLb', 'ub', 'pREPyIjRhXGKZovTqlDyYIuoYHewBH', 'hnNQpJmOKnGMlVbkSOyJxoQMdbGhTAsQU', 'UwaNyOQuYpkE', 'yHNlFVnuOLUxqHyzAtNgNohLT', 'YJRazuUZkP', 'z', 'lUbVhpjFxEWsRhP']) from system.numbers limit 10; +select 6 = multiSearchFirstIndexCaseInsensitive(materialize('ryHzepjmzFdLkCcYqoFCgnJh'), ['cLwBRJmuspkoOgKwtLXLbKFsj', 'YSgEdzTdYTZAEtaoJpjyfwymbERCVvveR', 'RzdDRzKjPXQzberVJRry', 'HUitVdjGjxYwIaLozmnKcCpFOjotfpAy', 'LWqtEkIiSvufymDiYjwt', 'FDlKCCYqoFCGNj', 'jmZfdlKCcyQOFcGnJ', 'OZCPsxgxYHdhqlnPnfRVGOJRL', 'JfhoyhbUhmDrKtYjZDCDFDcdNs', 'KCCYqo', 'EPJMzFDLKcCYQ', 'zLQb', 'qsqFDGqVnDX', 'MzfdLkCCyQOFc']) from system.numbers limit 10; +select 5 = multiSearchFirstIndexCaseInsensitive(materialize('oQLuuhKsqjdTaZmMiThIJrtwSrFv'), ['MsfVCGMIlgwomkNhkKn', 'fBzcso', 'meOeEdkEbFjgyAaeQeuqZXFFXqIxBkLbYiPk', 'tNV', 'i', 'EwuTkQnYCWktMAIdZEeJkgl', '', 'hUo', 'dtAzmMITHijRtwsrFV', 'vhnipYCl', 'puor', 'TazMmiTh', 'ITHIJRTWSrf', 'luuHksqJDTaz', 'uHkSQjDtazMMiThIjrtwSRFV', 'gpWugfu', 'QjdtazmmIthIjRTWSRFV', 'ZdJpc']) from system.numbers limit 10; + +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ИпрУщйжббКВНИчйацпцоЛП'), ['цШСкЕвеГЕЗЦщруИБтЦсБГАу', 'Хнщта', 'БшА', 'СалШйР', 'ЩфДГРРчшБДММГЧоноЖСчдпВХшшгйН', 'бЕжПШЦддожнЧоЕишчшЕЙфСщиВПФМ', 'ТЗзГФх', 'Чфл', 'КнНкнЖЕкППварНрхдгЙкДешмСКИЛкеО', 'ЖИсЧПСФФМДиТШХЦфмЗУпфрУщСЛщсфмвШ', 'ллЙумпхчОсЦМщУ', 'ГМУНЦФшНУбРжоПвШШщлВФАтоРфИ', 'БХцжеНЗкжЗЗшЦзфгдЖОзЗЖщКМИШАтЦАп', 'мтСкЕнбХШнЛхХГР']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('цмйвГЖруДлдЦавхЖАлоЕхЗКд'), ['ХфБПМДВХЙ', 'саЗваАбднХбЦттмКсМбШбВМУйНКСЖжХЦНц', 'плиЩщШАцЖсхГ', 'ЗнУЕФЗВаНА', 'ЧДйСаЗГЕшойСжбсуЩуЩщбПР', 'ЧЕуЩкФБВвчмабШЦтЖбОрЗп', 'йХбМсрТАФм', 'РЖСЗвЦлНВПЧщГУцЖ', 'ГГлЩрОХКнШРТуДФ', 'шСабРжла', 'ЕчБвгаРЧифаЙщХПпГЦхчШ', 'дайшйцВНЩЧуцйдМХг', 'УнзНКЧххВрцЩМлАнЖСДОДцбИгЛЛР', 'сЛЗзПбиАгзК']) from system.numbers limit 10; +select 2 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('дфЧлзОжММФкЖгиЗЩлоШжФТкцк'), ['ЗРТцИрсФСбПрщГЗ', '', 'ЖГИЗщлОш', 'АДПН', '', 'чЛЗОЖмМфКжг', 'Мфкж', 'ндаовк', 'зГЛРГАНШмСмШМефазшеБкзДвЕШиЖСЗЧПИфо', 'ФЧЛзОЖммфКжгиЗЩ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ИИКДМЛхРчнвЙЕкВЧелТйЛВТ'), ['АчшОЛтНЙуЦЛЙфАКУйуТЗМеЗщОХТМЗеТА', 'НЦУДбчфРТОпЛкОгВпоО', 'неДавнНРеАУфТтфАнДчтнУМЛПШнроАчжш', 'бГржВПЧлЛтСВТтаМЦШШ', 'БщГщРнБхЕЛоЛсмЙцВЕГ', 'цбАжЦРеу', 'ХсЦРаНиН', 'нббДдВЗРС', 'змОПпеЛЖзушлнДЛфчЗлцЙЛфЖрЛКг', 'фШиЖСУоаНПйИВшшаоуЙУА', 'ЛктХиШРП', 'МапщВйцХч', 'жмУТкуГбУ', 'сйпзДЩоНдШЕТбПзФтсрмАФГСз', 'ЛБУвйладЕижрКзШУАгНЩчЕмАа', 'мЧпФлМчРбШРблмтмПМоС']) from system.numbers limit 10; +select 8 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ПоДУЗАтХншЦатИшХвмИЖчГнжчНцух'), ['жЛЧХзкжлиЛцЩбЧСнЛУжЖпКРвиСРН', 'шадмЩеУШБврУдЕБЗИгмЗЕФШчЦБСзПидтАлб', 'йпГмШСз', 'хЖФЙиПГЗЩавиЗЩйПнБЗЦЩмАЧ', 'ХесщтлбСИуЦ', 'вар', 'ЙкМаСхаЩаЗнФЩфКжПщб', 'ОдУзАТХншЦатИШхвМиЖчгнЖч', 'ЗВЗДБпФфцвжУКвНсбухссбЙКЙйккЛиим', 'гХхсГЛшдфЖЛбгчоЕмоЧр']) from system.numbers limit 10; +select 7 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ихзКЖЩсЧРСЖсЖжЛАшкТхИйТгМБпск'), ['ДРОБм', 'нз', 'тОЛ', 'щРзуЖрТ', 'Мдд', 'АЦГРК', 'Чрсжсжжл', 'чРсжсЖжл', 'ктхИйтГмБ', 'аАзЙддМДЦЩФкРТЧзЧПУойоТхБиЧПлХДв', 'иЙтгМбп', 'РицлПн', 'йДГнЧкЕв', 'ВМЩцАш', 'хКЩнДшуБЕЛТФГВгцБПРихШЙХгГД', 'иЙТГМ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('жггкщцзщшамдбРЗжйТзвхшАпХСбе'), ['лВТвтлРБжиЛЦвРЦкАЦаНБгуОН', 'рШаавцжзМрзВЧДРСузб', 'оемрЗМгФБНмжп', 'ЛбмХбФЧШГЛХИуТСрфхп', 'ЖшТдтЧйчМР', 'ЧнИМбфУпмЙлШЗТрТИкКИЩОЧеМщПЩлдБ', 'ГвРдПжГдБаснилз', 'уТнТчТРЗИЛ', 'ИТЕВ', 'дИСЖпПнПСНОвсЩЩшНтХЧшВ', 'штабтлМнсчРЗтфсТЩублЕЧйцеЦТтХ', 'ХбхгУШвАзкшЖ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('нсЩЙЕМмЧЛСйФцГВМиатГХш'), ['КсОПЧИкВсКшРхнкхБжду', 'мШмпТащжФ', 'ББЖнианЧЦпмрГЩГМаЛКжА', 'арИжзжфГТУДИРРРбцил', 'дфдмшМИщТиЗПруКфОнСЦ', 'Рцч', 'гмДгВДАтсщКЗлхвжЦУеФДАТГЙЦЧОЗвРш', 'чфХЩсДбУбВжАМшРлКРщв', 'нцБйсУ', 'фасДЕчвчДмбтЖХвоД', 'аБЧшЖшЖАКргОИшпШЧзТбтфйвкЕц', 'ЗжжсмкжЛд', 'щщлПзг', 'бП']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('сКиурчоиаЦйхгаУДПфчИтИК'), ['МЧПцУАМрХКЧмАЦннУшмРчкЖКХвху', 'РвДуВиашрРКкмжшЖНШБфлцжБЦР', 'йМУиУчНЧчРшДйБЗфЩЦйПсцгкДС', 'НсмаЛзЧвНЦШФуВРпзБГзйКцп', 'ЖлМЛУХОБллСЗСКвМКМдГчЩ', 'ЩХПШиобЛх', 'аФАЖВтРиЦнжбкСожУЖЙипм', 'аУГжУНуМУВФлж', 'ШБчтЗкЖНЙк', 'ЩоГПГчНП', 'мВЗйЛаХПоЕМХиИйДлшРгзугЙЖлнМппКЦ', 'вчмДФхНеЦйЗсЗйкфпОщПтШпспИМдГйВМх', 'ИЗИжЧжаГЩСуцСЩдкскздмЖЦ', 'дАмфЕбгс', 'ГМттнхчЩжМЧДфщШБкфчтЧ', 'ШЕииФБпщЙИДцРиЖжЩл', 'ОпуОлБ', 'хБ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('риШМбгиПЖннНоЧргзГзеДпЛиНт'), ['икДкбйдройВУсвФзрПСусДнАШо', 'чуУеТкУВФхз', 'ЕГпйчехЗвЛлБблЧПДм', 'зеоЩЧожКЛбШЩдАрКБНйшКВШаЗгПш', 'виФКуЗОтгВмТкБ', 'цДрЙгЗРаЧКаМДдБЕЧзСРщВФзПВЧГвЩрАУшс', 'мБЗИУдчХХжТж', 'ФТНМмгЖилуЛйМ', 'ЗегЩЦнЦщцИк', 'оГОусхФсДЖДЩИЕХЗпсПЩХБТГЕп', 'АУКНзАДНкусВЧХвАж', 'КвКрбсВлНАоЗсфХОйЦхТ', 'вФдеХацЧБкрхМЖЗЧчКшпфВчс', 'йХшиОвХЗжТпДТбвУрпшЕ']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('МойрЙлтЖйБдББЛЕЕЦузЛфпИЕГт'), ['ПОжЦЩа', 'СШзЧФтСЗохЦЗдФтцНТу', 'вЕдТ', 'ечУФаМДнХщЕНУи', 'вмеосТзБАБуроЙУЛгФжДсЧщтчЕзлепгк', 'ИЧтБрцПмРаВрйИвНЛСйпЖжУВдНрурКшоКХП', 'ЕН', 'щКЦЩгФБСХпкпит', 'ей', 'ЕахшеОМРдЕГХуГЖчвКХМЕ', 'Гт', 'НужЛЛЙОАл']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('еззЦАвУаДнзИКЙнЙдртРоП'), ['КгЩбшПЛКвтИРцйчккгЧчЧмтГ', 'кЛппСФщзМмТйВЕтбЩЦлО', 'ШпдзиЖх', 'иИХ', 'пУаАФгсмтофНФХиЦЕтТЗсОШЗЙ', 'фаКАБТцФМиКЖрИКшГБЗБ', 'идЖЙдЦММУнХЦЦфсФМ', 'МиЦечЖЦЙмРВЙОХсБРНнрлйЙшц', 'ТфдСтМгтмимТМАучтхПНЦлуф', 'бейККЛСггУЦБсокЕЙпнРЧ', 'цавУАДНЗИКЙнЙд', 'ЩйЕЖчЧщаПшжФсхХЛЕТчвмЙнуце', 'РТРОП', 'цАВуАДнзИкЙНЙдРтРо', 'аЩПИд', 'ОСчКшОАчВмр', '', 'уЙЛИуЕУвцДшНОгбТбИШв', 'АВУаднзИКЙНйдР', 'жТйоП']) from system.numbers limit 10; +select 12 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('шйМЦУщвфщшбмлТНВохСЖНУ'), ['хшТАпТоШхКНсДпвДЕчДМНбАНччд', 'ХКуПСтфСйРжмБглОШЙлйДкСФВйВ', 'хпмНЦМУШеАД', 'чзмЧВвлбЧкАщПкзТгеуГущб', 'шзжрДд', 'еЗГОЙНйИБЗДщИИНицмсЙЗгФУл', 'кнщЙхооДТООе', 'всзЙнТшжФЗДБДрщВДлбвулДИаз', 'мп', 'уБОйцзнМпИсксхефбдЕЛйгИмГШГЗЩ', 'ОМпзШШщчФФнвУЧгжчиндЧч', 'щВФЩШбмЛТн', 'бм', 'БпфнкнйЗцПдЧЩбВ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('НЗБлОбшмОПктткоччКиКрФсбкШАХ'), ['нффЕББУЖГшЖвГфЦФГЕСщсЩЧлфнАшшктизУ', 'нСмпцхшИои', 'ЧИчЗУтйЦхГезппФРХХШуцЗШВ', 'РИнщН', 'НЩдВТсЙсОдхРбМФнСпАбОПкудБФСчмб', 'йхглпдКтртгош', 'ибгУРАБцх', 'ИЕиЛрИДафмЗИкТвАуГчШугбЧмЛШщсОЧбБкП', 'ЩСМуХМ', 'АУсмдЗБвКфЩ', 'пгбТНОйц', 'МоИ', 'КОйкзОЕИЗМЩ', 'чщттЛРНнГхЗхХй', 'ЩшцЧРКмШЖЩЦемтЧУЛГкХтВНзОжУХТпН', 'ЕшбБНчрДпЩЧМлераУЖХйфйдчтсчПШ', 'дбФйтИАшДйЩтбФйШуПиРлГмВОШаСлШЧИвфЖщгж', 'ОДжТЦщпщИжфуеЩмн', 'ПЛНЕзжСчВКДттуФРУ', 'БбмеГЩХшжрцОжХНииВКВлдиХБДСмнНфХЛТХ']) from system.numbers limit 10; +select 4 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ЕКаЖСЗЗЕЗгПдШкфцЙТцл'), ['ЙКМИХРОХ', 'НвМУХзфчДбАРЙДу', 'чмщжФшшжсЗТв', 'жСЗзеЗг', 'ЛФсКзВСдЦД', 'АЖсЗЗЕЗГ', 'Пдшкфц', 'усйсКщшрДрвнФЛедуГХ', '', 'цйтЦ', 'Ощс', 'ЕЗГпдшКф', 'ззеЗгп', 'УгЛйхШТтшрЛ', 'ЗзЕЗгП', 'КЛмТЩРтрзБбЩРгФбиОБазУнтУЦ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('чЕжАфАрБпКбДмшАшТШККауЩИхНВО'), ['ЧЙпЗЧЧлйПЙЖЙшККг', 'зйхуМЩАИПГЗА', 'ЙцехноХниИбзБЧ', 'чВомЗОфУроС', 'дбРхХЗрзоДДШщЕДжиФаЙ', 'еЛзТцЩДиДГрдМОНЧУнеТуДЩЧЦпГЕщПОРсйпЧ', 'ФчнпМРЧцПЙЩЩвфДХПнУхцЩСИ', 'цлШеУкМБнжЧлУцСуСЙуотшМфйс', 'лугГлкщКщкзЛйпбдсишргДДшОувр', 'ЗРИаФЛЗФрСзм', 'аЗвжВгхЩоЦ', 'чГКлеБНДнИЖЧеШЧДнИвсГДЖЖфБМНсУЦосВс', 'щЦнПУзЧщнЩЕ', 'рВУв']) from system.numbers limit 10; +select 20 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('анктгЦВВкЧвЖиБпфТйлр'), ['НшДПчтсСЧпкидаХжаЙчаДчЦГшГ', 'ХнцЛШИрХВаРхнЧИЙрОЛЛИТпППфгЖЩФ', 'ФАЛущПупмдМБмтйзУшрВМзцзШжгД', 'ГчЛЧеЛДХеипдшЦЦмаШНаРшУТ', 'фОЕфжО', 'ТНсУАнчшУЛЦкцчЙ', 'ЛйЦКБЗГЦйКЩиОПуТЦкБкБувснЙи', 'Бунф', 'ИтХЛШСУНЦВйРСЙчДчНвйшЗЦй', 'АцСКнзБаЖУДЖегавйБгужШАДЙтжИВк', 'ЦцХщфирДПрСуХзхЖМЕщ', 'кфдБЖКншвУФкЗДКуЙ', 'СкиСЦЗЦРмгЦНпБхфХДЙщЛзХ', 'йУепВЖАПНбАЩуЛжвЧпхМ', 'БпЧшпДочУвибщерйхйтОБАСПнЧМИОЩ', 'чФгНЗщвхавбшсООоВштбЧ', 'уДиЕцнЙХВЕйИАГдЕ', 'тп', 'ЧЕРЖсгВ', 'вЖибПФТЙЛ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ипозйпхЛОЛТлСМХЩдМвМгШИвГиЛп'), ['ФСГзиГррБДНКГЛХбААФхИ', 'гегпАвхДЕ', 'ЦХжзщХИвхп', 'ЗЖ', 'ХОКцКзЩо', 'абИОрГПМТКшБ', 'кмХТмФихСЦсшУдхВбИШМНАНмпмХОЗйПЩч', 'еОжТСкфЕТУУжГ', 'НтщМЕПЧИКЙКйй', 'ежСикИвйЛж', 'ушЩФОтпБзЩЛЗЦЧЙиВгБЧоПХНгОуАДТЙж', 'фМЕРефнутпнцФРнрГЖ', 'хшДЧзнХпфорвЩжмГРЦуХГ', 'ЧЖн', 'вВзгОСхгНумм', 'ЗДоВлСжпфщСКсщХаолЛнЛЗбСхвЩвЩНоЩЩМ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('МрЗтВФуЖРеЕШЧхПФбжжхчД'), ['щжОожЦндцШйТАй', 'йуРСЦУЗФУЦПвРфевСлфдРещЦтИтЩЩТг', 'ЕГЧдмХмРАлнЧ', 'йнкФизГСЗнуКбЙВЙчАТТрСхаЙШтсдгХ', 'ЧПрнРЖЙцХИщ', 'зЕ', 'СжВЩчГзБХбйТиклкдШШИееАлЧЩН', 'МШщГйБХжЙпйЕЗТзКмпе', 'НКбНщОМДзлдЧОс', 'НчзВХОпХХШМОХФумБгсрРЧИчВтгутВЩо']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('упТУЖелФкЧЧУЦРжоБтХсжКщД'), ['щКшуОЖааЖйнЕбДИжМК', 'ЕкнШцХРВтНйШоНбЙйУоЧщУиРпШЧхмКЧжх', 'рвЩЗоЗхшЗвлизкСзебЩКМКжбша', 'ДииБНСШвцЦбаСсИжЕЗмхмВ', 'СЦоБЗПМтмшрУлрДТФГЖиувШЗууШзв', 'ЦЗБЕзВХЙбйВОмЗпхндЗ', 'ЗНизЧВШкГВтпсЖж', 'уШиБПЙЧтРаЕгИ', 'ЙшпПА', 'ЧоММаАйМСфбхуФкефФштгУА']) from system.numbers limit 10; + diff --git a/tests/queries/0_stateless/02370_extractAll_regress.reference b/tests/queries/0_stateless/02370_extractAll_regress.reference new file mode 100644 index 00000000000..aad46128e52 --- /dev/null +++ b/tests/queries/0_stateless/02370_extractAll_regress.reference @@ -0,0 +1 @@ +{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"} ['a','b','c','d','a','b','c','d','a','b','c','d','a','b','c','d'] [':"',':"',':"',':"',':"',':"',':"',':"',':"',':"',':"',':"',':"',':"',':"',':"'] diff --git a/tests/queries/0_stateless/02370_extractAll_regress.sql b/tests/queries/0_stateless/02370_extractAll_regress.sql new file mode 100644 index 00000000000..6d255124948 --- /dev/null +++ b/tests/queries/0_stateless/02370_extractAll_regress.sql @@ -0,0 +1,5 @@ +-- Regression for UB (stack-use-after-scope) in extactAll() +SELECT + '{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}' AS json, + extractAll(json, '"([^"]*)":') AS keys, + extractAll(json, ':"\0[^"]*)"') AS values; diff --git a/tests/queries/0_stateless/02371_create_temporary_table_as_with_columns_list.reference b/tests/queries/0_stateless/02371_create_temporary_table_as_with_columns_list.reference new file mode 100644 index 00000000000..6fc56adcb1c --- /dev/null +++ b/tests/queries/0_stateless/02371_create_temporary_table_as_with_columns_list.reference @@ -0,0 +1,2 @@ +Vasya +Petya diff --git a/tests/queries/0_stateless/02371_create_temporary_table_as_with_columns_list.sql b/tests/queries/0_stateless/02371_create_temporary_table_as_with_columns_list.sql new file mode 100644 index 00000000000..7d8f297b505 --- /dev/null +++ b/tests/queries/0_stateless/02371_create_temporary_table_as_with_columns_list.sql @@ -0,0 +1,3 @@ +CREATE TEMPORARY TABLE test_02327 (name String) AS SELECT * FROM VALUES(('Vasya'), ('Petya')); +SELECT * FROM test_02327; +DROP TABLE test_02327; diff --git a/tests/queries/1_stateful/00024_random_counters.reference b/tests/queries/1_stateful/00024_random_counters.reference index f11b66aa5b5..96ce61aeccb 100644 --- a/tests/queries/1_stateful/00024_random_counters.reference +++ b/tests/queries/1_stateful/00024_random_counters.reference @@ -998,3 +998,5 @@ 1 1 1 2 1 1 +1 1 +1 5 diff --git a/tests/queries/1_stateful/00024_random_counters.sql b/tests/queries/1_stateful/00024_random_counters.sql index 99ba9cc653b..b44f0731471 100644 --- a/tests/queries/1_stateful/00024_random_counters.sql +++ b/tests/queries/1_stateful/00024_random_counters.sql @@ -998,3 +998,12 @@ SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15094099; SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6308405; SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20762370; SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14121177; + +SYSTEM DROP UNCOMPRESSED CACHE; + +SET local_filesystem_read_method = 'pread_threadpool'; +SET min_bytes_to_use_direct_io = 1; +SET use_uncompressed_cache = 1; + +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32745436; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33436573; diff --git a/tests/queries/1_stateful/00173_group_by_use_nulls.reference b/tests/queries/1_stateful/00173_group_by_use_nulls.reference new file mode 100644 index 00000000000..02723bf14dd --- /dev/null +++ b/tests/queries/1_stateful/00173_group_by_use_nulls.reference @@ -0,0 +1,10 @@ +1704509 1384 +732797 1336 +598875 1384 +792887 1336 +3807842 1336 +25703952 1336 +716829 1384 +59183 1336 +33010362 1336 +800784 1336 diff --git a/tests/queries/1_stateful/00173_group_by_use_nulls.sql b/tests/queries/1_stateful/00173_group_by_use_nulls.sql new file mode 100644 index 00000000000..7acacc4e579 --- /dev/null +++ b/tests/queries/1_stateful/00173_group_by_use_nulls.sql @@ -0,0 +1,10 @@ +SELECT + CounterID AS k, + quantileBFloat16(0.5)(ResolutionWidth) +FROM remote('127.0.0.{1,2}', test, hits) +GROUP BY k +ORDER BY + count() DESC, + CounterID ASC +LIMIT 10 +SETTINGS group_by_use_nulls = 1; diff --git a/utils/check-style/check-black b/utils/check-style/check-black index 45e7820469b..141dcd1b406 100755 --- a/utils/check-style/check-black +++ b/utils/check-style/check-black @@ -6,8 +6,14 @@ set -e GIT_ROOT=$(git rev-parse --show-cdup) GIT_ROOT=${GIT_ROOT:-.} tmp=$(mktemp) -if ! find "$GIT_ROOT" -name '*.py' -not -path "$GIT_ROOT/contrib/*" -exec black --check --diff {} + 1>"$tmp" 2>&1; then +# Find all *.py files in the repo except the contrib directory +find_cmd=(find "$GIT_ROOT" -name '*.py' -not -path "$GIT_ROOT/contrib/*") +if ! "${find_cmd[@]}" -exec black --check --diff {} + 1>"$tmp" 2>&1; then # Show the result only if some files need formatting cat "$tmp" + # Apply formatting + "${find_cmd[@]}" -exec black {} + 1>/dev/null 2>&1 + # Automatically add changed files to stage + "${find_cmd[@]}" -exec git add -u {} + 1>/dev/null 2>&1 fi rm "$tmp" diff --git a/utils/graphite-rollup/graphite-rollup-bench.cpp b/utils/graphite-rollup/graphite-rollup-bench.cpp index 4c11f90b3ff..49a3d509be6 100644 --- a/utils/graphite-rollup/graphite-rollup-bench.cpp +++ b/utils/graphite-rollup/graphite-rollup-bench.cpp @@ -20,9 +20,9 @@ using namespace DB; static SharedContextHolder shared_context = Context::createShared(); -std::vector loadMetrics(const std::string & metrics_file) +std::vector loadMetrics(const std::string & metrics_file) { - std::vector metrics; + std::vector metrics; FILE * stream; char * line = nullptr; @@ -47,7 +47,7 @@ std::vector loadMetrics(const std::string & metrics_file) } if (l > 0) { - metrics.push_back(StringRef(strdup(line), l)); + metrics.emplace_back(std::string_view(strdup(line), l)); } } } @@ -80,7 +80,7 @@ void bench(const std::string & config_path, const std::string & metrics_file, si Graphite::Params params; setGraphitePatternsFromConfig(context, "graphite_rollup", params); - std::vector metrics = loadMetrics(metrics_file); + std::vector metrics = loadMetrics(metrics_file); std::vector durations(metrics.size()); size_t j, i; @@ -99,15 +99,15 @@ void bench(const std::string & config_path, const std::string & metrics_file, si if (j == 0 && verbose) { - std::cout << metrics[i].data << ": rule with regexp '" << rule.second->regexp_str << "' found\n"; + std::cout << metrics[i].data() << ": rule with regexp '" << rule.second->regexp_str << "' found\n"; } } } for (i = 0; i < metrics.size(); i++) { - std::cout << metrics[i].data << " " << durations[i] / n << " ns\n"; - free(const_cast(static_cast(metrics[i].data))); + std::cout << metrics[i].data() << " " << durations[i] / n << " ns\n"; + free(const_cast(static_cast(metrics[i].data()))); } } diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index de9e4cfa0af..e4c7aae8b25 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v22.7.1.2484-stable 2022-07-21 v22.6.3.35-stable 2022-07-06 v22.6.2.12-stable 2022-06-29 v22.6.1.1985-stable 2022-06-16 diff --git a/utils/security-generator/SECURITY.md.sh b/utils/security-generator/SECURITY.md.sh index 381f5b4eaa6..15933da7942 100755 --- a/utils/security-generator/SECURITY.md.sh +++ b/utils/security-generator/SECURITY.md.sh @@ -33,7 +33,7 @@ FROM FROM ( WITH - extractGroups(version, 'v(\\d+).(\\d+)') AS v, + extractGroups(version, 'v(\\d+)\\.(\\d+)') AS v, v[1]::UInt8 AS y, v[2]::UInt8 AS m SELECT diff --git a/utils/self-extracting-executable/compressor.cpp b/utils/self-extracting-executable/compressor.cpp index f6bbc33aa16..6ce4af2c362 100644 --- a/utils/self-extracting-executable/compressor.cpp +++ b/utils/self-extracting-executable/compressor.cpp @@ -9,7 +9,18 @@ #include #include #include +#if defined OS_DARWIN + +// dependencies +#include +#include + +// define 64 bit macros +#define htole64(x) OSSwapHostToLittleInt64(x) + +#else #include +#endif #include "types.h" diff --git a/utils/self-extracting-executable/decompressor.cpp b/utils/self-extracting-executable/decompressor.cpp index 97bcda763e3..0adeb33b3ce 100644 --- a/utils/self-extracting-executable/decompressor.cpp +++ b/utils/self-extracting-executable/decompressor.cpp @@ -1,6 +1,6 @@ #include #include -#if defined __APPLE__ +#if defined OS_DARWIN #include #else #include @@ -12,7 +12,18 @@ #include #include #include +#if defined OS_DARWIN + +// dependencies +#include +#include + +// define 64 bit macros +#define le64toh(x) OSSwapLittleToHostInt64(x) + +#else #include +#endif #include "types.h"