diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index db170c3e28f..7fb2abebbbb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -12,6 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py - Build/Testing/Packaging Improvement - Documentation (changelog entry is not required) - Bug Fix (user-visible misbehavior in an official stable release) +- CI Fix or Improvement (changelog entry is not required) - Not for changelog (changelog entry is not required) diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 51670087ffe..2a98722414b 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -67,8 +67,6 @@ jobs: test_name: Compatibility check (amd64) runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} - run_command: | - python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions CompatibilityCheckAarch64: needs: [RunConfig, BuilderDebAarch64] if: ${{ !failure() && !cancelled() }} @@ -77,8 +75,6 @@ jobs: test_name: Compatibility check (aarch64) runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} - run_command: | - python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### @@ -146,9 +142,6 @@ jobs: test_name: Docker server image runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} - run_command: | - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-server --image-path docker/server --allow-build-reuse DockerKeeperImage: needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64] if: ${{ !failure() && !cancelled() }} @@ -157,9 +150,6 @@ jobs: test_name: Docker keeper image runner_type: style-checker data: ${{ needs.RunConfig.outputs.data }} - run_command: | - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper --allow-build-reuse ############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ @@ -176,14 +166,8 @@ jobs: uses: ./.github/workflows/reusable_test.yml with: test_name: ClickHouse build check - runner_type: style-checker + runner_type: style-checker-aarch64 data: ${{ needs.RunConfig.outputs.data }} - additional_envs: | - NEEDS_DATA< diff --git a/CHANGELOG.md b/CHANGELOG.md index b3e5dd709ab..9df678d4b9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,165 @@ ### Table of Contents +**[ClickHouse release v24.2, 2024-02-29](#242)**
**[ClickHouse release v24.1, 2024-01-30](#241)**
**[Changelog for 2023](https://clickhouse.com/docs/en/whats-new/changelog/2023/)**
# 2024 Changelog +### ClickHouse release 24.2, 2024-02-29 + +#### Backward Incompatible Change +* Validate suspicious/experimental types in nested types. Previously we didn't validate such types (except JSON) in nested types like Array/Tuple/Map. [#59385](https://github.com/ClickHouse/ClickHouse/pull/59385) ([Kruglov Pavel](https://github.com/Avogar)). +* Add sanity check for number of threads and block sizes. [#60138](https://github.com/ClickHouse/ClickHouse/pull/60138) ([Raúl Marín](https://github.com/Algunenano)). +* Don't infer floats in exponential notation by default. Add a setting `input_format_try_infer_exponent_floats` that will restore previous behaviour (disabled by default). Closes [#59476](https://github.com/ClickHouse/ClickHouse/issues/59476). [#59500](https://github.com/ClickHouse/ClickHouse/pull/59500) ([Kruglov Pavel](https://github.com/Avogar)). +* Allow alter operations to be surrounded by parenthesis. The emission of parentheses can be controlled by the `format_alter_operations_with_parentheses` config. By default, in formatted queries the parentheses are emitted as we store the formatted alter operations in some places as metadata (e.g.: mutations). The new syntax clarifies some of the queries where alter operations end in a list. E.g.: `ALTER TABLE x MODIFY TTL date GROUP BY a, b, DROP COLUMN c` cannot be parsed properly with the old syntax. In the new syntax the query `ALTER TABLE x (MODIFY TTL date GROUP BY a, b), (DROP COLUMN c)` is obvious. Older versions are not able to read the new syntax, therefore using the new syntax might cause issues if newer and older version of ClickHouse are mixed in a single cluster. [#59532](https://github.com/ClickHouse/ClickHouse/pull/59532) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). + +#### New Feature +* Added new syntax which allows to specify definer user in View/Materialized View. This allows to execute selects/inserts from views without explicit grants for underlying tables. So, a View will encapsulate the grants. [#54901](https://github.com/ClickHouse/ClickHouse/pull/54901) [#60439](https://github.com/ClickHouse/ClickHouse/pull/60439) ([pufit](https://github.com/pufit)). +* Try to detect file format automatically during schema inference if it's unknown in `file/s3/hdfs/url/azureBlobStorage` engines. Closes [#50576](https://github.com/ClickHouse/ClickHouse/issues/50576). [#59092](https://github.com/ClickHouse/ClickHouse/pull/59092) ([Kruglov Pavel](https://github.com/Avogar)). +* Implement auto-adjustment for asynchronous insert timeouts. The following settings are introduced: async_insert_poll_timeout_ms, async_insert_use_adaptive_busy_timeout, async_insert_busy_timeout_min_ms, async_insert_busy_timeout_max_ms, async_insert_busy_timeout_increase_rate, async_insert_busy_timeout_decrease_rate. [#58486](https://github.com/ClickHouse/ClickHouse/pull/58486) ([Julia Kartseva](https://github.com/jkartseva)). +* Allow to set up a quota for maximum sequential login failures. [#54737](https://github.com/ClickHouse/ClickHouse/pull/54737) ([Alexey Gerasimchuck](https://github.com/Demilivor)). +* A new aggregate function `groupArrayIntersect`. Follows up: [#49862](https://github.com/ClickHouse/ClickHouse/issues/49862). [#59598](https://github.com/ClickHouse/ClickHouse/pull/59598) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Backup & Restore support for `AzureBlobStorage`. Resolves [#50747](https://github.com/ClickHouse/ClickHouse/issues/50747). [#56988](https://github.com/ClickHouse/ClickHouse/pull/56988) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)). +* The user can now specify the template string directly in the query using `format_schema_rows_template` as an alternative to `format_template_row`. Closes [#31363](https://github.com/ClickHouse/ClickHouse/issues/31363). [#59088](https://github.com/ClickHouse/ClickHouse/pull/59088) ([Shaun Struwig](https://github.com/Blargian)). +* Implemented automatic conversion of merge tree tables of different kinds to replicated engine. Create empty `convert_to_replicated` file in table's data directory (`/clickhouse/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/`) and that table will be converted automatically on next server start. [#57798](https://github.com/ClickHouse/ClickHouse/pull/57798) ([Kirill](https://github.com/kirillgarbar)). +* Added query `ALTER TABLE table FORGET PARTITION partition` that removes ZooKeeper nodes, related to an empty partition. [#59507](https://github.com/ClickHouse/ClickHouse/pull/59507) ([Sergei Trifonov](https://github.com/serxa)). This is an expert-level feature. +* Support JWT credentials file for the NATS table engine. [#59543](https://github.com/ClickHouse/ClickHouse/pull/59543) ([Nickolaj Jepsen](https://github.com/nickolaj-jepsen)). +* Implemented `system.dns_cache` table, which can be useful for debugging DNS issues. [#59856](https://github.com/ClickHouse/ClickHouse/pull/59856) ([Kirill Nikiforov](https://github.com/allmazz)). +* The codec `LZ4HC` will accept a new level 2, which is faster than the previous minimum level 3, at the expense of less compression. In previous versions, `LZ4HC(2)` and less was the same as `LZ4HC(3)`. Author: [Cyan4973](https://github.com/Cyan4973). [#60090](https://github.com/ClickHouse/ClickHouse/pull/60090) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Implemented `system.dns_cache` table, which can be useful for debugging DNS issues. New server setting dns_cache_max_size. [#60257](https://github.com/ClickHouse/ClickHouse/pull/60257) ([Kirill Nikiforov](https://github.com/allmazz)). +* Support single-argument version for the `merge` table function, as `merge(['db_name', ] 'tables_regexp')`. [#60372](https://github.com/ClickHouse/ClickHouse/pull/60372) ([豪肥肥](https://github.com/HowePa)). +* Support negative positional arguments. Closes [#57736](https://github.com/ClickHouse/ClickHouse/issues/57736). [#58292](https://github.com/ClickHouse/ClickHouse/pull/58292) ([flynn](https://github.com/ucasfl)). +* Support specifying a set of permitted users for specific S3 settings in config using `user` key. [#60144](https://github.com/ClickHouse/ClickHouse/pull/60144) ([Antonio Andelic](https://github.com/antonio2368)). +* Added table function `mergeTreeIndex`. It represents the contents of index and marks files of `MergeTree` tables. It can be used for introspection. Syntax: `mergeTreeIndex(database, table, [with_marks = true])` where `database.table` is an existing table with `MergeTree` engine. [#58140](https://github.com/ClickHouse/ClickHouse/pull/58140) ([Anton Popov](https://github.com/CurtizJ)). + +#### Experimental Feature +* Added function `seriesOutliersDetectTukey` to detect outliers in series data using Tukey's fences algorithm. [#58632](https://github.com/ClickHouse/ClickHouse/pull/58632) ([Bhavna Jindal](https://github.com/bhavnajindal)). Keep in mind that the behavior will be changed in the next patch release. +* Add function `variantType` that returns Enum with variant type name for each row. [#59398](https://github.com/ClickHouse/ClickHouse/pull/59398) ([Kruglov Pavel](https://github.com/Avogar)). +* Support `LEFT JOIN`, `ALL INNER JOIN`, and simple subqueries for parallel replicas (only with analyzer). New setting `parallel_replicas_prefer_local_join` chooses local `JOIN` execution (by default) vs `GLOBAL JOIN`. All tables should exist on every replica from `cluster_for_parallel_replicas`. New settings `min_external_table_block_size_rows` and `min_external_table_block_size_bytes` are used to squash small blocks that are sent for temporary tables (only with analyzer). [#58916](https://github.com/ClickHouse/ClickHouse/pull/58916) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Allow concurrent table creation in the `Replicated` database during adding or recovering a new replica. [#59277](https://github.com/ClickHouse/ClickHouse/pull/59277) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Implement comparison operator for `Variant` values and proper Field inserting into `Variant` column. Don't allow creating `Variant` type with similar variant types by default (allow uder a setting `allow_suspicious_variant_types`) Closes [#59996](https://github.com/ClickHouse/ClickHouse/issues/59996). Closes [#59850](https://github.com/ClickHouse/ClickHouse/issues/59850). [#60198](https://github.com/ClickHouse/ClickHouse/pull/60198) ([Kruglov Pavel](https://github.com/Avogar)). +* Disable parallel replicas JOIN with CTE (not analyzer) [#59239](https://github.com/ClickHouse/ClickHouse/pull/59239) ([Raúl Marín](https://github.com/Algunenano)). + +#### Performance Improvement +* Primary key will use less amount of memory. [#60049](https://github.com/ClickHouse/ClickHouse/pull/60049) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve memory usage for primary key and some other operations. [#60050](https://github.com/ClickHouse/ClickHouse/pull/60050) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* The tables' primary keys will be loaded in memory lazily on first access. This is controlled by the new MergeTree setting `primary_key_lazy_load`, which is on by default. This provides several advantages: - it will not be loaded for tables that are not used; - if there is not enough memory, an exception will be thrown on first use instead of at server startup. This provides several disadvantages: - the latency of loading the primary key will be paid on the first query rather than before accepting connections; this theoretically may introduce a thundering-herd problem. This closes [#11188](https://github.com/ClickHouse/ClickHouse/issues/11188). [#60093](https://github.com/ClickHouse/ClickHouse/pull/60093) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Vectorized distance functions used in vector search. [#58866](https://github.com/ClickHouse/ClickHouse/pull/58866) ([Robert Schulze](https://github.com/rschu1ze)). +* Vectorized function `dotProduct` which is useful for vector search. [#60202](https://github.com/ClickHouse/ClickHouse/pull/60202) ([Robert Schulze](https://github.com/rschu1ze)). +* Add short-circuit ability for `dictGetOrDefault` function. Closes [#52098](https://github.com/ClickHouse/ClickHouse/issues/52098). [#57767](https://github.com/ClickHouse/ClickHouse/pull/57767) ([jsc0218](https://github.com/jsc0218)). +* Keeper improvement: cache only a certain amount of logs in-memory controlled by `latest_logs_cache_size_threshold` and `commit_logs_cache_size_threshold`. [#59460](https://github.com/ClickHouse/ClickHouse/pull/59460) ([Antonio Andelic](https://github.com/antonio2368)). +* Keeper improvement: reduce size of data node even more. [#59592](https://github.com/ClickHouse/ClickHouse/pull/59592) ([Antonio Andelic](https://github.com/antonio2368)). +* Continue optimizing branch miss of `if` function when result type is `Float*/Decimal*/*Int*`, follow up of https://github.com/ClickHouse/ClickHouse/pull/57885. [#59148](https://github.com/ClickHouse/ClickHouse/pull/59148) ([李扬](https://github.com/taiyang-li)). +* Optimize `if` function when the input type is `Map`, the speed-up is up to ~10x. [#59413](https://github.com/ClickHouse/ClickHouse/pull/59413) ([李扬](https://github.com/taiyang-li)). +* Improve performance of the `Int8` type by implementing strict aliasing (we already have it for `UInt8` and all other integer types). [#59485](https://github.com/ClickHouse/ClickHouse/pull/59485) ([Raúl Marín](https://github.com/Algunenano)). +* Optimize performance of sum/avg conditionally for bigint and big decimal types by reducing branch miss. [#59504](https://github.com/ClickHouse/ClickHouse/pull/59504) ([李扬](https://github.com/taiyang-li)). +* Improve performance of SELECTs with active mutations. [#59531](https://github.com/ClickHouse/ClickHouse/pull/59531) ([Azat Khuzhin](https://github.com/azat)). +* Optimized function `isNotNull` with AVX2. [#59621](https://github.com/ClickHouse/ClickHouse/pull/59621) ([李扬](https://github.com/taiyang-li)). +* Improve ASOF JOIN performance for sorted or almost sorted data. [#59731](https://github.com/ClickHouse/ClickHouse/pull/59731) ([Maksim Kita](https://github.com/kitaisreal)). +* The previous default value equals to 1 MB for `async_insert_max_data_size` appeared to be too small. The new one would be 10 MiB. [#59536](https://github.com/ClickHouse/ClickHouse/pull/59536) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Use multiple threads while reading the metadata of tables from a backup while executing the RESTORE command. [#60040](https://github.com/ClickHouse/ClickHouse/pull/60040) ([Vitaly Baranov](https://github.com/vitlibar)). +* Now if `StorageBuffer` has more than 1 shard (`num_layers` > 1) background flush will happen simultaneously for all shards in multiple threads. [#60111](https://github.com/ClickHouse/ClickHouse/pull/60111) ([alesapin](https://github.com/alesapin)). + +#### Improvement +* When output format is `Pretty` format and a block consists of a single numeric value which exceeds one million, A readable number will be printed on table right. [#60379](https://github.com/ClickHouse/ClickHouse/pull/60379) ([rogeryk](https://github.com/rogeryk)). +* Added settings `split_parts_ranges_into_intersecting_and_non_intersecting_final` and `split_intersecting_parts_ranges_into_layers_final`. These settings are needed to disable optimizations for queries with `FINAL` and needed for debug only. [#59705](https://github.com/ClickHouse/ClickHouse/pull/59705) ([Maksim Kita](https://github.com/kitaisreal)). Actually not only for that - they can also lower memory usage at the expense of performance. +* Rename the setting `extract_kvp_max_pairs_per_row` to `extract_key_value_pairs_max_pairs_per_row`. The issue (unnecessary abbreviation in the setting name) was introduced in https://github.com/ClickHouse/ClickHouse/pull/43606. Fix the documentation of this setting. [#59683](https://github.com/ClickHouse/ClickHouse/pull/59683) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#59960](https://github.com/ClickHouse/ClickHouse/pull/59960) ([jsc0218](https://github.com/jsc0218)). +* Running `ALTER COLUMN MATERIALIZE` on a column with `DEFAULT` or `MATERIALIZED` expression now precisely follows the semantics. [#58023](https://github.com/ClickHouse/ClickHouse/pull/58023) ([Duc Canh Le](https://github.com/canhld94)). +* Enabled an exponential backoff logic for errors during mutations. It will reduce the CPU usage, memory usage and log file sizes. [#58036](https://github.com/ClickHouse/ClickHouse/pull/58036) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Add improvement to count the `InitialQuery` Profile Event. [#58195](https://github.com/ClickHouse/ClickHouse/pull/58195) ([Unalian](https://github.com/Unalian)). +* Allow to define `volume_priority` in `storage_configuration`. [#58533](https://github.com/ClickHouse/ClickHouse/pull/58533) ([Andrey Zvonov](https://github.com/zvonand)). +* Add support for the `Date32` type in the `T64` codec. [#58738](https://github.com/ClickHouse/ClickHouse/pull/58738) ([Hongbin Ma](https://github.com/binmahone)). +* Allow trailing commas in types with several items. [#59119](https://github.com/ClickHouse/ClickHouse/pull/59119) ([Aleksandr Musorin](https://github.com/AVMusorin)). +* Settings for the Distributed table engine can now be specified in the server configuration file (similar to MergeTree settings), e.g. ` false `. [#59291](https://github.com/ClickHouse/ClickHouse/pull/59291) ([Azat Khuzhin](https://github.com/azat)). +* Retry disconnects and expired sessions when reading `system.zookeeper`. This is helpful when reading many rows from `system.zookeeper` table especially in the presence of fault-injected disconnects. [#59388](https://github.com/ClickHouse/ClickHouse/pull/59388) ([Alexander Gololobov](https://github.com/davenger)). +* Do not interpret numbers with leading zeroes as octals when `input_format_values_interpret_expressions=0`. [#59403](https://github.com/ClickHouse/ClickHouse/pull/59403) ([Joanna Hulboj](https://github.com/jh0x)). +* At startup and whenever config files are changed, ClickHouse updates the hard memory limits of its total memory tracker. These limits are computed based on various server settings and cgroups limits (on Linux). Previously, setting `/sys/fs/cgroup/memory.max` (for cgroups v2) was hard-coded. As a result, cgroup v2 memory limits configured for nested groups (hierarchies), e.g. `/sys/fs/cgroup/my/nested/group/memory.max` were ignored. This is now fixed. The behavior of v1 memory limits remains unchanged. [#59435](https://github.com/ClickHouse/ClickHouse/pull/59435) ([Robert Schulze](https://github.com/rschu1ze)). +* New profile events added to observe the time spent on calculating PK/projections/secondary indices during `INSERT`-s. [#59436](https://github.com/ClickHouse/ClickHouse/pull/59436) ([Nikita Taranov](https://github.com/nickitat)). +* Allow to define a starting point for S3Queue with Ordered mode at the creation using a setting `s3queue_last_processed_path`. [#59446](https://github.com/ClickHouse/ClickHouse/pull/59446) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Made comments for system tables also available in `system.tables` in `clickhouse-local`. [#59493](https://github.com/ClickHouse/ClickHouse/pull/59493) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* `system.zookeeper` table: previously the whole result was accumulated in memory and returned as one big chunk. This change should help to reduce memory consumption when reading many rows from `system.zookeeper`, allow showing intermediate progress (how many rows have been read so far) and avoid hitting connection timeout when result set is big. [#59545](https://github.com/ClickHouse/ClickHouse/pull/59545) ([Alexander Gololobov](https://github.com/davenger)). +* Now dashboard understands both compressed and uncompressed state of URL's #hash (backward compatibility). Continuation of [#59124](https://github.com/ClickHouse/ClickHouse/issues/59124) . [#59548](https://github.com/ClickHouse/ClickHouse/pull/59548) ([Amos Bird](https://github.com/amosbird)). +* Bumped Intel QPL (used by codec `DEFLATE_QPL`) from v1.3.1 to v1.4.0 . Also fixed a bug for polling timeout mechanism, as we observed in same cases timeout won't work properly, if timeout happen, IAA and CPU may process buffer concurrently. So far, we'd better make sure IAA codec status is not QPL_STS_BEING_PROCESSED, then fallback to SW codec. [#59551](https://github.com/ClickHouse/ClickHouse/pull/59551) ([jasperzhu](https://github.com/jinjunzh)). +* Do not show a warning about the server version in ClickHouse Cloud because ClickHouse Cloud handles seamless upgrades automatically. [#59657](https://github.com/ClickHouse/ClickHouse/pull/59657) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* After self-extraction temporary binary is moved instead copying. [#59661](https://github.com/ClickHouse/ClickHouse/pull/59661) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix stack unwinding on Apple macOS. This closes [#53653](https://github.com/ClickHouse/ClickHouse/issues/53653). [#59690](https://github.com/ClickHouse/ClickHouse/pull/59690) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Check for stack overflow in parsers even if the user misconfigured the `max_parser_depth` setting to a very high value. This closes [#59622](https://github.com/ClickHouse/ClickHouse/issues/59622). [#59697](https://github.com/ClickHouse/ClickHouse/pull/59697) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#60434](https://github.com/ClickHouse/ClickHouse/pull/60434) +* Unify XML and SQL created named collection behaviour in Kafka storage. [#59710](https://github.com/ClickHouse/ClickHouse/pull/59710) ([Pervakov Grigorii](https://github.com/GrigoryPervakov)). +* In case when `merge_max_block_size_bytes` is small enough and tables contain wide rows (strings or tuples) background merges may stuck in an endless loop. This behaviour is fixed. Follow-up for https://github.com/ClickHouse/ClickHouse/pull/59340. [#59812](https://github.com/ClickHouse/ClickHouse/pull/59812) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Allow uuid in replica_path if CREATE TABLE explicitly has it. [#59908](https://github.com/ClickHouse/ClickHouse/pull/59908) ([Azat Khuzhin](https://github.com/azat)). +* Add column `metadata_version` of ReplicatedMergeTree table in `system.tables` system table. [#59942](https://github.com/ClickHouse/ClickHouse/pull/59942) ([Maksim Kita](https://github.com/kitaisreal)). +* Keeper improvement: send only Keeper related metrics/events for Prometheus. [#59945](https://github.com/ClickHouse/ClickHouse/pull/59945) ([Antonio Andelic](https://github.com/antonio2368)). +* The dashboard will display metrics across different ClickHouse versions even if the structure of system tables has changed after the upgrade. [#59967](https://github.com/ClickHouse/ClickHouse/pull/59967) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Allow loading AZ info from a file. [#59976](https://github.com/ClickHouse/ClickHouse/pull/59976) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Keeper improvement: add retries on failures for Disk related operations. [#59980](https://github.com/ClickHouse/ClickHouse/pull/59980) ([Antonio Andelic](https://github.com/antonio2368)). +* Add new config setting `backups.remove_backup_files_after_failure`: ` true `. [#60002](https://github.com/ClickHouse/ClickHouse/pull/60002) ([Vitaly Baranov](https://github.com/vitlibar)). +* Copy S3 file GCP fallback to buffer copy in case GCP returned `Internal Error` with `GATEWAY_TIMEOUT` HTTP error code. [#60164](https://github.com/ClickHouse/ClickHouse/pull/60164) ([Maksim Kita](https://github.com/kitaisreal)). +* Short circuit execution for `ULIDStringToDateTime`. [#60211](https://github.com/ClickHouse/ClickHouse/pull/60211) ([Juan Madurga](https://github.com/jlmadurga)). +* Added `query_id` column for tables `system.backups` and `system.backup_log`. Added error stacktrace to `error` column. [#60220](https://github.com/ClickHouse/ClickHouse/pull/60220) ([Maksim Kita](https://github.com/kitaisreal)). +* Connections through the MySQL port now automatically run with setting `prefer_column_name_to_alias = 1` to support QuickSight out-of-the-box. Also, settings `mysql_map_string_to_text_in_show_columns` and `mysql_map_fixed_string_to_text_in_show_columns` are now enabled by default, affecting also only MySQL connections. This increases compatibility with more BI tools. [#60365](https://github.com/ClickHouse/ClickHouse/pull/60365) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix a race condition in JavaScript code leading to duplicate charts on top of each other. [#60392](https://github.com/ClickHouse/ClickHouse/pull/60392) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Build/Testing/Packaging Improvement +* Added builds and tests with coverage collection with introspection. Continuation of [#56102](https://github.com/ClickHouse/ClickHouse/issues/56102). [#58792](https://github.com/ClickHouse/ClickHouse/pull/58792) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update the Rust toolchain in `corrosion-cmake` when the CMake cross-compilation toolchain variable is set. [#59309](https://github.com/ClickHouse/ClickHouse/pull/59309) ([Aris Tritas](https://github.com/aris-aiven)). +* Add some fuzzing to ASTLiterals. [#59383](https://github.com/ClickHouse/ClickHouse/pull/59383) ([Raúl Marín](https://github.com/Algunenano)). +* If you want to run initdb scripts every time when ClickHouse container is starting you shoud initialize environment varible CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS. [#59808](https://github.com/ClickHouse/ClickHouse/pull/59808) ([Alexander Nikolaev](https://github.com/AlexNik)). +* Remove ability to disable generic clickhouse components (like server/client/...), but keep some that requires extra libraries (like ODBC or keeper). [#59857](https://github.com/ClickHouse/ClickHouse/pull/59857) ([Azat Khuzhin](https://github.com/azat)). +* Query fuzzer will fuzz SETTINGS inside queries. [#60087](https://github.com/ClickHouse/ClickHouse/pull/60087) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add support for building ClickHouse with clang-19 (master). [#60448](https://github.com/ClickHouse/ClickHouse/pull/60448) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fix a "Non-ready set" error in TTL WHERE. [#57430](https://github.com/ClickHouse/ClickHouse/pull/57430) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix a bug in the `quantilesGK` function [#58216](https://github.com/ClickHouse/ClickHouse/pull/58216) ([李扬](https://github.com/taiyang-li)). +* Fix a wrong behavior with `intDiv` for Decimal arguments [#59243](https://github.com/ClickHouse/ClickHouse/pull/59243) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix `translate` with FixedString input [#59356](https://github.com/ClickHouse/ClickHouse/pull/59356) ([Raúl Marín](https://github.com/Algunenano)). +* Fix digest calculation in Keeper [#59439](https://github.com/ClickHouse/ClickHouse/pull/59439) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix stacktraces for binaries without debug symbols [#59444](https://github.com/ClickHouse/ClickHouse/pull/59444) ([Azat Khuzhin](https://github.com/azat)). +* Fix `ASTAlterCommand::formatImpl` in case of column specific settings… [#59445](https://github.com/ClickHouse/ClickHouse/pull/59445) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix `SELECT * FROM [...] ORDER BY ALL` with Analyzer [#59462](https://github.com/ClickHouse/ClickHouse/pull/59462) ([zhongyuankai](https://github.com/zhongyuankai)). +* Fix possible uncaught exception during distributed query cancellation [#59487](https://github.com/ClickHouse/ClickHouse/pull/59487) ([Azat Khuzhin](https://github.com/azat)). +* Make MAX use the same rules as permutation for complex types [#59498](https://github.com/ClickHouse/ClickHouse/pull/59498) ([Raúl Marín](https://github.com/Algunenano)). +* Fix corner case when passing `update_insert_deduplication_token_in_dependent_materialized_views` [#59544](https://github.com/ClickHouse/ClickHouse/pull/59544) ([Jordi Villar](https://github.com/jrdi)). +* Fix incorrect result of arrayElement / map on empty value [#59594](https://github.com/ClickHouse/ClickHouse/pull/59594) ([Raúl Marín](https://github.com/Algunenano)). +* Fix crash in topK when merging empty states [#59603](https://github.com/ClickHouse/ClickHouse/pull/59603) ([Raúl Marín](https://github.com/Algunenano)). +* Fix distributed table with a constant sharding key [#59606](https://github.com/ClickHouse/ClickHouse/pull/59606) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix KQL issue found by WingFuzz [#59626](https://github.com/ClickHouse/ClickHouse/pull/59626) ([Yong Wang](https://github.com/kashwy)). +* Fix error "Read beyond last offset" for AsynchronousBoundedReadBuffer [#59630](https://github.com/ClickHouse/ClickHouse/pull/59630) ([Vitaly Baranov](https://github.com/vitlibar)). +* Maintain function alias in RewriteSumFunctionWithSumAndCountVisitor [#59658](https://github.com/ClickHouse/ClickHouse/pull/59658) ([Raúl Marín](https://github.com/Algunenano)). +* Fix query start time on non initial queries [#59662](https://github.com/ClickHouse/ClickHouse/pull/59662) ([Raúl Marín](https://github.com/Algunenano)). +* Validate types of arguments for `minmax` skipping index [#59733](https://github.com/ClickHouse/ClickHouse/pull/59733) ([Anton Popov](https://github.com/CurtizJ)). +* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)). +* Fix AST fuzzer issue in function `countMatches` [#59752](https://github.com/ClickHouse/ClickHouse/pull/59752) ([Robert Schulze](https://github.com/rschu1ze)). +* RabbitMQ: fix having neither acked nor nacked messages [#59775](https://github.com/ClickHouse/ClickHouse/pull/59775) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix StorageURL doing some of the query execution in single thread [#59833](https://github.com/ClickHouse/ClickHouse/pull/59833) ([Michael Kolupaev](https://github.com/al13n321)). +* S3Queue: fix uninitialized value [#59897](https://github.com/ClickHouse/ClickHouse/pull/59897) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix parsing of partition expressions surrounded by parens [#59901](https://github.com/ClickHouse/ClickHouse/pull/59901) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix crash in JSONColumnsWithMetadata format over HTTP [#59925](https://github.com/ClickHouse/ClickHouse/pull/59925) ([Kruglov Pavel](https://github.com/Avogar)). +* Do not rewrite sum to count if the return value differs in Analyzer [#59926](https://github.com/ClickHouse/ClickHouse/pull/59926) ([Azat Khuzhin](https://github.com/azat)). +* UniqExactSet read crash fix [#59928](https://github.com/ClickHouse/ClickHouse/pull/59928) ([Maksim Kita](https://github.com/kitaisreal)). +* ReplicatedMergeTree invalid metadata_version fix [#59946](https://github.com/ClickHouse/ClickHouse/pull/59946) ([Maksim Kita](https://github.com/kitaisreal)). +* Fix data race in `StorageDistributed` [#59987](https://github.com/ClickHouse/ClickHouse/pull/59987) ([Nikita Taranov](https://github.com/nickitat)). +* Docker: run init scripts when option is enabled rather than disabled [#59991](https://github.com/ClickHouse/ClickHouse/pull/59991) ([jktng](https://github.com/jktng)). +* Fix INSERT into `SQLite` with single quote (by escaping single quotes with a quote instead of backslash) [#60015](https://github.com/ClickHouse/ClickHouse/pull/60015) ([Azat Khuzhin](https://github.com/azat)). +* Fix several logical errors in `arrayFold` [#60022](https://github.com/ClickHouse/ClickHouse/pull/60022) ([Raúl Marín](https://github.com/Algunenano)). +* Fix optimize_uniq_to_count removing the column alias [#60026](https://github.com/ClickHouse/ClickHouse/pull/60026) ([Raúl Marín](https://github.com/Algunenano)). +* Fix possible exception from S3Queue table on drop [#60036](https://github.com/ClickHouse/ClickHouse/pull/60036) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix formatting of NOT with single literals [#60042](https://github.com/ClickHouse/ClickHouse/pull/60042) ([Raúl Marín](https://github.com/Algunenano)). +* Use max_query_size from context in DDLLogEntry instead of hardcoded 4096 [#60083](https://github.com/ClickHouse/ClickHouse/pull/60083) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix inconsistent formatting of queries containing tables named `table`. Fix wrong formatting of queries with `UNION ALL`, `INTERSECT`, and `EXCEPT` when their structure wasn't linear. This closes #52349. Fix wrong formatting of `SYSTEM` queries, including `SYSTEM ... DROP FILESYSTEM CACHE`, `SYSTEM ... REFRESH/START/STOP/CANCEL/TEST VIEW`, `SYSTEM ENABLE/DISABLE FAILPOINT`. Fix formatting of parameterized DDL queries. Fix the formatting of the `DESCRIBE FILESYSTEM CACHE` query. Fix incorrect formatting of the `SET param_...` (a query setting a parameter). Fix incorrect formatting of `CREATE INDEX` queries. Fix inconsistent formatting of `CREATE USER` and similar queries. Fix inconsistent formatting of `CREATE SETTINGS PROFILE`. Fix incorrect formatting of `ALTER ... MODIFY REFRESH`. Fix inconsistent formatting of window functions if frame offsets were expressions. Fix inconsistent formatting of `RESPECT NULLS` and `IGNORE NULLS` if they were used after a function that implements an operator (such as `plus`). Fix idiotic formatting of `SYSTEM SYNC REPLICA ... LIGHTWEIGHT FROM ...`. Fix inconsistent formatting of invalid queries with `GROUP BY GROUPING SETS ... WITH ROLLUP/CUBE/TOTALS`. Fix inconsistent formatting of `GRANT CURRENT GRANTS`. Fix inconsistent formatting of `CREATE TABLE (... COLLATE)`. Additionally, I fixed the incorrect formatting of `EXPLAIN` in subqueries (#60102). Fixed incorrect formatting of lambda functions (#60012). Added a check so there is no way to miss these abominations in the future. [#60095](https://github.com/ClickHouse/ClickHouse/pull/60095) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix inconsistent formatting of explain in subqueries [#60102](https://github.com/ClickHouse/ClickHouse/pull/60102) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix cosineDistance crash with Nullable [#60150](https://github.com/ClickHouse/ClickHouse/pull/60150) ([Raúl Marín](https://github.com/Algunenano)). +* Allow casting of bools in string representation to true bools [#60160](https://github.com/ClickHouse/ClickHouse/pull/60160) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix `system.s3queue_log` [#60166](https://github.com/ClickHouse/ClickHouse/pull/60166) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix arrayReduce with nullable aggregate function name [#60188](https://github.com/ClickHouse/ClickHouse/pull/60188) ([Raúl Marín](https://github.com/Algunenano)). +* Hide sensitive info for `S3Queue` [#60233](https://github.com/ClickHouse/ClickHouse/pull/60233) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix http exception codes. [#60252](https://github.com/ClickHouse/ClickHouse/pull/60252) ([Austin Kothig](https://github.com/kothiga)). +* S3Queue: fix a bug (also fixes flaky test_storage_s3_queue/test.py::test_shards_distributed) [#60282](https://github.com/ClickHouse/ClickHouse/pull/60282) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix use-of-uninitialized-value and invalid result in hashing functions with IPv6 [#60359](https://github.com/ClickHouse/ClickHouse/pull/60359) ([Kruglov Pavel](https://github.com/Avogar)). +* Fix OptimizeDateOrDateTimeConverterWithPreimageVisitor with null arguments [#60453](https://github.com/ClickHouse/ClickHouse/pull/60453) ([Raúl Marín](https://github.com/Algunenano)). +* Fixed a minor bug that prevented distributed table queries sent from either KQL or PRQL dialect clients to be executed on replicas. [#59674](https://github.com/ClickHouse/ClickHouse/issues/59674). [#60470](https://github.com/ClickHouse/ClickHouse/pull/60470) ([Alexey Milovidov](https://github.com/alexey-milovidov)) [#59674](https://github.com/ClickHouse/ClickHouse/pull/59674) ([Austin Kothig](https://github.com/kothiga)). + + ### ClickHouse release 24.1, 2024-01-30 #### Backward Incompatible Change diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 885080a3e38..2929c64ded8 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54483) +SET(VERSION_REVISION 54484) SET(VERSION_MAJOR 24) -SET(VERSION_MINOR 2) +SET(VERSION_MINOR 3) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 5a024dfc0936e062770d0cfaad0805b57c1fba17) -SET(VERSION_DESCRIBE v24.2.1.1-testing) -SET(VERSION_STRING 24.2.1.1) +SET(VERSION_GITHASH 891689a41506d00aa169548f5b4a8774351242c4) +SET(VERSION_DESCRIBE v24.3.1.1-testing) +SET(VERSION_STRING 24.3.1.1) # end of autochange diff --git a/contrib/libarchive-cmake/CMakeLists.txt b/contrib/libarchive-cmake/CMakeLists.txt index cd5658b7086..e89770da5f6 100644 --- a/contrib/libarchive-cmake/CMakeLists.txt +++ b/contrib/libarchive-cmake/CMakeLists.txt @@ -157,7 +157,7 @@ if (TARGET ch_contrib::zlib) endif() if (TARGET ch_contrib::zstd) - target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1) + target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1 HAVE_LIBZSTD_COMPRESSOR=1) target_link_libraries(_libarchive PRIVATE ch_contrib::zstd) endif() diff --git a/contrib/libmetrohash/src/metrohash128.h b/contrib/libmetrohash/src/metrohash128.h index 639a4fa97e3..2dbb6ca5a8a 100644 --- a/contrib/libmetrohash/src/metrohash128.h +++ b/contrib/libmetrohash/src/metrohash128.h @@ -25,21 +25,21 @@ public: static const uint32_t bits = 128; // Constructor initializes the same as Initialize() - MetroHash128(const uint64_t seed=0); - + explicit MetroHash128(const uint64_t seed=0); + // Initializes internal state for new hash with optional seed void Initialize(const uint64_t seed=0); - + // Update the hash state with a string of bytes. If the length // is sufficiently long, the implementation switches to a bulk // hashing algorithm directly on the argument buffer for speed. void Update(const uint8_t * buffer, const uint64_t length); - + // Constructs the final hash and writes it to the argument buffer. // After a hash is finalized, this instance must be Initialized()-ed // again or the behavior of Update() and Finalize() is undefined. void Finalize(uint8_t * const hash); - + // A non-incremental function implementation. This can be significantly // faster than the incremental implementation for some usage patterns. static void Hash(const uint8_t * buffer, const uint64_t length, uint8_t * const hash, const uint64_t seed=0); @@ -57,7 +57,7 @@ private: static const uint64_t k1 = 0x8648DBDB; static const uint64_t k2 = 0x7BDEC03B; static const uint64_t k3 = 0x2F5870A5; - + struct { uint64_t v[4]; } state; struct { uint8_t b[32]; } input; uint64_t bytes; diff --git a/contrib/libmetrohash/src/metrohash64.h b/contrib/libmetrohash/src/metrohash64.h index d58898b117d..911e54e6863 100644 --- a/contrib/libmetrohash/src/metrohash64.h +++ b/contrib/libmetrohash/src/metrohash64.h @@ -25,21 +25,21 @@ public: static const uint32_t bits = 64; // Constructor initializes the same as Initialize() - MetroHash64(const uint64_t seed=0); - + explicit MetroHash64(const uint64_t seed=0); + // Initializes internal state for new hash with optional seed void Initialize(const uint64_t seed=0); - + // Update the hash state with a string of bytes. If the length // is sufficiently long, the implementation switches to a bulk // hashing algorithm directly on the argument buffer for speed. void Update(const uint8_t * buffer, const uint64_t length); - + // Constructs the final hash and writes it to the argument buffer. // After a hash is finalized, this instance must be Initialized()-ed // again or the behavior of Update() and Finalize() is undefined. void Finalize(uint8_t * const hash); - + // A non-incremental function implementation. This can be significantly // faster than the incremental implementation for some usage patterns. static void Hash(const uint8_t * buffer, const uint64_t length, uint8_t * const hash, const uint64_t seed=0); @@ -57,7 +57,7 @@ private: static const uint64_t k1 = 0xA2AA033B; static const uint64_t k2 = 0x62992FC1; static const uint64_t k3 = 0x30BC5B29; - + struct { uint64_t v[4]; } state; struct { uint8_t b[32]; } input; uint64_t bytes; diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index aec2add2857..dc181339786 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -57,8 +57,20 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th sudo mv /etc/clickhouse-server/config.d/zookeeper.xml.tmp /etc/clickhouse-server/config.d/zookeeper.xml # it contains some new settings, but we can safely remove it + rm /etc/clickhouse-server/config.d/handlers.yaml rm /etc/clickhouse-server/users.d/s3_cache_new.xml rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml + + function remove_keeper_config() + { + sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \ + | sed "/<$1>$2<\/$1>/d" \ + > /etc/clickhouse-server/config.d/keeper_port.xml.tmp + sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml + } + # commit_logs_cache_size_threshold setting doesn't exist on some older versions + remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+" + remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+" fi # For flaky check we also enable thread fuzzer diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index a139327e12e..de9ac3b3a69 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -89,6 +89,7 @@ rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xm rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml rm /etc/clickhouse-server/config.d/storage_conf_02963.xml rm /etc/clickhouse-server/config.d/backoff_failed_mutation.xml +rm /etc/clickhouse-server/config.d/handlers.yaml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml rm /etc/clickhouse-server/users.d/s3_cache_new.xml rm /etc/clickhouse-server/users.d/replicated_ddl_entry.xml @@ -136,6 +137,7 @@ rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xm rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml rm /etc/clickhouse-server/config.d/storage_conf_02963.xml rm /etc/clickhouse-server/config.d/backoff_failed_mutation.xml +rm /etc/clickhouse-server/config.d/handlers.yaml rm /etc/clickhouse-server/config.d/block_number.xml rm /etc/clickhouse-server/users.d/nonconst_timezone.xml rm /etc/clickhouse-server/users.d/s3_cache_new.xml diff --git a/docs/changelogs/v23.10.1.1976-stable.md b/docs/changelogs/v23.10.1.1976-stable.md index 0e7e7bcd55a..b08383a859b 100644 --- a/docs/changelogs/v23.10.1.1976-stable.md +++ b/docs/changelogs/v23.10.1.1976-stable.md @@ -403,4 +403,3 @@ sidebar_label: 2023 * Do not remove part if `Too many open files` is thrown [#56238](https://github.com/ClickHouse/ClickHouse/pull/56238) ([Nikolay Degterinsky](https://github.com/evillique)). * Fix ORC commit [#56261](https://github.com/ClickHouse/ClickHouse/pull/56261) ([Raúl Marín](https://github.com/Algunenano)). * Fix typo in largestTriangleThreeBuckets.md [#56263](https://github.com/ClickHouse/ClickHouse/pull/56263) ([Nikita Taranov](https://github.com/nickitat)). - diff --git a/docs/changelogs/v23.5.1.3174-stable.md b/docs/changelogs/v23.5.1.3174-stable.md index 01e5425de71..2212eb6e893 100644 --- a/docs/changelogs/v23.5.1.3174-stable.md +++ b/docs/changelogs/v23.5.1.3174-stable.md @@ -596,4 +596,3 @@ sidebar_label: 2023 * Fix assertion from stress test [#50718](https://github.com/ClickHouse/ClickHouse/pull/50718) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix flaky unit test [#50719](https://github.com/ClickHouse/ClickHouse/pull/50719) ([Kseniia Sumarokova](https://github.com/kssenii)). * Show correct sharing state in system.query_cache [#50728](https://github.com/ClickHouse/ClickHouse/pull/50728) ([Robert Schulze](https://github.com/rschu1ze)). - diff --git a/docs/changelogs/v23.6.1.1524-stable.md b/docs/changelogs/v23.6.1.1524-stable.md index 6d295d61ef4..b91c5340789 100644 --- a/docs/changelogs/v23.6.1.1524-stable.md +++ b/docs/changelogs/v23.6.1.1524-stable.md @@ -298,4 +298,3 @@ sidebar_label: 2023 * Update version_date.tsv and changelogs after v23.4.5.22-stable [#51638](https://github.com/ClickHouse/ClickHouse/pull/51638) ([robot-clickhouse](https://github.com/robot-clickhouse)). * Update version_date.tsv and changelogs after v23.3.7.5-lts [#51639](https://github.com/ClickHouse/ClickHouse/pull/51639) ([robot-clickhouse](https://github.com/robot-clickhouse)). * Update parts.md [#51643](https://github.com/ClickHouse/ClickHouse/pull/51643) ([Ramazan Polat](https://github.com/ramazanpolat)). - diff --git a/docs/changelogs/v23.8.1.2992-lts.md b/docs/changelogs/v23.8.1.2992-lts.md index e3e0e4f0344..7c224b19350 100644 --- a/docs/changelogs/v23.8.1.2992-lts.md +++ b/docs/changelogs/v23.8.1.2992-lts.md @@ -588,4 +588,3 @@ sidebar_label: 2023 * tests: mark 02152_http_external_tables_memory_tracking as no-parallel [#54155](https://github.com/ClickHouse/ClickHouse/pull/54155) ([Azat Khuzhin](https://github.com/azat)). * The external logs have had colliding arguments [#54165](https://github.com/ClickHouse/ClickHouse/pull/54165) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). * Rename macro [#54169](https://github.com/ClickHouse/ClickHouse/pull/54169) ([Kseniia Sumarokova](https://github.com/kssenii)). - diff --git a/docs/changelogs/v23.9.1.1854-stable.md b/docs/changelogs/v23.9.1.1854-stable.md index aa27cd34478..bccd082bbaa 100644 --- a/docs/changelogs/v23.9.1.1854-stable.md +++ b/docs/changelogs/v23.9.1.1854-stable.md @@ -379,4 +379,3 @@ sidebar_label: 2023 * Fix typo in packager when ccache is used [#55104](https://github.com/ClickHouse/ClickHouse/pull/55104) ([Ilya Yatsishin](https://github.com/qoega)). * Reduce flakiness of 01455_opentelemetry_distributed [#55111](https://github.com/ClickHouse/ClickHouse/pull/55111) ([Michael Kolupaev](https://github.com/al13n321)). * Fix build [#55113](https://github.com/ClickHouse/ClickHouse/pull/55113) ([Alexey Milovidov](https://github.com/alexey-milovidov)). - diff --git a/docs/en/development/architecture.md b/docs/en/development/architecture.md index d3a29c9171b..6428c0e90d5 100644 --- a/docs/en/development/architecture.md +++ b/docs/en/development/architecture.md @@ -276,5 +276,3 @@ Besides, each replica stores its state in ZooKeeper as the set of parts and its :::note The ClickHouse cluster consists of independent shards, and each shard consists of replicas. The cluster is **not elastic**, so after adding a new shard, data is not rebalanced between shards automatically. Instead, the cluster load is supposed to be adjusted to be uneven. This implementation gives you more control, and it is ok for relatively small clusters, such as tens of nodes. But for clusters with hundreds of nodes that we are using in production, this approach becomes a significant drawback. We should implement a table engine that spans across the cluster with dynamically replicated regions that could be split and balanced between clusters automatically. ::: - -[Original article](https://clickhouse.com/docs/en/development/architecture/) diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md index c6525121667..0843ff1ac47 100644 --- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md +++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md @@ -19,6 +19,8 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32) ### Engine parameters +- `endpoint` — AzureBlobStorage endpoint URL with container & prefix. Optionally can contain account_name if the authentication method used needs it. (http://azurite1:{port}/[account_name]{container_name}/{data_prefix}) or these parameters can be provided separately using storage_account_url, account_name & container. For specifying prefix, endpoint should be used. +- `endpoint_contains_account_name` - This flag is used to specify if endpoint contains account_name as it is only needed for certain authentication methods. (Default : true) - `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key) - `container_name` - Container name - `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 228b2c8884f..58717b33aef 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -1242,7 +1242,9 @@ Configuration markup: ``` Connection parameters: -* `storage_account_url` - **Required**, Azure Blob Storage account URL, like `http://account.blob.core.windows.net` or `http://azurite1:10000/devstoreaccount1`. +* `endpoint` — AzureBlobStorage endpoint URL with container & prefix. Optionally can contain account_name if the authentication method used needs it. (`http://account.blob.core.windows.net:{port}/[account_name]{container_name}/{data_prefix}`) or these parameters can be provided separately using storage_account_url, account_name & container. For specifying prefix, endpoint should be used. +* `endpoint_contains_account_name` - This flag is used to specify if endpoint contains account_name as it is only needed for certain authentication methods. (Default : true) +* `storage_account_url` - Required if endpoint is not specified, Azure Blob Storage account URL, like `http://account.blob.core.windows.net` or `http://azurite1:10000/devstoreaccount1`. * `container_name` - Target container name, defaults to `default-container`. * `container_already_exists` - If set to `false`, a new container `container_name` is created in the storage account, if set to `true`, disk connects to the container directly, and if left unset, disk connects to the account, checks if the container `container_name` exists, and creates it if it doesn't exist yet. diff --git a/docs/en/engines/table-engines/mergetree-family/replication.md b/docs/en/engines/table-engines/mergetree-family/replication.md index 01782ac25bd..f70e275fd4e 100644 --- a/docs/en/engines/table-engines/mergetree-family/replication.md +++ b/docs/en/engines/table-engines/mergetree-family/replication.md @@ -304,6 +304,24 @@ We use the term `MergeTree` to refer to all table engines in the `MergeTree fami If you had a `MergeTree` table that was manually replicated, you can convert it to a replicated table. You might need to do this if you have already collected a large amount of data in a `MergeTree` table and now you want to enable replication. +`MergeTree` table can be automatically converted on server restart if `convert_to_replicated` flag is set at the table's data directory (`/var/lib/clickhouse/store/xxx/xxxyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy/` for `Atomic` database). +Create empty `convert_to_replicated` file and the table will be loaded as replicated on next server restart. + +This query can be used to get the table's data path. + +```sql +SELECT data_paths FROM system.tables WHERE table = 'table_name' AND database = 'database_name'; +``` + +Note that ReplicatedMergeTree table will be created with values of `default_replica_path` and `default_replica_name` settings. +To create a converted table on other replicas, you will need to explicitly specify its path in the first argument of the `ReplicatedMergeTree` engine. The following query can be used to get its path. + +```sql +SELECT zookeeper_path FROM system.replicas WHERE table = 'table_name'; +``` + +There is also a manual way to do this without server restart. + If the data differs on various replicas, first sync it, or delete this data on all the replicas except one. Rename the existing MergeTree table, then create a `ReplicatedMergeTree` table with the old name. diff --git a/docs/en/getting-started/example-datasets/covid19.md b/docs/en/getting-started/example-datasets/covid19.md index da9dc4aa96b..7dc4cea9be4 100644 --- a/docs/en/getting-started/example-datasets/covid19.md +++ b/docs/en/getting-started/example-datasets/covid19.md @@ -28,7 +28,7 @@ The CSV file has 10 columns: ```response ┌─name─────────────────┬─type─────────────┐ -│ date │ Nullable(String) │ +│ date │ Nullable(Date) │ │ location_key │ Nullable(String) │ │ new_confirmed │ Nullable(Int64) │ │ new_deceased │ Nullable(Int64) │ diff --git a/docs/en/getting-started/example-datasets/github.md b/docs/en/getting-started/example-datasets/github.md index 9ed8782e512..e5ffb15bb9a 100644 --- a/docs/en/getting-started/example-datasets/github.md +++ b/docs/en/getting-started/example-datasets/github.md @@ -23,7 +23,6 @@ As of November 8th, 2022, each TSV is approximately the following size and numbe # Table of Contents -- [ClickHouse GitHub data](#clickhouse-github-data) - [Table of Contents](#table-of-contents) - [Generating the data](#generating-the-data) - [Downloading and inserting the data](#downloading-and-inserting-the-data) diff --git a/docs/en/interfaces/postgresql.md b/docs/en/interfaces/postgresql.md index 1146274b012..7306575a4d3 100644 --- a/docs/en/interfaces/postgresql.md +++ b/docs/en/interfaces/postgresql.md @@ -69,5 +69,3 @@ psql "port=9005 host=127.0.0.1 user=alice dbname=default sslcert=/path/to/certif ``` View the [PostgreSQL docs](https://jdbc.postgresql.org/documentation/head/ssl-client.html) for more details on their SSL settings. - -[Original article](https://clickhouse.com/docs/en/interfaces/postgresql) diff --git a/docs/en/interfaces/third-party/gui.md b/docs/en/interfaces/third-party/gui.md index 900764b8128..0b3ca3db3a9 100644 --- a/docs/en/interfaces/third-party/gui.md +++ b/docs/en/interfaces/third-party/gui.md @@ -306,3 +306,18 @@ License: [commercial](https://tablum.io/pricing) product with 3-month free perio Try it out for free [in the cloud](https://tablum.io/try). Learn more about the product at [TABLUM.IO](https://tablum.io/) + +### CKMAN {#ckman} + +[CKMAN] (https://www.github.com/housepower/ckman) is a tool for managing and monitoring ClickHouse clusters! + +Features: + +- Rapid and convenient automated deployment of clusters through a browser interface +- Clusters can be scaled or scaled +- Load balance the data of the cluster +- Upgrade the cluster online +- Modify the cluster configuration on the page +- Provides cluster node monitoring and zookeeper monitoring +- Monitor the status of tables and partitions, and monitor slow SQL statements +- Provides an easy-to-use SQL execution page diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md index b1f2135c476..2d9bf2a2ee8 100644 --- a/docs/en/operations/backup.md +++ b/docs/en/operations/backup.md @@ -168,6 +168,28 @@ RESTORE TABLE test.table PARTITIONS '2', '3' FROM Disk('backups', 'filename.zip') ``` +### Backups as tar archives + +Backups can also be stored as tar archives. The functionality is the same as for zip, except that a password is not supported. + +Write a backup as a tar: +``` +BACKUP TABLE test.table TO Disk('backups', '1.tar') +``` + +Corresponding restore: +``` +RESTORE TABLE test.table FROM Disk('backups', '1.tar') +``` + +To change the compression method, the correct file suffix should be appended to the backup name. I.E to compress the tar archive using gzip: +``` +BACKUP TABLE test.table TO Disk('backups', '1.tar.gz') +``` + +The supported compression file suffixes are `tar.gz`, `.tgz` `tar.bz2`, `tar.lzma`, `.tar.zst`, `.tzst` and `.tar.xz`. + + ### Check the status of backups The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file: diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 30dfde7c80b..07c9a2b88ab 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -200,17 +200,13 @@ Type: Bool Default: 0 -## dns_cache_max_size +## dns_cache_max_entries -Internal DNS cache max size in bytes. - -:::note -ClickHouse also has a reverse cache, so the actual memory usage could be twice as much. -::: +Internal DNS cache max entries. Type: UInt64 -Default: 1024 +Default: 10000 ## dns_cache_update_period @@ -2927,3 +2923,15 @@ If set to true, then alter operations will be surrounded by parentheses in forma Type: Bool Default: 0 + +## ignore_empty_sql_security_in_create_view_query {#ignore_empty_sql_security_in_create_view_query} + +If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries. + +:::note +This setting is only necessary for the migration period and will become obsolete in 24.4 +::: + +Type: Bool + +Default: 1 diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md index 9265fffa323..f7d9586dd5b 100644 --- a/docs/en/operations/settings/settings-formats.md +++ b/docs/en/operations/settings/settings-formats.md @@ -1656,6 +1656,33 @@ Result: └─────────────────────────┴─────────┘ ``` +### output_format_pretty_single_large_number_tip_threshold {#output_format_pretty_single_large_number_tip_threshold} + +Print a readable number tip on the right side of the table if the block consists of a single number which exceeds +this value (except 0). + +Possible values: + +- 0 — The readable number tip will not be printed. +- Positive integer — The readable number tip will be printed if the single number exceeds this value. + +Default value: `1000000`. + +**Example** + +Query: + +```sql +SELECT 1000000000 as a; +``` + +Result: +```text +┌──────────a─┐ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +``` + ## Template format settings {#template-format-settings} ### format_template_resultset {#format_template_resultset} diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 5433a2866a2..622644a1543 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5378,6 +5378,24 @@ SELECT map('a', range(number), 'b', number, 'c', 'str_' || toString(number)) as Default value: `false`. +## default_normal_view_sql_security {#default_normal_view_sql_security} + +Allows to set default `SQL SECURITY` option while creating a normal view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security). + +The default value is `INVOKER`. + +## default_materialized_view_sql_security {#default_materialized_view_sql_security} + +Allows to set a default value for SQL SECURITY option when creating a materialized view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security). + +The default value is `DEFINER`. + +## default_view_definer {#default_view_definer} + +Allows to set default `DEFINER` option while creating a view. [More about SQL security](../../sql-reference/statements/create/view.md#sql_security). + +The default value is `CURRENT_USER`. + ## max_partition_size_to_drop Restriction on dropping partitions in query time. The value 0 means that you can drop partitions without any restrictions. diff --git a/docs/en/operations/system-tables/crash-log.md b/docs/en/operations/system-tables/crash-log.md index e83da3624b2..9877f674211 100644 --- a/docs/en/operations/system-tables/crash-log.md +++ b/docs/en/operations/system-tables/crash-log.md @@ -49,5 +49,3 @@ build_id: **See also** - [trace_log](../../operations/system-tables/trace_log.md) system table - -[Original article](https://clickhouse.com/docs/en/operations/system-tables/crash-log) diff --git a/docs/en/operations/system-tables/dns_cache.md b/docs/en/operations/system-tables/dns_cache.md index 824ce016a70..befeb9298aa 100644 --- a/docs/en/operations/system-tables/dns_cache.md +++ b/docs/en/operations/system-tables/dns_cache.md @@ -33,6 +33,6 @@ Result: **See also** - [disable_internal_dns_cache setting](../../operations/server-configuration-parameters/settings.md#disable_internal_dns_cache) -- [dns_cache_max_size setting](../../operations/server-configuration-parameters/settings.md#dns_cache_max_size) +- [dns_cache_max_entries setting](../../operations/server-configuration-parameters/settings.md#dns_cache_max_entries) - [dns_cache_update_period setting](../../operations/server-configuration-parameters/settings.md#dns_cache_update_period) - [dns_max_consecutive_failures setting](../../operations/server-configuration-parameters/settings.md#dns_max_consecutive_failures) diff --git a/docs/en/operations/system-tables/query_thread_log.md b/docs/en/operations/system-tables/query_thread_log.md index 0420a0392f2..a0712c78409 100644 --- a/docs/en/operations/system-tables/query_thread_log.md +++ b/docs/en/operations/system-tables/query_thread_log.md @@ -21,7 +21,7 @@ Columns: - `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query. - `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the thread has finished execution of the query. - `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query. -- `event_time_microsecinds` ([DateTime](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query with microseconds precision. +- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query with microseconds precision. - `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution. - `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision. - `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution. @@ -32,8 +32,7 @@ Columns: - `memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The difference between the amount of allocated and freed memory in context of this thread. - `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The maximum difference between the amount of allocated and freed memory in context of this thread. - `thread_name` ([String](../../sql-reference/data-types/string.md)) — Name of the thread. -- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Internal thread ID. -- `thread_id` ([Int32](../../sql-reference/data-types/int-uint.md)) — thread ID. +- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — OS thread ID. - `master_thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — OS initial ID of initial thread. - `query` ([String](../../sql-reference/data-types/string.md)) — Query string. - `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Query type. Possible values: diff --git a/docs/en/operations/system-tables/settings_profile_elements.md b/docs/en/operations/system-tables/settings_profile_elements.md index c1fc562e1e9..8955c84fab2 100644 --- a/docs/en/operations/system-tables/settings_profile_elements.md +++ b/docs/en/operations/system-tables/settings_profile_elements.md @@ -26,6 +26,6 @@ Columns: - `max` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — The maximum value of the setting. NULL if not set. -- `readonly` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges))) — Profile that allows only read queries. +- `writability` ([Nullable](../../sql-reference/data-types/nullable.md)([Enum8](../../sql-reference/data-types/enum.md)('WRITABLE' = 0, 'CONST' = 1, 'CHANGEABLE_IN_READONLY' = 2))) — Sets the settings constraint writability kind. - `inherit_profile` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — A parent profile for this setting profile. `NULL` if not set. Setting profile will inherit all the settings' values and constraints (`min`, `max`, `readonly`) from its parent profiles. diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md index d0da4d37d8d..119684ba68d 100644 --- a/docs/en/operations/tips.md +++ b/docs/en/operations/tips.md @@ -297,8 +297,6 @@ end script If you use antivirus software configure it to skip folders with ClickHouse datafiles (`/var/lib/clickhouse`) otherwise performance may be reduced and you may experience unexpected errors during data ingestion and background merges. -[Original article](https://clickhouse.com/docs/en/operations/tips/) - ## Related Content - [Getting started with ClickHouse? Here are 13 "Deadly Sins" and how to avoid them](https://clickhouse.com/blog/common-getting-started-issues-with-clickhouse) diff --git a/docs/en/sql-reference/aggregate-functions/reference/approxtopk.md b/docs/en/sql-reference/aggregate-functions/reference/approxtopk.md new file mode 100644 index 00000000000..2bb43a9f665 --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/approxtopk.md @@ -0,0 +1,55 @@ +--- +slug: /en/sql-reference/aggregate-functions/reference/approxtopk +sidebar_position: 212 +--- + +# approx_top_k + +Returns an array of the approximately most frequent values and their counts in the specified column. The resulting array is sorted in descending order of approximate frequency of values (not by the values themselves). + + +``` sql +approx_top_k(N)(column) +approx_top_k(N, reserved)(column) +``` + +This function does not provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. + +We recommend using the `N < 10` value; performance is reduced with large `N` values. Maximum value of `N = 65536`. + +**Parameters** + +- `N` — The number of elements to return. Optional. Default value: 10. +- `reserved` — Defines, how many cells reserved for values. If uniq(column) > reserved, result of topK function will be approximate. Optional. Default value: N * 3. + +**Arguments** + +- `column` — The value to calculate frequency. + +**Example** + +Query: + +``` sql +SELECT approx_top_k(2)(k) +FROM VALUES('k Char, w UInt64', ('y', 1), ('y', 1), ('x', 5), ('y', 1), ('z', 10)); +``` + +Result: + +``` text +┌─approx_top_k(2)(k)────┐ +│ [('y',3,0),('x',1,0)] │ +└───────────────────────┘ +``` + +# approx_top_count + +Is an alias to `approx_top_k` function + +**See Also** + +- [topK](../../../sql-reference/aggregate-functions/reference/topk.md) +- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md) +- [approx_top_sum](../../../sql-reference/aggregate-functions/reference/approxtopsum.md) + diff --git a/docs/en/sql-reference/aggregate-functions/reference/approxtopsum.md b/docs/en/sql-reference/aggregate-functions/reference/approxtopsum.md new file mode 100644 index 00000000000..aa884b26d8e --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/approxtopsum.md @@ -0,0 +1,51 @@ +--- +slug: /en/sql-reference/aggregate-functions/reference/approxtopsum +sidebar_position: 212 +--- + +# approx_top_sum + +Returns an array of the approximately most frequent values and their counts in the specified column. The resulting array is sorted in descending order of approximate frequency of values (not by the values themselves). Additionally, the weight of the value is taken into account. + +``` sql +approx_top_sum(N)(column, weight) +approx_top_sum(N, reserved)(column, weight) +``` + +This function does not provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. + +We recommend using the `N < 10` value; performance is reduced with large `N` values. Maximum value of `N = 65536`. + +**Parameters** + +- `N` — The number of elements to return. Optional. Default value: 10. +- `reserved` — Defines, how many cells reserved for values. If uniq(column) > reserved, result of topK function will be approximate. Optional. Default value: N * 3. + +**Arguments** + +- `column` — The value to calculate frequency. +- `weight` — The weight. Every value is accounted `weight` times for frequency calculation. [UInt64](../../../sql-reference/data-types/int-uint.md). + + +**Example** + +Query: + +``` sql +SELECT approx_top_sum(2)(k, w) +FROM VALUES('k Char, w UInt64', ('y', 1), ('y', 1), ('x', 5), ('y', 1), ('z', 10)) +``` + +Result: + +``` text +┌─approx_top_sum(2)(k, w)─┐ +│ [('z',10,0),('x',5,0)] │ +└─────────────────────────┘ +``` + +**See Also** + +- [topK](../../../sql-reference/aggregate-functions/reference/topk.md) +- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md) +- [approx_top_k](../../../sql-reference/aggregate-functions/reference/approxtopk.md) diff --git a/docs/en/sql-reference/aggregate-functions/reference/topk.md b/docs/en/sql-reference/aggregate-functions/reference/topk.md index bde29275f79..dd4b2251a8a 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topk.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topk.md @@ -11,21 +11,23 @@ Implements the [Filtered Space-Saving](https://doi.org/10.1016/j.ins.2010.08.024 ``` sql topK(N)(column) +topK(N, load_factor)(column) +topK(N, load_factor, 'counts')(column) ``` This function does not provide a guaranteed result. In certain situations, errors might occur and it might return frequent values that aren’t the most frequent values. We recommend using the `N < 10` value; performance is reduced with large `N` values. Maximum value of `N = 65536`. +**Parameters** + +- `N` — The number of elements to return. Optional. Default value: 10. +- `load_factor` — Defines, how many cells reserved for values. If uniq(column) > N * load_factor, result of topK function will be approximate. Optional. Default value: 3. +- `counts` — Defines, should result contain approximate count and error value. + **Arguments** -- `N` – The number of elements to return. - -If the parameter is omitted, default value 10 is used. - -**Arguments** - -- `x` – The value to calculate frequency. +- `column` — The value to calculate frequency. **Example** @@ -41,3 +43,9 @@ FROM ontime │ [19393,19790,19805] │ └─────────────────────┘ ``` + +**See Also** + +- [topKWeighted](../../../sql-reference/aggregate-functions/reference/topkweighted.md) +- [approx_top_k](../../../sql-reference/aggregate-functions/reference/approxtopk.md) +- [approx_top_sum](../../../sql-reference/aggregate-functions/reference/approxtopsum.md) \ No newline at end of file diff --git a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md index 03932e88a6a..d2a469828fc 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md +++ b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md @@ -10,13 +10,20 @@ Returns an array of the approximately most frequent values in the specified colu **Syntax** ``` sql -topKWeighted(N)(x, weight) +topKWeighted(N)(column, weight) +topKWeighted(N, load_factor)(column, weight) +topKWeighted(N, load_factor, 'counts')(column, weight) ``` +**Parameters** + +- `N` — The number of elements to return. Optional. Default value: 10. +- `load_factor` — Defines, how many cells reserved for values. If uniq(column) > N * load_factor, result of topK function will be approximate. Optional. Default value: 3. +- `counts` — Defines, should result contain approximate count and error value. + **Arguments** -- `N` — The number of elements to return. -- `x` — The value. +- `column` — The value. - `weight` — The weight. Every value is accounted `weight` times for frequency calculation. [UInt64](../../../sql-reference/data-types/int-uint.md). **Returned value** @@ -40,6 +47,23 @@ Result: └────────────────────────┘ ``` +Query: + +``` sql +SELECT topKWeighted(2, 10, 'counts')(k, w) +FROM VALUES('k Char, w UInt64', ('y', 1), ('y', 1), ('x', 5), ('y', 1), ('z', 10)) +``` + +Result: + +``` text +┌─topKWeighted(2, 10, 'counts')(k, w)─┐ +│ [('z',10,0),('x',5,0)] │ +└─────────────────────────────────────┘ +``` + **See Also** - [topK](../../../sql-reference/aggregate-functions/reference/topk.md) +- [approx_top_k](../../../sql-reference/aggregate-functions/reference/approxtopk.md) +- [approx_top_sum](../../../sql-reference/aggregate-functions/reference/approxtopsum.md) \ No newline at end of file diff --git a/docs/en/sql-reference/data-types/variant.md b/docs/en/sql-reference/data-types/variant.md index f027e3fe343..7d10d4b0e97 100644 --- a/docs/en/sql-reference/data-types/variant.md +++ b/docs/en/sql-reference/data-types/variant.md @@ -12,6 +12,11 @@ has a value of either type `T1` or `T2` or ... or `TN` or none of them (`NULL` v The order of nested types doesn't matter: Variant(T1, T2) = Variant(T2, T1). Nested types can be arbitrary types except Nullable(...), LowCardinality(Nullable(...)) and Variant(...) types. +:::note +It's not recommended to use similar types as variants (for example different numeric types like `Variant(UInt32, Int64)` or different date types like `Variant(Date, DateTime)`), +because working with values of such types can lead to ambiguity. By default, creating such `Variant` type will lead to an exception, but can be enabled using setting `allow_suspicious_variant_types` +::: + :::note The Variant data type is an experimental feature. To use it, set `allow_experimental_variant_type = 1`. ::: @@ -272,3 +277,121 @@ $$) │ [1,2,3] │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [1,2,3] │ └─────────────────────┴───────────────┴──────┴───────┴─────────────────────┴─────────┘ ``` + + +## Comparing values of Variant type + +Values of a `Variant` type can be compared only with values with the same `Variant` type. + +The result of operator `<` for values `v1` with underlying type `T1` and `v2` with underlying type `T2` of a type `Variant(..., T1, ... T2, ...)` is defined as follows: +- If `T1 = T2 = T`, the result will be `v1.T < v2.T` (underlying values will be compared). +- If `T1 != T2`, the result will be `T1 < T2` (type names will be compared). + +Examples: +```sql +CREATE TABLE test (v1 Variant(String, UInt64, Array(UInt32)), v2 Variant(String, UInt64, Array(UInt32))) ENGINE=Memory; +INSERT INTO test VALUES (42, 42), (42, 43), (42, 'abc'), (42, [1, 2, 3]), (42, []), (42, NULL); +``` + +```sql +SELECT v2, variantType(v2) as v2_type from test order by v2; +``` + +```text +┌─v2──────┬─v2_type───────┐ +│ [] │ Array(UInt32) │ +│ [1,2,3] │ Array(UInt32) │ +│ abc │ String │ +│ 42 │ UInt64 │ +│ 43 │ UInt64 │ +│ ᴺᵁᴸᴸ │ None │ +└─────────┴───────────────┘ +``` + +```sql +SELECT v1, variantType(v1) as v1_type, v2, variantType(v2) as v2_type, v1 = v2, v1 < v2, v1 > v2 from test; +``` + +```text +┌─v1─┬─v1_type─┬─v2──────┬─v2_type───────┬─equals(v1, v2)─┬─less(v1, v2)─┬─greater(v1, v2)─┐ +│ 42 │ UInt64 │ 42 │ UInt64 │ 1 │ 0 │ 0 │ +│ 42 │ UInt64 │ 43 │ UInt64 │ 0 │ 1 │ 0 │ +│ 42 │ UInt64 │ abc │ String │ 0 │ 0 │ 1 │ +│ 42 │ UInt64 │ [1,2,3] │ Array(UInt32) │ 0 │ 0 │ 1 │ +│ 42 │ UInt64 │ [] │ Array(UInt32) │ 0 │ 0 │ 1 │ +│ 42 │ UInt64 │ ᴺᵁᴸᴸ │ None │ 0 │ 1 │ 0 │ +└────┴─────────┴─────────┴───────────────┴────────────────┴──────────────┴─────────────────┘ + +``` + +If you need to find the row with specific `Variant` value, you can do one of the following: + +- Cast value to the corresponding `Variant` type: + +```sql +SELECT * FROM test WHERE v2 == [1,2,3]::Array(UInt32)::Variant(String, UInt64, Array(UInt32)); +``` + +```text +┌─v1─┬─v2──────┐ +│ 42 │ [1,2,3] │ +└────┴─────────┘ +``` + +- Compare `Variant` subcolumn with required type: + +```sql +SELECT * FROM test WHERE v2.`Array(UInt32)` == [1,2,3] -- or using variantElement(v2, 'Array(UInt32)') +``` + +```text +┌─v1─┬─v2──────┐ +│ 42 │ [1,2,3] │ +└────┴─────────┘ +``` + +Sometimes it can be useful to make additional check on variant type as subcolumns with complex types like `Array/Map/Tuple` cannot be inside `Nullable` and will have default values instead of `NULL` on rows with different types: + +```sql +SELECT v2, v2.`Array(UInt32)`, variantType(v2) FROM test WHERE v2.`Array(UInt32)` == []; +``` + +```text +┌─v2───┬─v2.Array(UInt32)─┬─variantType(v2)─┐ +│ 42 │ [] │ UInt64 │ +│ 43 │ [] │ UInt64 │ +│ abc │ [] │ String │ +│ [] │ [] │ Array(UInt32) │ +│ ᴺᵁᴸᴸ │ [] │ None │ +└──────┴──────────────────┴─────────────────┘ +``` + +```sql +SELECT v2, v2.`Array(UInt32)`, variantType(v2) FROM test WHERE variantType(v2) == 'Array(UInt32)' AND v2.`Array(UInt32)` == []; +``` + +```text +┌─v2─┬─v2.Array(UInt32)─┬─variantType(v2)─┐ +│ [] │ [] │ Array(UInt32) │ +└────┴──────────────────┴─────────────────┘ +``` + +**Note:** values of variants with different numeric types are considered as different variants and not compared between each other, their type names are compared instead. + +Example: + +```sql +SET allow_suspicious_variant_types = 1; +CREATE TABLE test (v Variant(UInt32, Int64)) ENGINE=Memory; +INSERT INTO test VALUES (1::UInt32), (1::Int64), (100::UInt32), (100::Int64); +SELECT v, variantType(v) FROM test ORDER by v; +``` + +```text +┌─v───┬─variantType(v)─┐ +│ 1 │ Int64 │ +│ 100 │ Int64 │ +│ 1 │ UInt32 │ +│ 100 │ UInt32 │ +└─────┴────────────────┘ +``` diff --git a/docs/en/sql-reference/functions/bit-functions.md b/docs/en/sql-reference/functions/bit-functions.md index 3c07fe8bcbe..0951c783aae 100644 --- a/docs/en/sql-reference/functions/bit-functions.md +++ b/docs/en/sql-reference/functions/bit-functions.md @@ -167,6 +167,10 @@ Result: └──────────────────────────────────────────┴───────────────────────────────┘ ``` +## byteSlice(s, offset, length) + +See function [substring](string-functions.md#substring). + ## bitTest Takes any integer and converts it into [binary form](https://en.wikipedia.org/wiki/Binary_number), returns the value of a bit at specified position. The countdown starts from 0 from the right to the left. diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index c5b3b4cc3ae..41503abfa2f 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -2287,10 +2287,43 @@ Result: ## today {#today} -Accepts zero arguments and returns the current date at one of the moments of query analysis. -The same as ‘toDate(now())’. +Returns the current date at moment of query analysis. It is the same as ‘toDate(now())’ and has aliases: `curdate`, `current_date`. -Aliases: `curdate`, `current_date`. +**Syntax** + +```sql +today() +``` + +**Arguments** + +- None + +**Returned value** + +- Current date + +Type: [DateTime](../../sql-reference/data-types/datetime.md). + +**Example** + +Query: + +```sql +SELECT today() AS today, curdate() AS curdate, current_date() AS current_date FORMAT Pretty +``` + +**Result**: + +Running the query above on the 3rd of March 2024 would have returned the following response: + +```response +┏━━━━━━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓ +┃ today ┃ curdate ┃ current_date ┃ +┡━━━━━━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩ +│ 2024-03-03 │ 2024-03-03 │ 2024-03-03 │ +└────────────┴────────────┴──────────────┘ +``` ## yesterday {#yesterday} @@ -3034,6 +3067,40 @@ Result: │ 2023-03-16 18:00:00.000 │ └─────────────────────────────────────────────────────────────────────────┘ ``` +## timeDiff + +Returns the difference between two dates or dates with time values. The difference is calculated in units of seconds. It is same as `dateDiff` and was added only for MySQL support. `dateDiff` is preferred. + +**Syntax** + +```sql +timeDiff(first_datetime, second_datetime) +``` + +*Arguments** + +- `first_datetime` — A DateTime/DateTime64 type const value or an expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md) +- `second_datetime` — A DateTime/DateTime64 type const value or an expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md) + +**Returned value** + +The difference between two dates or dates with time values in seconds. + +**Example** + +Query: + +```sql +timeDiff(toDateTime64('1927-01-01 00:00:00', 3), toDate32('1927-01-02')); +``` + +**Result**: + +```response +┌─timeDiff(toDateTime64('1927-01-01 00:00:00', 3), toDate32('1927-01-02'))─┐ +│ 86400 │ +└──────────────────────────────────────────────────────────────────────────┘ +``` ## Related content diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index 9ae403be524..3b49e4954ed 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -558,6 +558,7 @@ substring(s, offset[, length]) Alias: - `substr` - `mid` +- `byteSlice` **Arguments** diff --git a/docs/en/sql-reference/functions/time-series-functions.md b/docs/en/sql-reference/functions/time-series-functions.md index ce36c89f473..e80a3fa9860 100644 --- a/docs/en/sql-reference/functions/time-series-functions.md +++ b/docs/en/sql-reference/functions/time-series-functions.md @@ -22,8 +22,8 @@ seriesOutliersDetectTukey(series, min_percentile, max_percentile, K); **Arguments** - `series` - An array of numeric values. -- `min_percentile` - The minimum percentile to be used to calculate inter-quantile range [(IQR)](https://en.wikipedia.org/wiki/Interquartile_range). The value must be in range [2,98]. The default is 25. -- `max_percentile` - The maximum percentile to be used to calculate inter-quantile range (IQR). The value must be in range [2,98]. The default is 75. +- `min_percentile` - The minimum percentile to be used to calculate inter-quantile range [(IQR)](https://en.wikipedia.org/wiki/Interquartile_range). The value must be in range [0.02,0.98]. The default is 0.25. +- `max_percentile` - The maximum percentile to be used to calculate inter-quantile range (IQR). The value must be in range [0.02,0.98]. The default is 0.75. - `K` - Non-negative constant value to detect mild or stronger outliers. The default value is 1.5. At least four data points are required in `series` to detect outliers. @@ -53,7 +53,7 @@ Result: Query: ``` sql -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], 20, 80, 1.5) AS print_0; +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], 0.2, 0.8, 1.5) AS print_0; ``` Result: diff --git a/docs/en/sql-reference/statements/create/view.md b/docs/en/sql-reference/statements/create/view.md index 028d0b09a1a..073a3c0d246 100644 --- a/docs/en/sql-reference/statements/create/view.md +++ b/docs/en/sql-reference/statements/create/view.md @@ -13,7 +13,9 @@ Creates a new view. Views can be [normal](#normal-view), [materialized](#materia Syntax: ``` sql -CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] AS SELECT ... +CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +AS SELECT ... ``` Normal views do not store any data. They just perform a read from another table on each access. In other words, a normal view is nothing more than a saved query. When reading from a view, this saved query is used as a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. @@ -52,7 +54,9 @@ SELECT * FROM view(column1=value1, column2=value2 ...) ## Materialized View ``` sql -CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +AS SELECT ... ``` :::tip @@ -91,6 +95,49 @@ Views look the same as normal tables. For example, they are listed in the result To delete a view, use [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Although `DROP TABLE` works for VIEWs as well. +## SQL security {#sql_security} + +`DEFINER` and `SQL SECURITY` allow you to specify which ClickHouse user to use when executing the view's underlying query. +`SQL SECURITY` has three legal values: `DEFINER`, `INVOKER`, or `NONE`. You can specify any existing user or `CURRENT_USER` in the `DEFINER` clause. + +The following table will explain which rights are required for which user in order to select from view. +Note that regardless of the SQL security option, in every case it is still required to have `GRANT SELECT ON ` in order to read from it. + +| SQL security option | View | Materialized View | +|---------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------| +| `DEFINER alice` | `alice` must have a `SELECT` grant for the view's source table. | `alice` must have a `SELECT` grant for the view's source table and an `INSERT` grant for the view's target table. | +| `INVOKER` | User must have a `SELECT` grant for the view's source table. | `SQL SECURITY INVOKER` can't be specified for materialized views. | +| `NONE` | - | - | + +:::note +`SQL SECURITY NONE` is a deprecated option. Any user with the rights to create views with `SQL SECURITY NONE` will be able to execute any arbitrary query. +Thus, it is required to have `GRANT ALLOW SQL SECURITY NONE TO ` in order to create a view with this option. +::: + +If `DEFINER`/`SQL SECURITY` aren't specified, the default values are used: +- `SQL SECURITY`: `INVOKER` for normal views and `DEFINER` for materialized views ([configurable by settings](../../../operations/settings/settings.md#default_normal_view_sql_security)) +- `DEFINER`: `CURRENT_USER` ([configurable by settings](../../../operations/settings/settings.md#default_view_definer)) + +If a view is attached without `DEFINER`/`SQL SECURITY` specified, the default value is `SQL SECURITY NONE` for the materialized view and `SQL SECURITY INVOKER` for the normal view. + +To change SQL security for an existing view, use +```sql +ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }] +``` + +### Examples sql security +```sql +CREATE test_view +DEFINER = alice SQL SECURITY DEFINER +AS SELECT ... +``` + +```sql +CREATE test_view +SQL SECURITY INVOKER +AS SELECT ... +``` + ## Live View [Deprecated] This feature is deprecated and will be removed in the future. diff --git a/docs/en/sql-reference/statements/grant.md b/docs/en/sql-reference/statements/grant.md index e6073f3523a..4e5476210e3 100644 --- a/docs/en/sql-reference/statements/grant.md +++ b/docs/en/sql-reference/statements/grant.md @@ -114,6 +114,7 @@ Hierarchy of privileges: - `ALTER VIEW` - `ALTER VIEW REFRESH` - `ALTER VIEW MODIFY QUERY` + - `ALTER VIEW MODIFY SQL SECURITY` - [CREATE](#grant-create) - `CREATE DATABASE` - `CREATE TABLE` @@ -307,6 +308,7 @@ Allows executing [ALTER](../../sql-reference/statements/alter/index.md) queries - `ALTER VIEW` Level: `GROUP` - `ALTER VIEW REFRESH`. Level: `VIEW`. Aliases: `ALTER LIVE VIEW REFRESH`, `REFRESH VIEW` - `ALTER VIEW MODIFY QUERY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY QUERY` + - `ALTER VIEW MODIFY SQL SECURITY`. Level: `VIEW`. Aliases: `ALTER TABLE MODIFY SQL SECURITY` Examples of how this hierarchy is treated: @@ -409,6 +411,7 @@ Allows a user to execute queries that manage users, roles and row policies. - `SHOW_ROW_POLICIES`. Level: `GLOBAL`. Aliases: `SHOW POLICIES`, `SHOW CREATE ROW POLICY`, `SHOW CREATE POLICY` - `SHOW_QUOTAS`. Level: `GLOBAL`. Aliases: `SHOW CREATE QUOTA` - `SHOW_SETTINGS_PROFILES`. Level: `GLOBAL`. Aliases: `SHOW PROFILES`, `SHOW CREATE SETTINGS PROFILE`, `SHOW CREATE PROFILE` + - `ALLOW SQL SECURITY NONE`. Level: `GLOBAL`. Aliases: `CREATE SQL SECURITY NONE`, `SQL SECURITY NONE`, `SECURITY NONE` The `ROLE ADMIN` privilege allows a user to assign and revoke any roles including those which are not assigned to the user with the admin option. diff --git a/docs/en/sql-reference/statements/select/union.md b/docs/en/sql-reference/statements/select/union.md index 92a4ed1bb20..39ed3f2aceb 100644 --- a/docs/en/sql-reference/statements/select/union.md +++ b/docs/en/sql-reference/statements/select/union.md @@ -83,6 +83,3 @@ Queries that are parts of `UNION/UNION ALL/UNION DISTINCT` can be run simultaneo - [insert_null_as_default](../../../operations/settings/settings.md#insert_null_as_default) setting. - [union_default_mode](../../../operations/settings/settings.md#union-default-mode) setting. - - -[Original article](https://clickhouse.com/docs/en/sql-reference/statements/select/union/) diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 868571f3bb2..a128814f072 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -68,7 +68,7 @@ RELOAD FUNCTION [ON CLUSTER cluster_name] function_name Clears ClickHouse’s internal DNS cache. Sometimes (for old ClickHouse versions) it is necessary to use this command when changing the infrastructure (changing the IP address of another ClickHouse server or the server used by dictionaries). -For more convenient (automatic) cache management, see disable_internal_dns_cache, dns_cache_max_size, dns_cache_update_period parameters. +For more convenient (automatic) cache management, see disable_internal_dns_cache, dns_cache_max_entries, dns_cache_update_period parameters. ## DROP MARK CACHE diff --git a/docs/en/sql-reference/table-functions/fileCluster.md b/docs/en/sql-reference/table-functions/fileCluster.md index 2646250311c..4677d2883a7 100644 --- a/docs/en/sql-reference/table-functions/fileCluster.md +++ b/docs/en/sql-reference/table-functions/fileCluster.md @@ -59,9 +59,7 @@ INSERT INTO TABLE FUNCTION file('file2.csv', 'CSV', 'i UInt32, s String') VALUES Now, read data contents of `test1.csv` and `test2.csv` via `fileCluster` table function: ```sql -SELECT * from fileCluster( - 'my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s String') ORDER BY (i, s)""" -) +SELECT * FROM fileCluster('my_cluster', 'file{1,2}.csv', 'CSV', 'i UInt32, s String') ORDER BY i, s ``` ``` diff --git a/docs/en/sql-reference/table-functions/mergeTreeIndex.md b/docs/en/sql-reference/table-functions/mergeTreeIndex.md new file mode 100644 index 00000000000..dccfd1cfc97 --- /dev/null +++ b/docs/en/sql-reference/table-functions/mergeTreeIndex.md @@ -0,0 +1,83 @@ +--- +slug: /en/sql-reference/table-functions/mergeTreeIndex +sidebar_position: 77 +sidebar_label: mergeTreeIndex +--- + +# mergeTreeIndex + +Represents the contents of index and marks files of MergeTree tables. It can be used for introspection + +``` sql +mergeTreeIndex(database, table, [with_marks = true]) +``` + +**Arguments** + +- `database`- The database name to read index and marks from. +- `table`- The table name to read index and marks from. +- `with_marks` - Whether include columns with marks to the result. + +**Returned Value** + +A table object with columns with values of primary index of source table, columns with values of marks (if enabled) for all possible files in data parts of source table and virtual columns: + +- `part_name` - The name of data part. +- `mark_number` - The number of current mark in data part. +- `rows_in_granule` - The number of rows in current granule. + +Marks column may contain `(NULL, NULL)` value in case when column is absent in data part or marks for one of its substreams are not written (e.g. in compact parts). + +## Usage Example + +```sql +CREATE TABLE test_table +( + `id` UInt64, + `n` UInt64, + `arr` Array(UInt64) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 3, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 8; + +INSERT INTO test_table SELECT number, number, range(number % 5) FROM numbers(5); + +INSERT INTO test_table SELECT number, number, range(number % 5) FROM numbers(10, 10); +``` + +```sql +SELECT * FROM mergeTreeIndex(currentDatabase(), test_table, with_marks = true); +``` + +```text +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─id─┬─id.mark─┬─n.mark──┬─arr.size0.mark─┬─arr.mark─┐ +│ all_1_1_0 │ 0 │ 3 │ 0 │ (0,0) │ (42,0) │ (NULL,NULL) │ (84,0) │ +│ all_1_1_0 │ 1 │ 2 │ 3 │ (133,0) │ (172,0) │ (NULL,NULL) │ (211,0) │ +│ all_1_1_0 │ 2 │ 0 │ 4 │ (271,0) │ (271,0) │ (NULL,NULL) │ (271,0) │ +└───────────┴─────────────┴─────────────────┴────┴─────────┴─────────┴────────────────┴──────────┘ +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─id─┬─id.mark─┬─n.mark─┬─arr.size0.mark─┬─arr.mark─┐ +│ all_2_2_0 │ 0 │ 3 │ 10 │ (0,0) │ (0,0) │ (0,0) │ (0,0) │ +│ all_2_2_0 │ 1 │ 3 │ 13 │ (0,24) │ (0,24) │ (0,24) │ (0,24) │ +│ all_2_2_0 │ 2 │ 3 │ 16 │ (0,48) │ (0,48) │ (0,48) │ (0,80) │ +│ all_2_2_0 │ 3 │ 1 │ 19 │ (0,72) │ (0,72) │ (0,72) │ (0,128) │ +│ all_2_2_0 │ 4 │ 0 │ 19 │ (0,80) │ (0,80) │ (0,80) │ (0,160) │ +└───────────┴─────────────┴─────────────────┴────┴─────────┴────────┴────────────────┴──────────┘ +``` + +```sql +DESCRIBE mergeTreeIndex(currentDatabase(), test_table, with_marks = true) SETTINGS describe_compact_output = 1; +``` + +```text +┌─name────────────┬─type─────────────────────────────────────────────────────────────────────────────────────────────┐ +│ part_name │ String │ +│ mark_number │ UInt64 │ +│ rows_in_granule │ UInt64 │ +│ id │ UInt64 │ +│ id.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │ +│ n.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │ +│ arr.size0.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │ +│ arr.mark │ Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) │ +└─────────────────┴──────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` diff --git a/docs/ru/interfaces/third-party/gui.md b/docs/ru/interfaces/third-party/gui.md index 34d2f0e371a..6bed32052ad 100644 --- a/docs/ru/interfaces/third-party/gui.md +++ b/docs/ru/interfaces/third-party/gui.md @@ -260,3 +260,19 @@ SeekTable [бесплатен](https://www.seektable.com/help/cloud-pricing) д Протестировать TABLUM.IO без разворачивания на собственном сервере можно [здесь](https://tablum.io/try). Подробно о продукте смотрите на [TABLUM.IO](https://tablum.io/) + + +### CKMAN {#ckman} + +[CKMAN] (https://www.github.com/housepower/ckman) — инструмент управления и мониторинга кластеров ClickHouse! + +Основные возможности: + +- Быстрое и простое развертывание кластеров через веб-интерфейс +- Кластеры можно масштабировать или масштабировать +- Балансировка нагрузки данных кластера +- Обновление кластера в режиме онлайн +- Измените конфигурацию кластера на странице +- Обеспечивает мониторинг узлов кластера и zookeeper +- Мониторинг состояния таблиц и секций, а также медленные SQL-операторы +- Предоставляет простую в использовании страницу выполнения SQL diff --git a/docs/ru/sql-reference/statements/create/view.md b/docs/ru/sql-reference/statements/create/view.md index 543a4b21ad1..032bdc6e6d4 100644 --- a/docs/ru/sql-reference/statements/create/view.md +++ b/docs/ru/sql-reference/statements/create/view.md @@ -11,7 +11,9 @@ sidebar_label: "Представление" ## Обычные представления {#normal} ``` sql -CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] AS SELECT ... +CREATE [OR REPLACE] VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster_name] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +AS SELECT ... ``` Обычные представления не хранят никаких данных, они выполняют чтение данных из другой таблицы при каждом доступе. Другими словами, обычное представление — это не что иное, как сохраненный запрос. При чтении данных из представления этот сохраненный запрос используется как подзапрос в секции [FROM](../../../sql-reference/statements/select/from.md). @@ -37,7 +39,9 @@ SELECT a, b, c FROM (SELECT ...) ## Материализованные представления {#materialized} ``` sql -CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] AS SELECT ... +CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]name] [ENGINE = engine] [POPULATE] +[DEFINER = { user | CURRENT_USER }] [SQL SECURITY { DEFINER | INVOKER | NONE }] +AS SELECT ... ``` Материализованные (MATERIALIZED) представления хранят данные, преобразованные соответствующим запросом [SELECT](../../../sql-reference/statements/select/index.md). @@ -66,6 +70,52 @@ CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name [ON CLUSTER] [TO[db.]na Чтобы удалить представление, следует использовать [DROP VIEW](../../../sql-reference/statements/drop.md#drop-view). Впрочем, `DROP TABLE` тоже работает для представлений. +## SQL безопасность {#sql_security} + +Параметры `DEFINER` и `SQL SECURITY` позволяют задать правило от имени какого пользователя будут выполняться запросы к таблицам, на которые ссылается представление. +Для `SQL SECURITY` допустимо три значения: `DEFINER`, `INVOKER`, или `NONE`. +Для `DEFINER` можно указать имя любого существующего пользователя или же `CURRENT_USER`. + +Далее приведена таблица, объясняющая какие права необходимы каким пользователям при заданных параметрах SQL безопасности. +Обратите внимание, что, в независимости от заданных параметров SQL безопасности, +у пользователя должно быть право `GRANT SELECT ON ` для чтения из представления. + +| SQL security option | View | Materialized View | +|---------------------|----------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------| +| `DEFINER alice` | У `alice` должно быть право `SELECT` на таблицу-источник. | У `alice` должны быть права `SELECT` на таблицу-источник и `INSERT` на таблицу-назначение. | +| `INVOKER` | У пользователя выполняющего запрос к представлению должно быть право `SELECT` на таблицу-источник. | Тип `SQL SECURITY INVOKER` не может быть указан для материализованных представлений. | +| `NONE` | - | - | + +:::note +Тип `SQL SECURITY NONE` не безопасен для использования. Любой пользователь с правом создавать представления с `SQL SECURITY NONE` сможет исполнять любые запросы без проверки прав. +По умолчанию, у пользователей нет прав указывать `SQL SECURITY NONE`, однако, при необходимости, это право можно выдать с помощью `GRANT ALLOW SQL SECURITY NONE TO `. +::: + +Если `DEFINER`/`SQL SECURITY` не указан, будут использованы значения по умолчанию: +- `SQL SECURITY`: `INVOKER` для обычных представлений и `DEFINER` для материализованных ([изменяется в настройках](../../../operations/settings/settings.md#default_normal_view_sql_security)) +- `DEFINER`: `CURRENT_USER` ([изменяется в настройках](../../../operations/settings/settings.md#default_view_definer)) + +Если представление подключается с помощью ключевого слова `ATTACH` и настройки SQL безопасности не были заданы, +то по умолчанию будет использоваться `SQL SECURITY NONE` для материализованных представлений и `SQL SECURITY INVOKER` для обычных. + +Изменить параметры SQL безопасности возможно с помощью следующего запроса: +```sql +ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }] +``` + +### Примеры представлений с SQL безопасностью +```sql +CREATE test_view +DEFINER = alice SQL SECURITY DEFINER +AS SELECT ... +``` + +```sql +CREATE test_view +SQL SECURITY INVOKER +AS SELECT ... +``` + ## LIVE-представления [экспериментальный функционал] {#live-view} :::note Важно diff --git a/docs/zh/interfaces/third-party/gui.md b/docs/zh/interfaces/third-party/gui.md index 9dd32efc970..6cf1b99b640 100644 --- a/docs/zh/interfaces/third-party/gui.md +++ b/docs/zh/interfaces/third-party/gui.md @@ -129,3 +129,18 @@ ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix). - 数据编辑器。 - 重构。 - 搜索和导航。 + +### CKMAN {#ckman} + +[CKMAN](https://www.github.com/housepower/ckman) 是一个用于管理和监控ClickHouse集群的可视化工具! + +特征: + +- 非常快速便捷的通过浏览器界面自动化部署集群 +- 支持对集群进行扩缩容操作 +- 对集群的数据进行负载均衡 +- 对集群进行在线升级 +- 通过界面修改集群配置 +- 提供集群节点监控,zookeeper监控 +- 监控表、分区状态,慢SQL监控 +- 提供简单易操作的SQL执行页面 diff --git a/programs/keeper-converter/KeeperConverter.cpp b/programs/keeper-converter/KeeperConverter.cpp index 8cd50d0892f..7518227a070 100644 --- a/programs/keeper-converter/KeeperConverter.cpp +++ b/programs/keeper-converter/KeeperConverter.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 6dc33042a05..a10f47be0b8 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1774,7 +1774,7 @@ try } else { - DNSResolver::instance().setCacheMaxSize(server_settings.dns_cache_max_size); + DNSResolver::instance().setCacheMaxEntries(server_settings.dns_cache_max_entries); /// Initialize a watcher periodically updating DNS cache dns_cache_updater = std::make_unique( diff --git a/programs/server/config.d/filesystem_cache_log.xml b/programs/server/config.d/filesystem_cache_log.xml new file mode 120000 index 00000000000..aa89e44c64f --- /dev/null +++ b/programs/server/config.d/filesystem_cache_log.xml @@ -0,0 +1 @@ +../../../tests/config/config.d/filesystem_cache_log.xml \ No newline at end of file diff --git a/programs/server/config.d/filesystem_caches_path.xml b/programs/server/config.d/filesystem_caches_path.xml new file mode 100644 index 00000000000..87555d1f81c --- /dev/null +++ b/programs/server/config.d/filesystem_caches_path.xml @@ -0,0 +1,4 @@ + + /tmp/filesystem_caches/ + /tmp/filesystem_caches/ + diff --git a/programs/server/config.d/handlers.yaml b/programs/server/config.d/handlers.yaml new file mode 120000 index 00000000000..86dfc38179b --- /dev/null +++ b/programs/server/config.d/handlers.yaml @@ -0,0 +1 @@ +../../../tests/config/config.d/handlers.yaml \ No newline at end of file diff --git a/programs/server/config.xml b/programs/server/config.xml index f8c4f2bf9fa..d19f6d77f30 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -752,7 +752,7 @@ SQL_ @@ -1477,17 +1477,23 @@ --> mv_1_1 ---> ds_1_1 ---> mv_2_1 --┬-> ds_2_1 ---> mv_3_1 ---> ds_3_1 + * | | + * └--> mv_1_2 ---> ds_1_2 ---> mv_2_2 --┘ + * + * Here we want to avoid deduplication for two different blocks generated from `mv_2_1` and `mv_2_2` that will + * be inserted into `ds_2_1`. + * + * We are forced to use view id instead of table id because there are some possible INSERT flows where no tables + * are involved. + * + * Example: + * + * landing -┬--> mv_1_1 --┬-> ds_1_1 + * | | + * └--> mv_1_2 --┘ + * + */ + auto insert_deduplication_token = insert_settings.insert_deduplication_token.value; + + if (view_id.hasUUID()) + insert_deduplication_token += "_" + toString(view_id.uuid); + else + insert_deduplication_token += "_" + view_id.getFullNameNotQuoted(); + + insert_context->setSetting("insert_deduplication_token", insert_deduplication_token); + } + + // Processing of blocks for MVs is done block by block, and there will + // be no parallel reading after (plus it is not a costless operation) + select_context->setSetting("parallelize_output_from_storages", Field{false}); + + // Separate min_insert_block_size_rows/min_insert_block_size_bytes for children + if (insert_settings.min_insert_block_size_rows_for_materialized_views) + insert_context->setSetting("min_insert_block_size_rows", insert_settings.min_insert_block_size_rows_for_materialized_views.value); + if (insert_settings.min_insert_block_size_bytes_for_materialized_views) + insert_context->setSetting("min_insert_block_size_bytes", insert_settings.min_insert_block_size_bytes_for_materialized_views.value); + + ASTPtr query; + Chain out; + + /// We are creating a ThreadStatus per view to store its metrics individually + /// Since calling ThreadStatus() changes current_thread we save it and restore it after the calls + /// Later on, before doing any task related to a view, we'll switch to its ThreadStatus, do the work, + /// and switch back to the original thread_status. + auto * original_thread = current_thread; + SCOPE_EXIT({ current_thread = original_thread; }); + current_thread = nullptr; + std::unique_ptr view_thread_status_ptr = std::make_unique(/*check_current_thread_on_destruction=*/ false); + /// Copy of a ThreadStatus should be internal. + view_thread_status_ptr->setInternalThread(); + view_thread_status_ptr->attachToGroup(running_group); + + auto * view_thread_status = view_thread_status_ptr.get(); + views_data->thread_status_holder->thread_statuses.push_front(std::move(view_thread_status_ptr)); + + auto runtime_stats = std::make_unique(); + runtime_stats->target_name = view_id.getFullTableName(); + runtime_stats->thread_status = view_thread_status; + runtime_stats->event_time = std::chrono::system_clock::now(); + runtime_stats->event_status = QueryViewsLogElement::ViewStatus::EXCEPTION_BEFORE_START; + + auto & type = runtime_stats->type; + auto & target_name = runtime_stats->target_name; + auto * view_counter_ms = &runtime_stats->elapsed_ms; + + if (auto * materialized_view = dynamic_cast(view.get())) + { + auto lock = materialized_view->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout); + + if (lock == nullptr) + { + // In case the materialized view is dropped/detached at this point, we register a warning and ignore it + assert(materialized_view->is_dropped || materialized_view->is_detached); + LOG_WARNING( + getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName()); + return std::nullopt; + } + + type = QueryViewsLogElement::ViewType::MATERIALIZED; + result_chain.addTableLock(lock); + + StoragePtr inner_table = materialized_view->tryGetTargetTable(); + /// If target table was dropped, ignore this materialized view. + if (!inner_table) + { + if (context->getSettingsRef().ignore_materialized_views_with_dropped_target_table) + return std::nullopt; + + throw Exception( + ErrorCodes::UNKNOWN_TABLE, + "Target table '{}' of view '{}' doesn't exists. To ignore this view use setting " + "ignore_materialized_views_with_dropped_target_table", + materialized_view->getTargetTableId().getFullTableName(), + view_id.getFullTableName()); + } + + auto inner_table_id = inner_table->getStorageID(); + auto inner_metadata_snapshot = inner_table->getInMemoryMetadataPtr(); + + const auto & select_query = view_metadata_snapshot->getSelectQuery(); + if (select_query.select_table_id != views_data->source_storage_id) + { + /// It may happen if materialize view query was changed and it doesn't depend on this source table anymore. + /// See setting `allow_experimental_alter_materialized_view_structure` + LOG_DEBUG( + getLogger("PushingToViews"), "Table '{}' is not a source for view '{}' anymore, current source is '{}'", + select_query.select_table_id.getFullTableName(), view_id.getFullTableName(), views_data->source_storage_id); + return std::nullopt; + } + + query = select_query.inner_query; + + target_name = inner_table_id.getFullTableName(); + + Block header; + + /// Get list of columns we get from select query. + if (select_context->getSettingsRef().allow_experimental_analyzer) + header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context); + else + header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock(); + + /// Insert only columns returned by select. + Names insert_columns; + const auto & inner_table_columns = inner_metadata_snapshot->getColumns(); + for (const auto & column : header) + { + /// But skip columns which storage doesn't have. + if (inner_table_columns.hasNotAlias(column.name)) + insert_columns.emplace_back(column.name); + } + + InterpreterInsertQuery interpreter(nullptr, insert_context, false, false, false); + out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms, !materialized_view->hasInnerTable()); + + if (interpreter.shouldAddSquashingFroStorage(inner_table)) + { + bool table_prefers_large_blocks = inner_table->prefersLargeBlocks(); + const auto & settings = insert_context->getSettingsRef(); + + out.addSource(std::make_shared( + out.getInputHeader(), + table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size, + table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL)); + } + + auto counting = std::make_shared(out.getInputHeader(), current_thread, insert_context->getQuota()); + counting->setProcessListElement(insert_context->getProcessListElement()); + counting->setProgressCallback(insert_context->getProgressCallback()); + out.addSource(std::move(counting)); + + out.addStorageHolder(view); + out.addStorageHolder(inner_table); + } + else if (auto * live_view = dynamic_cast(view.get())) + { + runtime_stats->type = QueryViewsLogElement::ViewType::LIVE; + query = live_view->getInnerQuery(); + out = buildPushingToViewsChain( + view, view_metadata_snapshot, insert_context, ASTPtr(), + /* no_destination= */ true, + thread_status_holder, running_group, view_counter_ms, async_insert, storage_header); + } + else if (auto * window_view = dynamic_cast(view.get())) + { + runtime_stats->type = QueryViewsLogElement::ViewType::WINDOW; + query = window_view->getMergeableQuery(); + out = buildPushingToViewsChain( + view, view_metadata_snapshot, insert_context, ASTPtr(), + /* no_destination= */ true, + thread_status_holder, running_group, view_counter_ms, async_insert); + } + else + out = buildPushingToViewsChain( + view, view_metadata_snapshot, insert_context, ASTPtr(), + /* no_destination= */ false, + thread_status_holder, running_group, view_counter_ms, async_insert); + + views_data->views.emplace_back(ViewRuntimeData{ + std::move(query), + out.getInputHeader(), + view_id, + nullptr, + std::move(runtime_stats)}); + + if (type == QueryViewsLogElement::ViewType::MATERIALIZED) + { + auto executing_inner_query = std::make_shared( + storage_header, views_data->views.back(), views_data); + executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms); + + out.addSource(std::move(executing_inner_query)); + } + + return out; +} + Chain buildPushingToViewsChain( const StoragePtr & storage, @@ -232,259 +469,45 @@ Chain buildPushingToViewsChain( auto table_id = storage->getStorageID(); auto views = DatabaseCatalog::instance().getDependentViews(table_id); - /// We need special context for materialized views insertions - ContextMutablePtr select_context; - ContextMutablePtr insert_context; ViewsDataPtr views_data; if (!views.empty()) { - select_context = Context::createCopy(context); - insert_context = Context::createCopy(context); - - const auto & insert_settings = insert_context->getSettingsRef(); - - // Do not deduplicate insertions into MV if the main insertion is Ok - if (disable_deduplication_for_children) - { - insert_context->setSetting("insert_deduplicate", Field{false}); - } - - // Processing of blocks for MVs is done block by block, and there will - // be no parallel reading after (plus it is not a costless operation) - select_context->setSetting("parallelize_output_from_storages", Field{false}); - - // Separate min_insert_block_size_rows/min_insert_block_size_bytes for children - if (insert_settings.min_insert_block_size_rows_for_materialized_views) - insert_context->setSetting("min_insert_block_size_rows", insert_settings.min_insert_block_size_rows_for_materialized_views.value); - if (insert_settings.min_insert_block_size_bytes_for_materialized_views) - insert_context->setSetting("min_insert_block_size_bytes", insert_settings.min_insert_block_size_bytes_for_materialized_views.value); - - views_data = std::make_shared(thread_status_holder, select_context, table_id, metadata_snapshot, storage); + auto process_context = Context::createCopy(context); /// This context will be used in `process` function + views_data = std::make_shared(thread_status_holder, process_context, table_id, metadata_snapshot, storage); } std::vector chains; for (const auto & view_id : views) { - auto view = DatabaseCatalog::instance().tryGetTable(view_id, context); - if (view == nullptr) + try { - LOG_WARNING( - getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName()); - continue; - } + auto out = generateViewChain( + context, view_id, running_group, result_chain, + views_data, thread_status_holder, async_insert, storage_header, disable_deduplication_for_children); - auto view_metadata_snapshot = view->getInMemoryMetadataPtr(); - - ASTPtr query; - Chain out; - - /// We are creating a ThreadStatus per view to store its metrics individually - /// Since calling ThreadStatus() changes current_thread we save it and restore it after the calls - /// Later on, before doing any task related to a view, we'll switch to its ThreadStatus, do the work, - /// and switch back to the original thread_status. - auto * original_thread = current_thread; - SCOPE_EXIT({ current_thread = original_thread; }); - current_thread = nullptr; - std::unique_ptr view_thread_status_ptr = std::make_unique(/*check_current_thread_on_destruction=*/ false); - /// Copy of a ThreadStatus should be internal. - view_thread_status_ptr->setInternalThread(); - view_thread_status_ptr->attachToGroup(running_group); - - auto * view_thread_status = view_thread_status_ptr.get(); - views_data->thread_status_holder->thread_statuses.push_front(std::move(view_thread_status_ptr)); - - auto runtime_stats = std::make_unique(); - runtime_stats->target_name = view_id.getFullTableName(); - runtime_stats->thread_status = view_thread_status; - runtime_stats->event_time = std::chrono::system_clock::now(); - runtime_stats->event_status = QueryViewsLogElement::ViewStatus::EXCEPTION_BEFORE_START; - - auto & type = runtime_stats->type; - auto & target_name = runtime_stats->target_name; - auto * view_counter_ms = &runtime_stats->elapsed_ms; - - const auto & insert_settings = insert_context->getSettingsRef(); - ContextMutablePtr view_insert_context = insert_context; - - if (!disable_deduplication_for_children && - insert_settings.update_insert_deduplication_token_in_dependent_materialized_views && - !insert_settings.insert_deduplication_token.value.empty()) - { - /** Update deduplication token passed to dependent MV with current view id. So it is possible to properly handle - * deduplication in complex INSERT flows. - * - * Example: - * - * landing -┬--> mv_1_1 ---> ds_1_1 ---> mv_2_1 --┬-> ds_2_1 ---> mv_3_1 ---> ds_3_1 - * | | - * └--> mv_1_2 ---> ds_1_2 ---> mv_2_2 --┘ - * - * Here we want to avoid deduplication for two different blocks generated from `mv_2_1` and `mv_2_2` that will - * be inserted into `ds_2_1`. - * - * We are forced to use view id instead of table id because there are some possible INSERT flows where no tables - * are involved. - * - * Example: - * - * landing -┬--> mv_1_1 --┬-> ds_1_1 - * | | - * └--> mv_1_2 --┘ - * - */ - auto insert_deduplication_token = insert_settings.insert_deduplication_token.value; - - if (view_id.hasUUID()) - insert_deduplication_token += "_" + toString(view_id.uuid); - else - insert_deduplication_token += "_" + view_id.getFullNameNotQuoted(); - - view_insert_context = Context::createCopy(insert_context); - view_insert_context->setSetting("insert_deduplication_token", insert_deduplication_token); - } - - if (auto * materialized_view = dynamic_cast(view.get())) - { - auto lock = materialized_view->tryLockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout); - - if (lock == nullptr) - { - // In case the materialized view is dropped/detached at this point, we register a warning and ignore it - assert(materialized_view->is_dropped || materialized_view->is_detached); - LOG_WARNING( - getLogger("PushingToViews"), "Trying to access table {} but it doesn't exist", view_id.getFullTableName()); + if (!out.has_value()) continue; - } - type = QueryViewsLogElement::ViewType::MATERIALIZED; - result_chain.addTableLock(lock); + chains.emplace_back(std::move(*out)); - StoragePtr inner_table = materialized_view->tryGetTargetTable(); - /// If target table was dropped, ignore this materialized view. - if (!inner_table) + /// Add the view to the query access info so it can appear in system.query_log + /// hasQueryContext - for materialized tables with background replication process query context is not added + if (!no_destination && context->hasQueryContext()) { - if (context->getSettingsRef().ignore_materialized_views_with_dropped_target_table) - continue; + context->getQueryContext()->addQueryAccessInfo( + backQuoteIfNeed(view_id.getDatabaseName()), + views_data->views.back().runtime_stats->target_name, + /*column_names=*/ {}); - throw Exception( - ErrorCodes::UNKNOWN_TABLE, - "Target table '{}' of view '{}' doesn't exists. To ignore this view use setting " - "ignore_materialized_views_with_dropped_target_table", - materialized_view->getTargetTableId().getFullTableName(), - view_id.getFullTableName()); + context->getQueryContext()->addViewAccessInfo(view_id.getFullTableName()); } - - auto inner_table_id = inner_table->getStorageID(); - auto inner_metadata_snapshot = inner_table->getInMemoryMetadataPtr(); - - const auto & select_query = view_metadata_snapshot->getSelectQuery(); - if (select_query.select_table_id != table_id) - { - /// It may happen if materialize view query was changed and it doesn't depend on this source table anymore. - /// See setting `allow_experimental_alter_materialized_view_structure` - LOG_DEBUG( - getLogger("PushingToViews"), "Table '{}' is not a source for view '{}' anymore, current source is '{}'", - select_query.select_table_id.getFullTableName(), view_id.getFullTableName(), table_id); - continue; - } - - query = select_query.inner_query; - - target_name = inner_table_id.getFullTableName(); - - Block header; - - /// Get list of columns we get from select query. - if (select_context->getSettingsRef().allow_experimental_analyzer) - header = InterpreterSelectQueryAnalyzer::getSampleBlock(query, select_context); - else - header = InterpreterSelectQuery(query, select_context, SelectQueryOptions()).getSampleBlock(); - - /// Insert only columns returned by select. - Names insert_columns; - const auto & inner_table_columns = inner_metadata_snapshot->getColumns(); - for (const auto & column : header) - { - /// But skip columns which storage doesn't have. - if (inner_table_columns.hasNotAlias(column.name)) - insert_columns.emplace_back(column.name); - } - - InterpreterInsertQuery interpreter(nullptr, view_insert_context, false, false, false); - out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms); - - if (interpreter.shouldAddSquashingFroStorage(inner_table)) - { - bool table_prefers_large_blocks = inner_table->prefersLargeBlocks(); - const auto & settings = view_insert_context->getSettingsRef(); - - out.addSource(std::make_shared( - out.getInputHeader(), - table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size, - table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL)); - } - - auto counting = std::make_shared(out.getInputHeader(), current_thread, view_insert_context->getQuota()); - counting->setProcessListElement(view_insert_context->getProcessListElement()); - counting->setProgressCallback(view_insert_context->getProgressCallback()); - out.addSource(std::move(counting)); - - out.addStorageHolder(view); - out.addStorageHolder(inner_table); } - else if (auto * live_view = dynamic_cast(view.get())) + catch (const Exception & e) { - runtime_stats->type = QueryViewsLogElement::ViewType::LIVE; - query = live_view->getInnerQuery(); // Used only to log in system.query_views_log - out = buildPushingToViewsChain( - view, view_metadata_snapshot, view_insert_context, ASTPtr(), - /* no_destination= */ true, - thread_status_holder, running_group, view_counter_ms, async_insert, storage_header); - } - else if (auto * window_view = dynamic_cast(view.get())) - { - runtime_stats->type = QueryViewsLogElement::ViewType::WINDOW; - query = window_view->getMergeableQuery(); // Used only to log in system.query_views_log - out = buildPushingToViewsChain( - view, view_metadata_snapshot, view_insert_context, ASTPtr(), - /* no_destination= */ true, - thread_status_holder, running_group, view_counter_ms, async_insert); - } - else - out = buildPushingToViewsChain( - view, view_metadata_snapshot, view_insert_context, ASTPtr(), - /* no_destination= */ false, - thread_status_holder, running_group, view_counter_ms, async_insert); - - views_data->views.emplace_back(ViewRuntimeData{ - std::move(query), - out.getInputHeader(), - view_id, - nullptr, - std::move(runtime_stats)}); - - if (type == QueryViewsLogElement::ViewType::MATERIALIZED) - { - auto executing_inner_query = std::make_shared( - storage_header, views_data->views.back(), views_data); - executing_inner_query->setRuntimeData(view_thread_status, view_counter_ms); - - out.addSource(std::move(executing_inner_query)); - } - - chains.emplace_back(std::move(out)); - - /// Add the view to the query access info so it can appear in system.query_log - /// hasQueryContext - for materialized tables with background replication process query context is not added - if (!no_destination && context->hasQueryContext()) - { - context->getQueryContext()->addQueryAccessInfo( - backQuoteIfNeed(view_id.getDatabaseName()), - views_data->views.back().runtime_stats->target_name, - /*column_names=*/ {}); - - context->getQueryContext()->addViewAccessInfo(view_id.getFullTableName()); + LOG_ERROR(&Poco::Logger::get("PushingToViews"), "Failed to push block to view {}, {}", view_id, e.message()); + if (!context->getSettingsRef().materialized_views_ignore_errors) + throw; } } @@ -580,12 +603,12 @@ static QueryPipeline process(Block block, ViewRuntimeData & view, const ViewsDat if (local_context->getSettingsRef().allow_experimental_analyzer) { - InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions()); + InterpreterSelectQueryAnalyzer interpreter(view.query, local_context, local_context->getViewSource(), SelectQueryOptions().ignoreAccessCheck()); pipeline = interpreter.buildQueryPipeline(); } else { - InterpreterSelectQuery interpreter(view.query, local_context, SelectQueryOptions()); + InterpreterSelectQuery interpreter(view.query, local_context, SelectQueryOptions().ignoreAccessCheck()); pipeline = interpreter.buildQueryPipeline(); } diff --git a/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp b/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp index 6c7c7447070..8a13973b970 100644 --- a/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp +++ b/src/Processors/Transforms/getSourceFromASTInsertQuery.cpp @@ -37,7 +37,7 @@ InputFormatPtr getInputFormatFromASTInsertQuery( const auto * ast_insert_query = ast->as(); if (!ast_insert_query) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: query requires data to insert, but it is not INSERT query"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Query requires data to insert, but it is not INSERT query"); if (ast_insert_query->infile && context->getApplicationType() == Context::ApplicationType::SERVER) throw Exception(ErrorCodes::UNKNOWN_TYPE_OF_QUERY, "Query has infile and was send directly to server"); @@ -47,7 +47,7 @@ InputFormatPtr getInputFormatFromASTInsertQuery( if (input_function) throw Exception(ErrorCodes::INVALID_USAGE_OF_INPUT, "FORMAT must be specified for function input()"); else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: INSERT query requires format to be set"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "INSERT query requires format to be set"); } /// Data could be in parsed (ast_insert_query.data) and in not parsed yet (input_buffer_tail_part) part of query. @@ -105,7 +105,7 @@ std::unique_ptr getReadBufferFromASTInsertQuery(const ASTPtr & ast) { const auto * insert_query = ast->as(); if (!insert_query) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: query requires data to insert, but it is not INSERT query"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Query requires data to insert, but it is not INSERT query"); if (insert_query->infile) { diff --git a/src/QueryPipeline/ExecutionSpeedLimits.cpp b/src/QueryPipeline/ExecutionSpeedLimits.cpp index f8ae4c76d0f..05fd394db77 100644 --- a/src/QueryPipeline/ExecutionSpeedLimits.cpp +++ b/src/QueryPipeline/ExecutionSpeedLimits.cpp @@ -113,7 +113,7 @@ static bool handleOverflowMode(OverflowMode mode, int code, FormatStringHelper #include #include -#include #include #include #include @@ -54,7 +53,7 @@ private: struct Task : public AsyncTask { - Task(RemoteQueryExecutorReadContext & read_context_) : read_context(read_context_) {} + explicit Task(RemoteQueryExecutorReadContext & read_context_) : read_context(read_context_) {} RemoteQueryExecutorReadContext & read_context; diff --git a/src/Server/HTTP/HTTPServerResponse.cpp b/src/Server/HTTP/HTTPServerResponse.cpp index 3c2d54a67df..b6207f2d302 100644 --- a/src/Server/HTTP/HTTPServerResponse.cpp +++ b/src/Server/HTTP/HTTPServerResponse.cpp @@ -123,4 +123,20 @@ void HTTPServerResponse::requireAuthentication(const std::string & realm) set("WWW-Authenticate", auth); } +void HTTPServerResponse::redirect(const std::string & uri, HTTPStatus status) +{ + poco_assert(!stream); + + setContentLength(0); + setChunkedTransferEncoding(false); + + setStatusAndReason(status); + set("Location", uri); + + // Send header + Poco::Net::HTTPHeaderOutputStream hs(session); + write(hs); + hs.flush(); +} + } diff --git a/src/Server/HTTP/HTTPServerResponse.h b/src/Server/HTTP/HTTPServerResponse.h index 6efe48667eb..6c5be008bf8 100644 --- a/src/Server/HTTP/HTTPServerResponse.h +++ b/src/Server/HTTP/HTTPServerResponse.h @@ -231,6 +231,16 @@ public: /// Returns true if the response (header) has been sent. bool sent() const { return !!stream; } + /// Sets the status code, which must be one of + /// HTTP_MOVED_PERMANENTLY (301), HTTP_FOUND (302), + /// or HTTP_SEE_OTHER (303), + /// and sets the "Location" header field + /// to the given URI, which according to + /// the HTTP specification, must be absolute. + /// + /// Must not be called after send() has been called. + void redirect(const std::string & uri, HTTPStatus status = HTTP_FOUND); + Poco::Net::StreamSocket & getSocket() { return session.socket(); } void attachRequest(HTTPServerRequest * request_) { request = request_; } diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index e9157266901..9a67e576345 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -26,6 +26,42 @@ namespace ErrorCodes extern const int INVALID_CONFIG_PARAMETER; } +namespace +{ + +class RedirectRequestHandler : public HTTPRequestHandler +{ +private: + std::string url; + +public: + explicit RedirectRequestHandler(std::string url_) + : url(std::move(url_)) + { + } + + void handleRequest(HTTPServerRequest &, HTTPServerResponse & response, const ProfileEvents::Event &) override + { + response.redirect(url); + } +}; + +HTTPRequestHandlerFactoryPtr createRedirectHandlerFactory( + const Poco::Util::AbstractConfiguration & config, + const std::string & config_prefix) +{ + std::string url = config.getString(config_prefix + ".handler.location"); + + auto factory = std::make_shared>( + [my_url = std::move(url)]() { return std::make_unique(my_url); }); + + factory->addFiltersFromConfig(config, config_prefix); + return factory; +} + +} + + static void addCommonDefaultHandlersFactory(HTTPRequestHandlerFactoryMain & factory, IServer & server); static void addDefaultHandlersFactory( HTTPRequestHandlerFactoryMain & factory, @@ -73,6 +109,10 @@ static inline auto createHandlersFactoryFromConfig( { main_handler_factory->addHandler(createStaticHandlerFactory(server, config, prefix + "." + key)); } + else if (handler_type == "redirect") + { + main_handler_factory->addHandler(createRedirectHandlerFactory(config, prefix + "." + key)); + } else if (handler_type == "dynamic_query_handler") { main_handler_factory->addHandler(createDynamicHandlerFactory(server, config, prefix + "." + key)); @@ -165,7 +205,7 @@ HTTPRequestHandlerFactoryPtr createHandlerFactory(IServer & server, const Poco:: return createPrometheusMainHandlerFactory(server, config, metrics_writer, name); } - throw Exception(ErrorCodes::LOGICAL_ERROR, "LOGICAL ERROR: Unknown HTTP handler factory name."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown HTTP handler factory name."); } diff --git a/src/Server/HTTPHandlerFactory.h b/src/Server/HTTPHandlerFactory.h index bfa2fdd1e4b..ac18c36e6c9 100644 --- a/src/Server/HTTPHandlerFactory.h +++ b/src/Server/HTTPHandlerFactory.h @@ -56,6 +56,8 @@ public: continue; else if (filter_type == "url") addFilter(urlFilter(config, prefix + ".url")); + else if (filter_type == "empty_query_string") + addFilter(emptyQueryStringFilter()); else if (filter_type == "headers") addFilter(headersFilter(config, prefix + ".headers")); else if (filter_type == "methods") diff --git a/src/Server/HTTPHandlerRequestFilter.h b/src/Server/HTTPHandlerRequestFilter.h index 1f5db283323..15e64cf7f48 100644 --- a/src/Server/HTTPHandlerRequestFilter.h +++ b/src/Server/HTTPHandlerRequestFilter.h @@ -37,7 +37,7 @@ static inline bool checkExpression(std::string_view match_str, const std::pair methods; Poco::StringTokenizer tokenizer(config.getString(config_path), ","); @@ -62,7 +62,7 @@ static inline auto getExpression(const std::string & expression) return std::make_pair(expression, compiled_regex); } -static inline auto urlFilter(const Poco::Util::AbstractConfiguration & config, const std::string & config_path) /// NOLINT +static inline auto urlFilter(const Poco::Util::AbstractConfiguration & config, const std::string & config_path) { return [expression = getExpression(config.getString(config_path))](const HTTPServerRequest & request) { @@ -73,7 +73,16 @@ static inline auto urlFilter(const Poco::Util::AbstractConfiguration & config, c }; } -static inline auto headersFilter(const Poco::Util::AbstractConfiguration & config, const std::string & prefix) /// NOLINT +static inline auto emptyQueryStringFilter() +{ + return [](const HTTPServerRequest & request) + { + const auto & uri = request.getURI(); + return std::string::npos == uri.find('?'); + }; +} + +static inline auto headersFilter(const Poco::Util::AbstractConfiguration & config, const std::string & prefix) { std::unordered_map> headers_expression; Poco::Util::AbstractConfiguration::Keys headers_name; diff --git a/src/Server/NotFoundHandler.h b/src/Server/NotFoundHandler.h index a484d237771..9820c185a3d 100644 --- a/src/Server/NotFoundHandler.h +++ b/src/Server/NotFoundHandler.h @@ -9,7 +9,7 @@ namespace DB class NotFoundHandler : public HTTPRequestHandler { public: - NotFoundHandler(std::vector hints_) : hints(std::move(hints_)) {} + explicit NotFoundHandler(std::vector hints_) : hints(std::move(hints_)) {} void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; private: std::vector hints; diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 1d16d77f9ad..d883029408c 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -942,7 +942,7 @@ void TCPHandler::processInsertQuery() auto wait_status = result.future.wait_for(std::chrono::milliseconds(timeout_ms)); if (wait_status == std::future_status::deferred) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: got future in deferred state"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Got future in deferred state"); if (wait_status == std::future_status::timeout) throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Wait for async insert timeout ({} ms) exceeded)", timeout_ms); diff --git a/src/Server/WebUIRequestHandler.h b/src/Server/WebUIRequestHandler.h index 1769773bba8..b84c8f6534d 100644 --- a/src/Server/WebUIRequestHandler.h +++ b/src/Server/WebUIRequestHandler.h @@ -15,7 +15,7 @@ class PlayWebUIRequestHandler : public HTTPRequestHandler private: IServer & server; public: - PlayWebUIRequestHandler(IServer & server_); + explicit PlayWebUIRequestHandler(IServer & server_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; }; @@ -24,7 +24,7 @@ class DashboardWebUIRequestHandler : public HTTPRequestHandler private: IServer & server; public: - DashboardWebUIRequestHandler(IServer & server_); + explicit DashboardWebUIRequestHandler(IServer & server_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; }; @@ -33,7 +33,7 @@ class BinaryWebUIRequestHandler : public HTTPRequestHandler private: IServer & server; public: - BinaryWebUIRequestHandler(IServer & server_); + explicit BinaryWebUIRequestHandler(IServer & server_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; }; @@ -42,7 +42,7 @@ class JavaScriptWebUIRequestHandler : public HTTPRequestHandler private: IServer & server; public: - JavaScriptWebUIRequestHandler(IServer & server_); + explicit JavaScriptWebUIRequestHandler(IServer & server_); void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event) override; }; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 766863ed9f9..b09200f06ff 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -442,6 +442,14 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.if_exists = command_ast->if_exists; return command; } + else if (command_ast->type == ASTAlterCommand::MODIFY_SQL_SECURITY) + { + AlterCommand command; + command.ast = command_ast->clone(); + command.type = AlterCommand::MODIFY_SQL_SECURITY; + command.sql_security = command_ast->sql_security->clone(); + return command; + } else return {}; } @@ -854,6 +862,8 @@ void AlterCommand::apply(StorageInMemoryMetadata & metadata, ContextPtr context) for (auto & index : metadata.secondary_indices) rename_visitor.visit(index.definition_ast); } + else if (type == MODIFY_SQL_SECURITY) + metadata.setSQLSecurity(sql_security->as()); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong parameter type in ALTER query"); } diff --git a/src/Storages/AlterCommands.h b/src/Storages/AlterCommands.h index d0d5d02b5f7..b1b6c8308f9 100644 --- a/src/Storages/AlterCommands.h +++ b/src/Storages/AlterCommands.h @@ -50,6 +50,7 @@ struct AlterCommand MODIFY_DATABASE_SETTING, COMMENT_TABLE, REMOVE_SAMPLE_BY, + MODIFY_SQL_SECURITY, }; /// Which property user wants to remove from column @@ -147,6 +148,9 @@ struct AlterCommand /// For MODIFY_QUERY ASTPtr select = nullptr; + /// For MODIFY_SQL_SECURITY + ASTPtr sql_security = nullptr; + /// For MODIFY_REFRESH ASTPtr refresh = nullptr; diff --git a/src/Storages/Cache/SchemaCache.h b/src/Storages/Cache/SchemaCache.h index 1bfc18bddab..bb6c91fc9f0 100644 --- a/src/Storages/Cache/SchemaCache.h +++ b/src/Storages/Cache/SchemaCache.h @@ -22,7 +22,7 @@ const size_t DEFAULT_SCHEMA_CACHE_ELEMENTS = 4096; class SchemaCache { public: - SchemaCache(size_t max_elements_); + explicit SchemaCache(size_t max_elements_); struct Key { diff --git a/src/Storages/ColumnsDescription.h b/src/Storages/ColumnsDescription.h index 59179aac17a..5ba655ee10d 100644 --- a/src/Storages/ColumnsDescription.h +++ b/src/Storages/ColumnsDescription.h @@ -43,7 +43,7 @@ struct GetColumnsOptions All = AllPhysical | Aliases | Ephemeral, }; - GetColumnsOptions(Kind kind_) : kind(kind_) {} + GetColumnsOptions(Kind kind_) : kind(kind_) {} /// NOLINT(google-explicit-constructor) GetColumnsOptions & withSubcolumns(bool value = true) { @@ -113,7 +113,7 @@ public: explicit ColumnsDescription(NamesAndTypesList ordinary); - explicit ColumnsDescription(std::initializer_list ordinary); + ColumnsDescription(std::initializer_list ordinary); explicit ColumnsDescription(NamesAndTypesList ordinary, NamesAndAliases aliases); diff --git a/src/Storages/Freeze.h b/src/Storages/Freeze.h index 5775653aaea..035786fc90d 100644 --- a/src/Storages/Freeze.h +++ b/src/Storages/Freeze.h @@ -32,7 +32,7 @@ public: class Unfreezer { public: - Unfreezer(ContextPtr context); + explicit Unfreezer(ContextPtr context); PartitionCommandsResultInfo unfreezePartitionsFromTableDirectory(MergeTreeData::MatcherFn matcher, const String & backup_name, const Disks & disks, const fs::path & table_directory); BlockIO systemUnfreeze(const String & backup_name); private: diff --git a/src/Storages/IStorageCluster.cpp b/src/Storages/IStorageCluster.cpp index 475a2e00351..ab45ce877c2 100644 --- a/src/Storages/IStorageCluster.cpp +++ b/src/Storages/IStorageCluster.cpp @@ -191,7 +191,11 @@ void ReadFromCluster::initializePipeline(QueryPipelineBuilder & pipeline, const extension); remote_query_executor->setLogger(log); - pipes.emplace_back(std::make_shared(remote_query_executor, add_agg_info, false, false)); + pipes.emplace_back(std::make_shared( + remote_query_executor, + add_agg_info, + current_settings.async_socket_for_remote, + current_settings.async_query_sending_for_remote)); } } diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index aa347fc719d..638f5fe2ef6 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -246,64 +246,83 @@ namespace const String CONFIG_KAFKA_TOPIC_TAG = "kafka_topic"; const String CONFIG_NAME_TAG = "name"; + void setKafkaConfigValue(cppkafka::Configuration & kafka_config, const String & key, const String & value) + { + if (key.starts_with(CONFIG_KAFKA_TOPIC_TAG) || key == CONFIG_NAME_TAG) /// multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. + return; /// used by new per-topic configuration, ignore + + /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. + /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md + const String setting_name_in_kafka_config = (key == "log_level") ? key : boost::replace_all_copy(key, "_", "."); + kafka_config.set(setting_name_in_kafka_config, value); + } + /// Read server configuration into cppkafka configuration, used by global configuration and by legacy per-topic configuration - void loadFromConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & config_prefix) + void loadFromConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String& collection_name, const String & config_prefix) { + if (!collection_name.empty()) + { + const auto & collection = NamedCollectionFactory::instance().get(collection_name); + for (const auto & key : collection->getKeys(-1, config_prefix)) + { + // Cut prefix with '.' before actual config tag. + const auto param_name = key.substr(config_prefix.size() + 1); + setKafkaConfigValue(kafka_config, param_name, collection->get(key)); + } + return; + } + /// Read all tags one level below Poco::Util::AbstractConfiguration::Keys tags; config.keys(config_prefix, tags); for (const auto & tag : tags) { - if (tag.starts_with(CONFIG_KAFKA_TOPIC_TAG)) /// multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. - continue; /// used by new per-topic configuration, ignore - - const String setting_path = config_prefix + "." + tag; - const String setting_value = config.getString(setting_path); - - /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. - /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md - const String setting_name_in_kafka_config = (tag == "log_level") ? tag : boost::replace_all_copy(tag, "_", "."); - kafka_config.set(setting_name_in_kafka_config, setting_value); + const String setting_path = fmt::format("{}.{}", config_prefix, tag); + setKafkaConfigValue(kafka_config, tag, config.getString(setting_path)); } } /// Read server configuration into cppkafa configuration, used by new per-topic configuration - void loadTopicConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const String & topic) + void loadTopicConfig(cppkafka::Configuration & kafka_config, const Poco::Util::AbstractConfiguration & config, const String& collection_name, const String& config_prefix, const String& topic) { - /// Read all tags one level below - Poco::Util::AbstractConfiguration::Keys tags; - config.keys(config_prefix, tags); - - for (const auto & tag : tags) + if (!collection_name.empty()) { - /// Only consider tag . Multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. - if (!tag.starts_with(CONFIG_KAFKA_TOPIC_TAG)) - continue; - - /// Read topic name between ... - const String kafka_topic_path = config_prefix + "." + tag; - const String kafpa_topic_name_path = kafka_topic_path + "." + CONFIG_NAME_TAG; - - const String topic_name = config.getString(kafpa_topic_name_path); - if (topic_name == topic) + const auto topic_prefix = fmt::format("{}.{}", config_prefix, CONFIG_KAFKA_TOPIC_TAG); + const auto & collection = NamedCollectionFactory::instance().get(collection_name); + for (const auto & key : collection->getKeys(1, config_prefix)) { - /// Found it! Now read the per-topic configuration into cppkafka. - Poco::Util::AbstractConfiguration::Keys inner_tags; - config.keys(kafka_topic_path, inner_tags); - for (const auto & inner_tag : inner_tags) - { - if (inner_tag == CONFIG_NAME_TAG) - continue; // ignore + /// Only consider key . Multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. + if (!key.starts_with(topic_prefix)) + continue; - /// "log_level" has valid underscore, the remaining librdkafka setting use dot.separated.format which isn't acceptable for XML. - /// See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md - const String setting_path = kafka_topic_path + "." + inner_tag; - const String setting_value = config.getString(setting_path); + const String kafka_topic_path = config_prefix + "." + key; + const String kafka_topic_name_path = kafka_topic_path + "." + CONFIG_NAME_TAG; + if (topic == collection->get(kafka_topic_name_path)) + /// Found it! Now read the per-topic configuration into cppkafka. + loadFromConfig(kafka_config, config, collection_name, kafka_topic_path); + } + } + else + { + /// Read all tags one level below + Poco::Util::AbstractConfiguration::Keys tags; + config.keys(config_prefix, tags); - const String setting_name_in_kafka_config = (inner_tag == "log_level") ? inner_tag : boost::replace_all_copy(inner_tag, "_", "."); - kafka_config.set(setting_name_in_kafka_config, setting_value); - } + for (const auto & tag : tags) + { + /// Only consider tag . Multiple occurrences given as "kafka_topic", "kafka_topic[1]", etc. + if (!tag.starts_with(CONFIG_KAFKA_TOPIC_TAG)) + continue; + + /// Read topic name between ... + const String kafka_topic_path = fmt::format("{}.{}", config_prefix, tag); + const String kafka_topic_name_path = fmt::format("{}.{}", kafka_topic_path, CONFIG_NAME_TAG); + + const String topic_name = config.getString(kafka_topic_name_path); + if (topic_name == topic) + /// Found it! Now read the per-topic configuration into cppkafka. + loadFromConfig(kafka_config, config, collection_name, kafka_topic_path); } } } @@ -728,13 +747,6 @@ size_t StorageKafka::getPollTimeoutMillisecond() const : getContext()->getSettingsRef().stream_poll_timeout_ms.totalMilliseconds(); } -String StorageKafka::getConfigPrefix() const -{ - if (!collection_name.empty()) - return "named_collections." + collection_name + "." + CONFIG_KAFKA_TAG; /// Add one more level to separate librdkafka configuration. - return CONFIG_KAFKA_TAG; -} - void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config) { // Update consumer configuration from the configuration. Example: @@ -743,9 +755,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config) // 100000 // const auto & config = getContext()->getConfigRef(); - auto config_prefix = getConfigPrefix(); - if (config.has(config_prefix)) - loadFromConfig(kafka_config, config, config_prefix); + loadFromConfig(kafka_config, config, collection_name, CONFIG_KAFKA_TAG); #if USE_KRB5 if (kafka_config.has_property("sasl.kerberos.kinit.cmd")) @@ -784,9 +794,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config) // as are ugly. for (const auto & topic : topics) { - const auto topic_config_key = config_prefix + "_" + topic; - if (config.has(topic_config_key)) - loadFromConfig(kafka_config, config, topic_config_key); + loadFromConfig(kafka_config, config, collection_name, CONFIG_KAFKA_TAG + "_" + topic); } // Update consumer topic-specific configuration (new syntax). Example with topics "football" and "baseball": @@ -805,8 +813,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config) // Advantages: The period restriction no longer applies (e.g. sports.football will work), everything // Kafka-related is below . for (const auto & topic : topics) - if (config.has(config_prefix)) - loadTopicConfig(kafka_config, config, config_prefix, topic); + loadTopicConfig(kafka_config, config, collection_name, CONFIG_KAFKA_TAG, topic); // No need to add any prefix, messages can be distinguished kafka_config.set_log_callback([this](cppkafka::KafkaHandleBase &, int level, const std::string & facility, const std::string & message) @@ -817,7 +824,7 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config) /// NOTE: statistics should be consumed, otherwise it creates too much /// entries in the queue, that leads to memory leak and slow shutdown. - if (!config.has(config_prefix + "." + "statistics_interval_ms")) + if (!kafka_config.has_property("statistics.interval.ms")) { // every 3 seconds by default. set to 0 to disable. kafka_config.set("statistics.interval.ms", "3000"); diff --git a/src/Storages/Kafka/StorageKafka.h b/src/Storages/Kafka/StorageKafka.h index f9a1e3ff6f3..d5e319b8974 100644 --- a/src/Storages/Kafka/StorageKafka.h +++ b/src/Storages/Kafka/StorageKafka.h @@ -145,7 +145,6 @@ private: // Update Kafka configuration with values from CH user configuration. void updateConfiguration(cppkafka::Configuration & kafka_config); - String getConfigPrefix() const; void threadFunc(size_t idx); size_t getPollMaxBatchSize() const; diff --git a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp index 5210d14f3d0..c6407a99a4e 100644 --- a/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp +++ b/src/Storages/MergeTree/DataPartStorageOnDiskBase.cpp @@ -753,8 +753,12 @@ void DataPartStorageOnDiskBase::clearDirectory( /// Remove each expected file in directory, then remove directory itself. RemoveBatchRequest request; for (const auto & file : names_to_remove) - request.emplace_back(fs::path(dir) / file); + { + if (isGinFile(file) && (!disk->isFile(fs::path(dir) / file))) + continue; + request.emplace_back(fs::path(dir) / file); + } request.emplace_back(fs::path(dir) / "default_compression_codec.txt", true); request.emplace_back(fs::path(dir) / "delete-on-destroy.txt", true); request.emplace_back(fs::path(dir) / "txn_version.txt", true); diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index ce70fbe18e5..168c5f729ce 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -903,7 +903,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( || part_name.empty() || std::string::npos != tmp_prefix.find_first_of("/.") || std::string::npos != part_name.find_first_of("/.")) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: tmp_prefix and part_name cannot be empty or contain '.' or '/' characters."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "`tmp_prefix` and `part_name` cannot be empty or contain '.' or '/' characters."); auto part_dir = tmp_prefix + part_name; auto part_relative_path = data.getRelativeDataPath() + String(to_detached ? "detached/" : ""); diff --git a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp index 1ffb5177430..cbdeabffa97 100644 --- a/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp +++ b/src/Storages/MergeTree/EphemeralLockInZooKeeper.cpp @@ -17,7 +17,7 @@ EphemeralLockInZooKeeper::EphemeralLockInZooKeeper(const String & path_prefix_, : zookeeper(zookeeper_), path_prefix(path_prefix_), path(path_), conflict_path(conflict_path_) { if (conflict_path.empty() && path.size() <= path_prefix.size()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: name of the main node is shorter than prefix."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Name of the main node is shorter than prefix."); } template @@ -179,7 +179,7 @@ EphemeralLocksInAllPartitions::EphemeralLocksInAllPartitions( size_t prefix_size = block_numbers_path.size() + 1 + partitions[i].size() + 1 + path_prefix.size(); const String & path = dynamic_cast(*lock_responses[i]).path_created; if (path.size() <= prefix_size) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: name of the sequential node is shorter than prefix."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Name of the sequential node is shorter than prefix."); UInt64 number = parse(path.c_str() + prefix_size, path.size() - prefix_size); locks.push_back(LockInfo{path, partitions[i], number}); diff --git a/src/Storages/MergeTree/GinIndexStore.h b/src/Storages/MergeTree/GinIndexStore.h index 3ed624995e5..ad14a142318 100644 --- a/src/Storages/MergeTree/GinIndexStore.h +++ b/src/Storages/MergeTree/GinIndexStore.h @@ -300,4 +300,9 @@ private: std::mutex mutex; }; +inline bool isGinFile(const String &file_name) +{ + return (file_name.ends_with(".gin_dict") || file_name.ends_with(".gin_post") || file_name.ends_with(".gin_seg") || file_name.ends_with(".gin_sid")); +} + } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 6d5e486f6a1..8aa188cfe5c 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -870,7 +870,7 @@ void MergeTreeData::MergingParams::check(const StorageInMemoryMetadata & metadat if (is_optional) return; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: Sign column for storage {} is empty", storage); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Sign column for storage {} is empty", storage); } bool miss_column = true; @@ -897,7 +897,7 @@ void MergeTreeData::MergingParams::check(const StorageInMemoryMetadata & metadat if (is_optional) return; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: Version column for storage {} is empty", storage); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Version column for storage {} is empty", storage); } bool miss_column = true; @@ -926,12 +926,12 @@ void MergeTreeData::MergingParams::check(const StorageInMemoryMetadata & metadat if (is_optional) return; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: is_deleted ({}) column for storage {} is empty", is_deleted_column, storage); + throw Exception(ErrorCodes::LOGICAL_ERROR, "`is_deleted` ({}) column for storage {} is empty", is_deleted_column, storage); } else { if (version_column.empty() && !is_optional) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: Version column ({}) for storage {} is empty while is_deleted ({}) is not.", + throw Exception(ErrorCodes::LOGICAL_ERROR, "Version column ({}) for storage {} is empty while is_deleted ({}) is not.", version_column, storage, is_deleted_column); bool miss_is_deleted_column = true; diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index ab265715688..4475f2b6f12 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -1354,7 +1354,7 @@ protected: size_t max_postpone_time_ms; size_t max_postpone_power; - PartMutationInfo(size_t max_postpone_time_ms_) + explicit PartMutationInfo(size_t max_postpone_time_ms_) : retry_count(0ull) , latest_fail_time_us(static_cast(Poco::Timestamp().epochMicroseconds())) , max_postpone_time_ms(max_postpone_time_ms_) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 58fddde7b54..1bf1d4a3c29 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -85,7 +85,7 @@ UInt64 MergeTreeDataMergerMutator::getMaxSourcePartsSizeForMerge(size_t max_coun if (scheduled_tasks_count > max_count) { throw Exception(ErrorCodes::LOGICAL_ERROR, - "Logical error: invalid argument passed to getMaxSourcePartsSize: scheduled_tasks_count = {} > max_count = {}", + "Invalid argument passed to getMaxSourcePartsSize: scheduled_tasks_count = {} > max_count = {}", scheduled_tasks_count, max_count); } @@ -511,7 +511,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( /// Do not allow to "merge" part with itself for regular merges, unless it is a TTL-merge where it is ok to remove some values with expired ttl if (parts_to_merge.size() == 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: merge selector returned only one part to merge"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Merge selector returned only one part to merge"); if (parts_to_merge.empty()) { diff --git a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp index aa1968794f9..d60f4cc7354 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -60,7 +61,7 @@ void MergeTreeDataPartChecksum::checkEqual(const MergeTreeDataPartChecksum & rhs void MergeTreeDataPartChecksum::checkSize(const IDataPartStorage & storage, const String & name) const { /// Skip inverted index files, these have a default MergeTreeDataPartChecksum with file_size == 0 - if (name.ends_with(".gin_dict") || name.ends_with(".gin_post") || name.ends_with(".gin_seg") || name.ends_with(".gin_sid")) + if (isGinFile(name)) return; if (!storage.exists(name)) diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index c9c16b59f9e..ebf887f5e9e 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -384,13 +384,13 @@ Block MergeTreeDataWriter::mergeBlock( /// Check that after first merge merging_algorithm is waiting for data from input 0. if (status.required_source != 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: required source after the first merge is not 0. Chunk rows: {}, is_finished: {}, required_source: {}, algorithm: {}", status.chunk.getNumRows(), status.is_finished, status.required_source, merging_algorithm->getName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Required source after the first merge is not 0. Chunk rows: {}, is_finished: {}, required_source: {}, algorithm: {}", status.chunk.getNumRows(), status.is_finished, status.required_source, merging_algorithm->getName()); status = merging_algorithm->merge(); /// Check that merge is finished. if (!status.is_finished) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: merge is not finished after the second merge."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Merge is not finished after the second merge."); /// Merged Block is sorted and we don't need to use permutation anymore permutation = nullptr; @@ -439,7 +439,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( auto max_month = date_lut.toNumYYYYMM(max_date); if (min_month != max_month) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: part spans more than one month."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Part spans more than one month."); part_name = new_part_info.getPartNameV0(min_date, max_date); } diff --git a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp index da49814b83a..f506230b5ea 100644 --- a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp @@ -59,7 +59,7 @@ bool maybeTrueOnBloomFilter(const IColumn * hash_column, const BloomFilterPtr & const auto * non_const_column = typeid_cast(hash_column); if (!const_column && !non_const_column) - throw Exception(ErrorCodes::LOGICAL_ERROR, "LOGICAL ERROR: hash column must be Const Column or UInt64 Column."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Hash column must be Const or UInt64."); if (const_column) { diff --git a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h index db85c804d8d..8029d6d405b 100644 --- a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h +++ b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.h @@ -53,7 +53,7 @@ public: if (const auto & bf_granule = typeid_cast(granule.get())) return mayBeTrueOnGranule(bf_granule); - throw Exception(ErrorCodes::LOGICAL_ERROR, "LOGICAL ERROR: require bloom filter index granule."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Requires bloom filter index granule."); } private: diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp index 4e339964de3..da89d52a9ff 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.cpp @@ -54,9 +54,9 @@ MarkType::MarkType(bool adaptive_, bool compressed_, MergeTreeDataPartType::Valu : adaptive(adaptive_), compressed(compressed_), part_type(part_type_) { if (!adaptive && part_type != MergeTreeDataPartType::Wide) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: non-Wide data part type with non-adaptive granularity"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Non-Wide data part type with non-adaptive granularity"); if (part_type == MergeTreeDataPartType::Unknown) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: unknown data part type"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown data part type"); } bool MarkType::isMarkFileExtension(std::string_view extension) @@ -71,7 +71,7 @@ std::string MarkType::getFileExtension() const if (!adaptive) { if (part_type != MergeTreeDataPartType::Wide) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: non-Wide data part type with non-adaptive granularity"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Non-Wide data part type with non-adaptive granularity"); return res; } @@ -84,7 +84,7 @@ std::string MarkType::getFileExtension() const case MergeTreeDataPartType::InMemory: return ""; case MergeTreeDataPartType::Unknown: - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: unknown data part type"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown data part type"); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h index 4cb35ee64b1..af008866919 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularityInfo.h @@ -18,7 +18,7 @@ class MergeTreeData; */ struct MarkType { - MarkType(std::string_view extension); + explicit MarkType(std::string_view extension); MarkType(bool adaptive_, bool compressed_, MergeTreeDataPartType::Value part_type_); static bool isMarkFileExtension(std::string_view extension); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index cce7e56dda9..50d1216cdc2 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -448,21 +448,16 @@ static ColumnPtr andFilters(ColumnPtr c1, ColumnPtr c2) throw Exception(ErrorCodes::LOGICAL_ERROR, "Size of filters don't match: {} and {}", c1->size(), c2->size()); - // TODO: use proper vectorized implementation of AND? auto res = ColumnUInt8::create(c1->size()); auto & res_data = res->getData(); const auto & c1_data = typeid_cast(*c1).getData(); const auto & c2_data = typeid_cast(*c2).getData(); const size_t size = c1->size(); - const size_t step = 16; - size_t i = 0; - /// NOTE: '&&' must be used instead of '&' for 'AND' operation because UInt8 columns might contain any non-zero - /// value for true and we cannot bitwise AND them to get the correct result. - for (; i + step < size; i += step) - for (size_t j = 0; j < step; ++j) - res_data[i+j] = (c1_data[i+j] && c2_data[i+j]); - for (; i < size; ++i) - res_data[i] = (c1_data[i] && c2_data[i]); + /// The double NOT operators (!!) convert the non-zeros to the bool value of true (0x01) and zeros to false (0x00). + /// After casting them to UInt8, '&' could replace '&&' for the 'AND' operation implementation and at the same + /// time enable the auto vectorization. + for (size_t i = 0; i < size; ++i) + res_data[i] = (static_cast(!!c1_data[i]) & static_cast(!!c2_data[i])); return res; } diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index 8ed7a9d8707..68d57bf7b06 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -16,14 +16,15 @@ namespace ProfileEvents extern const Event ReadBackoff; } -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - namespace DB { +namespace ErrorCodes +{ +extern const int CANNOT_SCHEDULE_TASK; +extern const int LOGICAL_ERROR; +} + size_t getApproxSizeOfPart(const IMergeTreeDataPart & part, const Names & columns_to_read) { ColumnSize columns_size{}; @@ -217,6 +218,9 @@ void MergeTreeReadPool::profileFeedback(ReadBufferFromFileBase::ProfileInfo info void MergeTreeReadPool::fillPerThreadInfo(size_t threads, size_t sum_marks) { + if (threads > 1000000ull) + throw Exception(ErrorCodes::CANNOT_SCHEDULE_TASK, "Too many threads ({}) requested", threads); + threads_tasks.resize(threads); if (parts_ranges.empty()) return; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 743ca7fc258..b17e7819946 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -499,7 +499,7 @@ class BaseMergePredicate { public: BaseMergePredicate() = default; - BaseMergePredicate(std::optional && partition_ids_hint_) : partition_ids_hint(std::move(partition_ids_hint_)) {} + explicit BaseMergePredicate(std::optional && partition_ids_hint_) : partition_ids_hint(std::move(partition_ids_hint_)) {} /// Depending on the existence of left part checks a merge predicate for two parts or for single part. bool operator()(const MergeTreeData::DataPartPtr & left, @@ -550,7 +550,7 @@ protected: class LocalMergePredicate : public BaseMergePredicate { public: - LocalMergePredicate(ReplicatedMergeTreeQueue & queue_); + explicit LocalMergePredicate(ReplicatedMergeTreeQueue & queue_); }; class ReplicatedMergeTreeMergePredicate : public BaseMergePredicate diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h b/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h index a0b0d026693..f0f737cb1e6 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQuorumAddedParts.h @@ -22,7 +22,7 @@ struct ReplicatedMergeTreeQuorumAddedParts MergeTreeDataFormatVersion format_version; - ReplicatedMergeTreeQuorumAddedParts(const MergeTreeDataFormatVersion format_version_) + explicit ReplicatedMergeTreeQuorumAddedParts(const MergeTreeDataFormatVersion format_version_) : format_version(format_version_) {} diff --git a/src/Storages/MergeTree/checkDataPart.cpp b/src/Storages/MergeTree/checkDataPart.cpp index 8ae9b54b6e9..c74063cfd4b 100644 --- a/src/Storages/MergeTree/checkDataPart.cpp +++ b/src/Storages/MergeTree/checkDataPart.cpp @@ -254,7 +254,7 @@ static IMergeTreeDataPart::Checksums checkDataPart( } /// Exclude files written by inverted index from check. No correct checksums are available for them currently. - if (file_name.ends_with(".gin_dict") || file_name.ends_with(".gin_post") || file_name.ends_with(".gin_seg") || file_name.ends_with(".gin_sid")) + if (isGinFile(file_name)) continue; auto checksum_it = checksums_data.files.find(file_name); diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index da38b3ca07c..2b24a56e994 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -1,6 +1,5 @@ #include #include -#include #include #include #include @@ -9,16 +8,12 @@ #include #include #include -#include -#include -#include #include #include #include #include -#include #include #include diff --git a/src/Storages/MergeTree/tests/gtest_combine_filters.cpp b/src/Storages/MergeTree/tests/gtest_combine_filters.cpp index 53696474eb8..9a9e6caad81 100644 --- a/src/Storages/MergeTree/tests/gtest_combine_filters.cpp +++ b/src/Storages/MergeTree/tests/gtest_combine_filters.cpp @@ -138,6 +138,57 @@ bool testCombineColumns(size_t size) return true; } +/* To ensure the vectorized DB::andFilters works as its scalar implementation, this test validates the AND (&&) + * of any combinations of the UInt8 values. + */ +bool testAndFilters(size_t size) +{ + auto generateFastIncrementColumn = [](size_t len)->ColumnPtr + { + auto filter = ColumnUInt8::create(len); + auto & filter_data = filter->getData(); + + for (size_t i = 0; i < len; ++i) + filter_data[i] = static_cast(i & 0xFF); + + return filter; + }; + + auto generateSlowIncrementColumn = [](size_t len)->ColumnPtr + { + auto filter = ColumnUInt8::create(len); + auto & filter_data = filter->getData(); + + for (size_t i = 0; i < len; ++i) + filter_data[i] = static_cast((i >> 8) & 0xFF); + + return filter; + }; + + auto first_filter = generateFastIncrementColumn(size); + auto second_filter = generateSlowIncrementColumn(size); + + auto result = andFilters(first_filter, second_filter); + + const auto & first_filter_data = typeid_cast(first_filter.get())->getData(); + const auto & second_filter_data = typeid_cast(second_filter.get())->getData(); + const auto & result_data = typeid_cast(result.get())->getData(); + + if (result->size() != size) + { + return false; + } + + for (size_t i = 0; i < size; i++) + { + UInt8 expected = first_filter_data[i] && second_filter_data[i]; + if (result_data[i] != expected) + return false; + } + + return true; +} + TEST(MergeTree, CombineFilters) { /// Tests with only 0/1 and fixed intervals. @@ -159,3 +210,18 @@ TEST(MergeTree, CombineFilters) EXPECT_TRUE(testCombineColumns(2000)); EXPECT_TRUE(testCombineColumns(200000)); } + +TEST(MergeTree, AndFilters) +{ + EXPECT_TRUE(testAndFilters(1)); + EXPECT_TRUE(testAndFilters(2)); + EXPECT_TRUE(testAndFilters(15)); + EXPECT_TRUE(testAndFilters(16)); + EXPECT_TRUE(testAndFilters(200)); + EXPECT_TRUE(testAndFilters(201)); + EXPECT_TRUE(testAndFilters(2000)); + EXPECT_TRUE(testAndFilters(65535)); + EXPECT_TRUE(testAndFilters(65536)); + EXPECT_TRUE(testAndFilters(65537)); + EXPECT_TRUE(testAndFilters(200000)); +} diff --git a/src/Storages/NamedCollectionsHelpers.cpp b/src/Storages/NamedCollectionsHelpers.cpp index f832c7f53f1..c1e744e8d79 100644 --- a/src/Storages/NamedCollectionsHelpers.cpp +++ b/src/Storages/NamedCollectionsHelpers.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -61,8 +62,31 @@ namespace auto value = literal_value->as()->value; return std::pair{key, Field(value)}; } + + std::pair getKeyValueFromAST(ASTPtr ast, ContextPtr context) + { + auto res = getKeyValueFromAST(ast, true, context); + + if (!res || !std::holds_alternative(res->second)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Failed to get key value from ast '{}'", queryToString(ast)); + + return {res->first, std::get(res->second)}; + } } +std::map getParamsMapFromAST(ASTs asts, ContextPtr context) +{ + std::map params; + for (const auto & ast : asts) + { + auto [key, value] = getKeyValueFromAST(ast, context); + bool inserted = params.emplace(key, value).second; + if (!inserted) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Duplicated key '{}' in params", key); + } + + return params; +} MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides( ASTs asts, ContextPtr context, bool throw_unknown_collection, std::vector> * complex_args) diff --git a/src/Storages/NamedCollectionsHelpers.h b/src/Storages/NamedCollectionsHelpers.h index 657ad91e825..a1909f514ea 100644 --- a/src/Storages/NamedCollectionsHelpers.h +++ b/src/Storages/NamedCollectionsHelpers.h @@ -21,10 +21,16 @@ namespace DB /// Table engines have collection name as first argument of ast and other arguments are key-value overrides. MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides( ASTs asts, ContextPtr context, bool throw_unknown_collection = true, std::vector> * complex_args = nullptr); + /// Helper function to get named collection for dictionary source. /// Dictionaries have collection name as name argument of dict configuration and other arguments are overrides. MutableNamedCollectionPtr tryGetNamedCollectionWithOverrides(const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, ContextPtr context); +/// Parses asts as key value pairs and returns a map of them. +/// If key or value cannot be parsed as literal or interpreted +/// as constant expression throws an exception. +std::map getParamsMapFromAST(ASTs asts, ContextPtr context); + HTTPHeaderEntries getHeadersFromNamedCollection(const NamedCollection & collection); struct ExternalDatabaseEqualKeysSet @@ -45,9 +51,9 @@ struct RedisEqualKeysSet template struct NamedCollectionValidateKey { NamedCollectionValidateKey() = default; - NamedCollectionValidateKey(const char * value_) : value(value_) {} - NamedCollectionValidateKey(std::string_view value_) : value(value_) {} - NamedCollectionValidateKey(const String & value_) : value(value_) {} + NamedCollectionValidateKey(const char * value_) : value(value_) {} /// NOLINT(google-explicit-constructor) + NamedCollectionValidateKey(std::string_view value_) : value(value_) {} /// NOLINT(google-explicit-constructor) + NamedCollectionValidateKey(const String & value_) : value(value_) {} /// NOLINT(google-explicit-constructor) std::string_view value; diff --git a/src/Storages/StorageAzureBlob.h b/src/Storages/StorageAzureBlob.h index 2ab96c84e49..e1d1c3abd33 100644 --- a/src/Storages/StorageAzureBlob.h +++ b/src/Storages/StorageAzureBlob.h @@ -149,7 +149,7 @@ public: class IIterator : public WithContext { public: - IIterator(const ContextPtr & context_):WithContext(context_) {} + explicit IIterator(const ContextPtr & context_):WithContext(context_) {} virtual ~IIterator() = default; virtual RelativePathWithMetadata next() = 0; diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 64ff224fc10..8e5195d497f 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -1,5 +1,8 @@ #include +#include +#include + #include #include #include @@ -7,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -23,6 +27,7 @@ namespace ErrorCodes extern const int NOT_FOUND_COLUMN_IN_BLOCK; extern const int TYPE_MISMATCH; extern const int EMPTY_LIST_OF_COLUMNS_PASSED; + extern const int LOGICAL_ERROR; } StorageInMemoryMetadata::StorageInMemoryMetadata(const StorageInMemoryMetadata & other) @@ -41,6 +46,8 @@ StorageInMemoryMetadata::StorageInMemoryMetadata(const StorageInMemoryMetadata & , settings_changes(other.settings_changes ? other.settings_changes->clone() : nullptr) , select(other.select) , refresh(other.refresh ? other.refresh->clone() : nullptr) + , definer(other.definer) + , sql_security_type(other.sql_security_type) , comment(other.comment) , metadata_version(other.metadata_version) { @@ -71,6 +78,8 @@ StorageInMemoryMetadata & StorageInMemoryMetadata::operator=(const StorageInMemo settings_changes.reset(); select = other.select; refresh = other.refresh ? other.refresh->clone() : nullptr; + definer = other.definer; + sql_security_type = other.sql_security_type; comment = other.comment; metadata_version = other.metadata_version; return *this; @@ -81,6 +90,69 @@ void StorageInMemoryMetadata::setComment(const String & comment_) comment = comment_; } +void StorageInMemoryMetadata::setSQLSecurity(const ASTSQLSecurity & sql_security) +{ + if (sql_security.definer) + definer = sql_security.definer->toString(); + + sql_security_type = sql_security.type; +} + +UUID StorageInMemoryMetadata::getDefinerID(DB::ContextPtr context) const +{ + if (!definer) + { + if (const auto definer_id = context->getUserID()) + return *definer_id; + + throw Exception(ErrorCodes::LOGICAL_ERROR, "No user in context for sub query execution."); + } + + const auto & access_control = context->getAccessControl(); + return access_control.getID(*definer); +} + +ContextMutablePtr StorageInMemoryMetadata::getSQLSecurityOverriddenContext(ContextPtr context) const +{ + if (!sql_security_type) + return Context::createCopy(context); + + if (sql_security_type == SQLSecurityType::INVOKER) + return Context::createCopy(context); + + auto new_context = Context::createCopy(context->getGlobalContext()); + new_context->setClientInfo(context->getClientInfo()); + new_context->makeQueryContext(); + + const auto & database = context->getCurrentDatabase(); + if (!database.empty()) + new_context->setCurrentDatabase(database); + + new_context->setInsertionTable(context->getInsertionTable(), context->getInsertionTableColumnNames()); + new_context->setProgressCallback(context->getProgressCallback()); + new_context->setProcessListElement(context->getProcessListElement()); + + if (context->getCurrentTransaction()) + new_context->setCurrentTransaction(context->getCurrentTransaction()); + + if (context->getZooKeeperMetadataTransaction()) + new_context->initZooKeeperMetadataTransaction(context->getZooKeeperMetadataTransaction()); + + if (sql_security_type == SQLSecurityType::NONE) + { + new_context->applySettingsChanges(context->getSettingsRef().changes()); + return new_context; + } + + new_context->setUser(getDefinerID(context)); + + auto changed_settings = context->getSettingsRef().changes(); + new_context->clampToSettingsConstraints(changed_settings, SettingSource::QUERY); + new_context->applySettingsChanges(changed_settings); + + return new_context; +} + void StorageInMemoryMetadata::setColumns(ColumnsDescription columns_) { if (columns_.getAllPhysical().empty()) diff --git a/src/Storages/StorageInMemoryMetadata.h b/src/Storages/StorageInMemoryMetadata.h index ecc30f7b756..2823aba1224 100644 --- a/src/Storages/StorageInMemoryMetadata.h +++ b/src/Storages/StorageInMemoryMetadata.h @@ -1,5 +1,7 @@ #pragma once +#include +#include #include #include #include @@ -51,6 +53,14 @@ struct StorageInMemoryMetadata /// Materialized view REFRESH parameters. ASTPtr refresh; + /// DEFINER . Allows to specify a definer of the table. + /// Supported for MaterializedView and View. + std::optional definer; + + /// SQL SECURITY + /// Supported for MaterializedView and View. + std::optional sql_security_type; + String comment; /// Version of metadata. Managed properly by ReplicatedMergeTree only @@ -105,6 +115,15 @@ struct StorageInMemoryMetadata /// Get copy of current metadata with metadata_version_ StorageInMemoryMetadata withMetadataVersion(int32_t metadata_version_) const; + /// Sets SQL security for the storage. + void setSQLSecurity(const ASTSQLSecurity & sql_security); + UUID getDefinerID(ContextPtr context) const; + + /// Returns a copy of the context with the correct user from SQL security options. + /// If the SQL security wasn't set, this is equivalent to `Context::createCopy(context)`. + /// The context from this function must be used every time whenever views execute any read/write operations or subqueries. + ContextMutablePtr getSQLSecurityOverriddenContext(ContextPtr context) const; + /// Returns combined set of columns const ColumnsDescription & getColumns() const; diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index b9e082c0b22..b122674466f 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -500,7 +500,7 @@ protected: Chunk chunk; if (!joinDispatch(join->kind, join->strictness, join->data->maps.front(), [&](auto kind, auto strictness, auto & map) { chunk = createChunk(map); })) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: unknown JOIN strictness"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown JOIN strictness"); return chunk; } diff --git a/src/Storages/StorageKeeperMap.cpp b/src/Storages/StorageKeeperMap.cpp index 5113dccda5b..40a4190a413 100644 --- a/src/Storages/StorageKeeperMap.cpp +++ b/src/Storages/StorageKeeperMap.cpp @@ -367,7 +367,7 @@ StorageKeeperMap::StorageKeeperMap( zk_metadata_path = metadata_path_fs; zk_tables_path = metadata_path_fs / "tables"; - auto table_unique_id = toString(table_id.uuid) + toString(ServerUUID::get()); + table_unique_id = toString(table_id.uuid) + toString(ServerUUID::get()); zk_table_path = fs::path(zk_tables_path) / table_unique_id; zk_dropped_path = metadata_path_fs / "dropped"; @@ -753,14 +753,12 @@ private: void StorageKeeperMap::backupData(BackupEntriesCollector & backup_entries_collector, const String & data_path_in_backup, const std::optional & /*partitions*/) { - auto table_id = toString(getStorageID().uuid); - auto coordination = backup_entries_collector.getBackupCoordination(); - coordination->addKeeperMapTable(zk_root_path, table_id, data_path_in_backup); + coordination->addKeeperMapTable(zk_root_path, table_unique_id, data_path_in_backup); /// This task will be executed after all tables have registered their root zk path and the coordination is ready to /// assign each path to a single table only. - auto post_collecting_task = [my_table_id = std::move(table_id), coordination, &backup_entries_collector, my_data_path_in_backup = data_path_in_backup, this] + auto post_collecting_task = [coordination, &backup_entries_collector, my_data_path_in_backup = data_path_in_backup, this] { auto path_with_data = coordination->getKeeperMapDataPath(zk_root_path); if (path_with_data != my_data_path_in_backup) @@ -798,8 +796,7 @@ void StorageKeeperMap::restoreDataFromBackup(RestorerFromBackup & restorer, cons if (!backup->hasFiles(data_path_in_backup)) return; - auto table_id = toString(getStorageID().uuid); - if (!restorer.getRestoreCoordination()->acquireInsertingDataForKeeperMap(zk_root_path, table_id)) + if (!restorer.getRestoreCoordination()->acquireInsertingDataForKeeperMap(zk_root_path, table_unique_id)) { /// Other table is already restoring the data for this Keeper path. /// Tables defined on the same path share data diff --git a/src/Storages/StorageKeeperMap.h b/src/Storages/StorageKeeperMap.h index 9dca96a24a3..d65548ed428 100644 --- a/src/Storages/StorageKeeperMap.h +++ b/src/Storages/StorageKeeperMap.h @@ -125,10 +125,10 @@ private: std::string primary_key; std::string zk_data_path; - std::string zk_metadata_path; - std::string zk_tables_path; + + std::string table_unique_id; std::string zk_table_path; std::string zk_dropped_path; diff --git a/src/Storages/StorageLog.cpp b/src/Storages/StorageLog.cpp index 9fbeac5e4f3..7459760b0f5 100644 --- a/src/Storages/StorageLog.cpp +++ b/src/Storages/StorageLog.cpp @@ -241,7 +241,7 @@ void LogSource::readData(const NameAndTypePair & name_and_type, ColumnPtr & colu const auto & data_file_it = storage.data_files_by_names.find(data_file_name); if (data_file_it == storage.data_files_by_names.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: no information about file {} in StorageLog", data_file_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "No information about file {} in StorageLog", data_file_name); const auto & data_file = *data_file_it->second; size_t offset = stream_for_prefix ? 0 : offsets[data_file.index]; @@ -448,7 +448,7 @@ ISerialization::OutputStreamGetter LogSink::createStreamGetter(const NameAndType String data_file_name = ISerialization::getFileNameForStream(name_and_type, path); auto it = streams.find(data_file_name); if (it == streams.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: stream was not created when writing data in LogSink"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Stream was not created when writing data in LogSink"); Stream & stream = it->second; if (stream.written) @@ -473,7 +473,7 @@ void LogSink::writeData(const NameAndTypePair & name_and_type, const IColumn & c { const auto & data_file_it = storage.data_files_by_names.find(data_file_name); if (data_file_it == storage.data_files_by_names.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: no information about file {} in StorageLog", data_file_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "No information about file {} in StorageLog", data_file_name); const auto & data_file = *data_file_it->second; const auto & columns = metadata_snapshot->getColumns(); diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 34edc5482f4..1d0898a2f11 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -39,6 +39,7 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int NOT_IMPLEMENTED; extern const int INCORRECT_QUERY; + extern const int QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW; extern const int TOO_MANY_MATERIALIZED_VIEWS; } @@ -77,6 +78,11 @@ StorageMaterializedView::StorageMaterializedView( { StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns_); + if (query.sql_security) + storage_metadata.setSQLSecurity(query.sql_security->as()); + + if (storage_metadata.sql_security_type == SQLSecurityType::INVOKER) + throw Exception(ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW, "SQL SECURITY INVOKER can't be specified for MATERIALIZED VIEW"); if (!query.select) throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName()); @@ -175,19 +181,28 @@ void StorageMaterializedView::read( const size_t max_block_size, const size_t num_streams) { + auto context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(local_context); auto storage = getTargetTable(); - auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout); + auto lock = storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); auto target_metadata_snapshot = storage->getInMemoryMetadataPtr(); - auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, local_context); + auto target_storage_snapshot = storage->getStorageSnapshot(target_metadata_snapshot, context); if (query_info.order_optimizer) - query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, local_context); + query_info.input_order_info = query_info.order_optimizer->getInputOrder(target_metadata_snapshot, context); - storage->read(query_plan, column_names, target_storage_snapshot, query_info, local_context, processed_stage, max_block_size, num_streams); + if (!getInMemoryMetadataPtr()->select.select_table_id.empty()) + context->checkAccess(AccessType::SELECT, getInMemoryMetadataPtr()->select.select_table_id, column_names); + + auto storage_id = storage->getStorageID(); + /// We don't need to check access if the inner table was created automatically. + if (!has_inner_table && !storage_id.empty()) + context->checkAccess(AccessType::SELECT, storage_id, column_names); + + storage->read(query_plan, column_names, target_storage_snapshot, query_info, context, processed_stage, max_block_size, num_streams); if (query_plan.isInitialized()) { - auto mv_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, local_context, processed_stage); + auto mv_header = getHeaderForProcessingStage(column_names, storage_snapshot, query_info, context, processed_stage); auto target_header = query_plan.getCurrentDataStream().header; /// No need to convert columns that does not exists in MV @@ -222,11 +237,20 @@ void StorageMaterializedView::read( SinkToStoragePtr StorageMaterializedView::write(const ASTPtr & query, const StorageMetadataPtr & /*metadata_snapshot*/, ContextPtr local_context, bool async_insert) { + auto context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(local_context); auto storage = getTargetTable(); - auto lock = storage->lockForShare(local_context->getCurrentQueryId(), local_context->getSettingsRef().lock_acquire_timeout); - + auto lock = storage->lockForShare(context->getCurrentQueryId(), context->getSettingsRef().lock_acquire_timeout); auto metadata_snapshot = storage->getInMemoryMetadataPtr(); - auto sink = storage->write(query, metadata_snapshot, local_context, async_insert); + + auto storage_id = storage->getStorageID(); + /// We don't need to check access if the inner table was created automatically. + if (!has_inner_table && !storage_id.empty()) + { + auto query_sample_block = InterpreterInsertQuery::getSampleBlock(query->as(), storage, metadata_snapshot, context); + context->checkAccess(AccessType::INSERT, storage_id, query_sample_block.getNames()); + } + + auto sink = storage->write(query, metadata_snapshot, context, async_insert); sink->addTableLock(lock); return sink; @@ -297,7 +321,7 @@ bool StorageMaterializedView::optimize( std::tuple> StorageMaterializedView::prepareRefresh() const { - auto refresh_context = Context::createCopy(getContext()); + auto refresh_context = getInMemoryMetadataPtr()->getSQLSecurityOverriddenContext(getContext()); /// Generate a random query id. refresh_context->setCurrentQueryId(""); @@ -378,15 +402,24 @@ void StorageMaterializedView::checkAlterIsPossible(const AlterCommands & command { for (const auto & command : commands) { - if (command.isCommentAlter()) + if (command.type == AlterCommand::MODIFY_SQL_SECURITY) + { + if (command.sql_security->as().type == SQLSecurityType::INVOKER) + throw Exception(ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW, "SQL SECURITY INVOKER can't be specified for MATERIALIZED VIEW"); + continue; - if (command.type == AlterCommand::MODIFY_QUERY) + } + else if (command.isCommentAlter()) continue; - if (command.type == AlterCommand::MODIFY_REFRESH && refresher) + else if (command.type == AlterCommand::MODIFY_QUERY) continue; + else if (command.type == AlterCommand::MODIFY_REFRESH && refresher) + continue; + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Alter of type '{}' is not supported by storage {}", - command.type, getName()); + command.type, getName()); } + } void StorageMaterializedView::checkMutationIsPossible(const MutationCommands & commands, const Settings & settings) const diff --git a/src/Storages/StorageMergeTreeIndex.cpp b/src/Storages/StorageMergeTreeIndex.cpp new file mode 100644 index 00000000000..d875611bb50 --- /dev/null +++ b/src/Storages/StorageMergeTreeIndex.cpp @@ -0,0 +1,310 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int BAD_ARGUMENTS; + extern const int NO_SUCH_COLUMN_IN_TABLE; + extern const int NOT_IMPLEMENTED; +} + +class MergeTreeIndexSource : public ISource, WithContext +{ +public: + MergeTreeIndexSource( + Block header_, + Block index_header_, + MergeTreeData::DataPartsVector data_parts_, + ContextPtr context_, + bool with_marks_) + : ISource(header_) + , WithContext(context_) + , header(std::move(header_)) + , index_header(std::move(index_header_)) + , data_parts(std::move(data_parts_)) + , with_marks(with_marks_) + { + } + + String getName() const override { return "MergeTreeIndex"; } + +protected: + Chunk generate() override + { + if (part_index >= data_parts.size()) + return {}; + + const auto & part = data_parts[part_index]; + const auto & index_granularity = part->index_granularity; + + std::shared_ptr marks_loader; + if (with_marks && isCompactPart(part)) + marks_loader = createMarksLoader(part, MergeTreeDataPartCompact::DATA_FILE_NAME, part->getColumns().size()); + + size_t num_columns = header.columns(); + size_t num_rows = index_granularity.getMarksCount(); + + const auto & part_name_column = StorageMergeTreeIndex::part_name_column; + const auto & mark_number_column = StorageMergeTreeIndex::mark_number_column; + const auto & rows_in_granule_column = StorageMergeTreeIndex::rows_in_granule_column; + + const auto & index = part->getIndex(); + Columns result_columns(num_columns); + for (size_t pos = 0; pos < num_columns; ++pos) + { + const auto & column_name = header.getByPosition(pos).name; + const auto & column_type = header.getByPosition(pos).type; + + if (index_header.has(column_name)) + { + size_t index_position = index_header.getPositionByName(column_name); + result_columns[pos] = index[index_position]; + } + else if (column_name == part_name_column.name) + { + auto column = column_type->createColumnConst(num_rows, part->name); + result_columns[pos] = column->convertToFullColumnIfConst(); + } + else if (column_name == mark_number_column.name) + { + auto column = column_type->createColumn(); + auto & data = assert_cast(*column).getData(); + + data.resize(num_rows); + std::iota(data.begin(), data.end(), 0); + + result_columns[pos] = std::move(column); + } + else if (column_name == rows_in_granule_column.name) + { + auto column = column_type->createColumn(); + auto & data = assert_cast(*column).getData(); + + data.resize(num_rows); + for (size_t i = 0; i < num_rows; ++i) + data[i] = index_granularity.getMarkRows(i); + + result_columns[pos] = std::move(column); + } + else if (auto [first, second] = Nested::splitName(column_name, true); with_marks && second == "mark") + { + result_columns[pos] = fillMarks(part, marks_loader, *column_type, first); + } + else + { + throw Exception(ErrorCodes::NO_SUCH_COLUMN_IN_TABLE, "No such column {}", column_name); + } + } + + ++part_index; + return Chunk(std::move(result_columns), num_rows); + } + +private: + std::shared_ptr createMarksLoader(const MergeTreeDataPartPtr & part, const String & prefix_name, size_t num_columns) + { + auto info_for_read = std::make_shared(part, std::make_shared()); + auto local_context = getContext(); + + return std::make_shared( + info_for_read, + local_context->getMarkCache().get(), + info_for_read->getIndexGranularityInfo().getMarksFilePath(prefix_name), + info_for_read->getMarksCount(), + info_for_read->getIndexGranularityInfo(), + /*save_marks_in_cache=*/ false, + local_context->getReadSettings(), + /*load_marks_threadpool=*/ nullptr, + num_columns); + } + + ColumnPtr fillMarks( + MergeTreeDataPartPtr part, + std::shared_ptr marks_loader, + const IDataType & data_type, + const String & column_name) + { + size_t col_idx = 0; + bool has_marks_in_part = false; + size_t num_rows = part->index_granularity.getMarksCount(); + + if (isWidePart(part)) + { + if (auto stream_name = part->getStreamNameOrHash(column_name, part->checksums)) + { + col_idx = 0; + has_marks_in_part = true; + marks_loader = createMarksLoader(part, *stream_name, /*num_columns=*/ 1); + } + } + else if (isCompactPart(part)) + { + auto unescaped_name = unescapeForFileName(column_name); + if (auto col_idx_opt = part->getColumnPosition(unescaped_name)) + { + col_idx = *col_idx_opt; + has_marks_in_part = true; + } + } + else + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Parts with type {} are not supported", part->getTypeName()); + } + + if (!has_marks_in_part) + { + auto column = data_type.createColumnConstWithDefaultValue(num_rows); + return column->convertToFullColumnIfConst(); + } + + auto compressed = ColumnUInt64::create(num_rows); + auto uncompressed = ColumnUInt64::create(num_rows); + + auto & compressed_data = compressed->getData(); + auto & uncompressed_data = uncompressed->getData(); + + for (size_t i = 0; i < num_rows; ++i) + { + auto mark = marks_loader->getMark(i, col_idx); + + compressed_data[i] = mark.offset_in_compressed_file; + uncompressed_data[i] = mark.offset_in_decompressed_block; + } + + auto compressed_nullable = ColumnNullable::create(std::move(compressed), ColumnUInt8::create(num_rows, 0)); + auto uncompressed_nullable = ColumnNullable::create(std::move(uncompressed), ColumnUInt8::create(num_rows, 0)); + + return ColumnTuple::create(Columns{std::move(compressed_nullable), std::move(uncompressed_nullable)}); + } + + Block header; + Block index_header; + MergeTreeData::DataPartsVector data_parts; + bool with_marks; + + size_t part_index = 0; +}; + +const ColumnWithTypeAndName StorageMergeTreeIndex::part_name_column{std::make_shared(), "part_name"}; +const ColumnWithTypeAndName StorageMergeTreeIndex::mark_number_column{std::make_shared(), "mark_number"}; +const ColumnWithTypeAndName StorageMergeTreeIndex::rows_in_granule_column{std::make_shared(), "rows_in_granule"}; +const Block StorageMergeTreeIndex::virtuals_sample_block{part_name_column, mark_number_column, rows_in_granule_column}; + +StorageMergeTreeIndex::StorageMergeTreeIndex( + const StorageID & table_id_, + const StoragePtr & source_table_, + const ColumnsDescription & columns, + bool with_marks_) + : IStorage(table_id_) + , source_table(source_table_) + , with_marks(with_marks_) + , log(&Poco::Logger::get("StorageMergeTreeIndex")) +{ + const auto * merge_tree = dynamic_cast(source_table.get()); + if (!merge_tree) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage MergeTreeIndex expected MergeTree table, got: {}", source_table->getName()); + + data_parts = merge_tree->getDataPartsVectorForInternalUsage(); + key_sample_block = merge_tree->getInMemoryMetadataPtr()->getPrimaryKey().sample_block; + + StorageInMemoryMetadata storage_metadata; + storage_metadata.setColumns(columns); + setInMemoryMetadata(storage_metadata); +} + +Pipe StorageMergeTreeIndex::read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum, + size_t /*max_block_size*/, + size_t /*num_streams*/) +{ + const auto & storage_columns = source_table->getInMemoryMetadataPtr()->getColumns(); + Names columns_from_storage; + + for (const auto & column_name : column_names) + { + if (storage_columns.hasColumnOrSubcolumn(GetColumnsOptions::All, column_name)) + { + columns_from_storage.push_back(column_name); + continue; + } + + if (with_marks) + { + auto [first, second] = Nested::splitName(column_name, true); + auto unescaped_name = unescapeForFileName(first); + + if (second == "mark" && storage_columns.hasColumnOrSubcolumn(GetColumnsOptions::All, unescapeForFileName(unescaped_name))) + { + columns_from_storage.push_back(unescaped_name); + continue; + } + } + } + + context->checkAccess(AccessType::SELECT, source_table->getStorageID(), columns_from_storage); + + auto header = storage_snapshot->getSampleBlockForColumns(column_names); + auto filtered_parts = getFilteredDataParts(query_info, context); + + LOG_DEBUG(log, "Reading index{}from {} parts of table {}", + with_marks ? " with marks " : " ", + filtered_parts.size(), + source_table->getStorageID().getNameForLogs()); + + return Pipe(std::make_shared(std::move(header), key_sample_block, std::move(filtered_parts), context, with_marks)); +} + +MergeTreeData::DataPartsVector StorageMergeTreeIndex::getFilteredDataParts(SelectQueryInfo & query_info, const ContextPtr & context) const +{ + const auto * select_query = query_info.query->as(); + if (!select_query || !select_query->where()) + return data_parts; + + auto all_part_names = ColumnString::create(); + for (const auto & part : data_parts) + all_part_names->insert(part->name); + + Block filtered_block{{std::move(all_part_names), std::make_shared(), part_name_column.name}}; + VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context); + + if (!filtered_block.rows()) + return {}; + + auto part_names = filtered_block.getByPosition(0).column; + const auto & part_names_str = assert_cast(*part_names); + + HashSet part_names_set; + for (size_t i = 0; i < part_names_str.size(); ++i) + part_names_set.insert(part_names_str.getDataAt(i)); + + MergeTreeData::DataPartsVector filtered_parts; + for (const auto & part : data_parts) + if (part_names_set.has(part->name)) + filtered_parts.push_back(part); + + return filtered_parts; +} + +} diff --git a/src/Storages/StorageMergeTreeIndex.h b/src/Storages/StorageMergeTreeIndex.h new file mode 100644 index 00000000000..b610d391655 --- /dev/null +++ b/src/Storages/StorageMergeTreeIndex.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include + +namespace DB +{ + +/// Internal temporary storage for table function mergeTreeIndex(...) +class StorageMergeTreeIndex final : public IStorage +{ +public: + static const ColumnWithTypeAndName part_name_column; + static const ColumnWithTypeAndName mark_number_column; + static const ColumnWithTypeAndName rows_in_granule_column; + static const Block virtuals_sample_block; + + StorageMergeTreeIndex( + const StorageID & table_id_, + const StoragePtr & source_table_, + const ColumnsDescription & columns, + bool with_marks_); + + Pipe read( + const Names & column_names, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & query_info, + ContextPtr context, + QueryProcessingStage::Enum processing_stage, + size_t max_block_size, + size_t num_streams) override; + + String getName() const override { return "MergeTreeIndex"; } + +private: + MergeTreeData::DataPartsVector getFilteredDataParts(SelectQueryInfo & query_info, const ContextPtr & context) const; + + StoragePtr source_table; + bool with_marks; + + MergeTreeData::DataPartsVector data_parts; + Block key_sample_block; + Poco::Logger * log; +}; + +} diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 1702b52fa35..1c66a5f6ecd 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -2050,7 +2050,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry, bool need_to_che if (entry.quorum) { if (entry.type != LogEntry::GET_PART) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: log entry with quorum but type is not GET_PART"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Log entry with quorum but type is not GET_PART"); LOG_DEBUG(log, "No active replica has part {} which needs to be written with quorum. Will try to mark that quorum as failed.", entry.new_part_name); @@ -2113,7 +2113,7 @@ bool StorageReplicatedMergeTree::executeFetch(LogEntry & entry, bool need_to_che auto part_info = MergeTreePartInfo::fromPartName(entry.new_part_name, format_version); if (part_info.min_block != part_info.max_block) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: log entry with quorum for part covering more than one block number"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Log entry with quorum for part covering more than one block number"); ops.emplace_back(zkutil::makeCreateRequest( fs::path(zookeeper_path) / "quorum" / "failed_parts" / entry.new_part_name, @@ -6800,7 +6800,7 @@ bool StorageReplicatedMergeTree::tryWaitForReplicaToProcessLogEntry( } else { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: unexpected name of log node: {}", entry.znode_name); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected name of log node: {}", entry.znode_name); } /** Second - find the corresponding entry in the queue of the specified replica. @@ -7176,7 +7176,7 @@ void StorageReplicatedMergeTree::fetchPartition( } if (best_replica.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: cannot choose best replica."); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot choose best replica."); LOG_INFO(log, "Found {} replicas, {} of them are active. Selected {} to fetch from.", replicas.size(), active_replicas.size(), best_replica); diff --git a/src/Storages/StorageReplicatedMergeTree.h b/src/Storages/StorageReplicatedMergeTree.h index 41d18f7eb49..f2679f8f7dd 100644 --- a/src/Storages/StorageReplicatedMergeTree.h +++ b/src/Storages/StorageReplicatedMergeTree.h @@ -231,14 +231,6 @@ public: /// Checks ability to use granularity bool canUseAdaptiveGranularity() const override; - /// Returns the default path to the table in ZooKeeper. - /// It's used if not set in engine's arguments while creating a replicated table. - static String getDefaultReplicaPath(const ContextPtr & context_); - - /// Returns the default replica name in ZooKeeper. - /// It's used if not set in engine's arguments while creating a replicated table. - static String getDefaultReplicaName(const ContextPtr & context_); - /// Modify a CREATE TABLE query to make a variant which must be written to a backup. void adjustCreateQueryForBackup(ASTPtr & create_query) const override; @@ -360,6 +352,8 @@ public: bool isTableReadOnly () { return is_readonly; } + std::optional hasMetadataInZooKeeper () { return has_metadata_in_zookeeper; } + /// Get a sequential consistent view of current parts. ReplicatedMergeTreeQuorumAddedParts::PartitionIdToMaxBlock getMaxAddedBlocks() const; diff --git a/src/Storages/StorageS3.cpp b/src/Storages/StorageS3.cpp index 9282bdcd5ca..da90dbb4076 100644 --- a/src/Storages/StorageS3.cpp +++ b/src/Storages/StorageS3.cpp @@ -548,7 +548,15 @@ StorageS3Source::KeyWithInfoPtr StorageS3Source::ReadTaskIterator::next(size_t) if (current_index >= buffer.size()) return std::make_shared(callback()); - return buffer[current_index]; + while (current_index < buffer.size()) + { + if (const auto & key_info = buffer[current_index]; key_info && !key_info->key.empty()) + return buffer[current_index]; + + current_index = index.fetch_add(1, std::memory_order_relaxed); + } + + return nullptr; } size_t StorageS3Source::ReadTaskIterator::estimatedKeysCount() diff --git a/src/Storages/StorageS3Cluster.cpp b/src/Storages/StorageS3Cluster.cpp index 0ea224f6ee9..31c241a5b13 100644 --- a/src/Storages/StorageS3Cluster.cpp +++ b/src/Storages/StorageS3Cluster.cpp @@ -1,7 +1,5 @@ #include "Storages/StorageS3Cluster.h" -#include "config.h" - #if USE_AWS_S3 #include diff --git a/src/Storages/StorageS3Cluster.h b/src/Storages/StorageS3Cluster.h index ac25c506337..03155b6e707 100644 --- a/src/Storages/StorageS3Cluster.h +++ b/src/Storages/StorageS3Cluster.h @@ -4,10 +4,7 @@ #if USE_AWS_S3 -#include -#include - -#include "Client/Connection.h" +#include #include #include #include diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 181fd0ac61c..673ca61cd50 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -35,6 +36,7 @@ namespace ErrorCodes { extern const int INCORRECT_QUERY; extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; } @@ -90,10 +92,10 @@ bool hasJoin(const ASTSelectWithUnionQuery & ast) /** There are no limits on the maximum size of the result for the view. * Since the result of the view is not the result of the entire query. */ -ContextPtr getViewContext(ContextPtr context) +ContextPtr getViewContext(ContextPtr context, const StorageSnapshotPtr & storage_snapshot) { - auto view_context = Context::createCopy(context); - Settings view_settings = context->getSettings(); + auto view_context = storage_snapshot->metadata->getSQLSecurityOverriddenContext(context); + Settings view_settings = view_context->getSettings(); view_settings.max_result_rows = 0; view_settings.max_result_bytes = 0; view_settings.extremes = false; @@ -122,6 +124,8 @@ StorageView::StorageView( storage_metadata.setColumns(columns_); storage_metadata.setComment(comment); + if (query.sql_security) + storage_metadata.setSQLSecurity(query.sql_security->as()); if (!query.select) throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName()); @@ -160,13 +164,13 @@ void StorageView::read( if (context->getSettingsRef().allow_experimental_analyzer) { - InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context), options); + InterpreterSelectQueryAnalyzer interpreter(current_inner_query, getViewContext(context, storage_snapshot), options); interpreter.addStorageLimits(*query_info.storage_limits); query_plan = std::move(interpreter).extractQueryPlan(); } else { - InterpreterSelectWithUnionQuery interpreter(current_inner_query, getViewContext(context), options, column_names); + InterpreterSelectWithUnionQuery interpreter(current_inner_query, getViewContext(context, storage_snapshot), options, column_names); interpreter.addStorageLimits(*query_info.storage_limits); interpreter.buildQueryPlan(query_plan); } @@ -207,12 +211,12 @@ void StorageView::read( static ASTTableExpression * getFirstTableExpression(ASTSelectQuery & select_query) { if (!select_query.tables() || select_query.tables()->children.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: no table expression in view select AST"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "No table expression in view select AST"); auto * select_element = select_query.tables()->children[0]->as(); if (!select_element->table_expression) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: incorrect table expression"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect table expression"); return select_element->table_expression->as(); } @@ -243,7 +247,7 @@ void StorageView::replaceWithSubquery(ASTSelectQuery & outer_query, ASTPtr view_ } if (!table_expression->database_and_table_name) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: incorrect table expression"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect table expression"); } DatabaseAndTableWithAlias db_table(table_expression->database_and_table_name); @@ -270,7 +274,7 @@ ASTPtr StorageView::restoreViewName(ASTSelectQuery & select_query, const ASTPtr ASTTableExpression * table_expression = getFirstTableExpression(select_query); if (!table_expression->subquery) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: incorrect table expression"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Incorrect table expression"); ASTPtr subquery = table_expression->subquery; table_expression->subquery = {}; @@ -282,6 +286,15 @@ ASTPtr StorageView::restoreViewName(ASTSelectQuery & select_query, const ASTPtr return subquery->children[0]; } +void StorageView::checkAlterIsPossible(const AlterCommands & commands, ContextPtr /* local_context */) const +{ + for (const auto & command : commands) + { + if (!command.isCommentAlter() && command.type != AlterCommand::MODIFY_SQL_SECURITY) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Alter of type '{}' is not supported by storage {}", command.type, getName()); + } +} + void registerStorageView(StorageFactory & factory) { factory.registerStorage("View", [](const StorageFactory::Arguments & args) diff --git a/src/Storages/StorageView.h b/src/Storages/StorageView.h index b8bf5585c0f..4d265eed86b 100644 --- a/src/Storages/StorageView.h +++ b/src/Storages/StorageView.h @@ -26,6 +26,8 @@ public: bool supportsSampling() const override { return true; } bool supportsFinal() const override { return true; } + void checkAlterIsPossible(const AlterCommands & commands, ContextPtr local_context) const override; + void read( QueryPlan & query_plan, const Names & column_names, diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index b5a985fec9b..0ad22082863 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -12,6 +12,7 @@ const char * auto_contributors[] { "7vikpeculiar", "821008736@qq.com", "94rain", + "9611008+johnnymatthews@users.noreply.github.com", "AN", "ANDREI STAROVEROV", "AVMusorin", @@ -102,6 +103,7 @@ const char * auto_contributors[] { "Alexey Tronov", "Alexey Vasiliev", "Alexey Zatelepin", + "AlexeyGrezz", "Alexsey Shestakov", "AlfVII", "Alfonso Martinez", @@ -177,6 +179,7 @@ const char * auto_contributors[] { "Antonio Bonuccelli", "Aram Peres", "Ariel Robaldo", + "Aris Tritas", "Arsen Hakobyan", "Arslan G", "ArtCorp", @@ -251,6 +254,7 @@ const char * auto_contributors[] { "Chang Chen", "Chao Ma", "Chao Wang", + "Charlie", "CheSema", "Chebarykov Pavel", "Chen Lixiang", @@ -397,6 +401,7 @@ const char * auto_contributors[] { "Filipe Caixeta", "Filipp Ozinov", "Filippov Denis", + "Fille", "Flowyi", "Francisco Barón", "Frank Chen", @@ -454,6 +459,7 @@ const char * auto_contributors[] { "Hongbin", "Hongbin Ma", "Hosun Lee", + "HowePa", "HuFuwang", "Hui Wang", "ILya Limarenko", @@ -549,7 +555,9 @@ const char * auto_contributors[] { "Joris Giovannangeli", "Jose", "Josh Taylor", + "Joshua Hildred", "João Figueiredo", + "Juan Madurga", "Julia Kartseva", "Julian Gilyadov", "Julian Maicher", @@ -577,6 +585,7 @@ const char * auto_contributors[] { "Kirill Danshin", "Kirill Ershov", "Kirill Malev", + "Kirill Nikiforov", "Kirill Shvakov", "KitKatKKK", "Koblikov Mihail", @@ -767,6 +776,7 @@ const char * auto_contributors[] { "Nick-71", "Nickita", "Nickita Taranov", + "Nickolaj Jepsen", "Nickolay Yastrebov", "Nico Mandery", "Nico Piderman", @@ -787,6 +797,7 @@ const char * auto_contributors[] { "Nikita Tikhomirov", "Nikita Vasilev", "NikitaEvs", + "Nikolai Fedorovskikh", "Nikolai Kochetov", "Nikolai Sorokin", "Nikolay", @@ -823,6 +834,7 @@ const char * auto_contributors[] { "PHO", "Pablo Alegre", "Pablo Marcos", + "Pablo Musa", "Palash Goel", "Paramtamtam", "Patrick Zippenfenig", @@ -899,6 +911,7 @@ const char * auto_contributors[] { "Roman Vasin", "Roman Vlasenko", "Roman Zhukov", + "Ronald Bradford", "Rory Crispin", "Roy Bellingan", "Ruslan", @@ -1146,6 +1159,7 @@ const char * auto_contributors[] { "Yağızcan Değirmenci", "Yegor Andreenko", "Yegor Levankov", + "YenchangChan", "Yingchun Lai", "Yingfan Chen", "Yinzheng-Sun", @@ -1288,6 +1302,7 @@ const char * auto_contributors[] { "cnmade", "comunodi", "congbaoyangrou", + "conicliu", "copperybean", "coraxster", "cwkyaoyao", @@ -1434,6 +1449,7 @@ const char * auto_contributors[] { "jianmei zhang", "jinjunzh", "jiyoungyoooo", + "jktng", "jkuklis", "joelynch", "johanngan", @@ -1555,6 +1571,7 @@ const char * auto_contributors[] { "miha-g", "mikael", "mikepop7", + "mikhnenko", "millb", "minhthucdao", "mlkui", @@ -1644,6 +1661,7 @@ const char * auto_contributors[] { "robot-clickhouse-ci-2", "robot-metrika-test", "rodrigargar", + "rogeryk", "roman", "romanzhukov", "rondo_1895", @@ -1721,6 +1739,7 @@ const char * auto_contributors[] { "turbo jason", "tyrionhuang", "ubuntu", + "una", "unbyte", "unegare", "unknown", @@ -1834,6 +1853,7 @@ const char * auto_contributors[] { "Иванов Евгений", "Илья Исаев", "Илья Коргун", + "Кирилл Гарбар", "Коренберг Марк", "Коренберг ☢️ Марк", "Павел Литвиненко", diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index 48dab8c4777..3babdd5c7c1 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -138,7 +138,7 @@ StoragesInfoStream::StoragesInfoStream(const SelectQueryInfo & query_info, Conte String engine_name = storage->getName(); UUID storage_uuid = storage->getStorageID().uuid; - if (database->getEngineName() == "Ordinary") + if (storage_uuid == UUIDHelpers::Nil) { SipHash hash; hash.update(database_name); diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index 0a45d0f9dfe..1127a8906e5 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -37,7 +37,7 @@ struct StoragesInfo class StoragesInfoStreamBase { public: - StoragesInfoStreamBase(ContextPtr context) + explicit StoragesInfoStreamBase(ContextPtr context) : query_id(context->getCurrentQueryId()), settings(context->getSettingsRef()), next_row(0), rows(0) {} diff --git a/src/Storages/System/StorageSystemPrivileges.cpp b/src/Storages/System/StorageSystemPrivileges.cpp index f45f3c6ed01..a2d3e699c17 100644 --- a/src/Storages/System/StorageSystemPrivileges.cpp +++ b/src/Storages/System/StorageSystemPrivileges.cpp @@ -29,6 +29,7 @@ namespace VIEW, COLUMN, NAMED_COLLECTION, + USER_NAME, }; DataTypeEnum8::Values getLevelEnumValues() @@ -41,6 +42,7 @@ namespace enum_values.emplace_back("VIEW", static_cast(VIEW)); enum_values.emplace_back("COLUMN", static_cast(COLUMN)); enum_values.emplace_back("NAMED_COLLECTION", static_cast(NAMED_COLLECTION)); + enum_values.emplace_back("USER_NAME", static_cast(USER_NAME)); return enum_values; } } diff --git a/src/Storages/System/StorageSystemStackTrace.cpp b/src/Storages/System/StorageSystemStackTrace.cpp index c9758004a4d..74864bb50e1 100644 --- a/src/Storages/System/StorageSystemStackTrace.cpp +++ b/src/Storages/System/StorageSystemStackTrace.cpp @@ -167,7 +167,7 @@ bool wait(int timeout_ms) continue; /// Drain delayed notifications. } - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: read wrong number of bytes from pipe"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Read wrong number of bytes from pipe"); } } diff --git a/src/Storages/System/attachInformationSchemaTables.cpp b/src/Storages/System/attachInformationSchemaTables.cpp index fc8481362ba..3482867bbf7 100644 --- a/src/Storages/System/attachInformationSchemaTables.cpp +++ b/src/Storages/System/attachInformationSchemaTables.cpp @@ -35,8 +35,9 @@ static constexpr std::string_view schemata = R"( `DEFAULT_CHARACTER_SET_SCHEMA` Nullable(String), `DEFAULT_CHARACTER_SET_NAME` Nullable(String), `SQL_PATH` Nullable(String) - ) AS - SELECT + ) + SQL SECURITY INVOKER + AS SELECT name AS catalog_name, name AS schema_name, 'default' AS schema_owner, @@ -73,8 +74,9 @@ static constexpr std::string_view tables = R"( `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String) - ) AS - SELECT + ) + SQL SECURITY INVOKER + AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, @@ -122,8 +124,9 @@ static constexpr std::string_view views = R"( `IS_TRIGGER_UPDATABLE` Enum8('NO' = 0, 'YES' = 1), `IS_TRIGGER_DELETABLE` Enum8('NO' = 0, 'YES' = 1), `IS_TRIGGER_INSERTABLE_INTO` Enum8('NO' = 0, 'YES' = 1) - ) AS - SELECT + ) + SQL SECURITY INVOKER + AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, @@ -203,8 +206,9 @@ static constexpr std::string_view columns = R"( `EXTRA` Nullable(String), `COLUMN_COMMENT` String, `COLUMN_TYPE` String - ) AS - SELECT + ) + SQL SECURITY INVOKER + AS SELECT database AS table_catalog, database AS table_schema, table AS table_name, @@ -291,8 +295,9 @@ static constexpr std::string_view key_column_usage = R"( `REFERENCED_TABLE_SCHEMA` Nullable(String), `REFERENCED_TABLE_NAME` Nullable(String), `REFERENCED_COLUMN_NAME` Nullable(String) - ) AS - SELECT + ) + SQL SECURITY INVOKER + AS SELECT 'def' AS constraint_catalog, database AS constraint_schema, 'PRIMARY' AS constraint_name, @@ -346,8 +351,9 @@ static constexpr std::string_view referential_constraints = R"( `DELETE_RULE` String, `TABLE_NAME` String, `REFERENCED_TABLE_NAME` String - ) AS - SELECT + ) + SQL SECURITY INVOKER + AS SELECT '' AS constraint_catalog, NULL AS constraint_name, '' AS constraint_schema, @@ -412,8 +418,9 @@ static constexpr std::string_view statistics = R"( `INDEX_COMMENT` String, `IS_VISIBLE` String, `EXPRESSION` Nullable(String) - ) AS - SELECT + ) + SQL SECURITY INVOKER + AS SELECT '' AS table_catalog, '' AS table_schema, '' AS table_name, diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 33ff6e7104f..07ac61c110d 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -223,7 +223,7 @@ bool prepareFilterBlockWithQuery(const ASTPtr & query, ContextPtr context, Block auto expression_actions = std::make_shared(actions); auto block_with_constants = block; - expression_actions->execute(block_with_constants); + expression_actions->execute(block_with_constants, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); return block_with_constants.has(expr_column_name) && isColumnConst(*block_with_constants.getByName(expr_column_name).column); }; @@ -266,7 +266,7 @@ void filterBlockWithDAG(ActionsDAGPtr dag, Block & block, ContextPtr context) auto actions = std::make_shared(dag); makeSets(actions, context); Block block_with_filter = block; - actions->execute(block_with_filter); + actions->execute(block_with_filter, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); /// Filter the block. String filter_column_name = dag->getOutputs().at(0)->result_name; @@ -313,7 +313,7 @@ void filterBlockWithQuery(const ASTPtr & query, Block & block, ContextPtr contex makeSets(actions, context); Block block_with_filter = block; - actions->execute(block_with_filter); + actions->execute(block_with_filter, /*dry_run=*/ false, /*allow_duplicates_in_input=*/ true); /// Filter the block. String filter_column_name = expression_ast->getColumnName(); diff --git a/src/Storages/transformQueryForExternalDatabase.cpp b/src/Storages/transformQueryForExternalDatabase.cpp index 4526a38a1c3..afc458ea612 100644 --- a/src/Storages/transformQueryForExternalDatabase.cpp +++ b/src/Storages/transformQueryForExternalDatabase.cpp @@ -145,7 +145,7 @@ bool isCompatible(ASTPtr & node) return false; if (!function->arguments) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: function->arguments is not set"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "function->arguments is not set"); String name = function->name; diff --git a/src/TableFunctions/TableFunctionMergeTreeIndex.cpp b/src/TableFunctions/TableFunctionMergeTreeIndex.cpp new file mode 100644 index 00000000000..435ed4bdf0d --- /dev/null +++ b/src/TableFunctions/TableFunctionMergeTreeIndex.cpp @@ -0,0 +1,204 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; + extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; +} + +class TableFunctionMergeTreeIndex : public ITableFunction +{ +public: + static constexpr auto name = "mergeTreeIndex"; + std::string getName() const override { return name; } + + void parseArguments(const ASTPtr & ast_function, ContextPtr context) override; + ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override; + +private: + StoragePtr executeImpl( + const ASTPtr & ast_function, + ContextPtr context, + const std::string & table_name, + ColumnsDescription cached_columns, + bool is_insert_query) const override; + + const char * getStorageTypeName() const override { return "MergeTreeIndex"; } + + StorageID source_table_id{StorageID::createEmpty()}; + bool with_marks = false; +}; + +void TableFunctionMergeTreeIndex::parseArguments(const ASTPtr & ast_function, ContextPtr context) +{ + ASTs & args_func = ast_function->children; + if (args_func.size() != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table function ({}) must have arguments.", quoteString(getName())); + + ASTs & args = args_func.at(0)->children; + if (args.size() < 2 || args.size() > 3) + throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Table function '{}' must have 2 or 3 arguments, got: {}", getName(), args.size()); + + args[0] = evaluateConstantExpressionForDatabaseName(args[0], context); + args[1] = evaluateConstantExpressionOrIdentifierAsLiteral(args[1], context); + + auto database = checkAndGetLiteralArgument(args[0], "database"); + auto table = checkAndGetLiteralArgument(args[1], "table"); + + ASTs rest_args(args.begin() + 2, args.end()); + if (!rest_args.empty()) + { + auto params = getParamsMapFromAST(rest_args, context); + auto param = params.extract("with_marks"); + + if (!param.empty()) + { + auto & value = param.mapped(); + if (value.getType() != Field::Types::Bool && value.getType() != Field::Types::UInt64) + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Table function '{}' expected bool flag for 'with_marks' argument", getName()); + + if (value.getType() == Field::Types::Bool) + with_marks = value.get(); + else + with_marks = value.get(); + } + + if (!params.empty()) + { + throw Exception(ErrorCodes::BAD_ARGUMENTS, + "Unexpected arguments '{}' for table function '{}'", + fmt::join(params | boost::adaptors::map_keys, ","), getName()); + } + } + + source_table_id = StorageID{database, table}; +} + +static NameSet getAllPossibleStreamNames( + const NameAndTypePair & column, + const MergeTreeDataPartsVector & data_parts) +{ + NameSet all_streams; + + /// Add the stream with the name of column + /// because it may be abcent in serialization streams (e.g. for Tuple type) + /// but in compact parts we write only marks for whole columns, not subsubcolumns. + auto main_stream_name = escapeForFileName(column.name); + all_streams.insert(Nested::concatenateName(main_stream_name, "mark")); + + auto callback = [&](const auto & substream_path) + { + auto stream_name = ISerialization::getFileNameForStream(column, substream_path); + all_streams.insert(Nested::concatenateName(stream_name, "mark")); + }; + + auto serialization = IDataType::getSerialization(column); + serialization->enumerateStreams(callback); + + if (!column.type->supportsSparseSerialization()) + return all_streams; + + /// If there is at least one part with sparse serialization + /// add columns with marks of its substreams to the table. + for (const auto & part : data_parts) + { + serialization = part->tryGetSerialization(column.name); + if (serialization && serialization->getKind() == ISerialization::Kind::SPARSE) + { + serialization->enumerateStreams(callback); + break; + } + } + + return all_streams; +} + +ColumnsDescription TableFunctionMergeTreeIndex::getActualTableStructure(ContextPtr context, bool /*is_insert_query*/) const +{ + auto source_table = DatabaseCatalog::instance().getTable(source_table_id, context); + auto metadata_snapshot = source_table->getInMemoryMetadataPtr(); + + ColumnsDescription columns; + for (const auto & column : StorageMergeTreeIndex::virtuals_sample_block) + columns.add({column.name, column.type}); + + for (const auto & column : metadata_snapshot->getPrimaryKey().sample_block) + columns.add({column.name, column.type}); + + if (with_marks) + { + auto element_type = std::make_shared(std::make_shared()); + auto mark_type = std::make_shared( + DataTypes{element_type, element_type}, + Names{"offset_in_compressed_file", "offset_in_decompressed_block"}); + + const auto * merge_tree = dynamic_cast(source_table.get()); + if (!merge_tree) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Table function mergeTreeIndex expected MergeTree table, got: {}", source_table->getName()); + + auto data_parts = merge_tree->getDataPartsVectorForInternalUsage(); + auto columns_list = Nested::convertToSubcolumns(metadata_snapshot->getColumns().getAllPhysical()); + + for (const auto & column : columns_list) + { + auto all_streams = getAllPossibleStreamNames(column, data_parts); + for (const auto & stream_name : all_streams) + { + /// There may be shared substreams of columns (e.g. for Nested type) + if (!columns.has(stream_name)) + columns.add({stream_name, mark_type}); + } + } + } + + return columns; +} + +StoragePtr TableFunctionMergeTreeIndex::executeImpl( + const ASTPtr & /*ast_function*/, + ContextPtr context, + const std::string & table_name, + ColumnsDescription /*cached_columns*/, + bool is_insert_query) const +{ + auto source_table = DatabaseCatalog::instance().getTable(source_table_id, context); + auto columns = getActualTableStructure(context, is_insert_query); + + StorageID storage_id(getDatabaseName(), table_name); + auto res = std::make_shared(std::move(storage_id), std::move(source_table), std::move(columns), with_marks); + + res->startup(); + return res; +} + +void registerTableFunctionMergeTreeIndex(TableFunctionFactory & factory) +{ + factory.registerFunction( + { + .documentation = + { + .description = "Represents the contents of index and marks files of MergeTree tables. It can be used for introspection", + .examples = {{"mergeTreeIndex", "SELECT * FROM mergeTreeIndex(currentDatabase(), mt_table, with_marks = true)", ""}}, + .categories = {"Other"}, + }, + .allow_readonly = true, + }); +} + +} diff --git a/src/TableFunctions/registerTableFunctions.cpp b/src/TableFunctions/registerTableFunctions.cpp index 8c18c298f45..2b84bd347aa 100644 --- a/src/TableFunctions/registerTableFunctions.cpp +++ b/src/TableFunctions/registerTableFunctions.cpp @@ -23,6 +23,7 @@ void registerTableFunctions() registerTableFunctionGenerate(factory); registerTableFunctionMongoDB(factory); registerTableFunctionRedis(factory); + registerTableFunctionMergeTreeIndex(factory); #if USE_RAPIDJSON || USE_SIMDJSON registerTableFunctionFuzzJSON(factory); #endif diff --git a/src/TableFunctions/registerTableFunctions.h b/src/TableFunctions/registerTableFunctions.h index fae763e7dc8..6984eac619e 100644 --- a/src/TableFunctions/registerTableFunctions.h +++ b/src/TableFunctions/registerTableFunctions.h @@ -20,6 +20,7 @@ void registerTableFunctionInput(TableFunctionFactory & factory); void registerTableFunctionGenerate(TableFunctionFactory & factory); void registerTableFunctionMongoDB(TableFunctionFactory & factory); void registerTableFunctionRedis(TableFunctionFactory & factory); +void registerTableFunctionMergeTreeIndex(TableFunctionFactory & factory); #if USE_RAPIDJSON || USE_SIMDJSON void registerTableFunctionFuzzJSON(TableFunctionFactory & factory); #endif diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py index 8f8f2b28935..94e429ad77b 100644 --- a/tests/ci/build_report_check.py +++ b/tests/ci/build_report_check.py @@ -1,34 +1,31 @@ #!/usr/bin/env python3 -import json import logging import os import sys from pathlib import Path from typing import List +from ci_config import CI_CONFIG, Build from env_helper import ( GITHUB_JOB_URL, GITHUB_REPOSITORY, GITHUB_SERVER_URL, - TEMP_PATH, REPORT_PATH, + TEMP_PATH, ) +from pr_info import PRInfo from report import ( - BuildResult, ERROR, PENDING, SUCCESS, + BuildResult, JobReport, create_build_html_report, get_worst_status, ) - -from pr_info import PRInfo -from ci_config import CI_CONFIG from stopwatch import Stopwatch - # Old way to read the neads_data NEEDS_DATA_PATH = os.getenv("NEEDS_DATA_PATH", "") # Now it's set here. Two-steps migration for backward compatibility @@ -48,60 +45,43 @@ def main(): ) build_check_name = sys.argv[1] - needs_data: List[str] = [] - required_builds = 0 - - if NEEDS_DATA: - needs_data = json.loads(NEEDS_DATA) - # drop non build jobs if any - needs_data = [d for d in needs_data if "Build" in d] - elif os.path.exists(NEEDS_DATA_PATH): - with open(NEEDS_DATA_PATH, "rb") as file_handler: - needs_data = list(json.load(file_handler).keys()) - else: - assert False, "NEEDS_DATA env var required" - - required_builds = len(needs_data) - - if needs_data: - logging.info("The next builds are required: %s", ", ".join(needs_data)) pr_info = PRInfo() - builds_for_check = CI_CONFIG.get_builds_for_report(build_check_name) - required_builds = required_builds or len(builds_for_check) + builds_for_check = CI_CONFIG.get_builds_for_report( + build_check_name, + release=pr_info.is_release(), + backport=pr_info.head_ref.startswith("backport"), + ) + required_builds = len(builds_for_check) + missing_builds = 0 # Collect reports from json artifacts - build_results = [] + build_results = [] # type: List[BuildResult] for build_name in builds_for_check: build_result = BuildResult.load_any( build_name, pr_info.number, pr_info.head_ref ) if not build_result: - logging.warning("Build results for %s are missing", build_name) - continue - assert ( - pr_info.head_ref == build_result.head_ref or pr_info.number > 0 - ), "BUG. if not a PR, report must be created on the same branch" - build_results.append(build_result) - - # The code to collect missing reports for failed jobs - missing_job_names = [ - name - for name in needs_data - if not any(1 for br in build_results if br.job_name.startswith(name)) - ] - missing_builds = len(missing_job_names) - for job_name in reversed(missing_job_names): - build_result = BuildResult.missing_result("missing") - build_result.job_name = job_name - build_result.status = PENDING - logging.info( - "There is missing report for %s, created a dummy result %s", - job_name, - build_result, - ) - build_results.insert(0, build_result) + if build_name == Build.FUZZERS: + logging.info("Build [%s] is missing - skip", Build.FUZZERS) + continue + logging.warning("Build results for %s is missing", build_name) + build_result = BuildResult.missing_result("missing") + build_result.job_name = build_name + build_result.status = PENDING + logging.info( + "There is missing report for %s, created a dummy result %s", + build_name, + build_result, + ) + missing_builds += 1 + build_results.insert(0, build_result) + else: + assert ( + pr_info.head_ref == build_result.head_ref or pr_info.number > 0 + ), "BUG. if not a PR, report must be created on the same branch" + build_results.append(build_result) # Calculate artifact groups like packages and binaries total_groups = sum(len(br.grouped_urls) for br in build_results) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index b7f76a2e652..234eec48463 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -645,7 +645,7 @@ class CiCache: if not jobs_with_params: return {} poll_interval_sec = 300 - TIMEOUT = 3600 + TIMEOUT = 3590 MAX_ROUNDS_TO_WAIT = 6 MAX_JOB_NUM_TO_WAIT = 3 await_finished: Dict[str, List[int]] = {} @@ -953,10 +953,18 @@ def _mark_success_action( # FIXME: find generic design for propagating and handling job status (e.g. stop using statuses in GH api) # now job ca be build job w/o status data, any other job that exit with 0 with or w/o status data if CI_CONFIG.is_build_job(job): - # there is no status for build jobs - # create dummy success to mark it as done + # there is no CommitStatus for build jobs + # create dummy status relying on JobReport # FIXME: consider creating commit status for build jobs too, to treat everything the same way - CommitStatusData(SUCCESS, "dummy description", "dummy_url").dump_status() + job_report = JobReport.load() if JobReport.exist() else None + if job_report and job_report.status == SUCCESS: + CommitStatusData( + SUCCESS, + "dummy description", + "dummy_url", + pr_num=pr_info.number, + sha=pr_info.sha, + ).dump_status() job_status = None if CommitStatusData.exist(): @@ -1682,7 +1690,7 @@ def main() -> int: if not args.skip_jobs: ci_cache = CiCache(s3, jobs_data["digests"]) - if pr_info.is_release_branch(): + if pr_info.is_master(): # wait for pending jobs to be finished, await_jobs is a long blocking call # wait pending jobs (for now only on release/master branches) ready_jobs_batches_dict = ci_cache.await_jobs( diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 4757341ce7c..60581e8a49f 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -21,6 +21,7 @@ class Labels(metaclass=WithIter): CI_SET_REDUCED = "ci_set_reduced" CI_SET_ARM = "ci_set_arm" CI_SET_INTEGRATION = "ci_set_integration" + CI_SET_ANALYZER = "ci_set_analyzer" libFuzzer = "libFuzzer" @@ -275,6 +276,7 @@ class BuildReportConfig: builds: List[str] job_config: JobConfig = field( default_factory=lambda: JobConfig( + run_command='build_report_check.py "$CHECK_NAME"', digest=DigestConfig( include_paths=[ "./tests/ci/build_report_check.py", @@ -398,6 +400,20 @@ bugfix_validate_check = DigestConfig( ], ) # common test params +docker_server_job_config = JobConfig( + required_on_release_branch=True, + run_command='docker_server.py --check-name "$CHECK_NAME" --release-type head --allow-build-reuse', + digest=DigestConfig( + include_paths=[ + "tests/ci/docker_server.py", + "./docker/server", + ] + ), +) +compatibility_test_common_params = { + "digest": compatibility_check_digest, + "run_command": "compatibility_check.py", +} statless_test_common_params = { "digest": stateless_check_digest, "run_command": 'functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT', @@ -555,7 +571,25 @@ class CIConfig: for check_name in config: # type: ignore yield check_name - def get_builds_for_report(self, report_name: str) -> List[str]: + def get_builds_for_report( + self, report_name: str, release: bool = False, backport: bool = False + ) -> List[str]: + # hack to modify build list for release and bp wf + assert not (release and backport), "Invalid input" + if backport and report_name == JobNames.BUILD_CHECK: + return [ + Build.PACKAGE_RELEASE, + Build.PACKAGE_AARCH64, + Build.PACKAGE_ASAN, + Build.PACKAGE_TSAN, + Build.PACKAGE_DEBUG, + ] + if release and report_name == JobNames.BUILD_CHECK_SPECIAL: + return [ + Build.BINARY_DARWIN, + Build.BINARY_DARWIN_AARCH64, + ] + return self.builds_report_config[report_name].builds @classmethod @@ -647,6 +681,16 @@ CI_CONFIG = CIConfig( JobNames.INTEGRATION_TEST, ] ), + Labels.CI_SET_ANALYZER: LabelConfig( + run_jobs=[ + JobNames.STYLE_CHECK, + JobNames.FAST_TEST, + Build.PACKAGE_RELEASE, + Build.PACKAGE_ASAN, + JobNames.STATELESS_TEST_ANALYZER_S3_REPLICATED_RELEASE, + JobNames.INTEGRATION_TEST_ASAN_ANALYZER, + ] + ), Labels.CI_SET_REDUCED: LabelConfig( run_jobs=[ job @@ -812,9 +856,6 @@ CI_CONFIG = CIConfig( Build.PACKAGE_TSAN, Build.PACKAGE_MSAN, Build.PACKAGE_DEBUG, - Build.PACKAGE_RELEASE_COVERAGE, - Build.BINARY_RELEASE, - Build.FUZZERS, ] ), JobNames.BUILD_CHECK_SPECIAL: BuildReportConfig( @@ -830,33 +871,15 @@ CI_CONFIG = CIConfig( Build.BINARY_S390X, Build.BINARY_AMD64_COMPAT, Build.BINARY_AMD64_MUSL, + Build.PACKAGE_RELEASE_COVERAGE, + Build.BINARY_RELEASE, + Build.FUZZERS, ] ), }, other_jobs_configs={ - JobNames.DOCKER_SERVER: TestConfig( - "", - job_config=JobConfig( - required_on_release_branch=True, - digest=DigestConfig( - include_paths=[ - "tests/ci/docker_server.py", - "./docker/server", - ] - ), - ), - ), - JobNames.DOCKER_KEEPER: TestConfig( - "", - job_config=JobConfig( - digest=DigestConfig( - include_paths=[ - "tests/ci/docker_server.py", - "./docker/keeper", - ] - ), - ), - ), + JobNames.DOCKER_SERVER: TestConfig("", job_config=docker_server_job_config), + JobNames.DOCKER_KEEPER: TestConfig("", job_config=docker_server_job_config), JobNames.DOCS_CHECK: TestConfig( "", job_config=JobConfig( @@ -1038,13 +1061,13 @@ CI_CONFIG = CIConfig( JobNames.COMPATIBILITY_TEST: TestConfig( Build.PACKAGE_RELEASE, job_config=JobConfig( - required_on_release_branch=True, digest=compatibility_check_digest + required_on_release_branch=True, **compatibility_test_common_params # type: ignore ), ), JobNames.COMPATIBILITY_TEST_ARM: TestConfig( Build.PACKAGE_AARCH64, job_config=JobConfig( - required_on_release_branch=True, digest=compatibility_check_digest + required_on_release_branch=True, **compatibility_test_common_params # type: ignore ), ), JobNames.UNIT_TEST: TestConfig( diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py index b2e3686f813..8009ef24760 100644 --- a/tests/ci/compatibility_check.py +++ b/tests/ci/compatibility_check.py @@ -2,6 +2,7 @@ import argparse import logging +import os import subprocess import sys from pathlib import Path @@ -122,11 +123,7 @@ def get_run_commands_distributions( def parse_args(): parser = argparse.ArgumentParser("Check compatibility with old distributions") - parser.add_argument("--check-name", required=True) - parser.add_argument("--check-glibc", action="store_true") - parser.add_argument( - "--check-distributions", action="store_true" - ) # currently hardcoded to x86, don't enable for ARM + parser.add_argument("--check-name", required=False) return parser.parse_args() @@ -134,6 +131,13 @@ def main(): logging.basicConfig(level=logging.INFO) args = parse_args() + check_name = args.check_name or os.getenv("CHECK_NAME") + assert check_name + check_glibc = True + # currently hardcoded to x86, don't enable for ARM + check_distributions = ( + "aarch64" not in check_name.lower() and "arm64" not in check_name.lower() + ) stopwatch = Stopwatch() @@ -150,7 +154,7 @@ def main(): "clickhouse-common-static_" in url or "clickhouse-server_" in url ) - download_builds_filter(args.check_name, reports_path, packages_path, url_filter) + download_builds_filter(check_name, reports_path, packages_path, url_filter) for package in packages_path.iterdir(): if package.suffix == ".deb": @@ -166,11 +170,11 @@ def main(): run_commands = [] - if args.check_glibc: + if check_glibc: check_glibc_commands = get_run_commands_glibc(packages_path, result_path) run_commands.extend(check_glibc_commands) - if args.check_distributions: + if check_distributions: centos_image = pull_image(get_docker_image(IMAGE_CENTOS)) ubuntu_image = pull_image(get_docker_image(IMAGE_UBUNTU)) check_distributions_commands = get_run_commands_distributions( @@ -195,9 +199,9 @@ def main(): # See https://sourceware.org/glibc/wiki/Glibc%20Timeline max_glibc_version = "" - if "amd64" in args.check_name: + if "amd64" in check_name: max_glibc_version = "2.4" - elif "aarch64" in args.check_name: + elif "aarch64" in check_name: max_glibc_version = "2.18" # because of build with newer sysroot? else: raise Exception("Can't determine max glibc version") @@ -205,8 +209,8 @@ def main(): state, description, test_results, additional_logs = process_result( result_path, server_log_path, - args.check_glibc, - args.check_distributions, + check_glibc, + check_distributions, max_glibc_version, ) diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index af0416d83dc..ad497a00eba 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -25,7 +25,6 @@ from stopwatch import Stopwatch from tee_popen import TeePopen from upload_result_helper import upload_results -NAME = "Push to Dockerhub" TEMP_PATH = Path(RUNNER_TEMP) / "docker_images_check" TEMP_PATH.mkdir(parents=True, exist_ok=True) @@ -177,6 +176,9 @@ def main(): stopwatch = Stopwatch() args = parse_args() + + NAME = f"Push to Dockerhub {args.suffix}" + if args.push: logging.info("login to docker hub") docker_login() diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 7f53034fd0f..38d0ea6d86b 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -51,7 +51,11 @@ def parse_args() -> argparse.Namespace: description="A program to build clickhouse-server image, both alpine and " "ubuntu versions", ) - + parser.add_argument( + "--check-name", + required=False, + default="", + ) parser.add_argument( "--version", type=version_arg, @@ -71,13 +75,13 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--image-path", type=str, - default="docker/server", + default="", help="a path to docker context directory", ) parser.add_argument( "--image-repo", type=str, - default="clickhouse/clickhouse-server", + default="", help="image name on docker hub", ) parser.add_argument( @@ -92,14 +96,7 @@ def parse_args() -> argparse.Namespace: default=argparse.SUPPRESS, help="don't push reports to S3 and github", ) - parser.add_argument("--push", default=True, help=argparse.SUPPRESS) - parser.add_argument( - "--no-push-images", - action="store_false", - dest="push", - default=argparse.SUPPRESS, - help="don't push images to docker hub", - ) + parser.add_argument("--push", action="store_true", help=argparse.SUPPRESS) parser.add_argument("--os", default=["ubuntu", "alpine"], help=argparse.SUPPRESS) parser.add_argument( "--no-ubuntu", @@ -337,13 +334,37 @@ def main(): makedirs(TEMP_PATH, exist_ok=True) args = parse_args() - image = DockerImageData(args.image_path, args.image_repo, False) + + pr_info = PRInfo() + + if args.check_name: + assert not args.image_path and not args.image_repo + if "server image" in args.check_name: + image_path = "docker/server" + image_repo = "clickhouse/clickhouse-server" + elif "keeper image" in args.check_name: + image_path = "docker/keeper" + image_repo = "clickhouse/clickhouse-keeper" + else: + assert False, "Invalid --check-name" + else: + assert args.image_path and args.image_repo + image_path = args.image_path + image_repo = args.image_repo + + push = args.push + del args.image_path + del args.image_repo + del args.push + + if pr_info.is_master(): + push = True + + image = DockerImageData(image_path, image_repo, False) args.release_type = auto_release_type(args.version, args.release_type) tags = gen_tags(args.version, args.release_type) - pr_info = None repo_urls = dict() direct_urls: Dict[str, List[str]] = dict() - pr_info = PRInfo() release_or_pr, _ = get_release_or_pr(pr_info, args.version) for arch, build_name in zip(ARCH, ("package_release", "package_aarch64")): @@ -355,13 +376,13 @@ def main(): repo_urls[arch] = f"{args.bucket_prefix}/{build_name}" if args.allow_build_reuse: # read s3 urls from pre-downloaded build reports - if "clickhouse-server" in args.image_repo: + if "clickhouse-server" in image_repo: PACKAGES = [ "clickhouse-client", "clickhouse-server", "clickhouse-common-static", ] - elif "clickhouse-keeper" in args.image_repo: + elif "clickhouse-keeper" in image_repo: PACKAGES = ["clickhouse-keeper"] else: assert False, "BUG" @@ -375,7 +396,7 @@ def main(): if any(package in url for package in PACKAGES) and "-dbg" not in url ] - if args.push: + if push: docker_login() logging.info("Following tags will be created: %s", ", ".join(tags)) @@ -385,7 +406,7 @@ def main(): for tag in tags: test_results.extend( build_and_push_image( - image, args.push, repo_urls, os, tag, args.version, direct_urls + image, push, repo_urls, os, tag, args.version, direct_urls ) ) if test_results[-1].status != "OK": diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index da2dea60fc1..888305aa166 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -231,10 +231,10 @@ def main(): run_changed_tests = flaky_check or validate_bugfix_check pr_info = PRInfo(need_changed_files=run_changed_tests) tests_to_run = [] + assert ( + not validate_bugfix_check or args.report_to_file + ), "JobReport file path must be provided with --validate-bugfix" if run_changed_tests: - assert ( - args.report_to_file - ), "JobReport file path must be provided with --validate-bugfix" tests_to_run = _get_statless_tests_to_run(pr_info) if "RUN_BY_HASH_NUM" in os.environ: diff --git a/tests/ci/lambda_shared_package/lambda_shared/pr.py b/tests/ci/lambda_shared_package/lambda_shared/pr.py index 1b4f827cc0a..4ac787229c0 100644 --- a/tests/ci/lambda_shared_package/lambda_shared/pr.py +++ b/tests/ci/lambda_shared_package/lambda_shared/pr.py @@ -78,6 +78,7 @@ LABELS = { "Not for changelog", ], "pr-performance": ["Performance Improvement"], + "pr-ci": ["CI Fix or Improvement (changelog entry is not required)"], } CATEGORY_TO_LABEL = {c: lb for lb, categories in LABELS.items() for c in categories} @@ -149,10 +150,7 @@ def check_pr_description(pr_body: str, repo_name: str) -> Tuple[str, str]: if not category: description_error = "Changelog category is empty" # Filter out the PR categories that are not for changelog. - elif re.match( - r"(?i)doc|((non|in|not|un)[-\s]*significant)|(not[ ]*for[ ]*changelog)", - category, - ): + elif "(changelog entry is not required)" in category: pass # to not check the rest of the conditions elif category not in CATEGORY_TO_LABEL: description_error, category = f"Category '{category}' is not valid", "" diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 4aac7ad73df..aba32d88c0a 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -2,6 +2,7 @@ import json import logging import os +import re from typing import Dict, List, Set, Union from urllib.parse import quote @@ -288,6 +289,11 @@ class PRInfo: def is_master(self) -> bool: return self.number == 0 and self.head_ref == "master" + def is_release(self) -> bool: + return self.number == 0 and bool( + re.match(r"^2[1-9]\.[1-9][0-9]*$", self.head_ref) + ) + def is_release_branch(self) -> bool: return self.number == 0 diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py index 09d50c902d8..6187656983e 100644 --- a/tests/ci/run_check.py +++ b/tests/ci/run_check.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -import atexit import logging import sys from typing import Tuple @@ -17,7 +16,6 @@ from commit_status_helper import ( post_commit_status, post_labels, remove_labels, - update_mergeable_check, ) from env_helper import GITHUB_REPOSITORY, GITHUB_SERVER_URL from get_robot_token import get_best_robot_token @@ -27,7 +25,8 @@ from lambda_shared_package.lambda_shared.pr import ( check_pr_description, ) from pr_info import PRInfo -from report import FAILURE, PENDING +from report import FAILURE, PENDING, SUCCESS +from cherry_pick import Labels TRUSTED_ORG_IDS = { 54801242, # clickhouse @@ -38,6 +37,8 @@ CAN_BE_TESTED_LABEL = "can be tested" FEATURE_LABEL = "pr-feature" SUBMODULE_CHANGED_LABEL = "submodule changed" PR_CHECK = "PR Check" +# pr-bugfix autoport can lead to issues in releases, let's do ci fixes only +AUTO_BACKPORT_LABELS = ["pr-ci"] def pr_is_by_trusted_user(pr_user_login, pr_user_orgs): @@ -99,7 +100,6 @@ def main(): description = format_description(description) gh = Github(get_best_robot_token(), per_page=100) commit = get_commit(gh, pr_info.sha) - atexit.register(update_mergeable_check, commit, pr_info, PR_CHECK) description_error, category = check_pr_description(pr_info.body, GITHUB_REPOSITORY) pr_labels_to_add = [] @@ -123,6 +123,15 @@ def main(): elif SUBMODULE_CHANGED_LABEL in pr_info.labels: pr_labels_to_remove.append(SUBMODULE_CHANGED_LABEL) + if any(label in AUTO_BACKPORT_LABELS for label in pr_labels_to_add): + backport_labels = [Labels.MUST_BACKPORT, Labels.MUST_BACKPORT_CLOUD] + pr_labels_to_add += [ + label for label in backport_labels if label not in pr_info.labels + ] + print( + f"::notice :: Add backport labels [{backport_labels}] for a given PR category" + ) + print(f"Change labels: add {pr_labels_to_add}, remove {pr_labels_to_remove}") if pr_labels_to_add: post_labels(gh, pr_info, pr_labels_to_add) @@ -181,6 +190,15 @@ def main(): print("::notice ::Cannot run") sys.exit(1) + post_commit_status( + commit, + SUCCESS, + "", + "ok", + PR_CHECK, + pr_info, + ) + ci_report_url = create_ci_report(pr_info, []) print("::notice ::Can run") post_commit_status( diff --git a/tests/ci/worker/dockerhub_proxy_template.sh b/tests/ci/worker/dockerhub_proxy_template.sh index 7ca8d581df5..0e375dd5f04 100644 --- a/tests/ci/worker/dockerhub_proxy_template.sh +++ b/tests/ci/worker/dockerhub_proxy_template.sh @@ -1,19 +1,7 @@ #!/usr/bin/env bash set -xeuo pipefail -# Add cloudflare DNS as a fallback -# Get default gateway interface -IFACE=$(ip --json route list | jq '.[]|select(.dst == "default").dev' --raw-output) -# `Link 2 (eth0): 172.31.0.2` -ETH_DNS=$(resolvectl dns "$IFACE") || : -CLOUDFLARE_NS=1.1.1.1 -if [[ "$ETH_DNS" ]] && [[ "${ETH_DNS#*: }" != *"$CLOUDFLARE_NS"* ]]; then - # Cut the leading legend - ETH_DNS=${ETH_DNS#*: } - # shellcheck disable=SC2206 - new_dns=(${ETH_DNS} "$CLOUDFLARE_NS") - resolvectl dns "$IFACE" "${new_dns[@]}" -fi +bash /usr/local/share/scripts/init-network.sh # tune sysctl for network performance cat > /etc/sysctl.d/10-network-memory.conf << EOF diff --git a/tests/ci/worker/init_runner.sh b/tests/ci/worker/init_runner.sh index 017d847739f..b211128cf10 100644 --- a/tests/ci/worker/init_runner.sh +++ b/tests/ci/worker/init_runner.sh @@ -60,19 +60,7 @@ export RUNNER_URL="https://github.com/${RUNNER_ORG}" INSTANCE_ID=$(ec2metadata --instance-id) export INSTANCE_ID -# Add cloudflare DNS as a fallback -# Get default gateway interface -IFACE=$(ip --json route list | jq '.[]|select(.dst == "default").dev' --raw-output) -# `Link 2 (eth0): 172.31.0.2` -ETH_DNS=$(resolvectl dns "$IFACE") || : -CLOUDFLARE_NS=1.1.1.1 -if [[ "$ETH_DNS" ]] && [[ "${ETH_DNS#*: }" != *"$CLOUDFLARE_NS"* ]]; then - # Cut the leading legend - ETH_DNS=${ETH_DNS#*: } - # shellcheck disable=SC2206 - new_dns=(${ETH_DNS} "$CLOUDFLARE_NS") - resolvectl dns "$IFACE" "${new_dns[@]}" -fi +bash /usr/local/share/scripts/init-network.sh # combine labels RUNNER_TYPE=$(/usr/local/bin/aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --query "Tags[?Key=='github:runner-type'].Value" --output text) diff --git a/tests/ci/worker/prepare-ci-ami.sh b/tests/ci/worker/prepare-ci-ami.sh index c27d956c834..281dff5b1c2 100644 --- a/tests/ci/worker/prepare-ci-ami.sh +++ b/tests/ci/worker/prepare-ci-ami.sh @@ -9,7 +9,7 @@ set -xeuo pipefail echo "Running prepare script" export DEBIAN_FRONTEND=noninteractive -export RUNNER_VERSION=2.311.0 +export RUNNER_VERSION=2.313.0 export RUNNER_HOME=/home/ubuntu/actions-runner deb_arch() { @@ -138,6 +138,49 @@ dpkg -i /tmp/amazon-cloudwatch-agent.deb aws ssm get-parameter --region us-east-1 --name AmazonCloudWatch-github-runners --query 'Parameter.Value' --output text > /opt/aws/amazon-cloudwatch-agent/etc/amazon-cloudwatch-agent.json systemctl enable amazon-cloudwatch-agent.service + +echo "Install tailscale" +# Build get-authkey for tailscale +docker run --rm -v /usr/local/bin/:/host-local-bin -i golang:alpine sh -ex <<'EOF' + CGO_ENABLED=0 go install -tags tag:svc-core-ci-github tailscale.com/cmd/get-authkey@main + mv /go/bin/get-authkey /host-local-bin +EOF + +# install tailscale +curl -fsSL "https://pkgs.tailscale.com/stable/ubuntu/$(lsb_release -cs).noarmor.gpg" > /usr/share/keyrings/tailscale-archive-keyring.gpg +curl -fsSL "https://pkgs.tailscale.com/stable/ubuntu/$(lsb_release -cs).tailscale-keyring.list" > /etc/apt/sources.list.d/tailscale.list +apt-get update +apt-get install tailscale --yes --no-install-recommends + + +# Create a common script for the instances +mkdir /usr/local/share/scripts -p +cat > /usr/local/share/scripts/init-network.sh << 'EOF' +#!/usr/bin/env bash + +# Add cloudflare DNS as a fallback +# Get default gateway interface +IFACE=$(ip --json route list | jq '.[]|select(.dst == "default").dev' --raw-output) +# `Link 2 (eth0): 172.31.0.2` +ETH_DNS=$(resolvectl dns "$IFACE") || : +CLOUDFLARE_NS=1.1.1.1 +if [[ "$ETH_DNS" ]] && [[ "${ETH_DNS#*: }" != *"$CLOUDFLARE_NS"* ]]; then + # Cut the leading legend + ETH_DNS=${ETH_DNS#*: } + # shellcheck disable=SC2206 + new_dns=(${ETH_DNS} "$CLOUDFLARE_NS") + resolvectl dns "$IFACE" "${new_dns[@]}" +fi + +# Setup tailscale, the very first action +TS_API_CLIENT_ID=$(aws ssm get-parameter --region us-east-1 --name /tailscale/api-client-id --query 'Parameter.Value' --output text --with-decryption) +TS_API_CLIENT_SECRET=$(aws ssm get-parameter --region us-east-1 --name /tailscale/api-client-secret --query 'Parameter.Value' --output text --with-decryption) +export TS_API_CLIENT_ID TS_API_CLIENT_SECRET +TS_AUTHKEY=$(get-authkey -tags tag:svc-core-ci-github -reusable -ephemeral) +tailscale up --ssh --auth-key="$TS_AUTHKEY" --hostname="ci-runner-$INSTANCE_ID" +EOF + + # The following line is used in aws TOE check. touch /var/tmp/clickhouse-ci-ami.success # END OF THE SCRIPT diff --git a/tests/config/config.d/handlers.yaml b/tests/config/config.d/handlers.yaml new file mode 100644 index 00000000000..c77dd6578c1 --- /dev/null +++ b/tests/config/config.d/handlers.yaml @@ -0,0 +1,8 @@ +http_handlers: + rule: + url: '/upyachka' + empty_query_string: + handler: + type: redirect + location: "/?query=SELECT+'Pepyaka'" + defaults: diff --git a/tests/config/install.sh b/tests/config/install.sh index cac4e626594..467636cfa40 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -66,6 +66,7 @@ ln -sf $SRC_PATH/config.d/filesystem_caches_path.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/validate_tcp_client_information.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/zero_copy_destructive_operations.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/block_number.xml $DEST_SERVER_PATH/config.d/ +ln -sf $SRC_PATH/config.d/handlers.yaml $DEST_SERVER_PATH/config.d/ # Not supported with fasttest. if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 5cb50a0aa22..d6292c51bbe 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -70,6 +70,11 @@ CLICKHOUSE_LOG_FILE = "/var/log/clickhouse-server/clickhouse-server.log" CLICKHOUSE_ERROR_LOG_FILE = "/var/log/clickhouse-server/clickhouse-server.err.log" +# Minimum version we use in integration tests to check compatibility with old releases +# Keep in mind that we only support upgrading between releases that are at most 1 year different. +# This means that this minimum need to be, at least, 1 year older than the current release +CLICKHOUSE_CI_MIN_TESTED_VERSION = "22.8" + # to create docker-compose env file def _create_env_file(path, variables): diff --git a/tests/integration/test_backup_restore_new/test.py b/tests/integration/test_backup_restore_new/test.py index cac458f616d..34ffdf7a8df 100644 --- a/tests/integration/test_backup_restore_new/test.py +++ b/tests/integration/test_backup_restore_new/test.py @@ -591,6 +591,138 @@ def test_zip_archive_with_bad_compression_method(): ) +def test_tar_archive(): + backup_name = f"Disk('backups', 'archive.tar')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + + assert os.path.isfile(get_path_to_backup(backup_name)) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_tar_bz2_archive(): + backup_name = f"Disk('backups', 'archive.tar.bz2')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + + assert os.path.isfile(get_path_to_backup(backup_name)) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_tar_gz_archive(): + backup_name = f"Disk('backups', 'archive.tar.gz')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + + assert os.path.isfile(get_path_to_backup(backup_name)) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_tar_lzma_archive(): + backup_name = f"Disk('backups', 'archive.tar.lzma')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + + assert os.path.isfile(get_path_to_backup(backup_name)) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_tar_zst_archive(): + backup_name = f"Disk('backups', 'archive.tar.zst')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + + assert os.path.isfile(get_path_to_backup(backup_name)) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_tar_xz_archive(): + backup_name = f"Disk('backups', 'archive.tar.xz')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + instance.query(f"BACKUP TABLE test.table TO {backup_name}") + + assert os.path.isfile(get_path_to_backup(backup_name)) + + instance.query("DROP TABLE test.table") + assert instance.query("EXISTS test.table") == "0\n" + + instance.query(f"RESTORE TABLE test.table FROM {backup_name}") + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + +def test_tar_archive_with_password(): + backup_name = f"Disk('backups', 'archive_with_password.tar')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + expected_error = "Setting a password is not currently supported for libarchive" + assert expected_error in instance.query_and_get_error( + f"BACKUP TABLE test.table TO {backup_name} SETTINGS id='tar_archive_with_password', password='password123'" + ) + assert ( + instance.query( + "SELECT status FROM system.backups WHERE id='tar_archive_with_password'" + ) + == "BACKUP_FAILED\n" + ) + + +def test_tar_archive_with_bad_compression_method(): + backup_name = f"Disk('backups', 'archive_with_bad_compression_method.tar')" + create_and_fill_table() + + assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" + + expected_error = "Using compression_method and compression_level options are not supported for tar archives" + assert expected_error in instance.query_and_get_error( + f"BACKUP TABLE test.table TO {backup_name} SETTINGS id='tar_archive_with_bad_compression_method', compression_method='foobar'" + ) + assert ( + instance.query( + "SELECT status FROM system.backups WHERE id='tar_archive_with_bad_compression_method'" + ) + == "BACKUP_FAILED\n" + ) + + def test_async(): create_and_fill_table() assert instance.query("SELECT count(), sum(x) FROM test.table") == "100\t4950\n" @@ -885,7 +1017,7 @@ def test_required_privileges(): instance.query("CREATE USER u1") backup_name = new_backup_name() - expected_error = "necessary to have the grant BACKUP ON test.table" + expected_error = "necessary to have the grant BACKUP ON test.`table`" assert expected_error in instance.query_and_get_error( f"BACKUP TABLE test.table TO {backup_name}", user="u1" ) @@ -893,7 +1025,7 @@ def test_required_privileges(): instance.query("GRANT BACKUP ON test.table TO u1") instance.query(f"BACKUP TABLE test.table TO {backup_name}", user="u1") - expected_error = "necessary to have the grant INSERT, CREATE TABLE ON test.table" + expected_error = "necessary to have the grant INSERT, CREATE TABLE ON test.`table`" assert expected_error in instance.query_and_get_error( f"RESTORE TABLE test.table FROM {backup_name}", user="u1" ) @@ -910,7 +1042,7 @@ def test_required_privileges(): instance.query("DROP TABLE test.table") - expected_error = "necessary to have the grant INSERT, CREATE TABLE ON test.table" + expected_error = "necessary to have the grant INSERT, CREATE TABLE ON test.`table`" assert expected_error in instance.query_and_get_error( f"RESTORE ALL FROM {backup_name}", user="u1" ) @@ -963,7 +1095,7 @@ def test_system_users(): instance.query("GRANT r1 TO r2 WITH ADMIN OPTION") instance.query("GRANT r2 TO u1") - instance.query("CREATE SETTINGS PROFILE prof1 SETTINGS custom_b=2 TO u1") + instance.query("CREATE SETTINGS PROFILE `prof1` SETTINGS custom_b=2 TO u1") instance.query("CREATE ROW POLICY rowpol1 ON test.table USING x<50 TO u1") instance.query("CREATE QUOTA q1 TO r1") @@ -984,7 +1116,7 @@ def test_system_users(): assert ( instance.query("SHOW CREATE USER u1") - == "CREATE USER u1 IDENTIFIED WITH sha256_password SETTINGS PROFILE default, custom_a = 1\n" + == "CREATE USER u1 IDENTIFIED WITH sha256_password SETTINGS PROFILE `default`, custom_a = 1\n" ) assert instance.query("SHOW GRANTS FOR u1") == TSV( ["GRANT SELECT ON test.* TO u1", "GRANT r2 TO u1"] @@ -998,11 +1130,11 @@ def test_system_users(): assert ( instance.query("SHOW CREATE SETTINGS PROFILE prof1") - == "CREATE SETTINGS PROFILE prof1 SETTINGS custom_b = 2 TO u1\n" + == "CREATE SETTINGS PROFILE `prof1` SETTINGS custom_b = 2 TO u1\n" ) assert ( instance.query("SHOW CREATE ROW POLICY rowpol1") - == "CREATE ROW POLICY rowpol1 ON test.table FOR SELECT USING x < 50 TO u1\n" + == "CREATE ROW POLICY rowpol1 ON test.`table` FOR SELECT USING x < 50 TO u1\n" ) assert instance.query("SHOW CREATE QUOTA q1") == "CREATE QUOTA q1 TO r1\n" diff --git a/tests/integration/test_backup_restore_new/test_cancel_backup.py b/tests/integration/test_backup_restore_new/test_cancel_backup.py index 06bcb5eadfc..6016bac9197 100644 --- a/tests/integration/test_backup_restore_new/test_cancel_backup.py +++ b/tests/integration/test_backup_restore_new/test_cancel_backup.py @@ -196,7 +196,8 @@ def test_cancel_backup(): start_restore(try_restore_id_1, backup_id) cancel_restore(try_restore_id_1) - node.query(f"DROP TABLE tbl SYNC") + # IF EXISTS because it's unknown whether RESTORE had managed to create a table before it got cancelled. + node.query(f"DROP TABLE IF EXISTS tbl SYNC") restore_id = uuid.uuid4().hex start_restore(restore_id, backup_id) diff --git a/tests/integration/test_backup_restore_s3/test.py b/tests/integration/test_backup_restore_s3/test.py index 634f14621c0..4d3ee8200a3 100644 --- a/tests/integration/test_backup_restore_s3/test.py +++ b/tests/integration/test_backup_restore_s3/test.py @@ -454,6 +454,48 @@ def test_backup_to_zip(): check_backup_and_restore(storage_policy, backup_destination) +def test_backup_to_tar(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3('http://minio1:9001/root/data/backups/{backup_name}.tar', 'minio', 'minio123')" + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_tar_gz(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3('http://minio1:9001/root/data/backups/{backup_name}.tar.gz', 'minio', 'minio123')" + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_tar_bz2(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3('http://minio1:9001/root/data/backups/{backup_name}.tar.bz2', 'minio', 'minio123')" + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_tar_lzma(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3('http://minio1:9001/root/data/backups/{backup_name}.tar.lzma', 'minio', 'minio123')" + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_tar_zst(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3('http://minio1:9001/root/data/backups/{backup_name}.tar.zst', 'minio', 'minio123')" + check_backup_and_restore(storage_policy, backup_destination) + + +def test_backup_to_tar_xz(): + storage_policy = "default" + backup_name = new_backup_name() + backup_destination = f"S3('http://minio1:9001/root/data/backups/{backup_name}.tar.xz', 'minio', 'minio123')" + check_backup_and_restore(storage_policy, backup_destination) + + def test_user_specific_auth(start_cluster): def create_user(user): node.query(f"CREATE USER {user}") diff --git a/tests/integration/test_backward_compatibility/test.py b/tests/integration/test_backward_compatibility/test.py index 847483f2b9b..098fc8c1025 100644 --- a/tests/integration/test_backward_compatibility/test.py +++ b/tests/integration/test_backward_compatibility/test.py @@ -1,13 +1,13 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="19.16.9.37", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py index 94bc1d3bfc9..b0c0f5d17c7 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_fixed_key.py @@ -1,13 +1,13 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_backward_compatibility/test_aggregate_function_state.py b/tests/integration/test_backward_compatibility/test_aggregate_function_state.py index 7789d13be0b..5972f57b928 100644 --- a/tests/integration/test_backward_compatibility/test_aggregate_function_state.py +++ b/tests/integration/test_backward_compatibility/test_aggregate_function_state.py @@ -1,13 +1,13 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="19.16.9.37", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -15,8 +15,8 @@ node1 = cluster.add_instance( node2 = cluster.add_instance( "node2", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="19.16.9.37", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_convert_ordinary.py b/tests/integration/test_backward_compatibility/test_convert_ordinary.py index 034a68e0f30..8e7d773ad2c 100644 --- a/tests/integration/test_backward_compatibility/test_convert_ordinary.py +++ b/tests/integration/test_backward_compatibility/test_convert_ordinary.py @@ -1,11 +1,11 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", - image="yandex/clickhouse-server", - tag="19.16.9.37", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_zookeeper=True, with_installed_binary=True, @@ -28,16 +28,22 @@ def q(query): def check_convert_system_db_to_atomic(): - q( - "CREATE TABLE t(date Date, id UInt32) ENGINE = MergeTree PARTITION BY toYYYYMM(date) ORDER BY id" + node.query( + "CREATE DATABASE default2 ENGINE=Ordinary", + settings={"allow_deprecated_database_ordinary": 1}, + ) + q( + "CREATE TABLE default2.t(date Date, id UInt32) ENGINE = MergeTree PARTITION BY toYYYYMM(date) ORDER BY id" + ) + q("INSERT INTO default2.t VALUES (today(), 1)") + q( + "INSERT INTO default2.t SELECT number % 1000, number FROM system.numbers LIMIT 1000000" ) - q("INSERT INTO t VALUES (today(), 1)") - q("INSERT INTO t SELECT number % 1000, number FROM system.numbers LIMIT 1000000") - assert "1000001\n" == q("SELECT count() FROM t") - assert "499999500001\n" == q("SELECT sum(id) FROM t") + assert "1000001\n" == q("SELECT count() FROM default2.t") + assert "499999500001\n" == q("SELECT sum(id) FROM default2.t") assert "1970-01-01\t1000\t499500000\n1970-01-02\t1000\t499501000\n" == q( - "SELECT date, count(), sum(id) FROM t GROUP BY date ORDER BY date LIMIT 2" + "SELECT date, count(), sum(id) FROM default2.t GROUP BY date ORDER BY date LIMIT 2" ) q("SYSTEM FLUSH LOGS") @@ -49,7 +55,7 @@ def check_convert_system_db_to_atomic(): node.restart_with_latest_version(fix_metadata=True) - assert "Ordinary" in node.query("SHOW CREATE DATABASE default") + assert "Ordinary" in node.query("SHOW CREATE DATABASE default2") assert "Atomic" in node.query("SHOW CREATE DATABASE system") assert "query_log" in node.query("SHOW TABLES FROM system") assert "part_log" in node.query("SHOW TABLES FROM system") @@ -60,10 +66,11 @@ def check_convert_system_db_to_atomic(): assert "1\n" == node.query("SELECT count() != 0 FROM system.query_log_0") assert "1\n" == node.query("SELECT count() != 0 FROM system.part_log_0") assert "1970-01-01\t1000\t499500000\n1970-01-02\t1000\t499501000\n" == node.query( - "SELECT date, count(), sum(id) FROM t GROUP BY date ORDER BY date LIMIT 2" + "SELECT date, count(), sum(id) FROM default2.t GROUP BY date ORDER BY date LIMIT 2" ) - assert "INFORMATION_SCHEMA\ndefault\ninformation_schema\nsystem\n" == node.query( - "SELECT name FROM system.databases ORDER BY name" + assert ( + "INFORMATION_SCHEMA\ndefault\ndefault2\ninformation_schema\nsystem\n" + == node.query("SELECT name FROM system.databases ORDER BY name") ) errors_count = node.count_in_log("") @@ -213,7 +220,7 @@ def check_convert_all_dbs_to_atomic(): node.restart_clickhouse() assert ( - ".o r d i n a r y.\natomic\ndefault\nordinary\nother\nsystem\n" + ".o r d i n a r y.\natomic\ndefault\ndefault2\nordinary\nother\nsystem\n" == node.query( "SELECT name FROM system.databases WHERE engine='Atomic' ORDER BY name" ) diff --git a/tests/integration/test_backward_compatibility/test_cte_distributed.py b/tests/integration/test_backward_compatibility/test_cte_distributed.py index d47ae3aa255..e612bf2989a 100644 --- a/tests/integration/test_backward_compatibility/test_cte_distributed.py +++ b/tests/integration/test_backward_compatibility/test_cte_distributed.py @@ -1,14 +1,14 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=False, allow_analyzer=False) node2 = cluster.add_instance( "node2", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="21.6", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_data_skipping_indices.py b/tests/integration/test_backward_compatibility/test_data_skipping_indices.py deleted file mode 100644 index 46ab27d2ab0..00000000000 --- a/tests/integration/test_backward_compatibility/test_data_skipping_indices.py +++ /dev/null @@ -1,55 +0,0 @@ -# pylint: disable=line-too-long -# pylint: disable=unused-argument -# pylint: disable=redefined-outer-name - -import pytest -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance( - "node", - image="yandex/clickhouse-server", - tag="21.6", - stay_alive=True, - with_installed_binary=True, - allow_analyzer=False, -) - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - yield cluster - - finally: - cluster.shutdown() - - -# TODO: cover other types too, but for this we need to add something like -# restart_with_tagged_version(), since right now it is not possible to -# switch to old tagged clickhouse version. -def test_index(start_cluster): - node.query( - """ - CREATE TABLE data - ( - key Int, - value Nullable(Int), - INDEX value_index value TYPE minmax GRANULARITY 1 - ) - ENGINE = MergeTree - ORDER BY key; - - INSERT INTO data SELECT number, number FROM numbers(10000); - - SELECT * FROM data WHERE value = 20000 SETTINGS force_data_skipping_indices = 'value_index' SETTINGS force_data_skipping_indices = 'value_index', max_rows_to_read=1; - """ - ) - node.restart_with_latest_version() - node.query( - """ - SELECT * FROM data WHERE value = 20000 SETTINGS force_data_skipping_indices = 'value_index' SETTINGS force_data_skipping_indices = 'value_index', max_rows_to_read=1; - DROP TABLE data; - """ - ) diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py index b6b6ef28de5..0217c46a660 100644 --- a/tests/integration/test_backward_compatibility/test_functions.py +++ b/tests/integration/test_backward_compatibility/test_functions.py @@ -5,7 +5,7 @@ import logging import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) @@ -13,11 +13,7 @@ upstream = cluster.add_instance("upstream", allow_analyzer=False) backward = cluster.add_instance( "backward", image="clickhouse/clickhouse-server", - # Note that a bug changed the string representation of several aggregations in 22.9 and 22.10 and some minor - # releases of 22.8, 22.7 and 22.3 - # See https://github.com/ClickHouse/ClickHouse/issues/42916 - # Affected at least: singleValueOrNull, last_value, min, max, any, anyLast, anyHeavy, first_value, argMin, argMax - tag="22.6", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_backward_compatibility/test_insert_profile_events.py b/tests/integration/test_backward_compatibility/test_insert_profile_events.py index d38bece7855..30958127ee5 100644 --- a/tests/integration/test_backward_compatibility/test_insert_profile_events.py +++ b/tests/integration/test_backward_compatibility/test_insert_profile_events.py @@ -4,14 +4,14 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) upstream_node = cluster.add_instance("upstream_node", allow_analyzer=False) old_node = cluster.add_instance( "old_node", image="clickhouse/clickhouse-server", - tag="22.6", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, allow_analyzer=False, ) diff --git a/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py b/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py index 04016755a24..02c81ddbd52 100644 --- a/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py +++ b/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py @@ -1,13 +1,13 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) # Version 21.6.3.14 has incompatible partition id for tables with UUID in partition key. -node_22_6 = cluster.add_instance( - "node_22_6", +node = cluster.add_instance( + "node", image="clickhouse/clickhouse-server", - tag="22.6", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -25,18 +25,18 @@ def start_cluster(): def test_ip_types_binary_compatibility(start_cluster): - node_22_6.query( + node.query( "create table tab (ipv4 IPv4, ipv6 IPv6) engine = MergeTree order by tuple()" ) - node_22_6.query( + node.query( "insert into tab values ('123.231.213.132', '0123:4567:89ab:cdef:fedc:ba98:7654:3210')" ) - res_22_6 = node_22_6.query("select * from tab") + res_old = node.query("select * from tab") - node_22_6.restart_with_latest_version() + node.restart_with_latest_version() - res_latest = node_22_6.query("select * from tab") + res_latest = node.query("select * from tab") - assert res_22_6 == res_latest + assert res_old == res_latest - node_22_6.query("drop table tab") + node.query("drop table tab") diff --git a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py index 5261a279a4f..337a967e309 100644 --- a/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py +++ b/tests/integration/test_backward_compatibility/test_memory_bound_aggregation.py @@ -1,13 +1,13 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -15,8 +15,8 @@ node1 = cluster.add_instance( node2 = cluster.add_instance( "node2", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py index cf7a25e8dc1..7f6c3fc92e1 100644 --- a/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py +++ b/tests/integration/test_backward_compatibility/test_normalized_count_comparison.py @@ -1,14 +1,14 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=False, allow_analyzer=False) node2 = cluster.add_instance( "node2", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="21.6", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py index ec1d7fedac5..be161df0640 100644 --- a/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py +++ b/tests/integration/test_backward_compatibility/test_select_aggregate_alias_column.py @@ -1,14 +1,14 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance("node1", with_zookeeper=False, allow_analyzer=False) node2 = cluster.add_instance( "node2", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="21.6", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py index e4fda618031..5d0e5e24af5 100644 --- a/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py +++ b/tests/integration/test_backward_compatibility/test_short_strings_aggregation.py @@ -1,13 +1,13 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="19.16.9.37", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -15,8 +15,8 @@ node1 = cluster.add_instance( node2 = cluster.add_instance( "node2", with_zookeeper=False, - image="yandex/clickhouse-server", - tag="19.16.9.37", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py b/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py index 9c9d1a4d312..d3730357989 100644 --- a/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py +++ b/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py @@ -1,13 +1,13 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node_old = cluster.add_instance( "node1", image="clickhouse/clickhouse-server", - tag="22.8", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, with_zookeeper=True, diff --git a/tests/integration/test_compression_codec_read/test.py b/tests/integration/test_compression_codec_read/test.py deleted file mode 100644 index b39e5147d38..00000000000 --- a/tests/integration/test_compression_codec_read/test.py +++ /dev/null @@ -1,60 +0,0 @@ -import pytest - -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry - -cluster = ClickHouseCluster(__file__) - -node1 = cluster.add_instance( - "node1", - image="yandex/clickhouse-server", - tag="20.8.11.17", - with_installed_binary=True, - stay_alive=True, - allow_analyzer=False, -) - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - cluster.start() - - yield cluster - finally: - cluster.shutdown() - - -def test_default_codec_read(start_cluster): - node1.query("DROP TABLE IF EXISTS test_18340") - - node1.query( - """ - CREATE TABLE test_18340 - ( - `lns` LowCardinality(Nullable(String)), - `ns` Nullable(String), - `s` String, - `ni64` Nullable(Int64), - `ui64` UInt64, - `alns` Array(LowCardinality(Nullable(String))), - `ans` Array(Nullable(String)), - `dt` DateTime, - `i32` Int32 - ) - ENGINE = MergeTree() - PARTITION BY i32 - ORDER BY (s, farmHash64(s)) - SAMPLE BY farmHash64(s) - """ - ) - - node1.query( - "insert into test_18340 values ('test', 'test', 'test', 0, 0, ['a'], ['a'], now(), 0)" - ) - - assert node1.query("SELECT COUNT() FROM test_18340") == "1\n" - - node1.restart_with_latest_version() - - assert node1.query("SELECT COUNT() FROM test_18340") == "1\n" diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index ffe22c62325..4fe899f15e2 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -25,15 +25,6 @@ node2 = cluster.add_instance( ], with_zookeeper=True, ) -node3 = cluster.add_instance( - "node3", - main_configs=["configs/default_compression.xml"], - image="yandex/clickhouse-server", - tag="19.16.9.37", - stay_alive=True, - with_installed_binary=True, - allow_analyzer=False, -) node4 = cluster.add_instance("node4") @@ -413,88 +404,6 @@ def test_default_codec_multiple(start_cluster): node2.query("DROP TABLE compression_table_multiple SYNC") -def test_default_codec_version_update(start_cluster): - node3.query( - """ - CREATE TABLE compression_table ( - key UInt64 CODEC(LZ4HC(7)), - data1 String - ) ENGINE = MergeTree ORDER BY tuple() PARTITION BY key; - """ - ) - - node3.query("INSERT INTO compression_table VALUES (1, 'x')") - node3.query( - "INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048)) - ) - node3.query( - "INSERT INTO compression_table VALUES (3, '{}')".format( - get_random_string(22048) - ) - ) - - old_version = node3.query("SELECT version()") - node3.restart_with_latest_version(fix_metadata=True) - new_version = node3.query("SELECT version()") - logging.debug(f"Updated from {old_version} to {new_version}") - assert ( - node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'" - ) - == "ZSTD(1)\n" - ) - assert ( - node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_2_2_0'" - ) - == "ZSTD(1)\n" - ) - assert ( - node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_0'" - ) - == "ZSTD(1)\n" - ) - - node3.query("OPTIMIZE TABLE compression_table FINAL") - - assert ( - node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_1'" - ) - == "ZSTD(10)\n" - ) - assert ( - node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '2_2_2_1'" - ) - == "LZ4HC(5)\n" - ) - assert ( - node3.query( - "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'" - ) - == "LZ4\n" - ) - - node3.query("DROP TABLE compression_table SYNC") - - def callback(n): - n.exec_in_container( - [ - "bash", - "-c", - "rm -rf /var/lib/clickhouse/metadata/system /var/lib/clickhouse/data/system ", - ], - user="root", - ) - - node3.restart_with_original_version(callback_onstop=callback) - - cur_version = node3.query("SELECT version()") - logging.debug(f"End with {cur_version}") - - def test_default_codec_for_compact_parts(start_cluster): node4.query( """ diff --git a/tests/integration/test_disk_access_storage/test.py b/tests/integration/test_disk_access_storage/test.py index bcfc9d718a7..a710295505e 100644 --- a/tests/integration/test_disk_access_storage/test.py +++ b/tests/integration/test_disk_access_storage/test.py @@ -46,7 +46,7 @@ def test_create(): def check(): assert ( instance.query("SHOW CREATE USER u1") - == "CREATE USER u1 SETTINGS PROFILE s1\n" + == "CREATE USER u1 SETTINGS PROFILE `s1`\n" ) assert ( instance.query("SHOW CREATE USER u2") @@ -64,16 +64,16 @@ def test_create(): assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx TO u2\n" assert ( instance.query("SHOW CREATE ROLE rx") - == "CREATE ROLE rx SETTINGS PROFILE s1\n" + == "CREATE ROLE rx SETTINGS PROFILE `s1`\n" ) assert instance.query("SHOW GRANTS FOR rx") == "" assert ( instance.query("SHOW CREATE SETTINGS PROFILE s1") - == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000\n" + == "CREATE SETTINGS PROFILE `s1` SETTINGS max_memory_usage = 123456789 MIN 100000000 MAX 200000000\n" ) assert ( instance.query("SHOW CREATE SETTINGS PROFILE s2") - == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + == "CREATE SETTINGS PROFILE `s2` SETTINGS INHERIT `s1` TO u2\n" ) check() @@ -99,7 +99,7 @@ def test_alter(): def check(): assert ( instance.query("SHOW CREATE USER u1") - == "CREATE USER u1 SETTINGS PROFILE s1\n" + == "CREATE USER u1 SETTINGS PROFILE `s1`\n" ) assert ( instance.query("SHOW CREATE USER u2") @@ -112,7 +112,7 @@ def test_alter(): assert instance.query("SHOW GRANTS FOR u2") == "GRANT rx, ry TO u2\n" assert ( instance.query("SHOW CREATE ROLE rx") - == "CREATE ROLE rx SETTINGS PROFILE s2\n" + == "CREATE ROLE rx SETTINGS PROFILE `s2`\n" ) assert instance.query("SHOW CREATE ROLE ry") == "CREATE ROLE ry\n" assert ( @@ -124,11 +124,11 @@ def test_alter(): ) assert ( instance.query("SHOW CREATE SETTINGS PROFILE s1") - == "CREATE SETTINGS PROFILE s1 SETTINGS max_memory_usage = 987654321 CONST\n" + == "CREATE SETTINGS PROFILE `s1` SETTINGS max_memory_usage = 987654321 CONST\n" ) assert ( instance.query("SHOW CREATE SETTINGS PROFILE s2") - == "CREATE SETTINGS PROFILE s2 SETTINGS INHERIT s1 TO u2\n" + == "CREATE SETTINGS PROFILE `s2` SETTINGS INHERIT `s1` TO u2\n" ) check() @@ -150,7 +150,7 @@ def test_drop(): assert instance.query("SHOW CREATE USER u1") == "CREATE USER u1\n" assert ( instance.query("SHOW CREATE SETTINGS PROFILE s2") - == "CREATE SETTINGS PROFILE s2\n" + == "CREATE SETTINGS PROFILE `s2`\n" ) assert "There is no user `u2`" in instance.query_and_get_error( "SHOW CREATE USER u2" diff --git a/tests/integration/test_disk_over_web_server/test.py b/tests/integration/test_disk_over_web_server/test.py index 4b175d188ef..e84209a03a1 100644 --- a/tests/integration/test_disk_over_web_server/test.py +++ b/tests/integration/test_disk_over_web_server/test.py @@ -1,6 +1,6 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION uuids = [] @@ -38,7 +38,7 @@ def cluster(): stay_alive=True, with_installed_binary=True, image="clickhouse/clickhouse-server", - tag="22.6", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, allow_analyzer=False, ) diff --git a/tests/integration/test_distributed_insert_backward_compatibility/test.py b/tests/integration/test_distributed_insert_backward_compatibility/test.py index 7cfea61ffff..839e1008df1 100644 --- a/tests/integration/test_distributed_insert_backward_compatibility/test.py +++ b/tests/integration/test_distributed_insert_backward_compatibility/test.py @@ -1,6 +1,6 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION from helpers.client import QueryRuntimeException cluster = ClickHouseCluster(__file__) @@ -10,8 +10,8 @@ node_shard = cluster.add_instance("node1", main_configs=["configs/remote_servers node_dist = cluster.add_instance( "node2", main_configs=["configs/remote_servers.xml"], - image="yandex/clickhouse-server", - tag="21.6", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index a5b353cc030..88f051b022b 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -26,15 +26,6 @@ def make_instance(name, cfg, *args, **kwargs): # _n1/_n2 contains cluster with different -- should fail n1 = make_instance("n1", "configs/remote_servers_n1.xml") n2 = make_instance("n2", "configs/remote_servers_n2.xml") -backward = make_instance( - "backward", - "configs/remote_servers_backward.xml", - image="clickhouse/clickhouse-server", - # version without DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 - tag="22.6", - with_installed_binary=True, - allow_analyzer=False, -) users = pytest.mark.parametrize( "user,password", @@ -408,25 +399,3 @@ def test_per_user_protocol_settings_secure_cluster(user, password): assert int(get_query_setting_on_shard(n1, id_, "max_memory_usage_for_user")) == int( 1e9 ) - - -@users -def test_user_secure_cluster_with_backward(user, password): - id_ = "with-backward-query-dist_secure-" + user - n1.query( - f"SELECT *, '{id_}' FROM dist_secure_backward", user=user, password=password - ) - assert get_query_user_info(n1, id_) == [user, user] - assert get_query_user_info(backward, id_) == [user, user] - - -@users -def test_user_secure_cluster_from_backward(user, password): - id_ = "from-backward-query-dist_secure-" + user - backward.query(f"SELECT *, '{id_}' FROM dist_secure", user=user, password=password) - assert get_query_user_info(n1, id_) == [user, user] - assert get_query_user_info(backward, id_) == [user, user] - - assert n1.contains_in_log( - "Using deprecated interserver protocol because the client is too old. Consider upgrading all nodes in cluster." - ) diff --git a/tests/integration/test_external_http_authenticator/test.py b/tests/integration/test_external_http_authenticator/test.py index ca5225142f8..286d3a334c1 100644 --- a/tests/integration/test_external_http_authenticator/test.py +++ b/tests/integration/test_external_http_authenticator/test.py @@ -59,7 +59,7 @@ def started_cluster(): def test_user_from_config_basic_auth_pass(started_cluster): assert ( instance.query("SHOW CREATE USER good_user") - == "CREATE USER good_user IDENTIFIED WITH http SERVER \\'basic_server\\' SCHEME \\'BASIC\\' SETTINGS PROFILE default\n" + == "CREATE USER good_user IDENTIFIED WITH http SERVER \\'basic_server\\' SCHEME \\'BASIC\\' SETTINGS PROFILE `default`\n" ) assert ( instance.query( diff --git a/tests/integration/test_failed_mutations/test.py b/tests/integration/test_failed_mutations/test.py index 851569b702d..27bdcc3dd24 100644 --- a/tests/integration/test_failed_mutations/test.py +++ b/tests/integration/test_failed_mutations/test.py @@ -56,43 +56,36 @@ def started_cluster(): @pytest.mark.parametrize( - ("node, found_in_log"), + ("node"), [ - ( - node_with_backoff, - True, - ), - ( - node_no_backoff, - False, - ), + (node_with_backoff), ], ) -def test_exponential_backoff_with_merge_tree(started_cluster, node, found_in_log): +def test_exponential_backoff_with_merge_tree(started_cluster, node): prepare_cluster(False) # Executing incorrect mutation. node.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" + "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" ) - assert node.contains_in_log(POSPONE_MUTATION_LOG) == found_in_log + assert node.wait_for_log_line(POSPONE_MUTATION_LOG) node.rotate_logs() node.query("KILL MUTATION WHERE table='test_mutations'") # Check that after kill new parts mutations are postponing. node.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" + "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" ) - assert node.contains_in_log(POSPONE_MUTATION_LOG) == found_in_log + assert node.wait_for_log_line(POSPONE_MUTATION_LOG) def test_exponential_backoff_with_replicated_tree(started_cluster): prepare_cluster(True) node_with_backoff.query( - "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" + "ALTER TABLE test_mutations DELETE WHERE x IN (SELECT x FROM notexist_table) SETTINGS allow_nondeterministic_mutations=1" ) assert node_with_backoff.wait_for_log_line(REPLICATED_POSPONE_MUTATION_LOG) diff --git a/tests/integration/test_grant_and_revoke/test.py b/tests/integration/test_grant_and_revoke/test.py index a86a1208f49..46d8d254a0a 100644 --- a/tests/integration/test_grant_and_revoke/test.py +++ b/tests/integration/test_grant_and_revoke/test.py @@ -77,53 +77,53 @@ def test_revoke_requires_grant_option(): instance.query("CREATE USER B") instance.query("GRANT SELECT ON test.table TO B") - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" expected_error = "Not enough privileges" assert expected_error in instance.query_and_get_error( "REVOKE SELECT ON test.table FROM B", user="A" ) - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("GRANT SELECT ON test.table TO A") expected_error = "privileges have been granted, but without grant option" assert expected_error in instance.query_and_get_error( "REVOKE SELECT ON test.table FROM B", user="A" ) - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION") - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("REVOKE SELECT ON test.table FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT SELECT ON test.table TO B") - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("REVOKE SELECT ON test.* FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT SELECT ON test.table TO B") - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("REVOKE ALL ON test.* FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("GRANT SELECT ON test.table TO B") - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("REVOKE ALL ON *.* FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" instance.query("REVOKE GRANT OPTION FOR ALL ON *.* FROM A") instance.query("GRANT SELECT ON test.table TO B") - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" expected_error = "privileges have been granted, but without grant option" assert expected_error in instance.query_and_get_error( "REVOKE SELECT ON test.table FROM B", user="A" ) - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("GRANT SELECT ON test.* TO A WITH GRANT OPTION") instance.query("GRANT SELECT ON test.table TO B") - assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n" + assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.`table` TO B\n" instance.query("REVOKE SELECT ON test.table FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" @@ -186,10 +186,7 @@ def test_grant_all_on_table(): instance.query("GRANT ALL ON test.table TO B", user="A") assert ( instance.query("SHOW GRANTS FOR B") - == "GRANT SHOW TABLES, SHOW COLUMNS, SHOW DICTIONARIES, SELECT, INSERT, ALTER TABLE, ALTER VIEW, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, " - "DROP TABLE, DROP VIEW, DROP DICTIONARY, UNDROP TABLE, TRUNCATE, OPTIMIZE, BACKUP, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, SHOW ROW POLICIES, " - "SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, " - "SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM FLUSH DISTRIBUTED, dictGet ON test.table TO B\n" + == "GRANT SHOW TABLES, SHOW COLUMNS, SHOW DICTIONARIES, SELECT, INSERT, ALTER TABLE, ALTER VIEW, CREATE TABLE, CREATE VIEW, CREATE DICTIONARY, DROP TABLE, DROP VIEW, DROP DICTIONARY, UNDROP TABLE, TRUNCATE, OPTIMIZE, BACKUP, CREATE ROW POLICY, ALTER ROW POLICY, DROP ROW POLICY, SHOW ROW POLICIES, SYSTEM MERGES, SYSTEM TTL MERGES, SYSTEM FETCHES, SYSTEM MOVES, SYSTEM PULLING REPLICATION LOG, SYSTEM CLEANUP, SYSTEM VIEWS, SYSTEM SENDS, SYSTEM REPLICATION QUEUES, SYSTEM VIRTUAL PARTS UPDATE, SYSTEM DROP REPLICA, SYSTEM SYNC REPLICA, SYSTEM RESTART REPLICA, SYSTEM RESTORE REPLICA, SYSTEM WAIT LOADING PARTS, SYSTEM FLUSH DISTRIBUTED, dictGet ON test.`table` TO B\n" ) instance.query("REVOKE ALL ON test.table FROM B", user="A") assert instance.query("SHOW GRANTS FOR B") == "" @@ -219,7 +216,9 @@ def test_implicit_show_grants(): ) instance.query("GRANT SELECT(x) ON test.table TO A") - assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT(x) ON test.table TO A\n" + assert ( + instance.query("SHOW GRANTS FOR A") == "GRANT SELECT(x) ON test.`table` TO A\n" + ) assert ( instance.query( "select count() FROM system.databases WHERE name='test'", user="A" @@ -242,7 +241,7 @@ def test_implicit_show_grants(): ) instance.query("GRANT SELECT ON test.table TO A") - assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.table TO A\n" + assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.`table` TO A\n" assert ( instance.query( "select count() FROM system.databases WHERE name='test'", user="A" @@ -395,12 +394,12 @@ def test_introspection(): [ "CREATE USER A", "CREATE USER B", - "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default", + "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE `default`", ] ) assert instance.query("SHOW GRANTS FOR A") == TSV( - ["GRANT SELECT ON test.table TO A"] + ["GRANT SELECT ON test.`table` TO A"] ) assert instance.query("SHOW GRANTS FOR B") == TSV( ["GRANT CREATE ON *.* TO B WITH GRANT OPTION"] @@ -410,40 +409,40 @@ def test_introspection(): ) assert instance.query("SHOW GRANTS FOR A,B") == TSV( [ - "GRANT SELECT ON test.table TO A", + "GRANT SELECT ON test.`table` TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", ] ) assert instance.query("SHOW GRANTS FOR B,A") == TSV( [ - "GRANT SELECT ON test.table TO A", + "GRANT SELECT ON test.`table` TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", ] ) assert instance.query("SHOW GRANTS FOR ALL") == TSV( [ - "GRANT SELECT ON test.table TO A", + "GRANT SELECT ON test.`table` TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT ALL ON *.* TO default WITH GRANT OPTION", ] ) assert instance.query("SHOW GRANTS", user="A") == TSV( - ["GRANT SELECT ON test.table TO A"] + ["GRANT SELECT ON test.`table` TO A"] ) assert instance.query("SHOW GRANTS", user="B") == TSV( ["GRANT CREATE ON *.* TO B WITH GRANT OPTION"] ) assert instance.query("SHOW GRANTS FOR ALL", user="A") == TSV( - ["GRANT SELECT ON test.table TO A"] + ["GRANT SELECT ON test.`table` TO A"] ) assert instance.query("SHOW GRANTS FOR ALL", user="B") == TSV( ["GRANT CREATE ON *.* TO B WITH GRANT OPTION"] ) assert instance.query("SHOW GRANTS FOR ALL") == TSV( [ - "GRANT SELECT ON test.table TO A", + "GRANT SELECT ON test.`table` TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT ALL ON *.* TO default WITH GRANT OPTION", ] @@ -455,10 +454,10 @@ def test_introspection(): expected_access1 = ( "CREATE USER A\n" "CREATE USER B\n" - "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default" + "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE `default`" ) expected_access2 = ( - "GRANT SELECT ON test.table TO A\n" + "GRANT SELECT ON test.`table` TO A\n" "GRANT CREATE ON *.* TO B WITH GRANT OPTION\n" "GRANT ALL ON *.* TO default WITH GRANT OPTION\n" ) @@ -513,10 +512,10 @@ def test_current_database(): instance.query("GRANT SELECT ON table TO A", database="test") assert instance.query("SHOW GRANTS FOR A") == TSV( - ["GRANT SELECT ON test.table TO A"] + ["GRANT SELECT ON test.`table` TO A"] ) assert instance.query("SHOW GRANTS FOR A", database="test") == TSV( - ["GRANT SELECT ON test.table TO A"] + ["GRANT SELECT ON test.`table` TO A"] ) assert instance.query("SELECT * FROM test.table", user="A") == "1\t5\n2\t10\n" @@ -537,12 +536,12 @@ def test_grant_with_replace_option(): instance.query("CREATE USER A") instance.query("GRANT SELECT ON test.table TO A") assert instance.query("SHOW GRANTS FOR A") == TSV( - ["GRANT SELECT ON test.table TO A"] + ["GRANT SELECT ON test.`table` TO A"] ) instance.query("GRANT INSERT ON test.table TO A WITH REPLACE OPTION") assert instance.query("SHOW GRANTS FOR A") == TSV( - ["GRANT INSERT ON test.table TO A"] + ["GRANT INSERT ON test.`table` TO A"] ) instance.query("GRANT NONE ON *.* TO A WITH REPLACE OPTION") @@ -552,41 +551,41 @@ def test_grant_with_replace_option(): instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR A") == TSV([]) assert instance.query("SHOW GRANTS FOR B") == TSV( - ["GRANT SELECT ON test.table TO B"] + ["GRANT SELECT ON test.`table` TO B"] ) expected_error = ( - "it's necessary to have the grant INSERT ON test.table WITH GRANT OPTION" + "it's necessary to have the grant INSERT ON test.`table` WITH GRANT OPTION" ) assert expected_error in instance.query_and_get_error( - "GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user="A" + "GRANT INSERT ON test.`table` TO B WITH REPLACE OPTION", user="A" ) assert instance.query("SHOW GRANTS FOR A") == TSV([]) assert instance.query("SHOW GRANTS FOR B") == TSV( - ["GRANT SELECT ON test.table TO B"] + ["GRANT SELECT ON test.`table` TO B"] ) instance.query("GRANT INSERT ON test.table TO A WITH GRANT OPTION") expected_error = ( - "it's necessary to have the grant SELECT ON test.table WITH GRANT OPTION" + "it's necessary to have the grant SELECT ON test.`table` WITH GRANT OPTION" ) assert expected_error in instance.query_and_get_error( - "GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user="A" + "GRANT INSERT ON test.`table` TO B WITH REPLACE OPTION", user="A" ) assert instance.query("SHOW GRANTS FOR A") == TSV( - ["GRANT INSERT ON test.table TO A WITH GRANT OPTION"] + ["GRANT INSERT ON test.`table` TO A WITH GRANT OPTION"] ) assert instance.query("SHOW GRANTS FOR B") == TSV( - ["GRANT SELECT ON test.table TO B"] + ["GRANT SELECT ON test.`table` TO B"] ) - instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION") - instance.query("GRANT INSERT ON test.table TO B WITH REPLACE OPTION", user="A") + instance.query("GRANT SELECT ON test.`table` TO A WITH GRANT OPTION") + instance.query("GRANT INSERT ON test.`table` TO B WITH REPLACE OPTION", user="A") assert instance.query("SHOW GRANTS FOR A") == TSV( - ["GRANT SELECT, INSERT ON test.table TO A WITH GRANT OPTION"] + ["GRANT SELECT, INSERT ON test.`table` TO A WITH GRANT OPTION"] ) assert instance.query("SHOW GRANTS FOR B") == TSV( - ["GRANT INSERT ON test.table TO B"] + ["GRANT INSERT ON test.`table` TO B"] ) @@ -632,7 +631,7 @@ def test_grant_current_grants_with_partial_revoke(): "GRANT CREATE TABLE ON *.* TO A", "GRANT SELECT ON *.* TO A WITH GRANT OPTION", "REVOKE SELECT, CREATE TABLE ON test.* FROM A", - "GRANT SELECT, CREATE TABLE ON test.table TO A WITH GRANT OPTION", + "GRANT SELECT, CREATE TABLE ON test.`table` TO A WITH GRANT OPTION", "GRANT SELECT ON test.table2 TO A", ] ) @@ -643,7 +642,7 @@ def test_grant_current_grants_with_partial_revoke(): [ "GRANT SELECT ON *.* TO B", "REVOKE SELECT ON test.* FROM B", - "GRANT SELECT, CREATE TABLE ON test.table TO B", + "GRANT SELECT, CREATE TABLE ON test.`table` TO B", ] ) @@ -654,7 +653,7 @@ def test_grant_current_grants_with_partial_revoke(): [ "GRANT SELECT ON *.* TO B WITH GRANT OPTION", "REVOKE SELECT ON test.* FROM B", - "GRANT SELECT, CREATE TABLE ON test.table TO B WITH GRANT OPTION", + "GRANT SELECT, CREATE TABLE ON test.`table` TO B WITH GRANT OPTION", ] ) @@ -665,7 +664,7 @@ def test_grant_current_grants_with_partial_revoke(): assert instance.query("SHOW GRANTS FOR C") == TSV( [ "GRANT SELECT ON *.* TO C", - "GRANT CREATE TABLE ON test.table TO C", + "GRANT CREATE TABLE ON test.`table` TO C", ] ) @@ -674,7 +673,7 @@ def test_grant_current_grants_with_partial_revoke(): instance.query("GRANT CURRENT GRANTS ON test.* TO B WITH GRANT OPTION", user="A") assert instance.query("SHOW GRANTS FOR B") == TSV( [ - "GRANT SELECT, CREATE TABLE ON test.table TO B WITH GRANT OPTION", + "GRANT SELECT, CREATE TABLE ON test.`table` TO B WITH GRANT OPTION", ] ) @@ -693,7 +692,7 @@ def test_current_grants_override(): instance.query("CREATE USER B") instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR B") == TSV( - ["GRANT SELECT ON test.table TO B"] + ["GRANT SELECT ON test.`table` TO B"] ) instance.query("GRANT CURRENT GRANTS ON *.* TO B", user="A") @@ -701,7 +700,7 @@ def test_current_grants_override(): [ "GRANT SELECT ON *.* TO B", "REVOKE SELECT ON test.* FROM B", - "GRANT SELECT ON test.table TO B", + "GRANT SELECT ON test.`table` TO B", ] ) @@ -709,7 +708,7 @@ def test_current_grants_override(): instance.query("CREATE USER B") instance.query("GRANT SELECT ON test.table TO B") assert instance.query("SHOW GRANTS FOR B") == TSV( - ["GRANT SELECT ON test.table TO B"] + ["GRANT SELECT ON test.`table` TO B"] ) instance.query("GRANT CURRENT GRANTS ON *.* TO B WITH REPLACE OPTION", user="A") diff --git a/tests/integration/test_groupBitmapAnd_on_distributed/test.py b/tests/integration/test_groupBitmapAnd_on_distributed/test.py index 5d3dda8ecf2..5119a4e0e36 100644 --- a/tests/integration/test_groupBitmapAnd_on_distributed/test.py +++ b/tests/integration/test_groupBitmapAnd_on_distributed/test.py @@ -16,20 +16,6 @@ node2 = cluster.add_instance( with_zookeeper=True, allow_analyzer=False, ) -node3 = cluster.add_instance( - "node3", - main_configs=["configs/clusters.xml"], - with_zookeeper=True, - allow_analyzer=False, -) -node4 = cluster.add_instance( - "node4", - main_configs=["configs/clusters.xml"], - image="yandex/clickhouse-server", - tag="21.6", - with_zookeeper=True, - allow_analyzer=False, -) def insert_data(node, table_name): @@ -85,62 +71,3 @@ def test_groupBitmapAnd_on_distributed_table(start_cluster): "select groupBitmapAnd(z) FROM {};".format(distributed_table_name) ).strip() assert result == expected - - -def test_groupBitmapAnd_function_versioning(start_cluster): - local_table_name = "bitmap_column_expr_versioning_test" - distributed_table_name = "bitmap_column_expr_versioning_test_dst" - cluster_name = "test_version_cluster" - - for node in (node3, node4): - node.query( - """CREATE TABLE {} - ( - z AggregateFunction(groupBitmap, UInt32) - ) - ENGINE = MergeTree() - ORDER BY tuple()""".format( - local_table_name - ) - ) - - node.query( - """CREATE TABLE {} - ( - z AggregateFunction(groupBitmap, UInt32) - ) - ENGINE = Distributed('{}', 'default', '{}')""".format( - distributed_table_name, cluster_name, local_table_name - ) - ) - - node.query( - """INSERT INTO {} VALUES - (bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));""".format( - local_table_name - ) - ) - - expected = "10" - new_version_distributed_result = node3.query( - "select groupBitmapAnd(z) FROM {};".format(distributed_table_name) - ).strip() - old_version_distributed_result = node4.query( - "select groupBitmapAnd(z) FROM {};".format(distributed_table_name) - ).strip() - assert new_version_distributed_result == expected - assert old_version_distributed_result == expected - - result_from_old_to_new_version = node3.query( - "select groupBitmapAnd(z) FROM remote('node4', default.{})".format( - local_table_name - ) - ).strip() - assert result_from_old_to_new_version == expected - - result_from_new_to_old_version = node4.query( - "select groupBitmapAnd(z) FROM remote('node3', default.{})".format( - local_table_name - ) - ).strip() - assert result_from_new_to_old_version == expected diff --git a/tests/integration/test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py b/tests/integration/test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py index 115e6009801..237acf6b9e0 100644 --- a/tests/integration/test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py +++ b/tests/integration/test_groupBitmapAnd_on_distributed/test_groupBitmapAndState_on_distributed_table.py @@ -16,21 +16,6 @@ node2 = cluster.add_instance( with_zookeeper=True, allow_analyzer=False, ) -node3 = cluster.add_instance( - "node3", - main_configs=["configs/clusters.xml"], - with_zookeeper=True, - allow_analyzer=False, -) -node4 = cluster.add_instance( - "node4", - main_configs=["configs/clusters.xml"], - image="yandex/clickhouse-server", - tag="21.6", - with_installed_binary=True, - with_zookeeper=True, - allow_analyzer=False, -) @pytest.fixture(scope="module") @@ -91,60 +76,3 @@ def test_groupBitmapAndState_on_distributed_table(start_cluster): ) ).strip() assert result == expected - - -def test_groupBitmapAndState_on_different_version_nodes(start_cluster): - local_table_name = "test_group_bitmap_state_versioned" - distributed_table_name = "test_group_bitmap_state_versioned_dst" - cluster_name = "test_version_cluster" - - for node in (node3, node4): - node.query( - """CREATE TABLE {} - ( - z AggregateFunction(groupBitmap, UInt32) - ) - ENGINE = MergeTree() - ORDER BY tuple()""".format( - local_table_name - ) - ) - - node.query( - """CREATE TABLE {} - ( - z AggregateFunction(groupBitmap, UInt32) - ) - ENGINE = Distributed('{}', 'default', '{}')""".format( - distributed_table_name, cluster_name, local_table_name - ) - ) - - node3.query( - """INSERT INTO {} VALUES - (bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32))));""".format( - local_table_name - ) - ) - - node4.query( - """INSERT INTO {} VALUES - (bitmapBuild(cast([0] as Array(UInt32))));""".format( - local_table_name - ) - ) - - # We will get wrong result when query distribute table if the cluster contains old version server - result = node3.query( - "select bitmapCardinality(groupBitmapAndState(z)) FROM {};".format( - distributed_table_name - ) - ).strip() - assert result == "10" - - result = node4.query( - "select bitmapCardinality(groupBitmapAndState(z)) FROM {};".format( - distributed_table_name - ) - ).strip() - assert result == "1" diff --git a/tests/integration/test_keeper_snapshots/test.py b/tests/integration/test_keeper_snapshots/test.py index 2e126ed1152..f6f746c892e 100644 --- a/tests/integration/test_keeper_snapshots/test.py +++ b/tests/integration/test_keeper_snapshots/test.py @@ -7,8 +7,7 @@ import helpers.keeper_utils as keeper_utils import random import string import os -import time -from kazoo.client import KazooClient, KazooState +from kazoo.client import KazooClient cluster = ClickHouseCluster(__file__) @@ -161,3 +160,54 @@ def test_ephemeral_after_restart(started_cluster): node_zk2.close() except: pass + + +def test_invalid_snapshot(started_cluster): + keeper_utils.wait_until_connected(started_cluster, node) + node_zk = None + try: + node_zk = get_connection_zk("node") + node_zk.create("/test_invalid_snapshot", b"somevalue") + keeper_utils.send_4lw_cmd(started_cluster, node, "csnp") + node.stop_clickhouse() + snapshots = ( + node.exec_in_container(["ls", "/var/lib/clickhouse/coordination/snapshots"]) + .strip() + .split("\n") + ) + + def snapshot_sort_key(snapshot_name): + snapshot_prefix_size = len("snapshot_") + last_log_idx = snapshot_name.split(".")[0][snapshot_prefix_size:] + return int(last_log_idx) + + snapshots.sort(key=snapshot_sort_key) + last_snapshot = snapshots[-1] + node.exec_in_container( + [ + "truncate", + "-s", + "0", + f"/var/lib/clickhouse/coordination/snapshots/{last_snapshot}", + ] + ) + node.start_clickhouse(expected_to_fail=True) + assert node.contains_in_log( + "Aborting because of failure to load from latest snapshot with index" + ) + + node.stop_clickhouse() + node.exec_in_container( + [ + "rm", + f"/var/lib/clickhouse/coordination/snapshots/{last_snapshot}", + ] + ) + node.start_clickhouse() + finally: + try: + if node_zk is not None: + node_zk.stop() + node_zk.close() + except: + pass diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py index 80785c6c01f..251da7e4e09 100644 --- a/tests/integration/test_mask_sensitive_info/test.py +++ b/tests/integration/test_mask_sensitive_info/test.py @@ -249,10 +249,10 @@ def test_create_table(): "CREATE TABLE table5 (x int) ENGINE = S3('http://minio1:9001/root/data/test3.csv.gz', 'CSV', 'gzip')", "CREATE TABLE table6 (`x` int) ENGINE = S3('http://minio1:9001/root/data/test4.csv', 'minio', '[HIDDEN]', 'CSV')", "CREATE TABLE table7 (`x` int) ENGINE = S3('http://minio1:9001/root/data/test5.csv.gz', 'minio', '[HIDDEN]', 'CSV', 'gzip')", - "CREATE TABLE table8 (`x` int) ENGINE = MySQL(named_collection_1, host = 'mysql80', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')", - "CREATE TABLE table9 (`x` int) ENGINE = MySQL(named_collection_2, database = 'mysql_db', host = 'mysql80', port = 3306, password = '[HIDDEN]', table = 'mysql_table', user = 'mysql_user')", + "CREATE TABLE table8 (`x` int) ENGINE = MySQL(named_collection_1, host = 'mysql80', port = 3306, database = 'mysql_db', `table` = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')", + "CREATE TABLE table9 (`x` int) ENGINE = MySQL(named_collection_2, database = 'mysql_db', host = 'mysql80', port = 3306, password = '[HIDDEN]', `table` = 'mysql_table', user = 'mysql_user')", "CREATE TABLE table10 (x int) ENGINE = MySQL(named_collection_3, database = 'mysql_db', host = 'mysql80', port = 3306, table = 'mysql_table')", - "CREATE TABLE table11 (`x` int) ENGINE = PostgreSQL(named_collection_4, host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user', password = '[HIDDEN]')", + "CREATE TABLE table11 (`x` int) ENGINE = PostgreSQL(named_collection_4, host = 'postgres1', port = 5432, database = 'postgres_db', `table` = 'postgres_table', user = 'postgres_user', password = '[HIDDEN]')", "CREATE TABLE table12 (`x` int) ENGINE = MongoDB(named_collection_5, host = 'mongo1', port = 5432, db = 'mongo_db', collection = 'mongo_col', user = 'mongo_user', password = '[HIDDEN]'", "CREATE TABLE table13 (`x` int) ENGINE = S3(named_collection_6, url = 'http://minio1:9001/root/data/test8.csv', access_key_id = 'minio', secret_access_key = '[HIDDEN]', format = 'CSV')", "CREATE TABLE table14 (x int) ENGINE = S3('http://minio1:9001/root/data/test9.csv.gz', 'NOSIGN', 'CSV', 'gzip')", @@ -429,11 +429,11 @@ def test_table_functions(): "CREATE TABLE tablefunc22 (`x` int) AS remote('127.{2..11}', numbers(10), 'remote_user', '[HIDDEN]', rand())", "CREATE TABLE tablefunc23 (`x` int) AS remoteSecure('127.{2..11}', 'default', 'remote_table', 'remote_user', '[HIDDEN]')", "CREATE TABLE tablefunc24 (x int) AS remoteSecure('127.{2..11}', 'default', 'remote_table', 'remote_user', rand())", - "CREATE TABLE tablefunc25 (`x` int) AS mysql(named_collection_1, host = 'mysql80', port = 3306, database = 'mysql_db', table = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')", - "CREATE TABLE tablefunc26 (`x` int) AS postgresql(named_collection_2, password = '[HIDDEN]', host = 'postgres1', port = 5432, database = 'postgres_db', table = 'postgres_table', user = 'postgres_user')", + "CREATE TABLE tablefunc25 (`x` int) AS mysql(named_collection_1, host = 'mysql80', port = 3306, database = 'mysql_db', `table` = 'mysql_table', user = 'mysql_user', password = '[HIDDEN]')", + "CREATE TABLE tablefunc26 (`x` int) AS postgresql(named_collection_2, password = '[HIDDEN]', host = 'postgres1', port = 5432, database = 'postgres_db', `table` = 'postgres_table', user = 'postgres_user')", "CREATE TABLE tablefunc27 (`x` int) AS s3(named_collection_2, url = 'http://minio1:9001/root/data/test4.csv', access_key_id = 'minio', secret_access_key = '[HIDDEN]')", - "CREATE TABLE tablefunc28 (`x` int) AS remote(named_collection_6, addresses_expr = '127.{2..11}', database = 'default', table = 'remote_table', user = 'remote_user', password = '[HIDDEN]', sharding_key = rand())", - "CREATE TABLE tablefunc29 (`x` int) AS remoteSecure(named_collection_6, addresses_expr = '127.{2..11}', database = 'default', table = 'remote_table', user = 'remote_user', password = '[HIDDEN]')", + "CREATE TABLE tablefunc28 (`x` int) AS remote(named_collection_6, addresses_expr = '127.{2..11}', database = 'default', `table` = 'remote_table', user = 'remote_user', password = '[HIDDEN]', sharding_key = rand())", + "CREATE TABLE tablefunc29 (`x` int) AS remoteSecure(named_collection_6, addresses_expr = '127.{2..11}', database = 'default', `table` = 'remote_table', user = 'remote_user', password = '[HIDDEN]')", "CREATE TABLE tablefunc30 (x int) AS s3('http://minio1:9001/root/data/test9.csv.gz', 'NOSIGN', 'CSV')", "CREATE TABLE tablefunc31 (`x` int) AS s3('http://minio1:9001/root/data/test10.csv.gz', 'minio', '[HIDDEN]')", "CREATE TABLE tablefunc32 (`x` int) AS deltaLake('http://minio1:9001/root/data/test11.csv.gz', 'minio', '[HIDDEN]')", diff --git a/tests/integration/test_merge_tree_azure_blob_storage/test.py b/tests/integration/test_merge_tree_azure_blob_storage/test.py index f3e113c95d3..55deb87a97e 100644 --- a/tests/integration/test_merge_tree_azure_blob_storage/test.py +++ b/tests/integration/test_merge_tree_azure_blob_storage/test.py @@ -613,7 +613,8 @@ def test_endpoint(cluster): container_client = cluster.blob_service_client.get_container_client(container_name) container_client.create_container() - node.query( + azure_query( + node, f""" DROP TABLE IF EXISTS test SYNC; @@ -622,13 +623,128 @@ def test_endpoint(cluster): SETTINGS disk = disk( type = azure_blob_storage, endpoint = 'http://azurite1:{port}/{account_name}/{container_name}/{data_prefix}', + endpoint_contains_account_name = 'true', account_name = 'devstoreaccount1', account_key = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', container_already_exists = 1, skip_access_check = 0); INSERT INTO test SELECT number FROM numbers(10); - """ + """, ) assert 10 == int(node.query("SELECT count() FROM test")) + + +def test_endpoint_new_container(cluster): + node = cluster.instances[NODE_NAME] + account_name = "devstoreaccount1" + container_name = "cont3" + data_prefix = "data_prefix" + port = cluster.azurite_port + + azure_query( + node, + f""" + DROP TABLE IF EXISTS test SYNC; + + CREATE TABLE test (a Int32) + ENGINE = MergeTree() ORDER BY tuple() + SETTINGS disk = disk( + type = azure_blob_storage, + endpoint = 'http://azurite1:{port}/{account_name}/{container_name}/{data_prefix}', + endpoint_contains_account_name = 'true', + account_name = 'devstoreaccount1', + account_key = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + skip_access_check = 0); + + INSERT INTO test SELECT number FROM numbers(10); + """, + ) + + assert 10 == int(node.query("SELECT count() FROM test")) + + +def test_endpoint_without_prefix(cluster): + node = cluster.instances[NODE_NAME] + account_name = "devstoreaccount1" + container_name = "cont4" + port = cluster.azurite_port + + azure_query( + node, + f""" + DROP TABLE IF EXISTS test SYNC; + + CREATE TABLE test (a Int32) + ENGINE = MergeTree() ORDER BY tuple() + SETTINGS disk = disk( + type = azure_blob_storage, + endpoint = 'http://azurite1:{port}/{account_name}/{container_name}', + endpoint_contains_account_name = 'true', + account_name = 'devstoreaccount1', + account_key = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + skip_access_check = 0); + + INSERT INTO test SELECT number FROM numbers(10); + """, + ) + + assert 10 == int(node.query("SELECT count() FROM test")) + + +def test_endpoint_error_check(cluster): + node = cluster.instances[NODE_NAME] + account_name = "devstoreaccount1" + port = cluster.azurite_port + + query = f""" + DROP TABLE IF EXISTS test SYNC; + + CREATE TABLE test (a Int32) + ENGINE = MergeTree() ORDER BY tuple() + SETTINGS disk = disk( + type = azure_blob_storage, + endpoint = 'http://azurite1:{port}/{account_name}', + endpoint_contains_account_name = 'true', + account_name = 'devstoreaccount1', + account_key = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + skip_access_check = 0); + """ + + expected_err_msg = "Expected container_name in endpoint" + assert expected_err_msg in azure_query(node, query, expect_error="true") + + query = f""" + DROP TABLE IF EXISTS test SYNC; + + CREATE TABLE test (a Int32) + ENGINE = MergeTree() ORDER BY tuple() + SETTINGS disk = disk( + type = azure_blob_storage, + endpoint = 'http://azurite1:{port}', + endpoint_contains_account_name = 'true', + account_name = 'devstoreaccount1', + account_key = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + skip_access_check = 0); + """ + + expected_err_msg = "Expected account_name in endpoint" + assert expected_err_msg in azure_query(node, query, expect_error="true") + + query = f""" + DROP TABLE IF EXISTS test SYNC; + + CREATE TABLE test (a Int32) + ENGINE = MergeTree() ORDER BY tuple() + SETTINGS disk = disk( + type = azure_blob_storage, + endpoint = 'http://azurite1:{port}', + endpoint_contains_account_name = 'false', + account_name = 'devstoreaccount1', + account_key = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + skip_access_check = 0); + """ + + expected_err_msg = "Expected container_name in endpoint" + assert expected_err_msg in azure_query(node, query, expect_error="true") diff --git a/tests/integration/test_modify_engine_on_restart/__init__.py b/tests/integration/test_modify_engine_on_restart/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_modify_engine_on_restart/common.py b/tests/integration/test_modify_engine_on_restart/common.py new file mode 100644 index 00000000000..18b6c2dc1d7 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/common.py @@ -0,0 +1,29 @@ +from helpers.cluster import ClickHouseCluster + + +def get_table_path(node, table, database): + return node.query( + sql=f"SELECT data_paths FROM system.tables WHERE table = '{table}' and database = '{database}'" + ).strip("'[]\n") + + +def check_flags_deleted(node, database_name, tables): + for table in tables: + assert "convert_to_replicated" not in node.exec_in_container( + [ + "bash", + "-c", + f"ls {get_table_path(node, table, database_name)}", + ] + ) + + +def set_convert_flags(node, database_name, tables): + for table in tables: + node.exec_in_container( + [ + "bash", + "-c", + f"touch {get_table_path(node, table, database_name)}convert_to_replicated", + ] + ) diff --git a/tests/integration/test_modify_engine_on_restart/configs/config.d/clusters.xml b/tests/integration/test_modify_engine_on_restart/configs/config.d/clusters.xml new file mode 100644 index 00000000000..d3a9d4fb8f0 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/configs/config.d/clusters.xml @@ -0,0 +1,22 @@ + + + + + true + + ch1 + 9000 + + + ch2 + 9000 + + + + + + + 01 + + + \ No newline at end of file diff --git a/tests/integration/test_modify_engine_on_restart/configs/config.d/clusters_unusual.xml b/tests/integration/test_modify_engine_on_restart/configs/config.d/clusters_unusual.xml new file mode 100644 index 00000000000..812291335b8 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/configs/config.d/clusters_unusual.xml @@ -0,0 +1,20 @@ + + + + + true + + ch1 + 9000 + + + + + + + 01 + + +/lol/kek/'/{uuid} + + diff --git a/tests/integration/test_modify_engine_on_restart/configs/config.d/distributed_ddl.xml b/tests/integration/test_modify_engine_on_restart/configs/config.d/distributed_ddl.xml new file mode 100644 index 00000000000..45555338de5 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/configs/config.d/distributed_ddl.xml @@ -0,0 +1,5 @@ + + + /clickhouse/task_queue/ddl + + \ No newline at end of file diff --git a/tests/integration/test_modify_engine_on_restart/test.py b/tests/integration/test_modify_engine_on_restart/test.py new file mode 100644 index 00000000000..289b25dd89e --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/test.py @@ -0,0 +1,161 @@ +import pytest +from test_modify_engine_on_restart.common import check_flags_deleted, set_convert_flags +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance( + "ch1", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, + macros={"replica": "node1"}, + stay_alive=True, +) +ch2 = cluster.add_instance( + "ch2", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, + macros={"replica": "node2"}, +) + +database_name = "modify_engine" + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def q(node, query): + return node.query(database=database_name, sql=query) + + +def create_tables(): + # MergeTree table that will be converted + q( + ch1, + "CREATE TABLE rmt ( A Int64, D Date, S String ) ENGINE MergeTree() PARTITION BY toYYYYMM(D) ORDER BY A;", + ) + + q(ch1, "INSERT INTO rmt SELECT number, today(), '' FROM numbers(1e6);") + q(ch1, "INSERT INTO rmt SELECT number, today()-60, '' FROM numbers(1e5);") + + # ReplacingMergeTree table that will be converted to check unusual engine kinds + q( + ch1, + "CREATE TABLE replacing ( A Int64, D Date, S String ) ENGINE ReplacingMergeTree() PARTITION BY toYYYYMM(D) ORDER BY A;", + ) + + q(ch1, "INSERT INTO replacing SELECT number, today(), '' FROM numbers(1e6);") + q(ch1, "INSERT INTO replacing SELECT number, today()-60, '' FROM numbers(1e5);") + + # MergeTree table that will not be converted + q( + ch1, + "CREATE TABLE mt ( A Int64, D Date, S String ) ENGINE MergeTree() PARTITION BY toYYYYMM(D) ORDER BY A;", + ) + + # Not MergeTree table + q(ch1, "CREATE TABLE log ( A Int64, D Date, S String ) ENGINE Log;") + + +def check_tables(converted): + engine_prefix = "" + if converted: + engine_prefix = "Replicated" + + # Check tables exists + assert ( + q( + ch1, + "SHOW TABLES", + ).strip() + == "log\nmt\nreplacing\nrmt" + ) + + # Check engines + assert ( + q( + ch1, + f"SELECT name, engine FROM system.tables WHERE database = '{database_name}' AND (name != 'log' AND name != 'mt')", + ).strip() + == f"replacing\t{engine_prefix}ReplacingMergeTree\nrmt\t{engine_prefix}MergeTree" + ) + assert ( + q( + ch1, + f"SELECT name, engine FROM system.tables WHERE database = '{database_name}' AND (name = 'log' OR name = 'mt')", + ).strip() + == "log\tLog\nmt\tMergeTree" + ) + + # Check values + for table in ["rmt", "replacing"]: + assert ( + q( + ch1, + f"SELECT count() FROM {table}", + ).strip() + == "1100000" + ) + + +def check_replica_added(): + # Add replica to check if zookeeper path is correct and consistent with table uuid + + uuid = q( + ch1, + f"SELECT uuid FROM system.tables WHERE table = 'rmt' AND database = '{database_name}'", + ).strip() + + q( + ch2, + f"CREATE TABLE rmt ( A Int64, D Date, S String ) ENGINE ReplicatedMergeTree('/clickhouse/tables/{uuid}/{{shard}}', '{{replica}}') PARTITION BY toYYYYMM(D) ORDER BY A", + ) + + ch2.query(database=database_name, sql="SYSTEM SYNC REPLICA rmt", timeout=20) + + # Check values + assert ( + q( + ch2, + f"SELECT count() FROM rmt", + ).strip() + == "1100000" + ) + + +def test_modify_engine_on_restart(started_cluster): + ch1.query("CREATE DATABASE " + database_name + " ON CLUSTER cluster") + + create_tables() + + check_tables(False) + + ch1.restart_clickhouse() + + check_tables(False) + + set_convert_flags(ch1, database_name, ["rmt", "replacing", "log"]) + + ch1.restart_clickhouse() + + check_flags_deleted(ch1, database_name, ["rmt", "replacing"]) + + check_tables(True) + + check_replica_added() + + ch1.restart_clickhouse() + + check_tables(True) diff --git a/tests/integration/test_modify_engine_on_restart/test_args.py b/tests/integration/test_modify_engine_on_restart/test_args.py new file mode 100644 index 00000000000..f83d540bfb9 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/test_args.py @@ -0,0 +1,92 @@ +import pytest +from test_modify_engine_on_restart.common import check_flags_deleted, set_convert_flags +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance( + "ch1", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, + macros={"replica": "node1"}, + stay_alive=True, +) + +database_name = "modify_engine_args" + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def q(node, query): + return node.query(database=database_name, sql=query) + + +def create_tables(): + # Check one argument + q( + ch1, + "CREATE TABLE replacing_ver ( A Int64, D Date, S String ) ENGINE = ReplacingMergeTree(D) PARTITION BY toYYYYMM(D) ORDER BY A", + ) + + # Check more than one argument + q( + ch1, + "CREATE TABLE collapsing_ver ( ID UInt64, Sign Int8, Version UInt8 ) ENGINE = VersionedCollapsingMergeTree(Sign, Version) ORDER BY ID", + ) + + +def check_tables(): + # Check tables exists + assert ( + q( + ch1, + "SHOW TABLES", + ).strip() + == "collapsing_ver\nreplacing_ver" + ) + + # Check engines + assert ( + q( + ch1, + f"SELECT engine_full FROM system.tables WHERE database = '{database_name}' and name = 'replacing_ver'", + ) + .strip() + .startswith( + "ReplicatedReplacingMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\', D)" + ) + ) + assert ( + q( + ch1, + f"SELECT engine_full FROM system.tables WHERE database = '{database_name}' and name = 'collapsing_ver'", + ) + .strip() + .startswith( + "ReplicatedVersionedCollapsingMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\', Sign, Version)" + ) + ) + + +def test_modify_engine_on_restart_with_arguments(started_cluster): + ch1.query("CREATE DATABASE " + database_name) + + create_tables() + + set_convert_flags(ch1, database_name, ["replacing_ver", "collapsing_ver"]) + + ch1.restart_clickhouse() + + check_flags_deleted(ch1, database_name, ["replacing_ver", "collapsing_ver"]) + + check_tables() diff --git a/tests/integration/test_modify_engine_on_restart/test_mv.py b/tests/integration/test_modify_engine_on_restart/test_mv.py new file mode 100644 index 00000000000..30cb2ddc5e5 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/test_mv.py @@ -0,0 +1,132 @@ +import pytest +from test_modify_engine_on_restart.common import check_flags_deleted, set_convert_flags +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance( + "ch1", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, + macros={"replica": "node1"}, + stay_alive=True, +) + +database_name = "modify_engine_with_mv" + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def q(node, query): + return node.query(database=database_name, sql=query) + + +def create_tables(): + q( + ch1, + "CREATE TABLE hourly_data(`domain_name` String, `event_time` DateTime, `count_views` UInt64) ENGINE = MergeTree ORDER BY (domain_name, event_time)", + ) + + q( + ch1, + "CREATE TABLE monthly_aggregated_data\ + (`domain_name` String, `month` Date, `sumCountViews` AggregateFunction(sum, UInt64))\ + ENGINE = AggregatingMergeTree ORDER BY (domain_name, month)", + ) + + q( + ch1, + "CREATE MATERIALIZED VIEW monthly_aggregated_data_mv\ + TO monthly_aggregated_data\ + AS\ + SELECT\ + toDate(toStartOfMonth(event_time)) AS month,\ + domain_name,\ + sumState(count_views) AS sumCountViews\ + FROM hourly_data\ + GROUP BY\ + domain_name,\ + month", + ) + + q( + ch1, + "INSERT INTO hourly_data (domain_name, event_time, count_views)\ + VALUES ('clickhouse.com', '2019-01-01 10:00:00', 1),\ + ('clickhouse.com', '2019-02-02 00:00:00', 2),\ + ('clickhouse.com', '2019-02-01 00:00:00', 3),\ + ('clickhouse.com', '2020-01-01 00:00:00', 6)", + ) + + +def check_tables(converted): + engine_prefix = "" + if converted: + engine_prefix = "Replicated" + + # Check engines + assert ( + q( + ch1, + f"SELECT name, engine FROM system.tables WHERE database = '{database_name}'", + ).strip() + == f"hourly_data\t{engine_prefix}MergeTree\nmonthly_aggregated_data\t{engine_prefix}AggregatingMergeTree\nmonthly_aggregated_data_mv\tMaterializedView" + ) + + # Check values + assert ( + q( + ch1, + "SELECT sumMerge(sumCountViews) as sumCountViews\ + FROM monthly_aggregated_data_mv", + ).strip() + == "12" + ) + assert q(ch1, "SELECT count() FROM hourly_data").strip() == "4" + + if converted: + # Insert new values to check if new dependencies are set correctly + q( + ch1, + "INSERT INTO hourly_data (domain_name, event_time, count_views)\ + VALUES ('clickhouse.com', '2019-01-01 10:00:00', 1),\ + ('clickhouse.com', '2019-02-02 00:00:00', 2),\ + ('clickhouse.com', '2019-02-01 00:00:00', 3),\ + ('clickhouse.com', '2020-01-01 00:00:00', 6)", + ) + + assert ( + q( + ch1, + "SELECT sumMerge(sumCountViews) as sumCountViews\ + FROM monthly_aggregated_data_mv", + ).strip() + == "24" + ) + assert q(ch1, "SELECT count() FROM hourly_data").strip() == "8" + + +def test_modify_engine_on_restart_with_materialized_view(started_cluster): + ch1.query(f"CREATE DATABASE {database_name}") + + create_tables() + + check_tables(False) + + set_convert_flags(ch1, database_name, ["hourly_data", "monthly_aggregated_data"]) + + ch1.restart_clickhouse() + + check_flags_deleted(ch1, database_name, ["hourly_data", "monthly_aggregated_data"]) + + check_tables(True) diff --git a/tests/integration/test_modify_engine_on_restart/test_ordinary.py b/tests/integration/test_modify_engine_on_restart/test_ordinary.py new file mode 100644 index 00000000000..fd86417a216 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/test_ordinary.py @@ -0,0 +1,94 @@ +import pytest +from test_modify_engine_on_restart.common import check_flags_deleted, set_convert_flags +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance( + "ch1", + main_configs=[ + "configs/config.d/clusters.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, + macros={"replica": "node1"}, + stay_alive=True, +) + +database_name = "modify_engine_on_ordinary" + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def q(node, query): + return node.query(database=database_name, sql=query) + + +def create_tables(): + q( + ch1, + "CREATE TABLE mt ( A Int64, D Date, S String ) ENGINE MergeTree() PARTITION BY toYYYYMM(D) ORDER BY A;", + ) + + +def check_tables(): + # Check tables exists + assert ( + q( + ch1, + "SHOW TABLES", + ).strip() + == "mt" + ) + + # Check engines + assert ( + q( + ch1, + f"SELECT name, engine FROM system.tables WHERE database = '{database_name}'", + ).strip() + == f"mt\tMergeTree" + ) + + +def remove_convert_flags(): + ch1.exec_in_container( + [ + "bash", + "-c", + f"rm /var/lib/clickhouse/data/{database_name}/mt/convert_to_replicated", + ] + ) + + +def test_modify_engine_on_restart_ordinary_database(started_cluster): + ch1.query( + sql=f"CREATE DATABASE {database_name} ENGINE = Ordinary", + settings={"allow_deprecated_database_ordinary": 1}, + ) + + create_tables() + + check_tables() + + set_convert_flags(ch1, database_name, ["mt"]) + + cannot_start = False + try: + ch1.restart_clickhouse() + except: + cannot_start = True + assert cannot_start + + remove_convert_flags() + + ch1.restart_clickhouse() + + check_tables() diff --git a/tests/integration/test_modify_engine_on_restart/test_unusual_path.py b/tests/integration/test_modify_engine_on_restart/test_unusual_path.py new file mode 100644 index 00000000000..e82f48e8b34 --- /dev/null +++ b/tests/integration/test_modify_engine_on_restart/test_unusual_path.py @@ -0,0 +1,92 @@ +import pytest +from test_modify_engine_on_restart.common import check_flags_deleted, set_convert_flags +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance( + "ch1", + main_configs=[ + "configs/config.d/clusters_unusual.xml", + "configs/config.d/distributed_ddl.xml", + ], + with_zookeeper=True, + macros={"replica": "node1"}, + stay_alive=True, +) + +database_name = "modify_engine_unusual_path" + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def q(node, query): + return node.query(database=database_name, sql=query) + + +def create_tables(): + # Check one argument + q( + ch1, + "CREATE TABLE replacing_ver ( A Int64, D Date, S String ) ENGINE = ReplacingMergeTree(D) PARTITION BY toYYYYMM(D) ORDER BY A", + ) + + # Check more than one argument + q( + ch1, + "CREATE TABLE collapsing_ver ( ID UInt64, Sign Int8, Version UInt8 ) ENGINE = VersionedCollapsingMergeTree(Sign, Version) ORDER BY ID", + ) + + +def check_tables(): + # Check tables exists + assert ( + q( + ch1, + "SHOW TABLES", + ).strip() + == "collapsing_ver\nreplacing_ver" + ) + + # Check engines + assert ( + q( + ch1, + f"SELECT engine_full FROM system.tables WHERE database = '{database_name}' and name = 'replacing_ver'", + ) + .strip() + .startswith( + "ReplicatedReplacingMergeTree(\\'/lol/kek/\\\\\\'/{uuid}\\', \\'{replica}\\', D)" + ) + ) + assert ( + q( + ch1, + f"SELECT engine_full FROM system.tables WHERE database = '{database_name}' and name = 'collapsing_ver'", + ) + .strip() + .startswith( + "ReplicatedVersionedCollapsingMergeTree(\\'/lol/kek/\\\\\\'/{uuid}\\', \\'{replica}\\', Sign, Version)" + ) + ) + + +def test_modify_engine_on_restart_with_unusual_path(started_cluster): + ch1.query("CREATE DATABASE " + database_name) + + create_tables() + + set_convert_flags(ch1, database_name, ["replacing_ver", "collapsing_ver"]) + + ch1.restart_clickhouse() + + check_flags_deleted(ch1, database_name, ["replacing_ver", "collapsing_ver"]) + + check_tables() diff --git a/tests/integration/test_mysql57_database_engine/test.py b/tests/integration/test_mysql57_database_engine/test.py index a5a13a88b1b..26db6637bc6 100644 --- a/tests/integration/test_mysql57_database_engine/test.py +++ b/tests/integration/test_mysql57_database_engine/test.py @@ -964,7 +964,7 @@ def test_predefined_connection_configuration(started_cluster): result = clickhouse_node.query("show create table test_database.test_table") assert ( result.strip() - == "CREATE TABLE test_database.test_table\\n(\\n `id` Int32\\n)\\nENGINE = MySQL(mysql1, table = \\'test_table\\')" + == "CREATE TABLE test_database.test_table\\n(\\n `id` Int32\\n)\\nENGINE = MySQL(mysql1, `table` = \\'test_table\\')" ) clickhouse_node.query("DROP DATABASE test_database") diff --git a/tests/integration/test_mysql_database_engine/test.py b/tests/integration/test_mysql_database_engine/test.py index 64a38679121..44c23374b3a 100644 --- a/tests/integration/test_mysql_database_engine/test.py +++ b/tests/integration/test_mysql_database_engine/test.py @@ -943,7 +943,7 @@ def test_predefined_connection_configuration(started_cluster): result = clickhouse_node.query("show create table test_database.test_table") assert ( result.strip() - == "CREATE TABLE test_database.test_table\\n(\\n `id` Int32\\n)\\nENGINE = MySQL(mysql1, table = \\'test_table\\')" + == "CREATE TABLE test_database.test_table\\n(\\n `id` Int32\\n)\\nENGINE = MySQL(mysql1, `table` = \\'test_table\\')" ) clickhouse_node.query("DROP DATABASE test_database") diff --git a/tests/integration/test_old_versions/test.py b/tests/integration/test_old_versions/test.py index b59bfcc4f6b..43f91b7d265 100644 --- a/tests/integration/test_old_versions/test.py +++ b/tests/integration/test_old_versions/test.py @@ -1,66 +1,18 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION from helpers.test_tools import assert_eq_with_retry cluster = ClickHouseCluster(__file__) -node18_14 = cluster.add_instance( - "node18_14", - image="yandex/clickhouse-server", - tag="18.14.19", +node_oldest = cluster.add_instance( + "node_oldest", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, main_configs=["configs/config.d/test_cluster.xml"], allow_analyzer=False, ) -node19_1 = cluster.add_instance( - "node19_1", - image="yandex/clickhouse-server", - tag="19.1.16", - with_installed_binary=True, - main_configs=["configs/config.d/test_cluster.xml"], - allow_analyzer=False, -) -node19_4 = cluster.add_instance( - "node19_4", - image="yandex/clickhouse-server", - tag="19.4.5.35", - with_installed_binary=True, - main_configs=["configs/config.d/test_cluster.xml"], - allow_analyzer=False, -) -node19_8 = cluster.add_instance( - "node19_8", - image="yandex/clickhouse-server", - tag="19.8.3.8", - with_installed_binary=True, - main_configs=["configs/config.d/test_cluster.xml"], - allow_analyzer=False, -) -node19_11 = cluster.add_instance( - "node19_11", - image="yandex/clickhouse-server", - tag="19.11.13.74", - with_installed_binary=True, - main_configs=["configs/config.d/test_cluster.xml"], - allow_analyzer=False, -) -node19_13 = cluster.add_instance( - "node19_13", - image="yandex/clickhouse-server", - tag="19.13.7.57", - with_installed_binary=True, - main_configs=["configs/config.d/test_cluster.xml"], - allow_analyzer=False, -) -node19_16 = cluster.add_instance( - "node19_16", - image="yandex/clickhouse-server", - tag="19.16.9.37", - with_installed_binary=True, - main_configs=["configs/config.d/test_cluster.xml"], - allow_analyzer=False, -) -old_nodes = [node18_14, node19_1, node19_4, node19_8, node19_11, node19_13, node19_16] +old_nodes = [node_oldest] new_node = cluster.add_instance("node_new") @@ -127,18 +79,17 @@ def test_server_is_older_than_client(setup_nodes): def test_distributed_query_initiator_is_older_than_shard(setup_nodes): - distributed_query_initiator_old_nodes = [node18_14, node19_13, node19_16] shard = new_node - for i, initiator in enumerate(distributed_query_initiator_old_nodes): + for i, initiator in enumerate(old_nodes): initiator.query("INSERT INTO dist_table VALUES (3, {})".format(i)) assert_eq_with_retry( shard, "SELECT COUNT() FROM test_table WHERE id=3", - str(len(distributed_query_initiator_old_nodes)), + str(len(old_nodes)), ) assert_eq_with_retry( initiator, "SELECT COUNT() FROM dist_table WHERE id=3", - str(len(distributed_query_initiator_old_nodes)), + str(len(old_nodes)), ) diff --git a/tests/integration/test_polymorphic_parts/test.py b/tests/integration/test_polymorphic_parts/test.py index ba9b5ec6cac..01bc4804c9f 100644 --- a/tests/integration/test_polymorphic_parts/test.py +++ b/tests/integration/test_polymorphic_parts/test.py @@ -4,7 +4,7 @@ import string import struct import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION from helpers.test_tools import TSV from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry @@ -359,8 +359,8 @@ node7 = cluster.add_instance( "node7", user_configs=["configs_old/users.d/not_optimize_count.xml"], with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_postgresql_database_engine/test.py b/tests/integration/test_postgresql_database_engine/test.py index ee6c19707f3..c44fa176599 100644 --- a/tests/integration/test_postgresql_database_engine/test.py +++ b/tests/integration/test_postgresql_database_engine/test.py @@ -287,7 +287,7 @@ def test_predefined_connection_configuration(started_cluster): ) print(f"kssenii: {result}") assert result.strip().endswith( - "ENGINE = PostgreSQL(postgres1, table = \\'test_table\\')" + "ENGINE = PostgreSQL(postgres1, `table` = \\'test_table\\')" ) node1.query( diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index c7dae2359c4..5e04c9e4d12 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -723,6 +723,7 @@ def test_materialized_view(started_cluster): pg_manager.execute(f"INSERT INTO test_table SELECT 3, 4") check_tables_are_synchronized(instance, "test_table") assert "1\t2\n3\t4" == instance.query("SELECT * FROM mv ORDER BY 1, 2").strip() + instance.query("DROP VIEW mv") pg_manager.drop_materialized_db() diff --git a/tests/integration/test_reload_clusters_config/test.py b/tests/integration/test_reload_clusters_config/test.py index cb003bbe04e..f5baae77297 100644 --- a/tests/integration/test_reload_clusters_config/test.py +++ b/tests/integration/test_reload_clusters_config/test.py @@ -13,8 +13,6 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance( "node", with_zookeeper=True, main_configs=["configs/remote_servers.xml"] ) -node_1 = cluster.add_instance("node_1", with_zookeeper=True) -node_2 = cluster.add_instance("node_2", with_zookeeper=True) @pytest.fixture(scope="module") @@ -32,9 +30,6 @@ def started_cluster(): Distributed('test_cluster2', 'default', 'replicated')""" ) - cluster.pause_container("node_1") - cluster.pause_container("node_2") - yield cluster finally: diff --git a/tests/integration/test_replicated_fetches_bandwidth/test.py b/tests/integration/test_replicated_fetches_bandwidth/test.py index cd969746c31..1e84c09a523 100644 --- a/tests/integration/test_replicated_fetches_bandwidth/test.py +++ b/tests/integration/test_replicated_fetches_bandwidth/test.py @@ -129,7 +129,7 @@ def test_limited_fetches_for_server(start_cluster): node3.query(f"SYSTEM STOP FETCHES limited_fetches{j}") for i in range(5): node1.query( - "INSERT INTO limited_fetches{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format( + "INSERT INTO limited_fetches{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(150)".format( j, i ) ) @@ -175,7 +175,7 @@ def test_limited_sends_for_server(start_cluster): node1.query(f"SYSTEM STOP FETCHES limited_sends{j}") for i in range(5): node3.query( - "INSERT INTO limited_sends{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format( + "INSERT INTO limited_sends{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(150)".format( j, i ) ) diff --git a/tests/integration/test_replicated_merge_tree_compatibility/test.py b/tests/integration/test_replicated_merge_tree_compatibility/test.py index 32a44aa65b9..25cf3caa50c 100644 --- a/tests/integration/test_replicated_merge_tree_compatibility/test.py +++ b/tests/integration/test_replicated_merge_tree_compatibility/test.py @@ -1,12 +1,12 @@ import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, @@ -14,8 +14,8 @@ node1 = cluster.add_instance( node2 = cluster.add_instance( "node2", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, allow_analyzer=False, diff --git a/tests/integration/test_role/test.py b/tests/integration/test_role/test.py index 0e1bbea9cea..b3b18dc8271 100644 --- a/tests/integration/test_role/test.py +++ b/tests/integration/test_role/test.py @@ -246,7 +246,7 @@ def test_introspection(): ) assert instance.query("SHOW GRANTS FOR A") == TSV( - ["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"] + ["GRANT SELECT ON test.`table` TO A", "GRANT R1 TO A"] ) assert instance.query("SHOW GRANTS FOR B") == TSV( [ @@ -256,11 +256,14 @@ def test_introspection(): ) assert instance.query("SHOW GRANTS FOR R1") == "" assert instance.query("SHOW GRANTS FOR R2") == TSV( - ["GRANT SELECT ON test.table TO R2", "REVOKE SELECT(x) ON test.table FROM R2"] + [ + "GRANT SELECT ON test.`table` TO R2", + "REVOKE SELECT(x) ON test.`table` FROM R2", + ] ) assert instance.query("SHOW GRANTS", user="A") == TSV( - ["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"] + ["GRANT SELECT ON test.`table` TO A", "GRANT R1 TO A"] ) assert instance.query("SHOW GRANTS FOR R1", user="A") == TSV([]) diff --git a/tests/integration/test_row_policy/test.py b/tests/integration/test_row_policy/test.py index d88a8f2a243..98653bf6106 100644 --- a/tests/integration/test_row_policy/test.py +++ b/tests/integration/test_row_policy/test.py @@ -803,9 +803,9 @@ def test_tags_with_db_and_table_names(): assert node.query("SHOW CREATE POLICIES default") == TSV( [ "CREATE ROW POLICY default ON mydb.`.filtered_table4` FOR SELECT USING c = 2 TO default", + "CREATE ROW POLICY default ON mydb.`table` FOR SELECT USING a = 0 TO default", "CREATE ROW POLICY default ON mydb.filtered_table2 FOR SELECT USING c > (d + 5) TO default", "CREATE ROW POLICY default ON mydb.filtered_table3 FOR SELECT USING c = 0 TO default", - "CREATE ROW POLICY default ON mydb.table FOR SELECT USING a = 0 TO default", ] ) diff --git a/tests/integration/test_settings_profile/test.py b/tests/integration/test_settings_profile/test.py index 70740104d63..61237af08c5 100644 --- a/tests/integration/test_settings_profile/test.py +++ b/tests/integration/test_settings_profile/test.py @@ -68,7 +68,7 @@ def test_smoke(): ) assert ( instance.query("SHOW CREATE SETTINGS PROFILE xyz") - == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin\n" + == "CREATE SETTINGS PROFILE `xyz` SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin\n" ) assert ( instance.query( @@ -108,7 +108,7 @@ def test_smoke(): instance.query("ALTER SETTINGS PROFILE xyz TO NONE") assert ( instance.query("SHOW CREATE SETTINGS PROFILE xyz") - == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" + == "CREATE SETTINGS PROFILE `xyz` SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000\n" ) assert ( instance.query( @@ -128,7 +128,7 @@ def test_smoke(): instance.query("ALTER USER robin SETTINGS PROFILE xyz") assert ( instance.query("SHOW CREATE USER robin") - == "CREATE USER robin SETTINGS PROFILE xyz\n" + == "CREATE USER robin SETTINGS PROFILE `xyz`\n" ) assert ( instance.query( @@ -174,11 +174,11 @@ def test_settings_from_granted_role(): instance.query("GRANT worker TO robin") assert ( instance.query("SHOW CREATE SETTINGS PROFILE xyz") - == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" + == "CREATE SETTINGS PROFILE `xyz` SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" ) assert ( instance.query("SHOW CREATE ROLE worker") - == "CREATE ROLE worker SETTINGS PROFILE xyz\n" + == "CREATE ROLE worker SETTINGS PROFILE `xyz`\n" ) assert ( instance.query( @@ -260,7 +260,7 @@ def test_settings_from_granted_role(): instance.query("ALTER SETTINGS PROFILE xyz TO worker") assert ( instance.query("SHOW CREATE SETTINGS PROFILE xyz") - == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000 TO worker\n" + == "CREATE SETTINGS PROFILE `xyz` SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000 TO worker\n" ) assert ( instance.query( @@ -282,7 +282,7 @@ def test_settings_from_granted_role(): instance.query("ALTER SETTINGS PROFILE xyz TO NONE") assert ( instance.query("SHOW CREATE SETTINGS PROFILE xyz") - == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" + == "CREATE SETTINGS PROFILE `xyz` SETTINGS max_memory_usage = 100000001 MAX 110000000, max_ast_depth = 2000\n" ) assert ( instance.query( @@ -304,11 +304,11 @@ def test_inheritance(): instance.query("CREATE SETTINGS PROFILE alpha SETTINGS PROFILE xyz TO robin") assert ( instance.query("SHOW CREATE SETTINGS PROFILE xyz") - == "CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 CONST\n" + == "CREATE SETTINGS PROFILE `xyz` SETTINGS max_memory_usage = 100000002 CONST\n" ) assert ( instance.query("SHOW CREATE SETTINGS PROFILE alpha") - == "CREATE SETTINGS PROFILE alpha SETTINGS INHERIT xyz TO robin\n" + == "CREATE SETTINGS PROFILE `alpha` SETTINGS INHERIT `xyz` TO robin\n" ) assert ( instance.query( @@ -453,11 +453,13 @@ def test_show_profiles(): assert instance.query("SHOW SETTINGS PROFILES") == "default\nreadonly\nxyz\n" assert instance.query("SHOW PROFILES") == "default\nreadonly\nxyz\n" - assert instance.query("SHOW CREATE PROFILE xyz") == "CREATE SETTINGS PROFILE xyz\n" + assert ( + instance.query("SHOW CREATE PROFILE xyz") == "CREATE SETTINGS PROFILE `xyz`\n" + ) query_possible_response = [ - "CREATE SETTINGS PROFILE default\n", - "CREATE SETTINGS PROFILE default SETTINGS allow_experimental_analyzer = true\n", + "CREATE SETTINGS PROFILE `default`\n", + "CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n", ] assert ( instance.query("SHOW CREATE SETTINGS PROFILE default") @@ -465,24 +467,24 @@ def test_show_profiles(): ) query_possible_response = [ - "CREATE SETTINGS PROFILE default\n" - "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" - "CREATE SETTINGS PROFILE xyz\n", - "CREATE SETTINGS PROFILE default SETTINGS allow_experimental_analyzer = true\n" - "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" - "CREATE SETTINGS PROFILE xyz\n", + "CREATE SETTINGS PROFILE `default`\n" + "CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n" + "CREATE SETTINGS PROFILE `xyz`\n", + "CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n" + "CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n" + "CREATE SETTINGS PROFILE `xyz`\n", ] assert instance.query("SHOW CREATE PROFILES") in query_possible_response expected_access = ( - "CREATE SETTINGS PROFILE default\n" - "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" - "CREATE SETTINGS PROFILE xyz\n" + "CREATE SETTINGS PROFILE `default`\n" + "CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n" + "CREATE SETTINGS PROFILE `xyz`\n" ) expected_access_analyzer = ( - "CREATE SETTINGS PROFILE default SETTINGS allow_experimental_analyzer = true\n" - "CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1\n" - "CREATE SETTINGS PROFILE xyz\n" + "CREATE SETTINGS PROFILE `default` SETTINGS allow_experimental_analyzer = true\n" + "CREATE SETTINGS PROFILE `readonly` SETTINGS readonly = 1\n" + "CREATE SETTINGS PROFILE `xyz`\n" ) query_response = instance.query("SHOW ACCESS") diff --git a/tests/integration/test_storage_kafka/configs/kafka.xml b/tests/integration/test_storage_kafka/configs/kafka.xml index c6075aff715..3bd1b681c9c 100644 --- a/tests/integration/test_storage_kafka/configs/kafka.xml +++ b/tests/integration/test_storage_kafka/configs/kafka.xml @@ -11,8 +11,8 @@ cgrp,consumer,topic,protocol - - 600 + + 600 consumer_hang diff --git a/tests/integration/test_storage_kerberized_kafka/kerberos_image_config.sh b/tests/integration/test_storage_kerberized_kafka/kerberos_image_config.sh index 07437c42359..1c7419a05e7 100644 --- a/tests/integration/test_storage_kerberized_kafka/kerberos_image_config.sh +++ b/tests/integration/test_storage_kerberized_kafka/kerberos_image_config.sh @@ -105,6 +105,9 @@ create_keytabs() { kadmin.local -q "addprinc -randkey kafkauser/instance@${REALM}" kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab kafkauser/instance@${REALM}" + kadmin.local -q "addprinc -randkey anotherkafkauser/instance@${REALM}" + kadmin.local -q "ktadd -norandkey -k /tmp/keytab/clickhouse.keytab anotherkafkauser/instance@${REALM}" + chmod g+r /tmp/keytab/clickhouse.keytab } diff --git a/tests/integration/test_storage_kerberized_kafka/test.py b/tests/integration/test_storage_kerberized_kafka/test.py index 7856361deda..451e1ab2ccf 100644 --- a/tests/integration/test_storage_kerberized_kafka/test.py +++ b/tests/integration/test_storage_kerberized_kafka/test.py @@ -227,6 +227,58 @@ def test_kafka_json_as_string_no_kdc(kafka_cluster): assert instance.contains_in_log("KerberosInit failure:") +def test_kafka_config_from_sql_named_collection(kafka_cluster): + kafka_produce( + kafka_cluster, + "kafka_json_as_string", + [ + '{"t": 123, "e": {"x": "woof"} }', + "", + '{"t": 124, "e": {"x": "test"} }', + '{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}', + ], + ) + + instance.query( + """ + CREATE NAMED COLLECTION kafka_config AS + kafka.security_protocol = 'SASL_PLAINTEXT', + kafka.sasl_mechanism = 'GSSAPI', + kafka.sasl_kerberos_service_name = 'kafka', + kafka.sasl_kerberos_keytab = '/tmp/keytab/clickhouse.keytab', + kafka.sasl_kerberos_principal = 'anotherkafkauser/instance@TEST.CLICKHOUSE.TECH', + kafka.debug = 'security', + kafka.api_version_request = 'false', + + kafka_broker_list = 'kerberized_kafka1:19092', + kafka_topic_list = 'kafka_json_as_string', + kafka_commit_on_select = 1, + kafka_group_name = 'kafka_json_as_string', + kafka_format = 'JSONAsString', + kafka_flush_interval_ms=1000; + """ + ) + instance.query( + """ + CREATE TABLE test.kafka (field String) + ENGINE = Kafka(kafka_config); + """ + ) + + time.sleep(3) + + result = instance.query("SELECT * FROM test.kafka;") + expected = """\ +{"t": 123, "e": {"x": "woof"} } +{"t": 124, "e": {"x": "test"} } +{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"} +""" + assert TSV(result) == TSV(expected) + assert instance.contains_in_log( + "Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows" + ) + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index 9818a8183d7..b131d8fe51c 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -655,7 +655,7 @@ def test_predefined_connection_configuration(started_cluster): assert node1.query(f"SELECT count() FROM test_table").rstrip() == "100" assert "Connection pool cannot have zero size" in node1.query_and_get_error( - "SELECT count() FROM mysql(mysql1, table='test_table', connection_pool_size=0)" + "SELECT count() FROM mysql(mysql1, `table`='test_table', connection_pool_size=0)" ) assert "Connection pool cannot have zero size" in node1.query_and_get_error( "SELECT count() FROM mysql(mysql4)" diff --git a/tests/integration/test_ttl_replicated/test.py b/tests/integration/test_ttl_replicated/test.py index 119a211ae45..b20b761ef47 100644 --- a/tests/integration/test_ttl_replicated/test.py +++ b/tests/integration/test_ttl_replicated/test.py @@ -2,7 +2,7 @@ import time import helpers.client as client import pytest -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION from helpers.test_tools import TSV, exec_query_with_retry from helpers.wait_for_helpers import wait_for_delete_inactive_parts from helpers.wait_for_helpers import wait_for_delete_empty_parts @@ -16,8 +16,8 @@ node3 = cluster.add_instance("node3", with_zookeeper=True) node4 = cluster.add_instance( "node4", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, main_configs=[ @@ -29,8 +29,8 @@ node4 = cluster.add_instance( node5 = cluster.add_instance( "node5", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, main_configs=[ @@ -41,8 +41,8 @@ node5 = cluster.add_instance( node6 = cluster.add_instance( "node6", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, stay_alive=True, with_installed_binary=True, main_configs=[ diff --git a/tests/integration/test_version_update/test.py b/tests/integration/test_version_update/test.py index a752960bc76..ab3eb1860f3 100644 --- a/tests/integration/test_version_update/test.py +++ b/tests/integration/test_version_update/test.py @@ -1,7 +1,6 @@ import pytest -from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION cluster = ClickHouseCluster(__file__) @@ -11,43 +10,8 @@ node1 = cluster.add_instance("node1", stay_alive=True) node2 = cluster.add_instance( "node2", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", - with_installed_binary=True, - stay_alive=True, - allow_analyzer=False, -) - -# Use different nodes because if there is node.restart_from_latest_version(), then in later tests -# it will be with latest version, but shouldn't, order of tests in CI is shuffled. -node3 = cluster.add_instance( - "node3", - image="yandex/clickhouse-server", - tag="21.6", - with_installed_binary=True, - stay_alive=True, - allow_analyzer=False, -) -node4 = cluster.add_instance( - "node4", - image="yandex/clickhouse-server", - tag="21.6", - with_installed_binary=True, - stay_alive=True, - allow_analyzer=False, -) -node5 = cluster.add_instance( - "node5", - image="yandex/clickhouse-server", - tag="21.6", - with_installed_binary=True, - stay_alive=True, - allow_analyzer=False, -) -node6 = cluster.add_instance( - "node6", - image="yandex/clickhouse-server", - tag="21.6", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, stay_alive=True, allow_analyzer=False, @@ -124,211 +88,3 @@ def test_modulo_partition_key_issue_23508(start_cluster): assert partition_data == node2.query( "SELECT partition, name FROM system.parts WHERE table='test' ORDER BY partition" ) - - -# Test from issue 16587 -def test_aggregate_function_versioning_issue_16587(start_cluster): - for node in [node1, node3]: - node.query("DROP TABLE IF EXISTS test_table;") - node.query( - """ - CREATE TABLE test_table (`col1` DateTime, `col2` Int64) - ENGINE = MergeTree() ORDER BY col1""" - ) - node.query( - "insert into test_table select '2020-10-26 00:00:00', 1929292 from numbers(300)" - ) - - expected = "([1],[600])" - - result_on_old_version = node3.query( - "select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);" - ).strip() - assert result_on_old_version != expected - - result_on_new_version = node1.query( - "select sumMap(sm) from (select sumMap([1],[1]) as sm from remote('127.0.0.{1,2}', default.test_table) group by col1, col2);" - ).strip() - assert result_on_new_version == expected - - -def test_aggregate_function_versioning_fetch_data_from_old_to_new_server(start_cluster): - for node in [node1, node4]: - create_table(node) - insert_data(node) - - expected = "([1],[300])" - - new_server_data = node1.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert new_server_data == expected - - old_server_data = node4.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert old_server_data != expected - - data_from_old_to_new_server = node1.query( - "select finalizeAggregation(col3) from remote('node4', default.test_table);" - ).strip() - assert data_from_old_to_new_server == old_server_data - - -def test_aggregate_function_versioning_server_upgrade(start_cluster): - for node in [node1, node5]: - create_table(node) - insert_data(node1, col2=5) - insert_data(node5, col2=1) - - # Serialization with version 0, server does not support versioning of aggregate function states. - old_server_data = node5.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert old_server_data == "([1],[44])" - create = node5.query("describe table default.test_table;").strip() - assert create.strip().endswith( - "col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))" - ) - print("Ok 1") - - # Upgrade server. - node5.restart_with_latest_version() - - # Deserialized with version 0, server supports versioning. - upgraded_server_data = node5.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert upgraded_server_data == "([1],[44])" - create = node5.query("describe table default.test_table;").strip() - assert create.strip().endswith( - "col3\tAggregateFunction(sumMap, Array(UInt8), Array(UInt8))" - ) - print("Ok 2") - - create = node1.query("describe table default.test_table;").strip() - print(create) - assert create.strip().endswith( - "col3\tAggregateFunction(1, sumMap, Array(UInt8), Array(UInt8))" - ) - - # Data from upgraded server to new server. Deserialize with version 0. - data_from_upgraded_to_new_server = node1.query( - "select finalizeAggregation(col3) from remote('node5', default.test_table);" - ).strip() - assert data_from_upgraded_to_new_server == upgraded_server_data == "([1],[44])" - print("Ok 3") - - # Data is serialized according to version 0 (though one of the states is version 1, but result is version 0). - upgraded_server_data = node5.query( - "select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);" - ).strip() - assert upgraded_server_data == "([1],[44])\n([1],[44])" - print("Ok 4") - - # Check insertion after server upgarde. - insert_data(node5, col2=2) - - # Check newly inserted data is still serialized with 0 version. - upgraded_server_data = node5.query( - "select finalizeAggregation(col3) from default.test_table order by col2;" - ).strip() - assert upgraded_server_data == "([1],[44])\n([1],[44])" - print("Ok 5") - - # New table has latest version. - new_server_data = node1.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert new_server_data == "([1],[300])" - print("Ok 6") - - # Insert from new server (with version 1) to upgraded server (where version will be 0), result version 0. - node1.query( - "insert into table function remote('node5', default.test_table) select * from default.test_table;" - ).strip() - upgraded_server_data = node5.query( - "select finalizeAggregation(col3) from default.test_table order by col2;" - ).strip() - assert upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])" - print("Ok 7") - - # But new table gets data with latest version. - insert_data(node1) - new_server_data = node1.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert new_server_data == "([1],[300])\n([1],[300])" - print("Ok 8") - - # Create table with column implicitly with older version (version 0). - create_table(node1, name="test_table_0", version=0) - insert_data(node1, table_name="test_table_0", col2=3) - data = node1.query( - "select finalizeAggregation(col3) from default.test_table_0;" - ).strip() - assert data == "([1],[44])" - print("Ok") - - # Insert from new server to upgraded server to a new table but the version was set implicitly to 0, so data version 0. - node1.query( - "insert into table function remote('node5', default.test_table) select * from default.test_table_0;" - ).strip() - upgraded_server_data = node5.query( - "select finalizeAggregation(col3) from default.test_table order by col2;" - ).strip() - assert upgraded_server_data == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])" - print("Ok") - - -def test_aggregate_function_versioning_persisting_metadata(start_cluster): - for node in [node1, node6]: - create_table(node) - insert_data(node) - data = node1.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert data == "([1],[300])" - data = node6.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert data == "([1],[44])" - - node6.restart_with_latest_version() - - for node in [node1, node6]: - node.query("DETACH TABLE test_table") - node.query("ATTACH TABLE test_table") - - for node in [node1, node6]: - insert_data(node) - - new_server_data = node1.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert new_server_data == "([1],[300])\n([1],[300])" - - upgraded_server_data = node6.query( - "select finalizeAggregation(col3) from default.test_table;" - ).strip() - assert upgraded_server_data == "([1],[44])\n([1],[44])" - - for node in [node1, node6]: - node.restart_clickhouse() - insert_data(node) - - result = node1.query( - "select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);" - ).strip() - assert ( - result - == "([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])\n([1],[300])" - ) - - result = node6.query( - "select finalizeAggregation(col3) from remote('127.0.0.{1,2}', default.test_table);" - ).strip() - assert ( - result - == "([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])\n([1],[44])" - ) diff --git a/tests/integration/test_version_update_after_mutation/test.py b/tests/integration/test_version_update_after_mutation/test.py index 4e84b4c10ca..90f8d283a6a 100644 --- a/tests/integration/test_version_update_after_mutation/test.py +++ b/tests/integration/test_version_update_after_mutation/test.py @@ -1,7 +1,7 @@ import pytest import time -from helpers.cluster import ClickHouseCluster +from helpers.cluster import ClickHouseCluster, CLICKHOUSE_CI_MIN_TESTED_VERSION from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry cluster = ClickHouseCluster(__file__) @@ -9,8 +9,8 @@ cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, stay_alive=True, main_configs=[ @@ -21,8 +21,8 @@ node1 = cluster.add_instance( node2 = cluster.add_instance( "node2", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, stay_alive=True, main_configs=[ @@ -33,8 +33,8 @@ node2 = cluster.add_instance( node3 = cluster.add_instance( "node3", with_zookeeper=True, - image="yandex/clickhouse-server", - tag="20.8.11.17", + image="clickhouse/clickhouse-server", + tag=CLICKHOUSE_CI_MIN_TESTED_VERSION, with_installed_binary=True, stay_alive=True, main_configs=[ diff --git a/tests/performance/agg_functions_argmin_argmax.xml b/tests/performance/agg_functions_argmin_argmax.xml new file mode 100644 index 00000000000..e8eed2a82de --- /dev/null +++ b/tests/performance/agg_functions_argmin_argmax.xml @@ -0,0 +1,24 @@ + + + + group_scale + + 1000000 + + + + +select argMin(Title, EventTime) from hits_100m_single where Title != '' group by intHash32(UserID) % {group_scale} FORMAT Null +select argMinIf(Title, EventTime, Title != '') from hits_100m_single group by intHash32(UserID) % {group_scale} FORMAT Null +select argMinIf(Title::Nullable(String), EventTime::Nullable(DateTime), Title::Nullable(String) != '') from hits_100m_single group by intHash32(UserID) % {group_scale} FORMAT Null + +select argMin(RegionID, EventTime) from hits_100m_single where Title != '' group by intHash32(UserID) % {group_scale} FORMAT Null +select argMin((Title, RegionID), EventTime) from hits_100m_single where Title != '' group by intHash32(UserID) % {group_scale} FORMAT Null +select argMinIf(Title, EventTime, Title != '') from hits_100m_single group by intHash32(UserID) % {group_scale} FORMAT Null + +select argMax(WatchID, Age) from hits_100m_single FORMAT Null +select argMax(WatchID, Age::Nullable(UInt8)) from hits_100m_single FORMAT Null +select argMax(WatchID, (EventDate, EventTime)) from hits_100m_single where Title != '' group by intHash32(UserID) % {group_scale} FORMAT Null +select argMax(MobilePhone, MobilePhoneModel) from hits_100m_single + + diff --git a/tests/performance/aggregate_with_serialized_method.xml b/tests/performance/aggregate_with_serialized_method.xml index 91763c69bb9..5964e7e2227 100644 --- a/tests/performance/aggregate_with_serialized_method.xml +++ b/tests/performance/aggregate_with_serialized_method.xml @@ -1,8 +1,7 @@ 8 - 0 - 4 + 1 @@ -29,4 +28,4 @@ select toDecimal64(key_int64_1, 3),toDecimal64(key_int64_2, 3),toDecimal64(key_int64_3, 3),toDecimal64(key_int64_4, 3),toDecimal64(key_int64_5, 3), min(m1) from t_nullable group by toDecimal64(key_int64_1, 3),toDecimal64(key_int64_2, 3),toDecimal64(key_int64_3, 3),toDecimal64(key_int64_4, 3),toDecimal64(key_int64_5, 3) limit 10 drop table if exists t_nullable - \ No newline at end of file + diff --git a/tests/performance/jit_aggregate_functions.xml b/tests/performance/jit_aggregate_functions.xml index 8abb901439a..a16b81f610c 100644 --- a/tests/performance/jit_aggregate_functions.xml +++ b/tests/performance/jit_aggregate_functions.xml @@ -30,7 +30,7 @@ - CREATE TABLE jit_test_merge_tree_nullable ( + CREATE TABLE jit_test_memory_nullable ( key UInt64, value_1 Nullable(UInt64), value_2 Nullable(UInt64), @@ -42,7 +42,7 @@ - CREATE TABLE jit_test_memory_nullable ( + CREATE TABLE jit_test_merge_tree_nullable ( key UInt64, value_1 Nullable(UInt64), value_2 Nullable(UInt64), diff --git a/tests/performance/order_with_limit.xml b/tests/performance/order_with_limit.xml index 1e1cb52267c..d1ad2afade8 100644 --- a/tests/performance/order_with_limit.xml +++ b/tests/performance/order_with_limit.xml @@ -1,4 +1,5 @@ + SELECT number AS n FROM numbers_mt(200000000) ORDER BY n DESC LIMIT 1 FORMAT Null SELECT number AS n FROM numbers_mt(200000000) ORDER BY n DESC LIMIT 10 FORMAT Null SELECT number AS n FROM numbers_mt(200000000) ORDER BY n DESC LIMIT 100 FORMAT Null SELECT number AS n FROM numbers_mt(200000000) ORDER BY n DESC LIMIT 1500 FORMAT Null @@ -7,6 +8,7 @@ SELECT number AS n FROM numbers_mt(200000000) ORDER BY n DESC LIMIT 10000 FORMAT Null SELECT number AS n FROM numbers_mt(200000000) ORDER BY n DESC LIMIT 65535 FORMAT Null + SELECT intHash64(number) AS n FROM numbers_mt(500000000) ORDER BY n LIMIT 1 FORMAT Null SELECT intHash64(number) AS n FROM numbers_mt(500000000) ORDER BY n LIMIT 10 FORMAT Null SELECT intHash64(number) AS n FROM numbers_mt(200000000) ORDER BY n LIMIT 100 FORMAT Null SELECT intHash64(number) AS n FROM numbers_mt(200000000) ORDER BY n LIMIT 1500 FORMAT Null @@ -15,6 +17,7 @@ SELECT intHash64(number) AS n FROM numbers_mt(200000000) ORDER BY n LIMIT 10000 FORMAT Null SELECT intHash64(number) AS n FROM numbers_mt(100000000) ORDER BY n LIMIT 65535 FORMAT Null + SELECT intHash64(number) AS n FROM numbers_mt(200000000) ORDER BY n, n + 1, n + 2 LIMIT 1 FORMAT Null SELECT intHash64(number) AS n FROM numbers_mt(200000000) ORDER BY n, n + 1, n + 2 LIMIT 10 FORMAT Null SELECT intHash64(number) AS n FROM numbers_mt(200000000) ORDER BY n, n + 1, n + 2 LIMIT 100 FORMAT Null SELECT intHash64(number) AS n FROM numbers_mt(200000000) ORDER BY n, n + 1, n + 2 LIMIT 1500 FORMAT Null diff --git a/tests/queries/0_stateless/00027_argMinMax.sql b/tests/queries/0_stateless/00027_argMinMax.sql index 2b67b99ec77..dbf7c9176d2 100644 --- a/tests/queries/0_stateless/00027_argMinMax.sql +++ b/tests/queries/0_stateless/00027_argMinMax.sql @@ -5,4 +5,12 @@ select argMin(x.1, x.2), argMax(x.1, x.2) from (select (toDate(number, 'UTC'), t select argMin(x.1, x.2), argMax(x.1, x.2) from (select (toDecimal32(number, 2), toDecimal64(number, 2) + 1) as x from numbers(10)); -- array -SELECT argMinArray(id, num), argMaxArray(id, num) FROM (SELECT arrayJoin([[10, 4, 3], [7, 5, 6], [8, 8, 2]]) AS num, arrayJoin([[1, 2, 4], [2, 3, 3]]) AS id); +SELECT + argMinArray(id, num), + argMaxArray(id, num) +FROM +( + SELECT + arrayJoin([[10, 4, 3], [7, 5, 6], [8, 8, 2]]) AS num, + arrayJoin([[1, 2, 4]]) AS id +) diff --git a/tests/queries/0_stateless/00027_simple_argMinArray.sql b/tests/queries/0_stateless/00027_simple_argMinArray.sql index b681a2c53cf..bdee2b058b8 100644 --- a/tests/queries/0_stateless/00027_simple_argMinArray.sql +++ b/tests/queries/0_stateless/00027_simple_argMinArray.sql @@ -1 +1 @@ -SELECT argMinArray(id, num), argMaxArray(id, num) FROM (SELECT arrayJoin([[10, 4, 3], [7, 5, 6], [8, 8, 2]]) AS num, arrayJoin([[1, 2, 4], [2, 3, 3]]) AS id) +SELECT argMinArray(id, num), argMaxArray(id, num) FROM (SELECT arrayJoin([[10, 4, 3], [7, 5, 6], [8, 8, 2]]) AS num, arrayJoin([[1, 2, 4]]) AS id) diff --git a/tests/queries/0_stateless/00047_stored_aggregates_complex.sql b/tests/queries/0_stateless/00047_stored_aggregates_complex.sql index 2e416f91d5d..df5305c97db 100644 --- a/tests/queries/0_stateless/00047_stored_aggregates_complex.sql +++ b/tests/queries/0_stateless/00047_stored_aggregates_complex.sql @@ -1,5 +1,7 @@ DROP TABLE IF EXISTS stored_aggregates; +set max_insert_threads = 1; + set allow_deprecated_syntax_for_merge_tree=1; CREATE TABLE stored_aggregates ( diff --git a/tests/queries/0_stateless/00340_squashing_insert_select.sql b/tests/queries/0_stateless/00340_squashing_insert_select.sql index 102eb061bad..6b7133c155e 100644 --- a/tests/queries/0_stateless/00340_squashing_insert_select.sql +++ b/tests/queries/0_stateless/00340_squashing_insert_select.sql @@ -7,6 +7,8 @@ SET max_block_size = 10000; SET min_insert_block_size_rows = 1000000; SET min_insert_block_size_bytes = 0; +set max_insert_threads = 1; + INSERT INTO numbers_squashed SELECT * FROM system.numbers LIMIT 10000000; SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC; diff --git a/tests/queries/0_stateless/00599_create_view_with_subquery.reference b/tests/queries/0_stateless/00599_create_view_with_subquery.reference index 0458f650fd0..39a5f99df03 100644 --- a/tests/queries/0_stateless/00599_create_view_with_subquery.reference +++ b/tests/queries/0_stateless/00599_create_view_with_subquery.reference @@ -1 +1 @@ -CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n) AS\nSELECT *\nFROM default.test_00599\nWHERE id = (\n SELECT 1\n) +CREATE VIEW default.test_view_00599\n(\n `id` UInt64\n)\nAS SELECT *\nFROM default.test_00599\nWHERE id = (\n SELECT 1\n) diff --git a/tests/queries/0_stateless/00751_default_databasename_for_view.reference b/tests/queries/0_stateless/00751_default_databasename_for_view.reference index 4899e230924..2cd5019defa 100644 --- a/tests/queries/0_stateless/00751_default_databasename_for_view.reference +++ b/tests/queries/0_stateless/00751_default_databasename_for_view.reference @@ -6,8 +6,8 @@ CREATE MATERIALIZED VIEW default.t_mv_00751 ) ENGINE = MergeTree ORDER BY date -SETTINGS index_granularity = 8192 AS -SELECT +SETTINGS index_granularity = 8192 +AS SELECT date, platform, app diff --git a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql index 01a2f53bf93..0b289ad7c1d 100644 --- a/tests/queries/0_stateless/00804_test_delta_codec_compression.sql +++ b/tests/queries/0_stateless/00804_test_delta_codec_compression.sql @@ -16,6 +16,8 @@ CREATE TABLE default_codec_synthetic id UInt64 Codec(ZSTD(3)) ) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1; +set max_insert_threads = 1; + INSERT INTO delta_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000; INSERT INTO default_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000; diff --git a/tests/queries/0_stateless/00916_create_or_replace_view.reference b/tests/queries/0_stateless/00916_create_or_replace_view.reference index 50323e47556..66aefd5cf46 100644 --- a/tests/queries/0_stateless/00916_create_or_replace_view.reference +++ b/tests/queries/0_stateless/00916_create_or_replace_view.reference @@ -1,2 +1,2 @@ -CREATE VIEW default.t\n(\n `number` UInt64\n) AS\nSELECT number\nFROM system.numbers -CREATE VIEW default.t\n(\n `next_number` UInt64\n) AS\nSELECT number + 1 AS next_number\nFROM system.numbers +CREATE VIEW default.t\n(\n `number` UInt64\n)\nAS SELECT number\nFROM system.numbers +CREATE VIEW default.t\n(\n `next_number` UInt64\n)\nAS SELECT number + 1 AS next_number\nFROM system.numbers diff --git a/tests/queries/0_stateless/00984_parser_stack_overflow.reference b/tests/queries/0_stateless/00984_parser_stack_overflow.reference index 0cf6a1f96df..e28ada842c0 100644 --- a/tests/queries/0_stateless/00984_parser_stack_overflow.reference +++ b/tests/queries/0_stateless/00984_parser_stack_overflow.reference @@ -1,4 +1,6 @@ -exceeded -exceeded +1 +1 +0 +0 20002 1 diff --git a/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/tests/queries/0_stateless/00984_parser_stack_overflow.sh index a7854b91ee2..dc836388cf6 100755 --- a/tests/queries/0_stateless/00984_parser_stack_overflow.sh +++ b/tests/queries/0_stateless/00984_parser_stack_overflow.sh @@ -9,10 +9,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh # Too deep recursion -perl -e 'print "(" x 10000' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' -perl -e 'print "SELECT " . ("[" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' -perl -e 'print "SELECT " . ("([" x 5000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' -perl -e 'print "SELECT 1" . ("+1" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -oF 'exceeded' +perl -e 'print "(" x 10000' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -cP 'exceeded|too large' +perl -e 'print "SELECT " . ("[" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -cP 'exceeded|too large' +perl -e 'print "SELECT " . ("([" x 5000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -cP 'exceeded|too large' +perl -e 'print "SELECT 1" . ("+1" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | grep -cP 'exceeded|too large' # But this is Ok perl -e 'print "SELECT 1" . (",1" x 10000)' | $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" --data-binary @- | wc -c diff --git a/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql b/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql index 9fbc3f951fe..777effe9e81 100644 --- a/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql +++ b/tests/queries/0_stateless/01019_Buffer_and_max_memory_usage.sql @@ -25,6 +25,7 @@ CREATE TABLE buffer_ (key UInt64) Engine=Buffer(currentDatabase(), null_, SET max_memory_usage=10e6; SET max_block_size=100e3; +SET max_insert_threads=1; -- Check that max_memory_usage is ignored only on flush and not on squash SET min_insert_block_size_bytes=9e6; diff --git a/tests/queries/0_stateless/01033_function_substring.reference b/tests/queries/0_stateless/01033_function_substring.reference index b0fac36e24a..362a14f80f3 100644 --- a/tests/queries/0_stateless/01033_function_substring.reference +++ b/tests/queries/0_stateless/01033_function_substring.reference @@ -170,4 +170,6 @@ g -UBSAN bug +-- UBSAN bug +-- Alias +el diff --git a/tests/queries/0_stateless/01033_function_substring.sql b/tests/queries/0_stateless/01033_function_substring.sql index 82c6b5859e2..9955700f302 100644 --- a/tests/queries/0_stateless/01033_function_substring.sql +++ b/tests/queries/0_stateless/01033_function_substring.sql @@ -132,7 +132,7 @@ SELECT substring(s, l, r) FROM t; DROP table if exists t; -SELECT 'UBSAN bug'; +SELECT '-- UBSAN bug'; /** NOTE: The behaviour of substring and substringUTF8 is inconsistent when negative offset is greater than string size: * substring: @@ -144,3 +144,6 @@ SELECT 'UBSAN bug'; * This may be subject for change. */ SELECT substringUTF8('hello, пÑ�ивеÑ�', -9223372036854775808, number) FROM numbers(16) FORMAT Null; + +SELECT '-- Alias'; +SELECT byteSlice('hello', 2, 2); diff --git a/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql b/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql index e292447512c..f17f3ac63b0 100644 --- a/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql +++ b/tests/queries/0_stateless/01047_window_view_parser_inner_table.sql @@ -1,8 +1,9 @@ - +SET send_logs_level = 'fatal'; SET allow_experimental_analyzer = 0; SET allow_experimental_window_view = 1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Ordinary; DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.mt; diff --git a/tests/queries/0_stateless/01048_window_view_parser.sql b/tests/queries/0_stateless/01048_window_view_parser.sql index f87d9aa023e..adcb4a6364d 100644 --- a/tests/queries/0_stateless/01048_window_view_parser.sql +++ b/tests/queries/0_stateless/01048_window_view_parser.sql @@ -1,9 +1,11 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; SET allow_experimental_analyzer = 0; SET allow_experimental_window_view = 1; DROP DATABASE IF EXISTS test_01048; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE test_01048 ENGINE=Ordinary; DROP TABLE IF EXISTS test_01048.mt; diff --git a/tests/queries/0_stateless/01053_drop_database_mat_view.sql b/tests/queries/0_stateless/01053_drop_database_mat_view.sql index 2642430eb05..6ab31fce644 100644 --- a/tests/queries/0_stateless/01053_drop_database_mat_view.sql +++ b/tests/queries/0_stateless/01053_drop_database_mat_view.sql @@ -1,6 +1,8 @@ +SET send_logs_level = 'fatal'; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Ordinary; -- Different inner table name with Atomic set allow_deprecated_syntax_for_merge_tree=1; diff --git a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql index ae90dc3cc72..f5ac347cfff 100644 --- a/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql +++ b/tests/queries/0_stateless/01064_incremental_streaming_from_2_src_with_feedback.sql @@ -25,7 +25,8 @@ AS minState( toUInt64(-1) ) as fastest_session, maxState( toUInt64(0) ) as biggest_inactivity_period FROM numbers(50000) -GROUP BY id; +GROUP BY id +SETTINGS max_insert_threads=1; -- source table #1 diff --git a/tests/queries/0_stateless/01073_grant_and_revoke.reference b/tests/queries/0_stateless/01073_grant_and_revoke.reference index 449f21e896a..b91820914e6 100644 --- a/tests/queries/0_stateless/01073_grant_and_revoke.reference +++ b/tests/queries/0_stateless/01073_grant_and_revoke.reference @@ -4,12 +4,12 @@ B C GRANT INSERT, ALTER DELETE ON *.* TO test_user_01073 GRANT SELECT ON db1.* TO test_user_01073 -GRANT SELECT ON db2.table TO test_user_01073 -GRANT SELECT(col1) ON db3.table TO test_user_01073 -GRANT SELECT(col1, col2) ON db4.table TO test_user_01073 +GRANT SELECT ON db2.`table` TO test_user_01073 +GRANT SELECT(col1) ON db3.`table` TO test_user_01073 +GRANT SELECT(col1, col2) ON db4.`table` TO test_user_01073 D GRANT ALTER DELETE ON *.* TO test_user_01073 -GRANT SELECT(col1) ON db4.table TO test_user_01073 +GRANT SELECT(col1) ON db4.`table` TO test_user_01073 E GRANT SELECT ON db1.* TO test_role_01073 REVOKE SELECT(c1, c2, c3, c4, c5) ON db1.table1 FROM test_role_01073 diff --git a/tests/queries/0_stateless/01074_partial_revokes.reference b/tests/queries/0_stateless/01074_partial_revokes.reference index 43e44f3c941..9ffa2878ad4 100644 --- a/tests/queries/0_stateless/01074_partial_revokes.reference +++ b/tests/queries/0_stateless/01074_partial_revokes.reference @@ -4,21 +4,21 @@ REVOKE SELECT ON db.* FROM test_user_01074 --cleanup --simple 2 GRANT SELECT ON db.* TO test_user_01074 -REVOKE SELECT ON db.table FROM test_user_01074 +REVOKE SELECT ON db.`table` FROM test_user_01074 --cleanup --simple 3 -GRANT SELECT ON db.table TO test_user_01074 -REVOKE SELECT(col1) ON db.table FROM test_user_01074 +GRANT SELECT ON db.`table` TO test_user_01074 +REVOKE SELECT(col1) ON db.`table` FROM test_user_01074 --cleanup --complex 1 GRANT SELECT ON *.* TO test_user_01074 -REVOKE SELECT(col1, col2) ON db.table FROM test_user_01074 +REVOKE SELECT(col1, col2) ON db.`table` FROM test_user_01074 --cleanup --complex 2 GRANT SELECT ON *.* TO test_user_01074 REVOKE SELECT ON db.* FROM test_user_01074 -GRANT SELECT ON db.table TO test_user_01074 -REVOKE SELECT(col1) ON db.table FROM test_user_01074 +GRANT SELECT ON db.`table` TO test_user_01074 +REVOKE SELECT(col1) ON db.`table` FROM test_user_01074 ┏━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓ ┃ user_name  ┃ role_name ┃ access_type ┃ database ┃ table ┃ column ┃ is_partial_revoke ┃ grant_option ┃ ┡━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩ @@ -40,7 +40,7 @@ GRANT SELECT ON *.* TO test_user_01074 --cleanup --grant option 1 GRANT SELECT ON *.* TO test_user_01074 WITH GRANT OPTION -REVOKE GRANT OPTION FOR SELECT(col1) ON db.table FROM test_user_01074 +REVOKE GRANT OPTION FOR SELECT(col1) ON db.`table` FROM test_user_01074 ┏━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━┳━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┓ ┃ user_name  ┃ role_name ┃ access_type ┃ database ┃ table ┃ column ┃ is_partial_revoke ┃ grant_option ┃ ┡━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━╇━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━┩ @@ -51,7 +51,7 @@ REVOKE GRANT OPTION FOR SELECT(col1) ON db.table FROM test_user_01074 --cleanup --grant option 2 GRANT SELECT ON *.* TO test_user_01074 WITH GRANT OPTION -REVOKE SELECT(col1) ON db.table FROM test_user_01074 +REVOKE SELECT(col1) ON db.`table` FROM test_user_01074 --cleanup --grant option 3 GRANT SELECT ON *.* TO test_user_01074 diff --git a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference index b25cfadd0ec..19db37f852a 100644 --- a/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference +++ b/tests/queries/0_stateless/01083_expressions_in_engine_arguments.reference @@ -6,6 +6,6 @@ CREATE TABLE default.distributed\n(\n `n` Int8\n)\nENGINE = Distributed(\'tes CREATE TABLE default.distributed_tf\n(\n `n` Int8\n) AS cluster(\'test_shard_localhost\', \'default\', \'buffer\') CREATE TABLE default.url\n(\n `n` UInt64,\n `col` String\n)\nENGINE = URL(\'https://localhost:8443/?query=select+n,+_table+from+default.merge+format+CSV\', \'CSV\') CREATE TABLE default.rich_syntax\n(\n `n` Int64\n) AS remote(\'localhos{x|y|t}\', cluster(\'test_shard_localhost\', remote(\'127.0.0.{1..4}\', \'default\', \'view\'))) -CREATE VIEW default.view\n(\n `n` Int64\n) AS\nSELECT toInt64(n) AS n\nFROM\n(\n SELECT toString(n) AS n\n FROM default.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM default.file +CREATE VIEW default.view\n(\n `n` Int64\n)\nAS SELECT toInt64(n) AS n\nFROM\n(\n SELECT toString(n) AS n\n FROM default.merge\n WHERE _table != \'qwerty\'\n ORDER BY _table ASC\n)\nUNION ALL\nSELECT *\nFROM default.file CREATE DICTIONARY default.dict\n(\n `n` UInt64,\n `col` String DEFAULT \'42\'\n)\nPRIMARY KEY n\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9440 SECURE 1 USER \'default\' TABLE \'url\'))\nLIFETIME(MIN 0 MAX 1)\nLAYOUT(CACHE(SIZE_IN_CELLS 1)) 16 diff --git a/tests/queries/0_stateless/01085_window_view_attach.sql b/tests/queries/0_stateless/01085_window_view_attach.sql index 51a88a04f95..a40c6f20a1a 100644 --- a/tests/queries/0_stateless/01085_window_view_attach.sql +++ b/tests/queries/0_stateless/01085_window_view_attach.sql @@ -1,9 +1,11 @@ +SET send_logs_level = 'fatal'; SET allow_experimental_analyzer = 0; SET allow_experimental_window_view = 1; DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Ordinary; DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.mt; diff --git a/tests/queries/0_stateless/01086_window_view_cleanup.sh b/tests/queries/0_stateless/01086_window_view_cleanup.sh index b078b4718c0..01bf50bc14d 100755 --- a/tests/queries/0_stateless/01086_window_view_cleanup.sh +++ b/tests/queries/0_stateless/01086_window_view_cleanup.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash # Tags: no-parallel +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01109_exchange_tables.sql b/tests/queries/0_stateless/01109_exchange_tables.sql index c118945887b..b10377436f9 100644 --- a/tests/queries/0_stateless/01109_exchange_tables.sql +++ b/tests/queries/0_stateless/01109_exchange_tables.sql @@ -1,4 +1,5 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; DROP DATABASE IF EXISTS test_01109; CREATE DATABASE test_01109 ENGINE=Atomic; @@ -31,6 +32,7 @@ DROP DATABASE IF EXISTS test_01109_other_atomic; DROP DATABASE IF EXISTS test_01109_ordinary; CREATE DATABASE test_01109_other_atomic; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE test_01109_ordinary ENGINE=Ordinary; CREATE TABLE test_01109_other_atomic.t3 ENGINE=MergeTree() ORDER BY tuple() @@ -63,6 +65,3 @@ DROP DATABASE test_01109; DROP DATABASE test_01109_other_atomic; DROP DATABASE test_01109_ordinary; DROP DATABASE test_01109_rename_exists; - - - diff --git a/tests/queries/0_stateless/01114_database_atomic.reference b/tests/queries/0_stateless/01114_database_atomic.reference index 93e89e3a2ec..f42cd099d4e 100644 --- a/tests/queries/0_stateless/01114_database_atomic.reference +++ b/tests/queries/0_stateless/01114_database_atomic.reference @@ -1,4 +1,4 @@ -2 +1 CREATE DATABASE test_01114_1\nENGINE = Atomic CREATE DATABASE test_01114_2\nENGINE = Atomic CREATE DATABASE test_01114_3\nENGINE = Ordinary diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh index 3e1f9eb1f43..1b1f064ae0b 100755 --- a/tests/queries/0_stateless/01114_database_atomic.sh +++ b/tests/queries/0_stateless/01114_database_atomic.sh @@ -2,6 +2,9 @@ # Tags: no-parallel, no-fasttest # Tag no-fasttest: 45 seconds running +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01119_wierd_user_names.reference b/tests/queries/0_stateless/01119_weird_user_names.reference similarity index 100% rename from tests/queries/0_stateless/01119_wierd_user_names.reference rename to tests/queries/0_stateless/01119_weird_user_names.reference diff --git a/tests/queries/0_stateless/01119_wierd_user_names.sql b/tests/queries/0_stateless/01119_weird_user_names.sql similarity index 89% rename from tests/queries/0_stateless/01119_wierd_user_names.sql rename to tests/queries/0_stateless/01119_weird_user_names.sql index 7a28016f4f3..0d6f02786b0 100644 --- a/tests/queries/0_stateless/01119_wierd_user_names.sql +++ b/tests/queries/0_stateless/01119_weird_user_names.sql @@ -13,10 +13,10 @@ create user " "; create user ' spaces'; create user 'spaces '; create user ` INTERSERVER SECRET `; -- { serverError BAD_ARGUMENTS } -create user ''; -- { serverError BAD_ARGUMENTS } +create user ''; -- { clientError SYNTAX_ERROR } create user 'test 01119'; alter user `test 01119` rename to " spaces "; -alter user " spaces " rename to ''; -- { serverError BAD_ARGUMENTS } +alter user " spaces " rename to ''; -- { clientError SYNTAX_ERROR } alter user " spaces " rename to " INTERSERVER SECRET "; -- { serverError BAD_ARGUMENTS } create user "Вася Пупкин"; create user "无名氏 "; diff --git a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql index 505c406c2cc..fc3fcb34fc0 100644 --- a/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql +++ b/tests/queries/0_stateless/01148_zookeeper_path_macros_unfolding.sql @@ -1,5 +1,7 @@ -- Tags: zookeeper, no-replicated-database, no-parallel, no-ordinary-database +SET send_logs_level = 'fatal'; + DROP TABLE IF EXISTS rmt; DROP TABLE IF EXISTS rmt1; DROP TABLE IF EXISTS rmt2; @@ -32,6 +34,7 @@ SHOW CREATE TABLE test_01148_atomic.rmt3; DROP DATABASE IF EXISTS test_01148_ordinary; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE test_01148_ordinary ENGINE=Ordinary; RENAME TABLE test_01148_atomic.rmt3 to test_01148_ordinary.rmt3; -- { serverError 48 } DROP DATABASE test_01148_ordinary; diff --git a/tests/queries/0_stateless/01153_attach_mv_uuid.reference b/tests/queries/0_stateless/01153_attach_mv_uuid.reference index e37fe28e303..ca0a4b6ddbe 100644 --- a/tests/queries/0_stateless/01153_attach_mv_uuid.reference +++ b/tests/queries/0_stateless/01153_attach_mv_uuid.reference @@ -4,18 +4,18 @@ 2 4 3 9 4 16 -CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src +CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n\nAS SELECT\n n,\n n * n AS n2\nFROM default.src 1 1 2 4 -CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src +CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n\nAS SELECT\n n,\n n * n AS n2\nFROM default.src 1 1 2 4 3 9 4 16 -CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src +CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n\nAS SELECT\n n,\n n * n AS n2\nFROM default.src 1 1 2 4 -CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n AS\nSELECT\n n,\n n * n AS n2\nFROM default.src +CREATE MATERIALIZED VIEW default.mv UUID \'e15f3ab5-6cae-4df3-b879-f40deafd82c2\' TO INNER UUID \'3bd68e3c-2693-4352-ad66-a66eba9e345e\'\n(\n `n` Int32,\n `n2` Int64\n)\nENGINE = MergeTree\nPARTITION BY n % 10\nORDER BY n\nAS SELECT\n n,\n n * n AS n2\nFROM default.src 1 1 2 4 3 9 diff --git a/tests/queries/0_stateless/01155_rename_move_materialized_view.sql b/tests/queries/0_stateless/01155_rename_move_materialized_view.sql index 1eff1c0779a..80ed707b695 100644 --- a/tests/queries/0_stateless/01155_rename_move_materialized_view.sql +++ b/tests/queries/0_stateless/01155_rename_move_materialized_view.sql @@ -1,11 +1,13 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; SET prefer_localhost_replica = 1; DROP DATABASE IF EXISTS test_01155_ordinary; DROP DATABASE IF EXISTS test_01155_atomic; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE test_01155_ordinary ENGINE=Ordinary; CREATE DATABASE test_01155_atomic ENGINE=Atomic; @@ -70,7 +72,10 @@ RENAME DATABASE test_01155_ordinary TO test_01155_atomic; SET check_table_dependencies=1; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +SET send_logs_level='fatal'; CREATE DATABASE test_01155_ordinary ENGINE=Ordinary; +SET send_logs_level='warning'; SHOW CREATE DATABASE test_01155_atomic; RENAME TABLE test_01155_atomic.mv1 TO test_01155_ordinary.mv1; diff --git a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh index d1a7144e886..1ac01fe6abc 100755 --- a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh +++ b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash # Tags: zookeeper, no-parallel, no-fasttest +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01201_read_single_thread_in_order.sql b/tests/queries/0_stateless/01201_read_single_thread_in_order.sql index 24ed935a125..1c2b3eb3dbf 100644 --- a/tests/queries/0_stateless/01201_read_single_thread_in_order.sql +++ b/tests/queries/0_stateless/01201_read_single_thread_in_order.sql @@ -1,3 +1,5 @@ +-- Tags: long + DROP TABLE IF EXISTS t; CREATE TABLE t @@ -8,7 +10,7 @@ ENGINE = MergeTree ORDER BY number SETTINGS index_granularity = 128, ratio_of_defaults_for_sparse_serialization = 1.0, index_granularity_bytes = '10Mi'; -SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_insert_threads = 1; INSERT INTO t SELECT number FROM numbers(10000000); SET max_threads = 1, max_block_size = 12345; diff --git a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql index 5db92e70650..2a1b202c6dd 100644 --- a/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql +++ b/tests/queries/0_stateless/01224_no_superfluous_dict_reload.sql @@ -1,8 +1,11 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + DROP DATABASE IF EXISTS dict_db_01224; DROP DATABASE IF EXISTS dict_db_01224_dictionary; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE dict_db_01224 ENGINE=Ordinary; -- Different internal dictionary name with Atomic CREATE DATABASE dict_db_01224_dictionary Engine=Dictionary; diff --git a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql index bc733a0c546..28a5a0d9d55 100644 --- a/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql +++ b/tests/queries/0_stateless/01225_show_create_table_from_dictionary.sql @@ -1,8 +1,11 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + DROP DATABASE IF EXISTS dict_db_01225; DROP DATABASE IF EXISTS dict_db_01225_dictionary; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE dict_db_01225 ENGINE=Ordinary; -- Different internal dictionary name with Atomic CREATE DATABASE dict_db_01225_dictionary Engine=Dictionary; diff --git a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.sql b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.sql index d187a2e4d4e..0c9cfafa496 100644 --- a/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.sql +++ b/tests/queries/0_stateless/01249_bad_arguments_for_bloom_filter.sql @@ -1,7 +1,10 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + DROP DATABASE IF EXISTS test_01249; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE test_01249 ENGINE=Ordinary; -- Full ATTACH requires UUID with Atomic USE test_01249; diff --git a/tests/queries/0_stateless/01271_show_privileges.reference b/tests/queries/0_stateless/01271_show_privileges.reference index de5f9a82b15..12cc95b914a 100644 --- a/tests/queries/0_stateless/01271_show_privileges.reference +++ b/tests/queries/0_stateless/01271_show_privileges.reference @@ -48,6 +48,7 @@ ALTER TABLE [] \N ALTER ALTER DATABASE [] \N ALTER ALTER VIEW MODIFY QUERY ['ALTER TABLE MODIFY QUERY'] VIEW ALTER VIEW ALTER VIEW MODIFY REFRESH ['ALTER TABLE MODIFY QUERY'] VIEW ALTER VIEW +ALTER VIEW MODIFY SQL SECURITY ['ALTER TABLE MODIFY SQL SECURITY'] VIEW ALTER VIEW ALTER VIEW [] \N ALTER ALTER [] \N ALL CREATE DATABASE [] DATABASE CREATE @@ -89,6 +90,7 @@ DROP QUOTA [] GLOBAL ACCESS MANAGEMENT CREATE SETTINGS PROFILE ['CREATE PROFILE'] GLOBAL ACCESS MANAGEMENT ALTER SETTINGS PROFILE ['ALTER PROFILE'] GLOBAL ACCESS MANAGEMENT DROP SETTINGS PROFILE ['DROP PROFILE'] GLOBAL ACCESS MANAGEMENT +ALLOW SQL SECURITY NONE ['CREATE SQL SECURITY NONE','ALLOW SQL SECURITY NONE','SQL SECURITY NONE','SECURITY NONE'] GLOBAL ACCESS MANAGEMENT SHOW USERS ['SHOW CREATE USER'] GLOBAL SHOW ACCESS SHOW ROLES ['SHOW CREATE ROLE'] GLOBAL SHOW ACCESS SHOW ROW POLICIES ['SHOW POLICIES','SHOW CREATE ROW POLICY','SHOW CREATE POLICY'] TABLE SHOW ACCESS @@ -100,6 +102,7 @@ SHOW NAMED COLLECTIONS ['SHOW NAMED COLLECTIONS'] NAMED_COLLECTION NAMED COLLECT SHOW NAMED COLLECTIONS SECRETS ['SHOW NAMED COLLECTIONS SECRETS'] NAMED_COLLECTION NAMED COLLECTION ADMIN NAMED COLLECTION ['NAMED COLLECTION USAGE','USE NAMED COLLECTION'] NAMED_COLLECTION NAMED COLLECTION ADMIN NAMED COLLECTION ADMIN ['NAMED COLLECTION CONTROL'] NAMED_COLLECTION ALL +SET DEFINER [] USER_NAME ALL SYSTEM SHUTDOWN ['SYSTEM KILL','SHUTDOWN'] GLOBAL SYSTEM SYSTEM DROP DNS CACHE ['SYSTEM DROP DNS','DROP DNS CACHE','DROP DNS'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP MARK CACHE ['SYSTEM DROP MARK','DROP MARK CACHE','DROP MARKS'] GLOBAL SYSTEM DROP CACHE @@ -108,6 +111,7 @@ SYSTEM DROP MMAP CACHE ['SYSTEM DROP MMAP','DROP MMAP CACHE','DROP MMAP'] GLOBAL SYSTEM DROP QUERY CACHE ['SYSTEM DROP QUERY','DROP QUERY CACHE','DROP QUERY'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP COMPILED EXPRESSION CACHE ['SYSTEM DROP COMPILED EXPRESSION','DROP COMPILED EXPRESSION CACHE','DROP COMPILED EXPRESSIONS'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP FILESYSTEM CACHE ['SYSTEM DROP FILESYSTEM CACHE','DROP FILESYSTEM CACHE'] GLOBAL SYSTEM DROP CACHE +SYSTEM DROP DISTRIBUTED CACHE ['SYSTEM DROP DISTRIBUTED CACHE','DROP DISTRIBUTED CACHE'] GLOBAL SYSTEM DROP CACHE SYSTEM SYNC FILESYSTEM CACHE ['SYSTEM REPAIR FILESYSTEM CACHE','REPAIR FILESYSTEM CACHE','SYNC FILESYSTEM CACHE'] GLOBAL SYSTEM SYSTEM DROP SCHEMA CACHE ['SYSTEM DROP SCHEMA CACHE','DROP SCHEMA CACHE'] GLOBAL SYSTEM DROP CACHE SYSTEM DROP FORMAT SCHEMA CACHE ['SYSTEM DROP FORMAT SCHEMA CACHE','DROP FORMAT SCHEMA CACHE'] GLOBAL SYSTEM DROP CACHE @@ -133,6 +137,7 @@ SYSTEM DISTRIBUTED SENDS ['SYSTEM STOP DISTRIBUTED SENDS','SYSTEM START DISTRIBU SYSTEM REPLICATED SENDS ['SYSTEM STOP REPLICATED SENDS','SYSTEM START REPLICATED SENDS','STOP REPLICATED SENDS','START REPLICATED SENDS'] TABLE SYSTEM SENDS SYSTEM SENDS ['SYSTEM STOP SENDS','SYSTEM START SENDS','STOP SENDS','START SENDS'] \N SYSTEM SYSTEM REPLICATION QUEUES ['SYSTEM STOP REPLICATION QUEUES','SYSTEM START REPLICATION QUEUES','STOP REPLICATION QUEUES','START REPLICATION QUEUES'] TABLE SYSTEM +SYSTEM VIRTUAL PARTS UPDATE ['SYSTEM STOP VIRTUAL PARTS UPDATE','SYSTEM START VIRTUAL PARTS UPDATE','STOP VIRTUAL PARTS UPDATE','START VIRTUAL PARTS UPDATE'] TABLE SYSTEM SYSTEM DROP REPLICA ['DROP REPLICA'] TABLE SYSTEM SYSTEM SYNC REPLICA ['SYNC REPLICA'] TABLE SYSTEM SYSTEM REPLICA READINESS ['SYSTEM REPLICA READY','SYSTEM REPLICA UNREADY'] GLOBAL SYSTEM diff --git a/tests/queries/0_stateless/01278_format_multiple_queries.reference b/tests/queries/0_stateless/01278_format_multiple_queries.reference index 001b10b0990..9e3ae2250b3 100644 --- a/tests/queries/0_stateless/01278_format_multiple_queries.reference +++ b/tests/queries/0_stateless/01278_format_multiple_queries.reference @@ -1,7 +1,7 @@ SELECT a, b AS x -FROM table AS t +FROM `table` AS t INNER JOIN table2 AS t2 ON t.id = t2.t_id WHERE 1 = 1 ; diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index 08cc97c84bf..713d187cd88 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -76,7 +76,7 @@ insert into data_01278 select reinterpretAsString(number), // s6 reinterpretAsString(number), // s7 reinterpretAsString(number) // s8 -from numbers(100000); -- { serverError 241 }" > /dev/null 2>&1 +from numbers(200000); -- { serverError 241 }" > /dev/null 2>&1 local ret_code=$? if [[ $ret_code -eq 0 ]]; then diff --git a/tests/queries/0_stateless/01292_create_user.reference b/tests/queries/0_stateless/01292_create_user.reference index b7c30b304bf..d5841a74a2c 100644 --- a/tests/queries/0_stateless/01292_create_user.reference +++ b/tests/queries/0_stateless/01292_create_user.reference @@ -56,14 +56,14 @@ CREATE USER u1_01292 HOST LOCAL CREATE USER `u2_01292@%.myhost.com` -- settings CREATE USER u1_01292 -CREATE USER u2_01292 SETTINGS PROFILE default +CREATE USER u2_01292 SETTINGS PROFILE `default` CREATE USER u3_01292 SETTINGS max_memory_usage = 5000000 CREATE USER u4_01292 SETTINGS max_memory_usage MIN 5000000 CREATE USER u5_01292 SETTINGS max_memory_usage MAX 5000000 CREATE USER u6_01292 SETTINGS max_memory_usage CONST CREATE USER u7_01292 SETTINGS max_memory_usage WRITABLE CREATE USER u8_01292 SETTINGS max_memory_usage = 5000000 MIN 4000000 MAX 6000000 CONST -CREATE USER u9_01292 SETTINGS PROFILE default, max_memory_usage = 5000000 WRITABLE +CREATE USER u9_01292 SETTINGS PROFILE `default`, max_memory_usage = 5000000 WRITABLE CREATE USER u1_01292 SETTINGS readonly = 1 CREATE USER u2_01292 SETTINGS readonly = 1 CREATE USER u3_01292 @@ -82,7 +82,7 @@ CREATE USER u5_01292 DEFAULT ROLE ALL EXCEPT r1_01292 CREATE USER u6_01292 DEFAULT ROLE NONE -- complex CREATE USER u1_01292 IDENTIFIED WITH plaintext_password HOST LOCAL SETTINGS readonly = 1 -CREATE USER u1_01292 HOST LIKE \'%.%.myhost.com\' DEFAULT ROLE NONE SETTINGS PROFILE default +CREATE USER u1_01292 HOST LIKE \'%.%.myhost.com\' DEFAULT ROLE NONE SETTINGS PROFILE `default` -- if not exists CREATE USER u1_01292 GRANT r1_01292 TO u1_01292 diff --git a/tests/queries/0_stateless/01293_create_role.reference b/tests/queries/0_stateless/01293_create_role.reference index 42f091bddd5..f1bd76322d4 100644 --- a/tests/queries/0_stateless/01293_create_role.reference +++ b/tests/queries/0_stateless/01293_create_role.reference @@ -11,14 +11,14 @@ CREATE ROLE `r2_01293@%.myhost.com` CREATE ROLE `r2_01293@%.myhost.com` -- settings CREATE ROLE r1_01293 -CREATE ROLE r2_01293 SETTINGS PROFILE default +CREATE ROLE r2_01293 SETTINGS PROFILE `default` CREATE ROLE r3_01293 SETTINGS max_memory_usage = 5000000 CREATE ROLE r4_01293 SETTINGS max_memory_usage MIN 5000000 CREATE ROLE r5_01293 SETTINGS max_memory_usage MAX 5000000 CREATE ROLE r6_01293 SETTINGS max_memory_usage CONST CREATE ROLE r7_01293 SETTINGS max_memory_usage WRITABLE CREATE ROLE r8_01293 SETTINGS max_memory_usage = 5000000 MIN 4000000 MAX 6000000 CONST -CREATE ROLE r9_01293 SETTINGS PROFILE default, max_memory_usage = 5000000 WRITABLE +CREATE ROLE r9_01293 SETTINGS PROFILE `default`, max_memory_usage = 5000000 WRITABLE CREATE ROLE r1_01293 SETTINGS readonly = 1 CREATE ROLE r2_01293 SETTINGS readonly = 1 CREATE ROLE r3_01293 diff --git a/tests/queries/0_stateless/01294_create_settings_profile.reference b/tests/queries/0_stateless/01294_create_settings_profile.reference index a10d5758752..7e1838c8dae 100644 --- a/tests/queries/0_stateless/01294_create_settings_profile.reference +++ b/tests/queries/0_stateless/01294_create_settings_profile.reference @@ -1,57 +1,57 @@ -- default -CREATE SETTINGS PROFILE s1_01294 +CREATE SETTINGS PROFILE `s1_01294` -- same as default -CREATE SETTINGS PROFILE s2_01294 -CREATE SETTINGS PROFILE s3_01294 +CREATE SETTINGS PROFILE `s2_01294` +CREATE SETTINGS PROFILE `s3_01294` -- rename -CREATE SETTINGS PROFILE s2_01294_renamed +CREATE SETTINGS PROFILE `s2_01294_renamed` -- settings -CREATE SETTINGS PROFILE s1_01294 -CREATE SETTINGS PROFILE s2_01294 SETTINGS INHERIT default -CREATE SETTINGS PROFILE s3_01294 SETTINGS max_memory_usage = 5000000 -CREATE SETTINGS PROFILE s4_01294 SETTINGS max_memory_usage MIN 5000000 -CREATE SETTINGS PROFILE s5_01294 SETTINGS max_memory_usage MAX 5000000 -CREATE SETTINGS PROFILE s6_01294 SETTINGS max_memory_usage CONST -CREATE SETTINGS PROFILE s7_01294 SETTINGS max_memory_usage WRITABLE -CREATE SETTINGS PROFILE s8_01294 SETTINGS max_memory_usage = 5000000 MIN 4000000 MAX 6000000 CONST -CREATE SETTINGS PROFILE s9_01294 SETTINGS INHERIT default, max_memory_usage = 5000000 WRITABLE -CREATE SETTINGS PROFILE s10_01294 SETTINGS INHERIT s1_01294, INHERIT s3_01294, INHERIT default, readonly = 0, max_memory_usage MAX 6000000 -CREATE SETTINGS PROFILE s1_01294 SETTINGS readonly = 0 -CREATE SETTINGS PROFILE s2_01294 SETTINGS readonly = 1 -CREATE SETTINGS PROFILE s3_01294 +CREATE SETTINGS PROFILE `s1_01294` +CREATE SETTINGS PROFILE `s2_01294` SETTINGS INHERIT `default` +CREATE SETTINGS PROFILE `s3_01294` SETTINGS max_memory_usage = 5000000 +CREATE SETTINGS PROFILE `s4_01294` SETTINGS max_memory_usage MIN 5000000 +CREATE SETTINGS PROFILE `s5_01294` SETTINGS max_memory_usage MAX 5000000 +CREATE SETTINGS PROFILE `s6_01294` SETTINGS max_memory_usage CONST +CREATE SETTINGS PROFILE `s7_01294` SETTINGS max_memory_usage WRITABLE +CREATE SETTINGS PROFILE `s8_01294` SETTINGS max_memory_usage = 5000000 MIN 4000000 MAX 6000000 CONST +CREATE SETTINGS PROFILE `s9_01294` SETTINGS INHERIT `default`, max_memory_usage = 5000000 WRITABLE +CREATE SETTINGS PROFILE `s10_01294` SETTINGS INHERIT `s1_01294`, INHERIT `s3_01294`, INHERIT `default`, readonly = 0, max_memory_usage MAX 6000000 +CREATE SETTINGS PROFILE `s1_01294` SETTINGS readonly = 0 +CREATE SETTINGS PROFILE `s2_01294` SETTINGS readonly = 1 +CREATE SETTINGS PROFILE `s3_01294` -- to roles -CREATE SETTINGS PROFILE s1_01294 -CREATE SETTINGS PROFILE s2_01294 TO ALL -CREATE SETTINGS PROFILE s3_01294 TO r1_01294 -CREATE SETTINGS PROFILE s4_01294 TO u1_01294 -CREATE SETTINGS PROFILE s5_01294 TO r1_01294, u1_01294 -CREATE SETTINGS PROFILE s6_01294 TO ALL EXCEPT r1_01294 -CREATE SETTINGS PROFILE s7_01294 TO ALL EXCEPT r1_01294, u1_01294 -CREATE SETTINGS PROFILE s1_01294 TO u1_01294 -CREATE SETTINGS PROFILE s2_01294 +CREATE SETTINGS PROFILE `s1_01294` +CREATE SETTINGS PROFILE `s2_01294` TO ALL +CREATE SETTINGS PROFILE `s3_01294` TO r1_01294 +CREATE SETTINGS PROFILE `s4_01294` TO u1_01294 +CREATE SETTINGS PROFILE `s5_01294` TO r1_01294, u1_01294 +CREATE SETTINGS PROFILE `s6_01294` TO ALL EXCEPT r1_01294 +CREATE SETTINGS PROFILE `s7_01294` TO ALL EXCEPT r1_01294, u1_01294 +CREATE SETTINGS PROFILE `s1_01294` TO u1_01294 +CREATE SETTINGS PROFILE `s2_01294` -- complex -CREATE SETTINGS PROFILE s1_01294 SETTINGS readonly = 0 TO r1_01294 -CREATE SETTINGS PROFILE s1_01294 SETTINGS INHERIT default +CREATE SETTINGS PROFILE `s1_01294` SETTINGS readonly = 0 TO r1_01294 +CREATE SETTINGS PROFILE `s1_01294` SETTINGS INHERIT `default` -- multiple profiles in one command -CREATE SETTINGS PROFILE s1_01294 SETTINGS max_memory_usage = 5000000 -CREATE SETTINGS PROFILE s2_01294 SETTINGS max_memory_usage = 5000000 -CREATE SETTINGS PROFILE s3_01294 TO ALL -CREATE SETTINGS PROFILE s4_01294 TO ALL -CREATE SETTINGS PROFILE s1_01294 SETTINGS max_memory_usage = 6000000 -CREATE SETTINGS PROFILE s2_01294 SETTINGS max_memory_usage = 6000000 -CREATE SETTINGS PROFILE s3_01294 TO ALL -CREATE SETTINGS PROFILE s4_01294 TO ALL -CREATE SETTINGS PROFILE s1_01294 SETTINGS max_memory_usage = 6000000 -CREATE SETTINGS PROFILE s2_01294 SETTINGS max_memory_usage = 6000000 TO r1_01294 -CREATE SETTINGS PROFILE s3_01294 TO r1_01294 -CREATE SETTINGS PROFILE s4_01294 TO r1_01294 +CREATE SETTINGS PROFILE `s1_01294` SETTINGS max_memory_usage = 5000000 +CREATE SETTINGS PROFILE `s2_01294` SETTINGS max_memory_usage = 5000000 +CREATE SETTINGS PROFILE `s3_01294` TO ALL +CREATE SETTINGS PROFILE `s4_01294` TO ALL +CREATE SETTINGS PROFILE `s1_01294` SETTINGS max_memory_usage = 6000000 +CREATE SETTINGS PROFILE `s2_01294` SETTINGS max_memory_usage = 6000000 +CREATE SETTINGS PROFILE `s3_01294` TO ALL +CREATE SETTINGS PROFILE `s4_01294` TO ALL +CREATE SETTINGS PROFILE `s1_01294` SETTINGS max_memory_usage = 6000000 +CREATE SETTINGS PROFILE `s2_01294` SETTINGS max_memory_usage = 6000000 TO r1_01294 +CREATE SETTINGS PROFILE `s3_01294` TO r1_01294 +CREATE SETTINGS PROFILE `s4_01294` TO r1_01294 -- readonly ambiguity -CREATE SETTINGS PROFILE s1_01294 SETTINGS readonly = 1 -CREATE SETTINGS PROFILE s2_01294 SETTINGS readonly CONST -CREATE SETTINGS PROFILE s3_01294 SETTINGS INHERIT readonly -CREATE SETTINGS PROFILE s4_01294 SETTINGS INHERIT readonly, INHERIT readonly -CREATE SETTINGS PROFILE s5_01294 SETTINGS INHERIT readonly, readonly = 1 -CREATE SETTINGS PROFILE s6_01294 SETTINGS INHERIT readonly, readonly CONST +CREATE SETTINGS PROFILE `s1_01294` SETTINGS readonly = 1 +CREATE SETTINGS PROFILE `s2_01294` SETTINGS readonly CONST +CREATE SETTINGS PROFILE `s3_01294` SETTINGS INHERIT `readonly` +CREATE SETTINGS PROFILE `s4_01294` SETTINGS INHERIT `readonly`, INHERIT `readonly` +CREATE SETTINGS PROFILE `s5_01294` SETTINGS INHERIT `readonly`, readonly = 1 +CREATE SETTINGS PROFILE `s6_01294` SETTINGS INHERIT `readonly`, readonly CONST -- system.settings_profiles s1_01294 local_directory 0 0 [] [] s2_01294 local_directory 1 0 ['r1_01294'] [] diff --git a/tests/queries/0_stateless/01295_create_row_policy.reference b/tests/queries/0_stateless/01295_create_row_policy.reference index d73d9752bc1..2c300332a89 100644 --- a/tests/queries/0_stateless/01295_create_row_policy.reference +++ b/tests/queries/0_stateless/01295_create_row_policy.reference @@ -1,35 +1,35 @@ -- default -CREATE ROW POLICY p1_01295 ON db.table +CREATE ROW POLICY p1_01295 ON db.`table` -- same as default -CREATE ROW POLICY p2_01295 ON db.table -CREATE ROW POLICY p3_01295 ON db.table +CREATE ROW POLICY p2_01295 ON db.`table` +CREATE ROW POLICY p3_01295 ON db.`table` -- rename -CREATE ROW POLICY p2_01295_renamed ON db.table +CREATE ROW POLICY p2_01295_renamed ON db.`table` -- filter -CREATE ROW POLICY p1_01295 ON db.table FOR SELECT USING (a < b) AND (c > d) -CREATE ROW POLICY p2_01295 ON db.table AS restrictive FOR SELECT USING id = currentUser() -CREATE ROW POLICY p3_01295 ON db.table FOR SELECT USING 1 -CREATE ROW POLICY p1_01295 ON db.table AS restrictive FOR SELECT USING 0 +CREATE ROW POLICY p1_01295 ON db.`table` FOR SELECT USING (a < b) AND (c > d) +CREATE ROW POLICY p2_01295 ON db.`table` AS restrictive FOR SELECT USING id = currentUser() +CREATE ROW POLICY p3_01295 ON db.`table` FOR SELECT USING 1 +CREATE ROW POLICY p1_01295 ON db.`table` AS restrictive FOR SELECT USING 0 -- to roles -CREATE ROW POLICY p1_01295 ON db.table -CREATE ROW POLICY p2_01295 ON db.table TO ALL -CREATE ROW POLICY p3_01295 ON db.table TO r1_01295 -CREATE ROW POLICY p4_01295 ON db.table TO u1_01295 -CREATE ROW POLICY p5_01295 ON db.table TO r1_01295, u1_01295 -CREATE ROW POLICY p6_01295 ON db.table TO ALL EXCEPT r1_01295 -CREATE ROW POLICY p7_01295 ON db.table TO ALL EXCEPT r1_01295, u1_01295 -CREATE ROW POLICY p1_01295 ON db.table TO u1_01295 -CREATE ROW POLICY p2_01295 ON db.table +CREATE ROW POLICY p1_01295 ON db.`table` +CREATE ROW POLICY p2_01295 ON db.`table` TO ALL +CREATE ROW POLICY p3_01295 ON db.`table` TO r1_01295 +CREATE ROW POLICY p4_01295 ON db.`table` TO u1_01295 +CREATE ROW POLICY p5_01295 ON db.`table` TO r1_01295, u1_01295 +CREATE ROW POLICY p6_01295 ON db.`table` TO ALL EXCEPT r1_01295 +CREATE ROW POLICY p7_01295 ON db.`table` TO ALL EXCEPT r1_01295, u1_01295 +CREATE ROW POLICY p1_01295 ON db.`table` TO u1_01295 +CREATE ROW POLICY p2_01295 ON db.`table` -- multiple policies in one command -CREATE ROW POLICY p1_01295 ON db.table FOR SELECT USING 1 -CREATE ROW POLICY p2_01295 ON db.table FOR SELECT USING 1 -CREATE ROW POLICY p3_01295 ON db.table TO u1_01295 +CREATE ROW POLICY p1_01295 ON db.`table` FOR SELECT USING 1 +CREATE ROW POLICY p2_01295 ON db.`table` FOR SELECT USING 1 +CREATE ROW POLICY p3_01295 ON db.`table` TO u1_01295 CREATE ROW POLICY p3_01295 ON db2.table2 TO u1_01295 -CREATE ROW POLICY p4_01295 ON db.table FOR SELECT USING a = b +CREATE ROW POLICY p4_01295 ON db.`table` FOR SELECT USING a = b CREATE ROW POLICY p5_01295 ON db2.table2 FOR SELECT USING a = b -CREATE ROW POLICY p1_01295 ON db.table FOR SELECT USING 1 TO ALL -CREATE ROW POLICY p2_01295 ON db.table FOR SELECT USING 1 TO ALL +CREATE ROW POLICY p1_01295 ON db.`table` FOR SELECT USING 1 TO ALL +CREATE ROW POLICY p2_01295 ON db.`table` FOR SELECT USING 1 TO ALL -- system.row_policies -p1_01295 ON db.table p1_01295 db table local_directory (a < b) AND (c > d) 0 0 [] [] -p2_01295 ON db.table p2_01295 db table local_directory id = currentUser() 1 0 ['u1_01295'] [] -p3_01295 ON db.table p3_01295 db table local_directory 1 0 1 [] ['r1_01295'] +p1_01295 ON db.`table` p1_01295 db table local_directory (a < b) AND (c > d) 0 0 [] [] +p2_01295 ON db.`table` p2_01295 db table local_directory id = currentUser() 1 0 ['u1_01295'] [] +p3_01295 ON db.`table` p3_01295 db table local_directory 1 0 1 [] ['r1_01295'] diff --git a/tests/queries/0_stateless/01296_create_row_policy_in_current_database.reference b/tests/queries/0_stateless/01296_create_row_policy_in_current_database.reference index fa9c2f73021..bfca341bd7b 100644 --- a/tests/queries/0_stateless/01296_create_row_policy_in_current_database.reference +++ b/tests/queries/0_stateless/01296_create_row_policy_in_current_database.reference @@ -1,20 +1,20 @@ -- one policy -CREATE ROW POLICY p1_01296 ON db_01296.table -CREATE ROW POLICY p1_01296 ON db_01296.table -CREATE ROW POLICY p1_01296 ON db_01296.table FOR SELECT USING 1 -CREATE ROW POLICY p1_01296 ON db_01296.table FOR SELECT USING 1 +CREATE ROW POLICY p1_01296 ON db_01296.`table` +CREATE ROW POLICY p1_01296 ON db_01296.`table` +CREATE ROW POLICY p1_01296 ON db_01296.`table` FOR SELECT USING 1 +CREATE ROW POLICY p1_01296 ON db_01296.`table` FOR SELECT USING 1 -- multiple policies -CREATE ROW POLICY p1_01296 ON db_01296.table FOR SELECT USING 1 -CREATE ROW POLICY p2_01296 ON db_01296.table FOR SELECT USING 1 -CREATE ROW POLICY p3_01296 ON db_01296.table TO u1_01296 +CREATE ROW POLICY p1_01296 ON db_01296.`table` FOR SELECT USING 1 +CREATE ROW POLICY p2_01296 ON db_01296.`table` FOR SELECT USING 1 +CREATE ROW POLICY p3_01296 ON db_01296.`table` TO u1_01296 CREATE ROW POLICY p3_01296 ON db_01296.table2 TO u1_01296 -CREATE ROW POLICY p4_01296 ON db_01296.table FOR SELECT USING a = b +CREATE ROW POLICY p4_01296 ON db_01296.`table` FOR SELECT USING a = b CREATE ROW POLICY p5_01296 ON db_01296.table2 FOR SELECT USING a = b -CREATE ROW POLICY p1_01296 ON db_01296.table FOR SELECT USING 1 -CREATE ROW POLICY p2_01296 ON db_01296.table FOR SELECT USING 1 -CREATE ROW POLICY p3_01296 ON db_01296.table TO u1_01296 +CREATE ROW POLICY p1_01296 ON db_01296.`table` FOR SELECT USING 1 +CREATE ROW POLICY p2_01296 ON db_01296.`table` FOR SELECT USING 1 +CREATE ROW POLICY p3_01296 ON db_01296.`table` TO u1_01296 CREATE ROW POLICY p3_01296 ON db_01296.table2 TO u1_01296 -CREATE ROW POLICY p4_01296 ON db_01296.table FOR SELECT USING a = b +CREATE ROW POLICY p4_01296 ON db_01296.`table` FOR SELECT USING a = b CREATE ROW POLICY p5_01296 ON db_01296.table2 FOR SELECT USING a = b -CREATE ROW POLICY p1_01296 ON db_01296.table FOR SELECT USING 1 TO ALL -CREATE ROW POLICY p2_01296 ON db_01296.table FOR SELECT USING 1 TO ALL +CREATE ROW POLICY p1_01296 ON db_01296.`table` FOR SELECT USING 1 TO ALL +CREATE ROW POLICY p2_01296 ON db_01296.`table` FOR SELECT USING 1 TO ALL diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh index 57409d782ae..1d5f5d54853 100755 --- a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh +++ b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash # Tags: race, zookeeper, no-parallel +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01383_log_broken_table.sh b/tests/queries/0_stateless/01383_log_broken_table.sh index 5cc0f24a87f..4d82fa8547d 100755 --- a/tests/queries/0_stateless/01383_log_broken_table.sh +++ b/tests/queries/0_stateless/01383_log_broken_table.sh @@ -20,6 +20,7 @@ function test_func() MAX_MEM=$((2 * $MAX_MEM)) $CLICKHOUSE_CLIENT --query "INSERT INTO log SELECT number, number, number FROM numbers(1000000)" --max_memory_usage $MAX_MEM > "${CLICKHOUSE_TMP}"/insert_result 2>&1 + RES=$? grep -o -F 'Memory limit' "${CLICKHOUSE_TMP}"/insert_result || cat "${CLICKHOUSE_TMP}"/insert_result @@ -27,7 +28,7 @@ function test_func() cat "${CLICKHOUSE_TMP}"/select_result - [[ $MAX_MEM -gt 200000000 ]] && break; + { [[ $RES -eq 0 ]] || [[ $MAX_MEM -gt 200000000 ]]; } && break; done $CLICKHOUSE_CLIENT --query "DROP TABLE log"; diff --git a/tests/queries/0_stateless/01418_custom_settings.reference b/tests/queries/0_stateless/01418_custom_settings.reference index 8484a5d0e6f..923d43077d8 100644 --- a/tests/queries/0_stateless/01418_custom_settings.reference +++ b/tests/queries/0_stateless/01418_custom_settings.reference @@ -30,10 +30,10 @@ custom_f \'word\' --- compound identifier --- test String custom_compound.identifier.v1 \'test\' -CREATE SETTINGS PROFILE s1_01418 SETTINGS custom_compound.identifier.v2 = 100 +CREATE SETTINGS PROFILE `s1_01418` SETTINGS custom_compound.identifier.v2 = 100 --- null type --- \N Nullable(Nothing) custom_null NULL \N Nullable(Nothing) custom_null NULL -CREATE SETTINGS PROFILE s2_01418 SETTINGS custom_null = NULL +CREATE SETTINGS PROFILE `s2_01418` SETTINGS custom_null = NULL diff --git a/tests/queries/0_stateless/01516_create_table_primary_key.sql b/tests/queries/0_stateless/01516_create_table_primary_key.sql index 630c573c2cc..1e5a0b9cddf 100644 --- a/tests/queries/0_stateless/01516_create_table_primary_key.sql +++ b/tests/queries/0_stateless/01516_create_table_primary_key.sql @@ -1,7 +1,10 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + DROP DATABASE IF EXISTS test_01516; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE test_01516 ENGINE=Ordinary; -- Full ATTACH requires UUID with Atomic USE test_01516; diff --git a/tests/queries/0_stateless/01517_drop_mv_with_inner_table.sql b/tests/queries/0_stateless/01517_drop_mv_with_inner_table.sql index 67a2009b913..167625629a5 100644 --- a/tests/queries/0_stateless/01517_drop_mv_with_inner_table.sql +++ b/tests/queries/0_stateless/01517_drop_mv_with_inner_table.sql @@ -1,5 +1,7 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + -- -- Atomic no SYNC -- (should go first to check that thread for DROP TABLE does not hang) @@ -33,6 +35,7 @@ show tables from db_01517_atomic_sync; --- drop database if exists db_01517_ordinary; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. create database db_01517_ordinary Engine=Ordinary; create table db_01517_ordinary.source (key Int) engine=Null; diff --git a/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh index 538d712ad9c..d3fe6d16559 100755 --- a/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh +++ b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh @@ -1,5 +1,8 @@ #!/usr/bin/env bash +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01600_detach_permanently.sh b/tests/queries/0_stateless/01600_detach_permanently.sh index 0d1815a75e8..036706d2fe8 100755 --- a/tests/queries/0_stateless/01600_detach_permanently.sh +++ b/tests/queries/0_stateless/01600_detach_permanently.sh @@ -1,6 +1,9 @@ #!/usr/bin/env bash # Tags: no-parallel +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql b/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql index c25f308eda8..b3739af93f8 100644 --- a/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql +++ b/tests/queries/0_stateless/01600_remerge_sort_lowered_memory_bytes_ratio.sql @@ -2,7 +2,7 @@ -- Check remerge_sort_lowered_memory_bytes_ratio setting -set max_memory_usage='300Mi'; +set max_memory_usage='200Mi'; -- enter remerge once limit*2 is reached set max_bytes_before_remerge_sort='10Mi'; -- more blocks diff --git a/tests/queries/0_stateless/01601_detach_permanently.sql b/tests/queries/0_stateless/01601_detach_permanently.sql index 95c80e77213..6ab3a7f9b21 100644 --- a/tests/queries/0_stateless/01601_detach_permanently.sql +++ b/tests/queries/0_stateless/01601_detach_permanently.sql @@ -1,5 +1,7 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + SELECT 'database atomic tests'; DROP DATABASE IF EXISTS test1601_detach_permanently_atomic; @@ -73,6 +75,7 @@ SELECT 'database ordinary tests'; DROP DATABASE IF EXISTS test1601_detach_permanently_ordinary; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. CREATE DATABASE test1601_detach_permanently_ordinary Engine=Ordinary; create table test1601_detach_permanently_ordinary.test_name_reuse (number UInt64) engine=MergeTree order by tuple(); diff --git a/tests/queries/0_stateless/01602_show_create_view.reference b/tests/queries/0_stateless/01602_show_create_view.reference index 5fe11a38db3..b3a345f2742 100644 --- a/tests/queries/0_stateless/01602_show_create_view.reference +++ b/tests/queries/0_stateless/01602_show_create_view.reference @@ -1,6 +1,6 @@ -CREATE VIEW test_1602.v\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE MATERIALIZED VIEW test_1602.vv\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nENGINE = MergeTree\nPARTITION BY toYYYYMM(EventDate)\nORDER BY (CounterID, EventDate, intHash32(UserID))\nSETTINGS index_granularity = 8192 AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.VIEW\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.DATABASE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.DICTIONARY\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl -CREATE VIEW test_1602.TABLE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n) AS\nSELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.v\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nAS SELECT *\nFROM test_1602.tbl +CREATE MATERIALIZED VIEW test_1602.vv\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nENGINE = MergeTree\nPARTITION BY toYYYYMM(EventDate)\nORDER BY (CounterID, EventDate, intHash32(UserID))\nSETTINGS index_granularity = 8192\nAS SELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.VIEW\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nAS SELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.DATABASE\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nAS SELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.DICTIONARY\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nAS SELECT *\nFROM test_1602.tbl +CREATE VIEW test_1602.`TABLE`\n(\n `EventDate` DateTime,\n `CounterID` UInt32,\n `UserID` UInt32\n)\nAS SELECT *\nFROM test_1602.tbl diff --git a/tests/queries/0_stateless/01603_rename_overwrite_bug.sql b/tests/queries/0_stateless/01603_rename_overwrite_bug.sql index acf9f520709..cc283ab4292 100644 --- a/tests/queries/0_stateless/01603_rename_overwrite_bug.sql +++ b/tests/queries/0_stateless/01603_rename_overwrite_bug.sql @@ -1,7 +1,10 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + DROP database IF EXISTS test_1603_rename_bug_ordinary; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. create database test_1603_rename_bug_ordinary engine=Ordinary; create table test_1603_rename_bug_ordinary.foo engine=Memory as select * from numbers(100); create table test_1603_rename_bug_ordinary.bar engine=Log as select * from numbers(200); diff --git a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql index 3d6a25fe799..74bafe6e4cd 100644 --- a/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql +++ b/tests/queries/0_stateless/01730_distributed_group_by_no_merge_order_by_long.sql @@ -12,7 +12,7 @@ select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by n -- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block, -- so the initiator will first receive all blocks from remotes and only after start merging, -- and will hit the memory limit. -select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='100Mi', max_block_size=1e12; -- { serverError 241 } +select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError 241 } -- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently, -- since they don't need to wait until the aggregation will be finished, diff --git a/tests/queries/0_stateless/01732_explain_syntax_union_query.reference b/tests/queries/0_stateless/01732_explain_syntax_union_query.reference index ccafa916b9f..5246cfec7aa 100644 --- a/tests/queries/0_stateless/01732_explain_syntax_union_query.reference +++ b/tests/queries/0_stateless/01732_explain_syntax_union_query.reference @@ -54,7 +54,6 @@ SELECT 1 - SELECT 1 - - ( SELECT 1 UNION DISTINCT diff --git a/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh b/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh index 87153a4bd58..3782a7d3ad6 100755 --- a/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh +++ b/tests/queries/0_stateless/01810_max_part_removal_threads_long.sh @@ -7,6 +7,9 @@ # and we can do it compatible with parallel run only in .sh # (via $CLICKHOUSE_DATABASE) +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01874_select_from_trailing_whitespaces.reference b/tests/queries/0_stateless/01874_select_from_trailing_whitespaces.reference index a52505659d1..4dc5ccac840 100644 --- a/tests/queries/0_stateless/01874_select_from_trailing_whitespaces.reference +++ b/tests/queries/0_stateless/01874_select_from_trailing_whitespaces.reference @@ -46,10 +46,12 @@ SELECT * FROM system.one UNION ALL SELECT * FROM system.one SELECT * FROM system.one UNION ALL -SELECT * -FROM system.one +( + SELECT * + FROM system.one +) # /* oneline */ select * from system.one union all (select * from system.one) -SELECT * FROM system.one UNION ALL SELECT * FROM system.one +SELECT * FROM system.one UNION ALL (SELECT * FROM system.one) # select 1 union all (select 1 union distinct select 1) SELECT 1 UNION ALL diff --git a/tests/queries/0_stateless/01890_materialized_distributed_join.sh b/tests/queries/0_stateless/01890_materialized_distributed_join.sh index 0d761f2defa..88f7dcf9a69 100755 --- a/tests/queries/0_stateless/01890_materialized_distributed_join.sh +++ b/tests/queries/0_stateless/01890_materialized_distributed_join.sh @@ -20,7 +20,7 @@ $CLICKHOUSE_CLIENT -nm -q " insert into test_shard values (1, 1); insert into test_local values (1, 2); - create materialized view test_distributed engine Distributed('test_cluster_two_shards', $CLICKHOUSE_DATABASE, 'test_shard', k) as select k, v from test_source; + create materialized view $CLICKHOUSE_DATABASE.test_distributed engine Distributed('test_cluster_two_shards', $CLICKHOUSE_DATABASE, 'test_shard', k) as select k, v from test_source; select * from test_distributed td asof join $CLICKHOUSE_DATABASE.test_local tl on td.k = tl.k and td.v < tl.v; select td.v, td.k, td.v, tl.v, tl.k, td.v from test_distributed td asof join $CLICKHOUSE_DATABASE.test_local tl on td.k = tl.k and td.v < tl.v FORMAT TSVWithNamesAndTypes; diff --git a/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference b/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference index c2ebb7fa4f4..33be11c07d5 100644 --- a/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference +++ b/tests/queries/0_stateless/01913_fix_column_transformer_replace_format.reference @@ -1 +1 @@ -CREATE VIEW default.my_view\n(\n `Id` UInt32,\n `Object.Key` Array(UInt16),\n `Object.Value` Array(String)\n) AS\nSELECT * REPLACE arrayMap(x -> (x + 1), `Object.Key`) AS `Object.Key`\nFROM default.my_table +CREATE VIEW default.my_view\n(\n `Id` UInt32,\n `Object.Key` Array(UInt16),\n `Object.Value` Array(String)\n)\nAS SELECT * REPLACE arrayMap(x -> (x + 1), `Object.Key`) AS `Object.Key`\nFROM default.my_table diff --git a/tests/queries/0_stateless/01999_grant_with_replace.reference b/tests/queries/0_stateless/01999_grant_with_replace.reference index 740c55d5325..dc2047ab73c 100644 --- a/tests/queries/0_stateless/01999_grant_with_replace.reference +++ b/tests/queries/0_stateless/01999_grant_with_replace.reference @@ -4,12 +4,12 @@ B GRANT SELECT ON db1.* TO test_user_01999 GRANT SHOW TABLES, SHOW COLUMNS, SHOW DICTIONARIES ON db2.tb2 TO test_user_01999 C -GRANT SELECT(col1) ON db3.table TO test_user_01999 +GRANT SELECT(col1) ON db3.`table` TO test_user_01999 D GRANT SELECT(col3) ON db3.table3 TO test_user_01999 GRANT SELECT(col1, col2) ON db4.table4 TO test_user_01999 E -GRANT SELECT(cola) ON db5.table TO test_user_01999 +GRANT SELECT(cola) ON db5.`table` TO test_user_01999 GRANT INSERT(colb) ON db6.tb61 TO test_user_01999 GRANT SHOW ON db7.* TO test_user_01999 F diff --git a/tests/queries/0_stateless/02021_create_database_with_comment.sh b/tests/queries/0_stateless/02021_create_database_with_comment.sh index 11e62e790b7..8432963e059 100755 --- a/tests/queries/0_stateless/02021_create_database_with_comment.sh +++ b/tests/queries/0_stateless/02021_create_database_with_comment.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02096_rename_atomic_hang.sql b/tests/queries/0_stateless/02096_rename_atomic_hang.sql index dec5f3f9506..32d7efec6c2 100644 --- a/tests/queries/0_stateless/02096_rename_atomic_hang.sql +++ b/tests/queries/0_stateless/02096_rename_atomic_hang.sql @@ -1,8 +1,9 @@ -- Tags: no-parallel - +SET send_logs_level = 'fatal'; drop database if exists db_hang; drop database if exists db_hang_temp; set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. create database db_hang engine=Ordinary; use db_hang; create table db_hang.test(A Int64) Engine=MergeTree order by A; diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index beda9e36223..0bb8966cbe4 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.table\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') -CREATE TABLE foo.table\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TABLE default.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TABLE foo.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') diff --git a/tests/queries/0_stateless/02152_http_external_tables_memory_tracking.reference b/tests/queries/0_stateless/02152_http_external_tables_memory_tracking.reference deleted file mode 100644 index 1fc09c8d154..00000000000 --- a/tests/queries/0_stateless/02152_http_external_tables_memory_tracking.reference +++ /dev/null @@ -1,16 +0,0 @@ -Checking input_format_parallel_parsing=false& -1 -Checking input_format_parallel_parsing=false&cancel_http_readonly_queries_on_client_close=1&readonly=1 -1 -Checking input_format_parallel_parsing=false&send_progress_in_http_headers=true -1 -Checking input_format_parallel_parsing=false&cancel_http_readonly_queries_on_client_close=1&readonly=1&send_progress_in_http_headers=true -1 -Checking input_format_parallel_parsing=true& -1 -Checking input_format_parallel_parsing=true&cancel_http_readonly_queries_on_client_close=1&readonly=1 -1 -Checking input_format_parallel_parsing=true&send_progress_in_http_headers=true -1 -Checking input_format_parallel_parsing=true&cancel_http_readonly_queries_on_client_close=1&readonly=1&send_progress_in_http_headers=true -1 diff --git a/tests/queries/0_stateless/02152_http_external_tables_memory_tracking.sh b/tests/queries/0_stateless/02152_http_external_tables_memory_tracking.sh deleted file mode 100755 index 5f9eb460e44..00000000000 --- a/tests/queries/0_stateless/02152_http_external_tables_memory_tracking.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-tsan, no-cpu-aarch64, no-parallel -# TSan does not supports tracing. -# trace_log doesn't work on aarch64 - -# Regression for proper release of Context, -# via tracking memory of external tables. - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - -tmp_file=$(mktemp "$CURDIR/clickhouse.XXXXXX.csv") -trap 'rm $tmp_file' EXIT - -$CLICKHOUSE_CLIENT -q "SELECT toString(number) FROM numbers(1e6) FORMAT TSV" > "$tmp_file" - -function run_and_check() -{ - local query_id - query_id="$(${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @- <<<'SELECT generateUUIDv4()')" - - echo "Checking $*" - - # Run query with external table (implicit StorageMemory user) - $CLICKHOUSE_CURL -sS -F "s=@$tmp_file;" "$CLICKHOUSE_URL&s_structure=key+Int&query=SELECT+count()+FROM+s&memory_profiler_sample_probability=1&max_untracked_memory=0&query_id=$query_id&$*" -o /dev/null - - ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @- <<<'SYSTEM FLUSH LOGS' - - # Check that temporary table had been destroyed. - ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&allow_introspection_functions=1" --data-binary @- <<<" - WITH arrayStringConcat(arrayMap(x -> demangle(addressToSymbol(x)), trace), '\n') AS sym - SELECT count()>0 FROM system.trace_log - WHERE - sym LIKE '%DB::StorageMemory::drop%\n%TemporaryTableHolder::~TemporaryTableHolder%' AND - query_id = '$query_id' - " -} - -for input_format_parallel_parsing in false true; do - query_args_variants=( - "" - "cancel_http_readonly_queries_on_client_close=1&readonly=1" - "send_progress_in_http_headers=true" - # nested progress callback - "cancel_http_readonly_queries_on_client_close=1&readonly=1&send_progress_in_http_headers=true" - ) - for query_args in "${query_args_variants[@]}"; do - run_and_check "input_format_parallel_parsing=$input_format_parallel_parsing&$query_args" - done -done diff --git a/tests/queries/0_stateless/02181_format_describe_query.reference b/tests/queries/0_stateless/02181_format_describe_query.reference index 328ea19bd37..238a83df50a 100644 --- a/tests/queries/0_stateless/02181_format_describe_query.reference +++ b/tests/queries/0_stateless/02181_format_describe_query.reference @@ -1,3 +1,3 @@ DESCRIBE TABLE file('data.csv') -DESCRIBE TABLE table +DESCRIBE TABLE `table` DESCRIBE TABLE file('data.csv') diff --git a/tests/queries/0_stateless/02184_default_table_engine.reference b/tests/queries/0_stateless/02184_default_table_engine.reference index 495b9627acb..83760a178bd 100644 --- a/tests/queries/0_stateless/02184_default_table_engine.reference +++ b/tests/queries/0_stateless/02184_default_table_engine.reference @@ -9,7 +9,7 @@ CREATE TABLE default.numbers1\n(\n `number` UInt64\n)\nENGINE = Memory CREATE TABLE default.numbers2\n(\n `number` UInt64\n)\nENGINE = MergeTree\nORDER BY intHash32(number)\nSAMPLE BY intHash32(number)\nSETTINGS index_granularity = 8192 45 CREATE TABLE default.numbers3\n(\n `number` UInt64\n)\nENGINE = Log -CREATE MATERIALIZED VIEW default.test_view_filtered\n(\n `EventDate` Date,\n `CounterID` UInt32\n)\nENGINE = Memory AS\nSELECT\n CounterID,\n EventDate\nFROM default.test_table\nWHERE EventDate < \'2013-01-01\' +CREATE MATERIALIZED VIEW default.test_view_filtered\n(\n `EventDate` Date,\n `CounterID` UInt32\n)\nENGINE = Memory\nAS SELECT\n CounterID,\n EventDate\nFROM default.test_table\nWHERE EventDate < \'2013-01-01\' 2014-01-02 0 0 1969-12-31 16:00:00 2014-01-02 03:04:06 1 2014-01-01 19:04:06 CREATE TABLE default.t1\n(\n `Rows` UInt64,\n `MaxHitTime` DateTime(\'UTC\')\n)\nENGINE = MergeTree\nORDER BY Rows\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02206_information_schema_show_database.reference b/tests/queries/0_stateless/02206_information_schema_show_database.reference index fcc41e771b3..2cc93c56e82 100644 --- a/tests/queries/0_stateless/02206_information_schema_show_database.reference +++ b/tests/queries/0_stateless/02206_information_schema_show_database.reference @@ -1,6 +1,6 @@ CREATE DATABASE INFORMATION_SCHEMA\nENGINE = Memory -CREATE VIEW INFORMATION_SCHEMA.COLUMNS\n(\n `table_catalog` String,\n `table_schema` String,\n `table_name` String,\n `column_name` String,\n `ordinal_position` UInt64,\n `column_default` String,\n `is_nullable` String,\n `data_type` String,\n `character_maximum_length` Nullable(UInt64),\n `character_octet_length` Nullable(UInt64),\n `numeric_precision` Nullable(UInt64),\n `numeric_precision_radix` Nullable(UInt64),\n `numeric_scale` Nullable(UInt64),\n `datetime_precision` Nullable(UInt64),\n `character_set_catalog` Nullable(String),\n `character_set_schema` Nullable(String),\n `character_set_name` Nullable(String),\n `collation_catalog` Nullable(String),\n `collation_schema` Nullable(String),\n `collation_name` Nullable(String),\n `domain_catalog` Nullable(String),\n `domain_schema` Nullable(String),\n `domain_name` Nullable(String),\n `extra` Nullable(String),\n `column_comment` String,\n `column_type` String,\n `TABLE_CATALOG` String,\n `TABLE_SCHEMA` String,\n `TABLE_NAME` String,\n `COLUMN_NAME` String,\n `ORDINAL_POSITION` UInt64,\n `COLUMN_DEFAULT` String,\n `IS_NULLABLE` String,\n `DATA_TYPE` String,\n `CHARACTER_MAXIMUM_LENGTH` Nullable(UInt64),\n `CHARACTER_OCTET_LENGTH` Nullable(UInt64),\n `NUMERIC_PRECISION` Nullable(UInt64),\n `NUMERIC_PRECISION_RADIX` Nullable(UInt64),\n `NUMERIC_SCALE` Nullable(UInt64),\n `DATETIME_PRECISION` Nullable(UInt64),\n `CHARACTER_SET_CATALOG` Nullable(String),\n `CHARACTER_SET_SCHEMA` Nullable(String),\n `CHARACTER_SET_NAME` Nullable(String),\n `COLLATION_CATALOG` Nullable(String),\n `COLLATION_SCHEMA` Nullable(String),\n `COLLATION_NAME` Nullable(String),\n `DOMAIN_CATALOG` Nullable(String),\n `DOMAIN_SCHEMA` Nullable(String),\n `DOMAIN_NAME` Nullable(String),\n `EXTRA` Nullable(String),\n `COLUMN_COMMENT` String,\n `COLUMN_TYPE` String\n) AS\nSELECT\n database AS table_catalog,\n database AS table_schema,\n table AS table_name,\n name AS column_name,\n position AS ordinal_position,\n default_expression AS column_default,\n type LIKE \'Nullable(%)\' AS is_nullable,\n type AS data_type,\n character_octet_length AS character_maximum_length,\n character_octet_length,\n numeric_precision,\n numeric_precision_radix,\n numeric_scale,\n datetime_precision,\n NULL AS character_set_catalog,\n NULL AS character_set_schema,\n NULL AS character_set_name,\n NULL AS collation_catalog,\n NULL AS collation_schema,\n NULL AS collation_name,\n NULL AS domain_catalog,\n NULL AS domain_schema,\n NULL AS domain_name,\n multiIf(default_kind = \'DEFAULT\', \'DEFAULT_GENERATED\', default_kind = \'MATERIALIZED\', \'STORED GENERATED\', default_kind = \'ALIAS\', \'VIRTUAL GENERATED\', \'\') AS extra,\n comment AS column_comment,\n type AS column_type,\n table_catalog AS TABLE_CATALOG,\n table_schema AS TABLE_SCHEMA,\n table_name AS TABLE_NAME,\n column_name AS COLUMN_NAME,\n ordinal_position AS ORDINAL_POSITION,\n column_default AS COLUMN_DEFAULT,\n is_nullable AS IS_NULLABLE,\n data_type AS DATA_TYPE,\n character_maximum_length AS CHARACTER_MAXIMUM_LENGTH,\n character_octet_length AS CHARACTER_OCTET_LENGTH,\n numeric_precision AS NUMERIC_PRECISION,\n numeric_precision_radix AS NUMERIC_PRECISION_RADIX,\n numeric_scale AS NUMERIC_SCALE,\n datetime_precision AS DATETIME_PRECISION,\n character_set_catalog AS CHARACTER_SET_CATALOG,\n character_set_schema AS CHARACTER_SET_SCHEMA,\n character_set_name AS CHARACTER_SET_NAME,\n collation_catalog AS COLLATION_CATALOG,\n collation_schema AS COLLATION_SCHEMA,\n collation_name AS COLLATION_NAME,\n domain_catalog AS DOMAIN_CATALOG,\n domain_schema AS DOMAIN_SCHEMA,\n domain_name AS DOMAIN_NAME,\n extra AS EXTRA,\n column_comment AS COLUMN_COMMENT,\n column_type AS COLUMN_TYPE\nFROM system.columns -CREATE VIEW INFORMATION_SCHEMA.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables -CREATE VIEW INFORMATION_SCHEMA.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables -CREATE VIEW information_schema.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables -CREATE VIEW information_schema.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables +CREATE VIEW INFORMATION_SCHEMA.COLUMNS\n(\n `table_catalog` String,\n `table_schema` String,\n `table_name` String,\n `column_name` String,\n `ordinal_position` UInt64,\n `column_default` String,\n `is_nullable` String,\n `data_type` String,\n `character_maximum_length` Nullable(UInt64),\n `character_octet_length` Nullable(UInt64),\n `numeric_precision` Nullable(UInt64),\n `numeric_precision_radix` Nullable(UInt64),\n `numeric_scale` Nullable(UInt64),\n `datetime_precision` Nullable(UInt64),\n `character_set_catalog` Nullable(String),\n `character_set_schema` Nullable(String),\n `character_set_name` Nullable(String),\n `collation_catalog` Nullable(String),\n `collation_schema` Nullable(String),\n `collation_name` Nullable(String),\n `domain_catalog` Nullable(String),\n `domain_schema` Nullable(String),\n `domain_name` Nullable(String),\n `extra` Nullable(String),\n `column_comment` String,\n `column_type` String,\n `TABLE_CATALOG` String,\n `TABLE_SCHEMA` String,\n `TABLE_NAME` String,\n `COLUMN_NAME` String,\n `ORDINAL_POSITION` UInt64,\n `COLUMN_DEFAULT` String,\n `IS_NULLABLE` String,\n `DATA_TYPE` String,\n `CHARACTER_MAXIMUM_LENGTH` Nullable(UInt64),\n `CHARACTER_OCTET_LENGTH` Nullable(UInt64),\n `NUMERIC_PRECISION` Nullable(UInt64),\n `NUMERIC_PRECISION_RADIX` Nullable(UInt64),\n `NUMERIC_SCALE` Nullable(UInt64),\n `DATETIME_PRECISION` Nullable(UInt64),\n `CHARACTER_SET_CATALOG` Nullable(String),\n `CHARACTER_SET_SCHEMA` Nullable(String),\n `CHARACTER_SET_NAME` Nullable(String),\n `COLLATION_CATALOG` Nullable(String),\n `COLLATION_SCHEMA` Nullable(String),\n `COLLATION_NAME` Nullable(String),\n `DOMAIN_CATALOG` Nullable(String),\n `DOMAIN_SCHEMA` Nullable(String),\n `DOMAIN_NAME` Nullable(String),\n `EXTRA` Nullable(String),\n `COLUMN_COMMENT` String,\n `COLUMN_TYPE` String\n)\nSQL SECURITY INVOKER\nAS SELECT\n database AS table_catalog,\n database AS table_schema,\n `table` AS table_name,\n name AS column_name,\n position AS ordinal_position,\n default_expression AS column_default,\n type LIKE \'Nullable(%)\' AS is_nullable,\n type AS data_type,\n character_octet_length AS character_maximum_length,\n character_octet_length,\n numeric_precision,\n numeric_precision_radix,\n numeric_scale,\n datetime_precision,\n NULL AS character_set_catalog,\n NULL AS character_set_schema,\n NULL AS character_set_name,\n NULL AS collation_catalog,\n NULL AS collation_schema,\n NULL AS collation_name,\n NULL AS domain_catalog,\n NULL AS domain_schema,\n NULL AS domain_name,\n multiIf(default_kind = \'DEFAULT\', \'DEFAULT_GENERATED\', default_kind = \'MATERIALIZED\', \'STORED GENERATED\', default_kind = \'ALIAS\', \'VIRTUAL GENERATED\', \'\') AS extra,\n comment AS column_comment,\n type AS column_type,\n table_catalog AS TABLE_CATALOG,\n table_schema AS TABLE_SCHEMA,\n table_name AS TABLE_NAME,\n column_name AS COLUMN_NAME,\n ordinal_position AS ORDINAL_POSITION,\n column_default AS COLUMN_DEFAULT,\n is_nullable AS IS_NULLABLE,\n data_type AS DATA_TYPE,\n character_maximum_length AS CHARACTER_MAXIMUM_LENGTH,\n character_octet_length AS CHARACTER_OCTET_LENGTH,\n numeric_precision AS NUMERIC_PRECISION,\n numeric_precision_radix AS NUMERIC_PRECISION_RADIX,\n numeric_scale AS NUMERIC_SCALE,\n datetime_precision AS DATETIME_PRECISION,\n character_set_catalog AS CHARACTER_SET_CATALOG,\n character_set_schema AS CHARACTER_SET_SCHEMA,\n character_set_name AS CHARACTER_SET_NAME,\n collation_catalog AS COLLATION_CATALOG,\n collation_schema AS COLLATION_SCHEMA,\n collation_name AS COLLATION_NAME,\n domain_catalog AS DOMAIN_CATALOG,\n domain_schema AS DOMAIN_SCHEMA,\n domain_name AS DOMAIN_NAME,\n extra AS EXTRA,\n column_comment AS COLUMN_COMMENT,\n column_type AS COLUMN_TYPE\nFROM system.columns +CREATE VIEW INFORMATION_SCHEMA.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) SQL SECURITY INVOKER AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables +CREATE VIEW INFORMATION_SCHEMA.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) SQL SECURITY INVOKER AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables +CREATE VIEW information_schema.TABLES (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) SQL SECURITY INVOKER AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables +CREATE VIEW information_schema.tables (`table_catalog` String, `table_schema` String, `table_name` String, `table_type` String, `table_rows` Nullable(UInt64), `data_length` Nullable(UInt64), `table_collation` Nullable(String), `table_comment` Nullable(String), `TABLE_CATALOG` String, `TABLE_SCHEMA` String, `TABLE_NAME` String, `TABLE_TYPE` String, `TABLE_ROWS` Nullable(UInt64), `DATA_LENGTH` Nullable(UInt64), `TABLE_COLLATION` Nullable(String), `TABLE_COMMENT` Nullable(String)) SQL SECURITY INVOKER AS SELECT database AS table_catalog, database AS table_schema, name AS table_name, multiIf(is_temporary, \'LOCAL TEMPORARY\', engine LIKE \'%View\', \'VIEW\', engine LIKE \'System%\', \'SYSTEM VIEW\', has_own_data = 0, \'FOREIGN TABLE\', \'BASE TABLE\') AS table_type, total_rows AS table_rows, total_bytes AS data_length, \'utf8mb4_0900_ai_ci\' AS table_collation, comment AS table_comment, table_catalog AS TABLE_CATALOG, table_schema AS TABLE_SCHEMA, table_name AS TABLE_NAME, table_type AS TABLE_TYPE, table_rows AS TABLE_ROWS, data_length AS DATA_LENGTH, table_collation AS TABLE_COLLATION, table_comment AS TABLE_COMMENT FROM system.tables diff --git a/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql index 041cb887647..235fc86f828 100644 --- a/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql +++ b/tests/queries/0_stateless/02265_rename_join_ordinary_to_atomic.sql @@ -1,5 +1,7 @@ -- Tags: no-parallel +SET send_logs_level = 'fatal'; + set allow_deprecated_database_ordinary=1; DROP DATABASE IF EXISTS 02265_atomic_db; DROP DATABASE IF EXISTS 02265_ordinary_db; diff --git a/tests/queries/0_stateless/02343_create_empty_as_select.reference b/tests/queries/0_stateless/02343_create_empty_as_select.reference index 3b0d34c5863..8a21a716bd1 100644 --- a/tests/queries/0_stateless/02343_create_empty_as_select.reference +++ b/tests/queries/0_stateless/02343_create_empty_as_select.reference @@ -1,4 +1,4 @@ CREATE TABLE default.t\n(\n `1` UInt8\n)\nENGINE = Memory 0 -CREATE MATERIALIZED VIEW default.mv\n(\n `1` UInt8\n)\nENGINE = Memory AS\nSELECT 1 +CREATE MATERIALIZED VIEW default.mv\n(\n `1` UInt8\n)\nENGINE = Memory\nAS SELECT 1 0 diff --git a/tests/queries/0_stateless/02346_inverted_index_search.sql b/tests/queries/0_stateless/02346_inverted_index_search.sql index be56f24d5da..d225d3463d1 100644 --- a/tests/queries/0_stateless/02346_inverted_index_search.sql +++ b/tests/queries/0_stateless/02346_inverted_index_search.sql @@ -243,40 +243,6 @@ CREATE TABLE tab (row_id UInt32, str String, INDEX idx str TYPE inverted) ENGINE INSERT INTO tab VALUES (0, 'a'); SELECT * FROM tab WHERE str == 'b' AND 1.0; - --- Tests with parameter max_digestion_size_per_segment are flaky in CI, not clear why --> comment out for the time being: - --- ---------------------------------------------------- --- SELECT 'Test max_digestion_size_per_segment'; --- --- DROP TABLE IF EXISTS tab; --- --- CREATE TABLE tab(k UInt64, s String, INDEX af(s) TYPE inverted(0)) --- Engine=MergeTree --- ORDER BY (k) --- SETTINGS max_digestion_size_per_segment = 1024, index_granularity = 256 --- AS --- SELECT --- number, --- format('{},{},{},{}', hex(12345678), hex(87654321), hex(number/17 + 5), hex(13579012)) as s --- FROM numbers(10240); --- --- -- check inverted index was created --- SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; --- --- -- search inverted index --- SELECT s FROM tab WHERE hasToken(s, '6969696969898240'); --- --- -- check the query only read 1 granule (1 row total; each granule has 256 rows) --- SYSTEM FLUSH LOGS; --- SELECT read_rows==256 from system.query_log --- WHERE query_kind ='Select' --- AND current_database = currentDatabase() --- AND endsWith(trimRight(query), 'SELECT s FROM tab WHERE hasToken(s, \'6969696969898240\');') --- AND type='QueryFinish' --- AND result_rows==1 --- LIMIT 1; --- SELECT 'Test max_rows_per_postings_list'; DROP TABLE IF EXISTS tab; -- create table 'tab' with inverted index parameter (ngrams, max_rows_per_most_list) which is (0, 10240) diff --git a/tests/queries/0_stateless/02346_non_negative_derivative.reference b/tests/queries/0_stateless/02346_non_negative_derivative.reference index b81af45962e..22e5f609ad7 100644 --- a/tests/queries/0_stateless/02346_non_negative_derivative.reference +++ b/tests/queries/0_stateless/02346_non_negative_derivative.reference @@ -1,63 +1,63 @@ 1 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 340.00000000000006 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 0.20550000000000002 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 0.0000010200000000000004 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 6.165000000000001e-10 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 0.00136 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 8.22e-7 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 1.7000000000000004 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 0.0010275000000000002 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 2040.0000000000005 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 1.233 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 142800.00000000003 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 86.31 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 9792000.000000002 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 5918.400000000001 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 264384000.00000003 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 159796.80000000002 1979-12-12 21:21:23.000 1.54 0 -1979-12-12 21:21:21.127 3.7 0 -2299-12-31 23:37:36.788 1.1 0 -2299-12-31 23:37:36.789 2.34 0 +1979-12-12 21:21:21.123 1.1 0 +1979-12-12 21:21:21.123 2.34 0 +1979-12-12 21:21:21.127 3.7 2056320000.0000002 1979-12-12 21:21:21.129 2.1 0 1979-12-12 21:21:22.000 1.3345 0 1979-12-12 21:21:23.000 1.54 1242864 diff --git a/tests/queries/0_stateless/02346_non_negative_derivative.sql b/tests/queries/0_stateless/02346_non_negative_derivative.sql index 265a8afb2cb..704241da16c 100644 --- a/tests/queries/0_stateless/02346_non_negative_derivative.sql +++ b/tests/queries/0_stateless/02346_non_negative_derivative.sql @@ -18,7 +18,7 @@ SELECT ( SELECT ts, metric, - nonNegativeDerivative(metric, ts) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv + nonNegativeDerivative(metric, ts) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd LIMIT 5, 1 ) = ( @@ -29,37 +29,37 @@ SELECT ( FROM nnd LIMIT 5, 1 ); -SELECT ts, metric, nonNegativeDerivative(metric, ts) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; -- Nanosecond -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd; -- Microsecond -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 4 MICROSECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 4 MICROSECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; -- Millisecond -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 5 MILLISECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 5 MILLISECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; -- Second -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 6 SECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 6 SECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- Minute -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 7 MINUTE) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 7 MINUTE) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd; -- Hour -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 8 HOUR) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 8 HOUR) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- Day -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 9 DAY) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 3 PRECEDING AND 3 FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 9 DAY) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 3 PRECEDING AND 3 FOLLOWING) AS deriv FROM nnd; -- Week -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 10 WEEK) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 10 WEEK) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- shall not work for month, quarter, year (intervals with floating number of seconds) -- Month -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 11 MONTH) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 11 MONTH) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -- Quarter -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 12 QUARTER) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 12 QUARTER) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -- Year -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 13 YEAR) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 13 YEAR) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -- test against wrong arguments/types -SELECT ts, metric, nonNegativeDerivative(metric, 1, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } -SELECT ts, metric, nonNegativeDerivative('string not datetime', ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } -SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND, id) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } -SELECT ts, metric, nonNegativeDerivative(metric) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } +SELECT ts, metric, nonNegativeDerivative(metric, 1, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } +SELECT ts, metric, nonNegativeDerivative('string not datetime', ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND, id) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } +SELECT ts, metric, nonNegativeDerivative(metric) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } -- cleanup DROP TABLE IF EXISTS nnd; diff --git a/tests/queries/0_stateless/02352_rwlock.sh b/tests/queries/0_stateless/02352_rwlock.sh index 08551794c2e..b4a77e0b08a 100755 --- a/tests/queries/0_stateless/02352_rwlock.sh +++ b/tests/queries/0_stateless/02352_rwlock.sh @@ -6,6 +6,9 @@ # In other words to ensure that after WRITE lock failure (DROP), # READ lock (SELECT) available instantly. +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02406_minmax_behaviour.reference b/tests/queries/0_stateless/02406_minmax_behaviour.reference index d52ba640a0e..5a715e1c7f7 100644 --- a/tests/queries/0_stateless/02406_minmax_behaviour.reference +++ b/tests/queries/0_stateless/02406_minmax_behaviour.reference @@ -56,6 +56,10 @@ SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, 22 SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); 26 +SELECT max(number) from (Select if(number % 2 == 1, NULL, -number::Int8) as number FROM numbers(128)); +0 +SELECT min(number) from (Select if(number % 2 == 1, NULL, -number::Int8) as number FROM numbers(128)); +-126 SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; 10 SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; @@ -190,3 +194,7 @@ SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, 22 SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); 26 +SELECT max(number::Nullable(Decimal64(3))) from numbers(11) settings max_block_size=10; +10 +SELECT min(-number::Nullable(Decimal64(3))) from numbers(11) settings max_block_size=10; +-10 diff --git a/tests/queries/0_stateless/02406_minmax_behaviour.sql b/tests/queries/0_stateless/02406_minmax_behaviour.sql index a3afe7d40b0..314374a260d 100644 --- a/tests/queries/0_stateless/02406_minmax_behaviour.sql +++ b/tests/queries/0_stateless/02406_minmax_behaviour.sql @@ -48,6 +48,9 @@ SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1 SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +SELECT max(number) from (Select if(number % 2 == 1, NULL, -number::Int8) as number FROM numbers(128)); +SELECT min(number) from (Select if(number % 2 == 1, NULL, -number::Int8) as number FROM numbers(128)); + SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; SELECT argMax(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; @@ -138,3 +141,6 @@ SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1 SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); + +SELECT max(number::Nullable(Decimal64(3))) from numbers(11) settings max_block_size=10; +SELECT min(-number::Nullable(Decimal64(3))) from numbers(11) settings max_block_size=10; diff --git a/tests/queries/0_stateless/02428_combinators_with_over_statement.sql b/tests/queries/0_stateless/02428_combinators_with_over_statement.sql index 7946b997b00..2b82839d6eb 100644 --- a/tests/queries/0_stateless/02428_combinators_with_over_statement.sql +++ b/tests/queries/0_stateless/02428_combinators_with_over_statement.sql @@ -1,5 +1,6 @@ drop table if exists test; create table test (x AggregateFunction(uniq, UInt64), y Int64) engine=Memory; +set max_insert_threads = 1; insert into test select uniqState(number) as x, number as y from numbers(10) group by number order by x, y; select uniqStateMap(map(1, x)) OVER (PARTITION BY y) from test; select uniqStateForEach([x]) OVER (PARTITION BY y) from test; diff --git a/tests/queries/0_stateless/02449_check_dependencies_and_table_shutdown.reference b/tests/queries/0_stateless/02449_check_dependencies_and_table_shutdown.reference index 37cd3d93e39..317dbe1c61c 100644 --- a/tests/queries/0_stateless/02449_check_dependencies_and_table_shutdown.reference +++ b/tests/queries/0_stateless/02449_check_dependencies_and_table_shutdown.reference @@ -1,5 +1,5 @@ CREATE DICTIONARY default.dict\n(\n `id` UInt32,\n `value` String\n)\nPRIMARY KEY id\nSOURCE(CLICKHOUSE(HOST \'localhost\' PORT 9000 USER \'default\' DB \'default\' TABLE \'view\'))\nLIFETIME(MIN 600 MAX 600)\nLAYOUT(HASHED()) -CREATE TABLE default.table\n(\n `col` String MATERIALIZED dictGet(\'default.dict\', \'value\', toUInt32(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.`table`\n(\n `col` String MATERIALIZED dictGet(\'default.dict\', \'value\', toUInt32(1))\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 1 v 1 v 1 v diff --git a/tests/queries/0_stateless/02454_set_parameters_formatting.reference b/tests/queries/0_stateless/02454_set_parameters_formatting.reference index 1098a8159f3..c438c068635 100644 --- a/tests/queries/0_stateless/02454_set_parameters_formatting.reference +++ b/tests/queries/0_stateless/02454_set_parameters_formatting.reference @@ -1,3 +1,3 @@ -SET param_a = 1 -SET max_threads = 1, param_a = 1 -SET max_threads = 1, param_a = 1 +SET param_a = '1' +SET max_threads = 1, param_a = '1' +SET max_threads = 1, param_a = '1' diff --git a/tests/queries/0_stateless/02457_insert_select_progress_http.sh b/tests/queries/0_stateless/02457_insert_select_progress_http.sh index ae62ee4b77e..ccf37dfb327 100755 --- a/tests/queries/0_stateless/02457_insert_select_progress_http.sh +++ b/tests/queries/0_stateless/02457_insert_select_progress_http.sh @@ -4,6 +4,6 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d @- <<< "insert into function null('_ Int') select * from numbers(5) settings max_block_size=1" -v |& { +${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d @- <<< "insert into function null('_ Int') select * from numbers(5) settings max_block_size=1, max_insert_threads=1" -v |& { grep -F -e X-ClickHouse-Progress: -e X-ClickHouse-Summary: | sed 's/,\"elapsed_ns[^}]*//' } diff --git a/tests/queries/0_stateless/02476_fix_lambda_parsing.reference b/tests/queries/0_stateless/02476_fix_lambda_parsing.reference index de508c7a0d3..04e64792b23 100644 --- a/tests/queries/0_stateless/02476_fix_lambda_parsing.reference +++ b/tests/queries/0_stateless/02476_fix_lambda_parsing.reference @@ -1,4 +1,4 @@ -SELECT f(x, y -> z) +SELECT f(x, (y -> z)) SELECT f((x, y) -> z) SELECT f((x, y) -> z) -SELECT f(x, (x, y) -> z) +SELECT f(x, ((x, y) -> z)) diff --git a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql index 5c90313b6b8..507ac7289f5 100644 --- a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql +++ b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql @@ -5,7 +5,7 @@ create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02468/{database}', ' system stop cleanup rmt; system stop merges rmt1; -insert into rmt select * from numbers(10) settings max_block_size=1; +insert into rmt select * from numbers(10) settings max_block_size=1, max_insert_threads=1; alter table rmt drop partition id '0'; truncate table rmt1; @@ -31,7 +31,7 @@ create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02468/{database}2', system stop cleanup rmt; system stop merges rmt1; -insert into rmt select * from numbers(10) settings max_block_size=1; +insert into rmt select * from numbers(10) settings max_block_size=1, max_insert_threads=1; system sync replica rmt1 lightweight; alter table rmt replace partition id '0' from rmt2; diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference index e7c169cf45e..bc22ae23ee1 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference @@ -220,13 +220,16 @@ FROM ) GROUP BY number ORDER BY number +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test -- explain Expression (Projection) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) Aggregating - Expression ((Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY))))) - ReadFromSystemNumbers + Expression ((Before GROUP BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + Before ORDER BY))) + ReadFromSystemNumbers -- execute 0 1 @@ -289,13 +292,16 @@ FROM GROUP BY number ) ORDER BY a ASC +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test -- explain Expression (Projection) Sorting (Sorting for ORDER BY) Expression ((Before ORDER BY + (Projection + Before ORDER BY))) Aggregating - Expression ((Before GROUP BY + (Projection + Before ORDER BY))) - ReadFromSystemNumbers + Expression ((Before GROUP BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression (Before ORDER BY) + ReadFromSystemNumbers -- execute 0 1 @@ -321,14 +327,18 @@ FROM ) WHERE a > 0 ORDER BY a +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test -- explain Expression (Projection) Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + )) - Aggregating - Filter - Filter (( + (Before GROUP BY + (Projection + (Before ORDER BY + (Projection + Before ORDER BY)))))) - ReadFromSystemNumbers + Expression (Before ORDER BY) + Filter ((WHERE + (Projection + Before ORDER BY))) + Filter (HAVING) + Aggregating + Expression ((Before GROUP BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + Before ORDER BY))) + ReadFromSystemNumbers -- execute 1 2 diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh index c676e0340b1..23eccb4e782 100755 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh @@ -157,7 +157,8 @@ FROM ORDER BY number DESC ) GROUP BY number -ORDER BY number" +ORDER BY number +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test" run_query "$query" echo "-- query with aggregation function but w/o GROUP BY -> remove sorting" @@ -200,7 +201,8 @@ FROM ) GROUP BY number ) -ORDER BY a ASC" +ORDER BY a ASC +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test" run_query "$query" echo "-- Check that optimization works for subqueries as well, - main query have neither ORDER BY nor GROUP BY" @@ -222,7 +224,8 @@ FROM GROUP BY number ) WHERE a > 0 -ORDER BY a" +ORDER BY a +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test" run_query "$query" echo "-- GROUP BY in most inner query makes execution parallelized, and removing inner sorting steps will keep it that way. But need to correctly update data streams sorting properties after removing sorting steps" diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference index 16d3327b9c2..47348651025 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference @@ -220,6 +220,7 @@ FROM ) GROUP BY number ORDER BY number +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test -- explain Expression (Project names) Sorting (Sorting for ORDER BY) @@ -291,6 +292,7 @@ FROM GROUP BY number ) ORDER BY a ASC +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test -- explain Expression (Project names) Sorting (Sorting for ORDER BY) @@ -325,6 +327,7 @@ FROM ) WHERE a > 0 ORDER BY a +SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it depends on order and we need it for the test -- explain Expression (Project names) Sorting (Sorting for ORDER BY) diff --git a/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh b/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh index 0ae44ec0c01..b58db7c87ad 100755 --- a/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh +++ b/tests/queries/0_stateless/02521_incorrect_dealy_for_insert_bug_44902.sh @@ -14,7 +14,7 @@ $CLICKHOUSE_CLIENT -q "SYSTEM STOP MERGES test_02521_insert_delay" for i in {0..4} do query_id="${CLICKHOUSE_DATABASE}_02521_${i}_$RANDOM$RANDOM" - $CLICKHOUSE_CLIENT --query_id="$query_id" -q "INSERT INTO test_02521_insert_delay SELECT number, toString(number) FROM numbers(${i}, 1)" + $CLICKHOUSE_CLIENT --query_id="$query_id" --max_insert_threads 1 -q "INSERT INTO test_02521_insert_delay SELECT number, toString(number) FROM numbers(${i}, 1)" $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" $CLICKHOUSE_CLIENT --param_query_id="$query_id" -q "select ProfileEvents['DelayedInsertsMilliseconds'] as delay from system.query_log where event_date >= yesterday() and current_database = '$CLICKHOUSE_DATABASE' and query_id = {query_id:String} order by delay desc limit 1" done diff --git a/tests/queries/0_stateless/02539_settings_alias.reference b/tests/queries/0_stateless/02539_settings_alias.reference index db17cf631de..a4b3d996674 100644 --- a/tests/queries/0_stateless/02539_settings_alias.reference +++ b/tests/queries/0_stateless/02539_settings_alias.reference @@ -18,7 +18,7 @@ Using HTTP with query params Using client options 0 2 -CREATE VIEW default.`02539_settings_alias_view`\n(\n `1` UInt8\n) AS\nSELECT 1\nSETTINGS replication_alter_partitions_sync = 2 +CREATE VIEW default.`02539_settings_alias_view`\n(\n `1` UInt8\n)\nAS SELECT 1\nSETTINGS replication_alter_partitions_sync = 2 replication_alter_partitions_sync 0 1 alter_sync replication_alter_partitions_sync 2 1 alter_sync alter_sync 0 1 diff --git a/tests/queries/0_stateless/02703_row_policy_for_database.reference b/tests/queries/0_stateless/02703_row_policy_for_database.reference index ec03e538d95..b67ea69ae72 100644 --- a/tests/queries/0_stateless/02703_row_policy_for_database.reference +++ b/tests/queries/0_stateless/02703_row_policy_for_database.reference @@ -3,7 +3,7 @@ CREATE ROW POLICY db1_02703 ON db1_02703.* FOR SELECT USING 1 TO ALL -- SHOW CREATE POLICY ON db1_02703.* CREATE ROW POLICY db1_02703 ON db1_02703.* FOR SELECT USING 1 TO ALL -CREATE ROW POLICY tbl1_02703 ON db1_02703.table FOR SELECT USING 1 TO ALL +CREATE ROW POLICY tbl1_02703 ON db1_02703.`table` FOR SELECT USING 1 TO ALL -- SHOW CREATE POLICY ON db1_02703.`*` R1, R2: (x == 1) OR (x == 2) 1 diff --git a/tests/queries/0_stateless/02714_read_bytes_aggregateFunction.sql b/tests/queries/0_stateless/02714_read_bytes_aggregateFunction.sql index 26bc9ebe62b..1c70a77c4d1 100644 --- a/tests/queries/0_stateless/02714_read_bytes_aggregateFunction.sql +++ b/tests/queries/0_stateless/02714_read_bytes_aggregateFunction.sql @@ -42,12 +42,12 @@ ORDER BY event_time_microseconds; -- 1 * 8 + AggregateFunction(argMax, String, DateTime) -- -- Size of AggregateFunction(argMax, String, DateTime): --- SingleValueDataString() + SingleValueDataFixed(DateTime) --- SingleValueDataString = 64B for small strings, 64B + string size + 1 for larger --- SingleValueDataFixed(DateTime) = 1 + 4. With padding = 8 --- SingleValueDataString Total: 72B +-- 1 Base class + 1 specific/value class: +-- Base class: MAX(sizeOf(SingleValueDataFixed), sizeOf(SingleValueDataString), sizeOf(SingleValueDataGeneric)) = 64 +-- Specific class: SingleValueDataFixed(DateTime) = 4 + 1. With padding = 8 +-- Total: 8 + 64 + 8 = 80 -- --- ColumnAggregateFunction total: 8 + 72 = 80 +-- ColumnAggregateFunction total: 8 + 2 * 64 = 136 SELECT 'AggregateFunction(argMax, String, DateTime)', read_rows, read_bytes diff --git a/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh b/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh index cded0b28409..84031ad9081 100755 --- a/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh +++ b/tests/queries/0_stateless/02765_queries_with_subqueries_profile_events.sh @@ -11,7 +11,7 @@ $CLICKHOUSE_CLIENT -n -q " CREATE TABLE input (key Int) Engine=Null; CREATE TABLE output AS input Engine=Null; - CREATE MATERIALIZED VIEW mv TO output AS SELECT * FROM input; + CREATE MATERIALIZED VIEW mv TO output SQL SECURITY NONE AS SELECT * FROM input; " for allow_experimental_analyzer in 0 1; do diff --git a/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.reference b/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.reference index 1340b3affe3..ef9d076449a 100644 --- a/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.reference +++ b/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.reference @@ -4,34 +4,34 @@ The argument is before the reference time point ─────────────────────────────────────────────── Row 1: ────── -syslog_arg: Jun 30 23:58:30 -res: 2023-06-30 23:58:30 -res_null: 2023-06-30 23:58:30 -res_zero: 2023-06-30 23:58:30 -res_us: 2023-06-30 23:58:30 -res_us_null: 2023-06-30 23:58:30 -res_us_zero: 2023-06-30 23:58:30 -res64: 2023-06-30 23:58:30.000 -res64_null: 2023-06-30 23:58:30.000 -res64_zero: 2023-06-30 23:58:30.000 -res64_us: 2023-06-30 23:58:30.000 -res64_us_null: 2023-06-30 23:58:30.000 -res64_us_zero: 2023-06-30 23:58:30.000 +syslog_arg: Jun 29 23:59:30 +res: 2023-06-29 23:59:30 +res_null: 2023-06-29 23:59:30 +res_zero: 2023-06-29 23:59:30 +res_us: 2023-06-29 23:59:30 +res_us_null: 2023-06-29 23:59:30 +res_us_zero: 2023-06-29 23:59:30 +res64: 2023-06-29 23:59:30.000 +res64_null: 2023-06-29 23:59:30.000 +res64_zero: 2023-06-29 23:59:30.000 +res64_us: 2023-06-29 23:59:30.000 +res64_us_null: 2023-06-29 23:59:30.000 +res64_us_zero: 2023-06-29 23:59:30.000 ────────────────────────────────────────────── The argument is after the reference time point ────────────────────────────────────────────── Row 1: ────── -syslog_arg: Jul 1 00:00:30 -res: 2022-07-01 00:00:30 -res_null: 2022-07-01 00:00:30 -res_zero: 2022-07-01 00:00:30 -res_us: 2022-07-01 00:00:30 -res_us_null: 2022-07-01 00:00:30 -res_us_zero: 2022-07-01 00:00:30 -res64: 2022-07-01 00:00:30.000 -res64_null: 2022-07-01 00:00:30.000 -res64_zero: 2022-07-01 00:00:30.000 -res64_us: 2022-07-01 00:00:30.000 -res64_us_null: 2022-07-01 00:00:30.000 -res64_us_zero: 2022-07-01 00:00:30.000 +syslog_arg: Jul 1 23:59:30 +res: 2022-06-30 23:59:30 +res_null: 2022-06-30 23:59:30 +res_zero: 2022-06-30 23:59:30 +res_us: 2022-06-30 23:59:30 +res_us_null: 2022-06-30 23:59:30 +res_us_zero: 2022-06-30 23:59:30 +res64: 2022-06-30 23:59:30.000 +res64_null: 2022-06-30 23:59:30.000 +res64_zero: 2022-06-30 23:59:30.000 +res64_us: 2022-06-30 23:59:30.000 +res64_us_null: 2022-06-30 23:59:30.000 +res64_us_zero: 2022-06-30 23:59:30.000 diff --git a/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.sql b/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.sql index c67722393ab..ecaec9f99bf 100644 --- a/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.sql +++ b/tests/queries/0_stateless/02783_parsedatetimebesteffort_syslog.sql @@ -8,7 +8,7 @@ SELECT '──────────────────────── WITH toDateTime('2023-06-30 23:59:30') AS dt_ref, now() AS dt_now, - date_sub(MINUTE, 1, dt_now) as dt_before, + date_sub(DAY, 1, dt_now) as dt_before, dateDiff(SECOND, dt_ref, dt_now) AS time_shift, formatDateTime(dt_before, '%b %e %T') AS syslog_before SELECT @@ -34,7 +34,7 @@ SELECT '──────────────────────── WITH toDateTime('2023-06-30 23:59:30') AS dt_ref, now() AS dt_now, - date_add(MINUTE, 1, dt_now) as dt_after, + date_add(DAY, 1, dt_now) as dt_after, dateDiff(SECOND, dt_ref, dt_now) AS time_shift, formatDateTime(dt_after, '%b %e %T') AS syslog_after SELECT diff --git a/tests/queries/0_stateless/02797_aggregator_huge_mem_usage_bug.sql b/tests/queries/0_stateless/02797_aggregator_huge_mem_usage_bug.sql index e204d968382..3532f617e89 100644 --- a/tests/queries/0_stateless/02797_aggregator_huge_mem_usage_bug.sql +++ b/tests/queries/0_stateless/02797_aggregator_huge_mem_usage_bug.sql @@ -3,9 +3,9 @@ DROP TABLE IF EXISTS v; create view v (s LowCardinality(String), n UInt8) as select 'test' as s, toUInt8(number) as n from numbers(10000000); -- this is what allows mem usage to go really high -set max_block_size=10000000000; +set max_block_size=4294967296; -set max_memory_usage = '1Gi'; +set max_memory_usage = '420Mi'; select s, sum(n) from v group by s format Null; diff --git a/tests/queries/0_stateless/02813_create_index_noop.sql b/tests/queries/0_stateless/02813_create_index_noop.sql index 3d65f81af9d..0f32dc6bdf3 100644 --- a/tests/queries/0_stateless/02813_create_index_noop.sql +++ b/tests/queries/0_stateless/02813_create_index_noop.sql @@ -997,4 +997,4 @@ CREATE INDEX idx_tab4_5 ON tab4 (col4,col0 DESC); CREATE INDEX idx_tab4_5 ON tab4 (col4,col1 DESC); CREATE INDEX idx_tab4_5 ON tab4 (col4,col1 DESC,col3); CREATE INDEX idx_tab4_5 ON tab4 (col4,col3 DESC,col1 DESC); -CREATE INDEX idx_tab4_5 ON tab4 (col4,col3); \ No newline at end of file +CREATE INDEX idx_tab4_5 ON tab4 (col4,col3); diff --git a/tests/queries/0_stateless/02813_seriesOutliersDetectTukey.sql b/tests/queries/0_stateless/02813_seriesOutliersDetectTukey.sql index ca116e8b7ed..0030929e6a3 100644 --- a/tests/queries/0_stateless/02813_seriesOutliersDetectTukey.sql +++ b/tests/queries/0_stateless/02813_seriesOutliersDetectTukey.sql @@ -8,7 +8,7 @@ INSERT INTO tb1 VALUES (1, [-3, 2.40, 15, 3.90, 5, 6, 4.50, 5.20, 3, 4, 5, 16, 7 -- non-const inputs SELECT seriesOutliersDetectTukey(a) FROM tb1 ORDER BY n; -SELECT seriesOutliersDetectTukey(a,10,90,1.5) FROM tb1 ORDER BY n; +SELECT seriesOutliersDetectTukey(a,.10,.90,1.5) FROM tb1 ORDER BY n; DROP TABLE IF EXISTS tb1; -- const inputs @@ -16,17 +16,17 @@ SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, SELECT seriesOutliersDetectTukey([-3, 2.40, 15, 3.90, 5, 6, 4.50, 5.20, 12, 60, 12, 3.40, 3, 4, 5, 6, 3.40, 2.7]); -- const inputs with optional arguments -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], 25, 75, 1.5); -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], 10, 90, 1.5); -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], 2, 98, 1.5); -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], 2, 98, 1.5); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], .25, .75, 1.5); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], .10, .90, 1.5); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], .02, .98, 1.5); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], 0.02, 0.98, 1.5); SELECT seriesOutliersDetectTukey(arrayMap(x -> sin(x / 10), range(30))); -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4, 5, 12, 45, 12, 3, 3, 4, 5, 6], 25, 75, 3); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4, 5, 12, 45, 12, 3, 3, 4, 5, 6], .25, .75, 3); -- negative tests -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4, 5, 12, 45, 12, 3, 3, 4, 5, 6], 25, 75, -1); -- { serverError BAD_ARGUMENTS} -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], 33, 53); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} -SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], 33); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4, 5, 12, 45, 12, 3, 3, 4, 5, 6], .25, .75, -1); -- { serverError BAD_ARGUMENTS} +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], .33, .53); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], .33); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} SELECT seriesOutliersDetectTukey([-3, 2.4, 15, NULL]); -- { serverError ILLEGAL_COLUMN} SELECT seriesOutliersDetectTukey([]); -- { serverError ILLEGAL_COLUMN} -SELECT seriesOutliersDetectTukey([-3, 2.4, 15]); -- { serverError BAD_ARGUMENTS} \ No newline at end of file +SELECT seriesOutliersDetectTukey([-3, 2.4, 15]); -- { serverError BAD_ARGUMENTS} diff --git a/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.reference b/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.reference new file mode 100644 index 00000000000..84119736fe9 --- /dev/null +++ b/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.reference @@ -0,0 +1,526 @@ +set optimize_aggregators_of_group_by_keys = 1 +0 0 +0 1 +0 2 +1 0 +1 1 +1 2 +0 0 +0 1 +0 2 +1 0 +1 1 +1 2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +2 +2 +3 +3 +4 +4 +4 +5 +6 +6 +6 +8 +8 +9 +10 +12 +12 +12 +15 +16 +18 +20 +24 +0 +0 +QUERY id: 0 + PROJECTION COLUMNS + a UInt8 + b UInt8 + PROJECTION + LIST id: 1, nodes: 2 + FUNCTION id: 2, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 6, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 7, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 9, constant_value: UInt64_3, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 5, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 10, nodes: 1 + CONSTANT id: 11, constant_value: UInt64_10000000, constant_value_type: UInt32 + GROUP BY + LIST id: 12, nodes: 2 + FUNCTION id: 13, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 14, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 15, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 16, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 17, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 + ORDER BY + LIST id: 19, nodes: 2 + SORT id: 20, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 2, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 6, constant_value: UInt64_2, constant_value_type: UInt8 + SORT id: 21, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 7, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 9, constant_value: UInt64_3, constant_value_type: UInt8 +QUERY id: 0 + PROJECTION COLUMNS + a UInt8 + b UInt8 + PROJECTION + LIST id: 1, nodes: 2 + FUNCTION id: 2, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 6, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 7, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 9, constant_value: UInt64_3, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 5, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 10, nodes: 1 + CONSTANT id: 11, constant_value: UInt64_10000000, constant_value_type: UInt32 + GROUP BY + LIST id: 12, nodes: 2 + FUNCTION id: 13, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 14, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 15, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 16, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 17, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 18, constant_value: UInt64_3, constant_value_type: UInt8 + ORDER BY + LIST id: 19, nodes: 2 + SORT id: 20, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 2, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 6, constant_value: UInt64_2, constant_value_type: UInt8 + SORT id: 21, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 7, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 8, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 9, constant_value: UInt64_3, constant_value_type: UInt8 +QUERY id: 0 + PROJECTION COLUMNS + a UInt16 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 3, nodes: 2 + FUNCTION id: 4, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 8, constant_value: UInt64_5, constant_value_type: UInt8 + FUNCTION id: 9, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 10, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 11, constant_value: UInt64_7, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 7, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 12, nodes: 1 + CONSTANT id: 13, constant_value: UInt64_10000000, constant_value_type: UInt32 + GROUP BY + LIST id: 14, nodes: 2 + FUNCTION id: 15, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 16, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 17, constant_value: UInt64_7, constant_value_type: UInt8 + FUNCTION id: 18, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 19, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 20, constant_value: UInt64_5, constant_value_type: UInt8 + ORDER BY + LIST id: 21, nodes: 1 + SORT id: 22, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 3, nodes: 2 + FUNCTION id: 4, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 8, constant_value: UInt64_5, constant_value_type: UInt8 + FUNCTION id: 9, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 10, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 11, constant_value: UInt64_7, constant_value_type: UInt8 +QUERY id: 0 + PROJECTION COLUMNS + foo UInt64 + PROJECTION + LIST id: 1, nodes: 1 + COLUMN id: 2, column_name: foo, result_type: UInt64, source_id: 3 + JOIN TREE + QUERY id: 3, alias: __table1, is_subquery: 1 + PROJECTION COLUMNS + foo UInt64 + PROJECTION + LIST id: 4, nodes: 1 + COLUMN id: 5, column_name: number, result_type: UInt64, source_id: 6 + JOIN TREE + TABLE_FUNCTION id: 6, alias: __table2, table_function_name: numbers + ARGUMENTS + LIST id: 7, nodes: 1 + CONSTANT id: 8, constant_value: UInt64_1, constant_value_type: UInt8 + GROUP BY + LIST id: 9, nodes: 1 + COLUMN id: 5, column_name: number, result_type: UInt64, source_id: 6 +QUERY id: 0 + PROJECTION COLUMNS + min(number) OVER (PARTITION BY modulo(number, 2)) UInt64 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: min, function_type: window, result_type: UInt64 + ARGUMENTS + LIST id: 3, nodes: 1 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + WINDOW + WINDOW id: 6, frame_type: RANGE, frame_begin_type: unbounded preceding, frame_end_type: current + PARTITION BY + LIST id: 7, nodes: 1 + FUNCTION id: 8, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 9, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 10, constant_value: UInt64_2, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 5, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 11, nodes: 1 + CONSTANT id: 12, constant_value: UInt64_3, constant_value_type: UInt8 + GROUP BY + LIST id: 13, nodes: 1 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 +set optimize_aggregators_of_group_by_keys = 0 +0 0 +0 1 +0 2 +1 0 +1 1 +1 2 +0 0 +0 1 +0 2 +1 0 +1 1 +1 2 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +2 +2 +3 +3 +4 +4 +4 +5 +6 +6 +6 +8 +8 +9 +10 +12 +12 +12 +15 +16 +18 +20 +24 +0 +QUERY id: 0 + PROJECTION COLUMNS + a UInt8 + b UInt8 + PROJECTION + LIST id: 1, nodes: 2 + FUNCTION id: 2, function_name: min, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 1 + FUNCTION id: 4, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 8, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 9, function_name: max, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 10, nodes: 1 + FUNCTION id: 11, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 13, constant_value: UInt64_3, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 7, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 14, nodes: 1 + CONSTANT id: 15, constant_value: UInt64_10000000, constant_value_type: UInt32 + GROUP BY + LIST id: 16, nodes: 2 + FUNCTION id: 17, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 18, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 19, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 20, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 21, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 22, constant_value: UInt64_3, constant_value_type: UInt8 + ORDER BY + LIST id: 23, nodes: 2 + SORT id: 24, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 2, function_name: min, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 1 + FUNCTION id: 4, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 8, constant_value: UInt64_2, constant_value_type: UInt8 + SORT id: 25, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 9, function_name: max, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 10, nodes: 1 + FUNCTION id: 11, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 13, constant_value: UInt64_3, constant_value_type: UInt8 +QUERY id: 0 + PROJECTION COLUMNS + a UInt8 + b UInt8 + PROJECTION + LIST id: 1, nodes: 2 + FUNCTION id: 2, function_name: any, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 1 + FUNCTION id: 4, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 8, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 9, function_name: anyLast, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 10, nodes: 1 + FUNCTION id: 11, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 13, constant_value: UInt64_3, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 7, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 14, nodes: 1 + CONSTANT id: 15, constant_value: UInt64_10000000, constant_value_type: UInt32 + GROUP BY + LIST id: 16, nodes: 2 + FUNCTION id: 17, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 18, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 19, constant_value: UInt64_2, constant_value_type: UInt8 + FUNCTION id: 20, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 21, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 22, constant_value: UInt64_3, constant_value_type: UInt8 + ORDER BY + LIST id: 23, nodes: 2 + SORT id: 24, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 2, function_name: any, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 3, nodes: 1 + FUNCTION id: 4, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 5, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 8, constant_value: UInt64_2, constant_value_type: UInt8 + SORT id: 25, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 9, function_name: anyLast, function_type: aggregate, result_type: UInt8 + ARGUMENTS + LIST id: 10, nodes: 1 + FUNCTION id: 11, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7 + CONSTANT id: 13, constant_value: UInt64_3, constant_value_type: UInt8 +QUERY id: 0 + PROJECTION COLUMNS + a UInt16 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: max, function_type: aggregate, result_type: UInt16 + ARGUMENTS + LIST id: 3, nodes: 1 + FUNCTION id: 4, function_name: multiply, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 5, nodes: 2 + FUNCTION id: 6, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 7, nodes: 2 + COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9 + CONSTANT id: 10, constant_value: UInt64_5, constant_value_type: UInt8 + FUNCTION id: 11, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9 + CONSTANT id: 13, constant_value: UInt64_7, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 9, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 14, nodes: 1 + CONSTANT id: 15, constant_value: UInt64_10000000, constant_value_type: UInt32 + GROUP BY + LIST id: 16, nodes: 2 + FUNCTION id: 17, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 18, nodes: 2 + COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9 + CONSTANT id: 19, constant_value: UInt64_7, constant_value_type: UInt8 + FUNCTION id: 20, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 21, nodes: 2 + COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9 + CONSTANT id: 22, constant_value: UInt64_5, constant_value_type: UInt8 + ORDER BY + LIST id: 23, nodes: 1 + SORT id: 24, sort_direction: ASCENDING, with_fill: 0 + EXPRESSION + FUNCTION id: 2, function_name: max, function_type: aggregate, result_type: UInt16 + ARGUMENTS + LIST id: 3, nodes: 1 + FUNCTION id: 4, function_name: multiply, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 5, nodes: 2 + FUNCTION id: 6, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 7, nodes: 2 + COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9 + CONSTANT id: 10, constant_value: UInt64_5, constant_value_type: UInt8 + FUNCTION id: 11, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 12, nodes: 2 + COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9 + CONSTANT id: 13, constant_value: UInt64_7, constant_value_type: UInt8 +QUERY id: 0 + PROJECTION COLUMNS + foo UInt64 + PROJECTION + LIST id: 1, nodes: 1 + COLUMN id: 2, column_name: foo, result_type: UInt64, source_id: 3 + JOIN TREE + QUERY id: 3, alias: __table1, is_subquery: 1 + PROJECTION COLUMNS + foo UInt64 + PROJECTION + LIST id: 4, nodes: 1 + FUNCTION id: 5, function_name: anyLast, function_type: aggregate, result_type: UInt64 + ARGUMENTS + LIST id: 6, nodes: 1 + COLUMN id: 7, column_name: number, result_type: UInt64, source_id: 8 + JOIN TREE + TABLE_FUNCTION id: 8, alias: __table2, table_function_name: numbers + ARGUMENTS + LIST id: 9, nodes: 1 + CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8 + GROUP BY + LIST id: 11, nodes: 1 + COLUMN id: 7, column_name: number, result_type: UInt64, source_id: 8 +QUERY id: 0 + PROJECTION COLUMNS + min(number) OVER (PARTITION BY modulo(number, 2)) UInt64 + PROJECTION + LIST id: 1, nodes: 1 + FUNCTION id: 2, function_name: min, function_type: window, result_type: UInt64 + ARGUMENTS + LIST id: 3, nodes: 1 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + WINDOW + WINDOW id: 6, frame_type: RANGE, frame_begin_type: unbounded preceding, frame_end_type: current + PARTITION BY + LIST id: 7, nodes: 1 + FUNCTION id: 8, function_name: modulo, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 9, nodes: 2 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 + CONSTANT id: 10, constant_value: UInt64_2, constant_value_type: UInt8 + JOIN TREE + TABLE_FUNCTION id: 5, alias: __table1, table_function_name: numbers + ARGUMENTS + LIST id: 11, nodes: 1 + CONSTANT id: 12, constant_value: UInt64_3, constant_value_type: UInt8 + GROUP BY + LIST id: 13, nodes: 1 + COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 diff --git a/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.sql b/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.sql new file mode 100644 index 00000000000..ca03cbb6f9f --- /dev/null +++ b/tests/queries/0_stateless/02815_analyzer_aggregate_functions_of_group_by_keys.sql @@ -0,0 +1,39 @@ +set allow_experimental_analyzer = 1; +set optimize_move_functions_out_of_any = 0; + +SELECT 'set optimize_aggregators_of_group_by_keys = 1'; +set optimize_aggregators_of_group_by_keys = 1; + +SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); +SELECT anyLast(number) FROM numbers(1) GROUP BY number; + +EXPLAIN QUERY TREE SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN QUERY TREE SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +EXPLAIN QUERY TREE +SELECT min(number) OVER (PARTITION BY number % 2) +FROM numbers(3) +GROUP BY number; + +SELECT 'set optimize_aggregators_of_group_by_keys = 0'; +set optimize_aggregators_of_group_by_keys = 0; + +SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +EXPLAIN QUERY TREE SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN QUERY TREE SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +EXPLAIN QUERY TREE +SELECT min(number) OVER (PARTITION BY number % 2) +FROM numbers(3) +GROUP BY number; diff --git a/tests/queries/0_stateless/02868_select_support_from_keywords.reference b/tests/queries/0_stateless/02868_select_support_from_keywords.reference index d2dcb047cf0..6782e51e0e9 100644 --- a/tests/queries/0_stateless/02868_select_support_from_keywords.reference +++ b/tests/queries/0_stateless/02868_select_support_from_keywords.reference @@ -1 +1 @@ -CREATE VIEW default.test_view\n(\n `date` Date,\n `__sign` Int8,\n `from` Float64,\n `to` Float64\n) AS\nWITH cte AS\n (\n SELECT\n date,\n __sign,\n from,\n to\n FROM default.test_table\n FINAL\n )\nSELECT\n date,\n __sign,\n from,\n to\nFROM cte +CREATE VIEW default.test_view\n(\n `date` Date,\n `__sign` Int8,\n `from` Float64,\n `to` Float64\n)\nAS WITH cte AS\n (\n SELECT\n date,\n __sign,\n from,\n to\n FROM default.test_table\n FINAL\n )\nSELECT\n date,\n __sign,\n from,\n to\nFROM cte diff --git a/tests/queries/0_stateless/02884_create_view_with_sql_security_option.reference b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.reference new file mode 100644 index 00000000000..79728fadc04 --- /dev/null +++ b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.reference @@ -0,0 +1,32 @@ +===== StorageView ===== +OK +OK +OK +2 +2 +OK +OK +2 +2 +OK +2 +2 +OK +===== MaterializedView ===== +OK +0 +0 +OK +OK +OK +2 +OK +OK +===== TestGrants ===== +OK +OK +===== TestRowPolicy ===== +1 1 +2 2 +6 6 +9 9 diff --git a/tests/queries/0_stateless/02884_create_view_with_sql_security_option.sh b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.sh new file mode 100755 index 00000000000..a4ab3ed0024 --- /dev/null +++ b/tests/queries/0_stateless/02884_create_view_with_sql_security_option.sh @@ -0,0 +1,226 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +user1="user02884_1_$RANDOM$RANDOM" +user2="user02884_2_$RANDOM$RANDOM" +user3="user02884_3_$RANDOM$RANDOM" +db="db02884_$RANDOM$RANDOM" + +${CLICKHOUSE_CLIENT} --multiquery <&1 | grep -c "INVOKER") >= 1 )) && echo "OK" || echo "UNEXPECTED" +(( $(${CLICKHOUSE_CLIENT} --query "SHOW TABLE $db.test_view_2" 2>&1 | grep -c "DEFINER = $user1") >= 1 )) && echo "OK" || echo "UNEXPECTED" + +${CLICKHOUSE_CLIENT} --multiquery <&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_view_2" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_view_3" +(( $(${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_view_4" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" +(( $(${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_view_5" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_view_6" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_view_7" +(( $(${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_view_8" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_view_9" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_view_10" + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $db.test_view_10 MODIFY SQL SECURITY INVOKER" +(( $(${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_view_10" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" + + +echo "===== MaterializedView =====" +${CLICKHOUSE_CLIENT} --query " + CREATE MATERIALIZED VIEW $db.test_mv_1 (s String) + ENGINE = MergeTree ORDER BY s + DEFINER = $user1 SQL SECURITY DEFINER + AS SELECT * FROM $db.test_table; +" + +(( $(${CLICKHOUSE_CLIENT} --query " + CREATE MATERIALIZED VIEW $db.test_mv_2 (s String) + ENGINE = MergeTree ORDER BY s + SQL SECURITY INVOKER + AS SELECT * FROM $db.test_table; +" 2>&1 | grep -c "SQL SECURITY INVOKER can't be specified for MATERIALIZED VIEW") >= 1 )) && echo "OK" || echo "UNEXPECTED" + +${CLICKHOUSE_CLIENT} --query " + CREATE MATERIALIZED VIEW $db.test_mv_3 (s String) + ENGINE = MergeTree ORDER BY s + SQL SECURITY NONE + AS SELECT * FROM $db.test_table; +" + +${CLICKHOUSE_CLIENT} --query "CREATE TABLE $db.test_mv_data (s String) ENGINE = MergeTree ORDER BY s;" + +${CLICKHOUSE_CLIENT} --query " + CREATE MATERIALIZED VIEW $db.test_mv_4 + TO $db.test_mv_data + DEFINER = $user1 SQL SECURITY DEFINER + AS SELECT * FROM $db.test_table; +" + +${CLICKHOUSE_CLIENT} --query " + CREATE MATERIALIZED VIEW $db.test_mv_5 (s String) + ENGINE = MergeTree ORDER BY s + DEFINER = $user2 SQL SECURITY DEFINER + AS SELECT * FROM $db.test_table; +" + +${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON $db.test_mv_5 TO $user2" + +${CLICKHOUSE_CLIENT} --query "ALTER TABLE $db.test_mv_5 MODIFY SQL SECURITY NONE" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_mv_5" + +${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON $db.test_mv_1 TO $user2" +${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON $db.test_mv_3 TO $user2" +${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON $db.test_mv_4 TO $user2" + +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_mv_1" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_mv_3" + +${CLICKHOUSE_CLIENT} --query "REVOKE SELECT ON $db.test_mv_data FROM $user1" +(( $(${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_mv_4" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" +(( $(${CLICKHOUSE_CLIENT} --query "INSERT INTO $db.test_table VALUES ('foo'), ('bar');" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" +(( $(${CLICKHOUSE_CLIENT} --materialized_views_ignore_errors 1 --query "INSERT INTO $db.test_table VALUES ('foo'), ('bar');" 2>&1 | grep -c "Failed to push block to view") >= 1 )) && echo "OK" || echo "UNEXPECTED" + +${CLICKHOUSE_CLIENT} --query "GRANT INSERT ON $db.test_mv_data TO $user1" +${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON $db.test_mv_data TO $user1" +${CLICKHOUSE_CLIENT} --query "INSERT INTO $db.test_table VALUES ('foo'), ('bar');" +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT count() FROM $db.test_mv_4" + +${CLICKHOUSE_CLIENT} --query "REVOKE SELECT ON $db.test_table FROM $user1" +(( $(${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_mv_4" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" +(( $(${CLICKHOUSE_CLIENT} --query "INSERT INTO $db.test_table VALUES ('foo'), ('bar');" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" + + +echo "===== TestGrants =====" +${CLICKHOUSE_CLIENT} --query "GRANT CREATE ON *.* TO $user1" +${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON $db.test_table TO $user1, $user2" + +${CLICKHOUSE_CLIENT} --user $user1 --query " + CREATE VIEW $db.test_view_g_1 + DEFINER = CURRENT_USER SQL SECURITY DEFINER + AS SELECT * FROM $db.test_table; +" + +(( $(${CLICKHOUSE_CLIENT} --user $user1 --query " + CREATE VIEW $db.test_view_g_2 + DEFINER = $user2 + AS SELECT * FROM $db.test_table; +" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" + +${CLICKHOUSE_CLIENT} --query "GRANT SET DEFINER ON $user2 TO $user1" + +${CLICKHOUSE_CLIENT} --user $user1 --query " + CREATE VIEW $db.test_view_g_2 + DEFINER = $user2 + AS SELECT * FROM $db.test_table; +" + +(( $(${CLICKHOUSE_CLIENT} --user $user1 --query " + CREATE VIEW $db.test_view_g_3 + SQL SECURITY NONE + AS SELECT * FROM $db.test_table; +" 2>&1 | grep -c "Not enough privileges") >= 1 )) && echo "OK" || echo "UNEXPECTED" + +${CLICKHOUSE_CLIENT} --query "GRANT SET DEFINER ON $user2 TO $user1" + + +echo "===== TestRowPolicy =====" +${CLICKHOUSE_CLIENT} --multiquery <= z TO $user2; + +INSERT INTO $db.test_row_t VALUES (1, 2), (1, 1), (2, 2), (3, 2), (4, 0); + +GRANT SELECT ON $db.test_view_row_1 to $user2; +EOF + +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_view_row_1" + +${CLICKHOUSE_CLIENT} --multiquery <= z TO $user2; + +INSERT INTO $db.test_row_t2 VALUES (5, 6), (6, 5), (6, 6), (8, 7), (9, 9); + +GRANT SELECT ON $db.test_mv_row_2 to $user2; +EOF + +${CLICKHOUSE_CLIENT} --user $user2 --query "SELECT * FROM $db.test_mv_row_2" + + +${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS $db;" +${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS $user1, $user2, $user3"; diff --git a/tests/queries/0_stateless/02885_arg_min_max_combinator.reference b/tests/queries/0_stateless/02885_arg_min_max_combinator.reference index c4e850c0b99..f7ae9e7d104 100644 --- a/tests/queries/0_stateless/02885_arg_min_max_combinator.reference +++ b/tests/queries/0_stateless/02885_arg_min_max_combinator.reference @@ -1,3 +1,4 @@ 200 295 200 245 200 290 +999 diff --git a/tests/queries/0_stateless/02885_arg_min_max_combinator.sql b/tests/queries/0_stateless/02885_arg_min_max_combinator.sql index 86ee73d8f3b..8502234acfc 100644 --- a/tests/queries/0_stateless/02885_arg_min_max_combinator.sql +++ b/tests/queries/0_stateless/02885_arg_min_max_combinator.sql @@ -1,3 +1,10 @@ select sumArgMin(number, number % 20), sumArgMax(number, number % 20) from numbers(100); select sumArgMin(number, toString(number % 20)), sumArgMax(number, toString(number % 20)) from numbers(100); select sumArgMinIf(number, number % 20, number % 2 = 0), sumArgMaxIf(number, number % 20, number % 2 = 0) from numbers(100); +select sumArgMin() from numbers(100); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select sumArgMin(number) from numbers(100); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +-- Try passing a non comparable type, for example an AggregationState +select sumArgMin(number, unhex('0000000000000000')::AggregateFunction(sum, UInt64)) from numbers(100); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- ASAN (data leak) +SELECT sumArgMax(number, tuple(number, repeat('a', (10 * (number % 100))::Int32))) FROM numbers(1000); diff --git a/tests/queries/0_stateless/02916_set_formatting.reference b/tests/queries/0_stateless/02916_set_formatting.reference index 34ff52365f9..46d30053970 100644 --- a/tests/queries/0_stateless/02916_set_formatting.reference +++ b/tests/queries/0_stateless/02916_set_formatting.reference @@ -5,7 +5,7 @@ Row 1: statement: CREATE VIEW default.v1 ( `v` UInt64 -) AS -SELECT v +) +AS SELECT v FROM default.t1 SETTINGS additional_table_filters = {'default.t1':'s != \'s1%\''} diff --git a/tests/queries/0_stateless/02931_alter_materialized_view_query_inconsistent.reference b/tests/queries/0_stateless/02931_alter_materialized_view_query_inconsistent.reference index 45e4b958f4b..0d6874fbb59 100644 --- a/tests/queries/0_stateless/02931_alter_materialized_view_query_inconsistent.reference +++ b/tests/queries/0_stateless/02931_alter_materialized_view_query_inconsistent.reference @@ -1,3 +1,3 @@ v UInt64 v2 UInt8 -CREATE MATERIALIZED VIEW default.pipe TO default.dest\n(\n `v` UInt64,\n `v2` UInt8\n) AS\nSELECT\n v * 2 AS v,\n 1 AS v2\nFROM default.src +CREATE MATERIALIZED VIEW default.pipe TO default.dest\n(\n `v` UInt64,\n `v2` UInt8\n)\nAS SELECT\n v * 2 AS v,\n 1 AS v2\nFROM default.src diff --git a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference index 4c5b678cfa5..b52d0847ff9 100644 --- a/tests/queries/0_stateless/02932_refreshable_materialized_views.reference +++ b/tests/queries/0_stateless/02932_refreshable_materialized_views.reference @@ -1,14 +1,14 @@ <1: created view> a [] 1 -CREATE MATERIALIZED VIEW default.a\nREFRESH AFTER 1 SECOND\n(\n `x` UInt64\n)\nENGINE = Memory AS\nSELECT number AS x\nFROM numbers(2)\nUNION ALL\nSELECT rand64() AS x +CREATE MATERIALIZED VIEW default.a\nREFRESH AFTER 1 SECOND\n(\n `x` UInt64\n)\nENGINE = Memory\nAS SELECT number AS x\nFROM numbers(2)\nUNION ALL\nSELECT rand64() AS x <2: refreshed> 3 1 1 <3: time difference at least> 500 <4: next refresh in> 1 <4.5: altered> Scheduled Finished 2052-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` Int16\n)\nENGINE = Memory AS\nSELECT x * 2 AS x\nFROM default.src +CREATE MATERIALIZED VIEW default.a\nREFRESH EVERY 2 YEAR\n(\n `x` Int16\n)\nENGINE = Memory\nAS SELECT x * 2 AS x\nFROM default.src <5: no refresh> 3 <6: refreshed> 2 <7: refreshed> Scheduled Finished 2054-01-01 00:00:00 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192 AS\nSELECT x * 10 AS y\nFROM default.a +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a <8: refreshed> 20 <9: refreshed> a Scheduled Finished 2054-01-01 00:00:00 <9: refreshed> b Scheduled Finished 2054-01-01 00:00:00 @@ -25,7 +25,7 @@ CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR DEPENDS ON default.a\n( <17: chain-refreshed> a Scheduled 2062-01-01 00:00:00 <17: chain-refreshed> b Scheduled 2062-01-01 00:00:00 <18: removed dependency> b Scheduled [] 2062-03-03 03:03:03 2064-01-01 00:00:00 5 -CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192 AS\nSELECT x * 10 AS y\nFROM default.a +CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nENGINE = MergeTree\nORDER BY y\nSETTINGS index_granularity = 8192\nAS SELECT x * 10 AS y\nFROM default.a <19: exception> 1 <20: unexception> 1 <21: rename> 1 @@ -34,9 +34,9 @@ CREATE MATERIALIZED VIEW default.b\nREFRESH EVERY 2 YEAR\n(\n `y` Int32\n)\nE <24: rename during refresh> 1 <25: rename during refresh> f Running <27: cancelled> f Scheduled -CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory AS\nSELECT 42 +CREATE MATERIALIZED VIEW default.g\nREFRESH EVERY 1 WEEK OFFSET 3 DAY 4 HOUR RANDOMIZE FOR 4 DAY 1 HOUR\n(\n `x` Int64\n)\nENGINE = Memory\nAS SELECT 42 <29: randomize> 1 1 -CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n) AS\nSELECT x * 10 AS x\nFROM default.src +CREATE MATERIALIZED VIEW default.h\nREFRESH EVERY 1 SECOND TO default.dest\n(\n `x` Int64\n)\nAS SELECT x * 10 AS x\nFROM default.src <30: to existing table> 10 <31: to existing table> 10 <31: to existing table> 20 diff --git a/tests/queries/0_stateless/02940_variant_text_deserialization.sql b/tests/queries/0_stateless/02940_variant_text_deserialization.sql index 041d02088ef..b909b2b6790 100644 --- a/tests/queries/0_stateless/02940_variant_text_deserialization.sql +++ b/tests/queries/0_stateless/02940_variant_text_deserialization.sql @@ -1,4 +1,5 @@ set allow_experimental_variant_type = 1; +set allow_suspicious_variant_types = 1; set session_timezone = 'UTC'; select 'JSON'; @@ -263,4 +264,4 @@ select v, variantElement(v, 'Array(LowCardinality(Nullable(String)))') from form select 'Nullable'; select v, variantElement(v, 'Array(Nullable(String))') from format(Values, 'v Variant(String, Array(Nullable(String)))', '(NULL), (''string''), ([''hello'', null, ''world''])') format Values; -select ''; \ No newline at end of file +select ''; diff --git a/tests/queries/0_stateless/02941_variant_type_1.sh b/tests/queries/0_stateless/02941_variant_type_1.sh index ed365bbd244..773a8c4a5e4 100755 --- a/tests/queries/0_stateless/02941_variant_type_1.sh +++ b/tests/queries/0_stateless/02941_variant_type_1.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test1_insert() { diff --git a/tests/queries/0_stateless/02941_variant_type_2.sh b/tests/queries/0_stateless/02941_variant_type_2.sh index 23666a9b4a8..509c537e7fc 100755 --- a/tests/queries/0_stateless/02941_variant_type_2.sh +++ b/tests/queries/0_stateless/02941_variant_type_2.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test4_insert() { diff --git a/tests/queries/0_stateless/02941_variant_type_3.sh b/tests/queries/0_stateless/02941_variant_type_3.sh index d6309e26414..a0efead280a 100755 --- a/tests/queries/0_stateless/02941_variant_type_3.sh +++ b/tests/queries/0_stateless/02941_variant_type_3.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test5_insert() { diff --git a/tests/queries/0_stateless/02941_variant_type_4.sh b/tests/queries/0_stateless/02941_variant_type_4.sh index 5ea04db4bb4..336540d1e79 100755 --- a/tests/queries/0_stateless/02941_variant_type_4.sh +++ b/tests/queries/0_stateless/02941_variant_type_4.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1" +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --allow_suspicious_variant_types=1" function test6_insert() { diff --git a/tests/queries/0_stateless/02943_variant_read_subcolumns.sh b/tests/queries/0_stateless/02943_variant_read_subcolumns.sh index 88be09c2036..b816a20c818 100755 --- a/tests/queries/0_stateless/02943_variant_read_subcolumns.sh +++ b/tests/queries/0_stateless/02943_variant_read_subcolumns.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 " +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_suspicious_variant_types=1" function test() diff --git a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh index e4c1206263f..3bb37719a3f 100755 --- a/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh +++ b/tests/queries/0_stateless/02943_variant_type_with_different_local_and_global_order.sh @@ -7,7 +7,7 @@ CLICKHOUSE_LOG_COMMENT= # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 " +CH_CLIENT="$CLICKHOUSE_CLIENT --allow_experimental_variant_type=1 --use_variant_as_common_type=1 --allow_suspicious_variant_types=1" function test1_insert() diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_1.reference b/tests/queries/0_stateless/02947_merge_tree_index_table_1.reference new file mode 100644 index 00000000000..0ce9de91293 --- /dev/null +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_1.reference @@ -0,0 +1,84 @@ +0 0 v0 +0 5 v25 +1 1 v1 +1 6 v36 +2 2 v4 +2 7 v49 +3 3 v9 +3 8 v64 +4 4 v16 +4 9 v81 +0 10 v100 +0 15 v225 +1 11 v121 +1 16 v256 +2 12 v144 +2 17 v289 +3 13 v169 +3 18 v324 +4 14 v196 +4 19 v361 +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─a─┬──b─┐ +│ all_1_1_0 │ 0 │ 3 │ 0 │ 0 │ +│ all_1_1_0 │ 1 │ 3 │ 1 │ 6 │ +│ all_1_1_0 │ 2 │ 3 │ 3 │ 3 │ +│ all_1_1_0 │ 3 │ 1 │ 4 │ 9 │ +│ all_1_1_0 │ 4 │ 0 │ 4 │ 9 │ +│ all_2_2_0 │ 0 │ 3 │ 0 │ 10 │ +│ all_2_2_0 │ 1 │ 3 │ 1 │ 16 │ +│ all_2_2_0 │ 2 │ 3 │ 3 │ 13 │ +│ all_2_2_0 │ 3 │ 1 │ 4 │ 19 │ +│ all_2_2_0 │ 4 │ 0 │ 4 │ 19 │ +└───────────┴─────────────┴─────────────────┴───┴────┘ +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─a─┬──b─┬─a.mark─┬─b.mark─┬─s.mark─┐ +│ all_1_1_0 │ 0 │ 3 │ 0 │ 0 │ (0,0) │ (0,0) │ (0,0) │ +│ all_1_1_0 │ 1 │ 3 │ 1 │ 6 │ (0,24) │ (0,24) │ (0,10) │ +│ all_1_1_0 │ 2 │ 3 │ 3 │ 3 │ (0,48) │ (0,48) │ (0,21) │ +│ all_1_1_0 │ 3 │ 1 │ 4 │ 9 │ (0,72) │ (0,72) │ (0,32) │ +│ all_1_1_0 │ 4 │ 0 │ 4 │ 9 │ (0,80) │ (0,80) │ (0,36) │ +│ all_2_2_0 │ 0 │ 3 │ 0 │ 10 │ (0,0) │ (0,0) │ (0,0) │ +│ all_2_2_0 │ 1 │ 3 │ 1 │ 16 │ (0,24) │ (0,24) │ (0,15) │ +│ all_2_2_0 │ 2 │ 3 │ 3 │ 13 │ (0,48) │ (0,48) │ (0,30) │ +│ all_2_2_0 │ 3 │ 1 │ 4 │ 19 │ (0,72) │ (0,72) │ (0,45) │ +│ all_2_2_0 │ 4 │ 0 │ 4 │ 19 │ (0,80) │ (0,80) │ (0,50) │ +└───────────┴─────────────┴─────────────────┴───┴────┴────────┴────────┴────────┘ +0 0 v0 +0 4 v16 +0 8 v64 +1 1 v1 +1 5 v25 +1 9 v81 +2 2 v4 +2 6 v36 +3 3 v9 +3 7 v49 +0 12 v144 +0 16 v256 +1 13 v169 +1 17 v289 +2 10 v100 +2 14 v196 +2 18 v324 +3 11 v121 +3 15 v225 +3 19 v361 +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─a─┬──b─┐ +│ all_1_1_0 │ 0 │ 3 │ 0 │ 0 │ +│ all_1_1_0 │ 1 │ 3 │ 1 │ 1 │ +│ all_1_1_0 │ 2 │ 4 │ 2 │ 2 │ +│ all_1_1_0 │ 3 │ 0 │ 3 │ 7 │ +│ all_2_2_0 │ 0 │ 3 │ 0 │ 12 │ +│ all_2_2_0 │ 1 │ 3 │ 1 │ 17 │ +│ all_2_2_0 │ 2 │ 4 │ 2 │ 18 │ +│ all_2_2_0 │ 3 │ 0 │ 3 │ 19 │ +└───────────┴─────────────┴─────────────────┴───┴────┘ +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─a─┬──b─┬─a.mark──┬─b.mark──┬─s.mark──┐ +│ all_1_1_0 │ 0 │ 3 │ 0 │ 0 │ (0,0) │ (35,0) │ (77,0) │ +│ all_1_1_0 │ 1 │ 3 │ 1 │ 1 │ (114,0) │ (153,0) │ (197,0) │ +│ all_1_1_0 │ 2 │ 4 │ 2 │ 2 │ (234,0) │ (281,0) │ (329,0) │ +│ all_1_1_0 │ 3 │ 0 │ 3 │ 7 │ (369,0) │ (369,0) │ (369,0) │ +│ all_2_2_0 │ 0 │ 3 │ 0 │ 12 │ (0,0) │ (38,0) │ (82,0) │ +│ all_2_2_0 │ 1 │ 3 │ 1 │ 17 │ (124,0) │ (168,0) │ (212,0) │ +│ all_2_2_0 │ 2 │ 4 │ 2 │ 18 │ (254,0) │ (297,0) │ (345,0) │ +│ all_2_2_0 │ 3 │ 0 │ 3 │ 19 │ (392,0) │ (392,0) │ (392,0) │ +└───────────┴─────────────┴─────────────────┴───┴────┴─────────┴─────────┴─────────┘ diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_1.sql b/tests/queries/0_stateless/02947_merge_tree_index_table_1.sql new file mode 100644 index 00000000000..412fd476413 --- /dev/null +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_1.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS t_merge_tree_index; + +CREATE TABLE t_merge_tree_index (a UInt64, b UInt64, s String) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = 0, + ratio_of_defaults_for_sparse_serialization = 1.0; + +SYSTEM STOP MERGES t_merge_tree_index; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, 'v' || toString(number * number) FROM numbers(10); +INSERT INTO t_merge_tree_index SELECT number % 5, number, 'v' || toString(number * number) FROM numbers(10, 10); + +SELECT * FROM t_merge_tree_index ORDER BY _part, a, b; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; + +DROP TABLE t_merge_tree_index; + +CREATE TABLE t_merge_tree_index (a UInt64, b UInt64, s String) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = '1G', + ratio_of_defaults_for_sparse_serialization = 1.0; + +SYSTEM STOP MERGES t_merge_tree_index; + +INSERT INTO t_merge_tree_index SELECT number % 4, number, 'v' || toString(number * number) FROM numbers(10); +INSERT INTO t_merge_tree_index SELECT number % 4, number, 'v' || toString(number * number) FROM numbers(10, 10); + +SELECT * FROM t_merge_tree_index ORDER BY _part, a, b; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; + +DROP TABLE t_merge_tree_index; diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_2.reference b/tests/queries/0_stateless/02947_merge_tree_index_table_2.reference new file mode 100644 index 00000000000..7bfcb7b2822 --- /dev/null +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_2.reference @@ -0,0 +1,51 @@ +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─a─┬─b─┬─modulo(sipHash64(sp), 100)─┐ +│ all_1_1_0 │ 0 │ 3 │ 0 │ 0 │ 19 │ +│ all_1_1_0 │ 1 │ 3 │ 1 │ 6 │ 19 │ +│ all_1_1_0 │ 2 │ 3 │ 3 │ 3 │ 19 │ +│ all_1_1_0 │ 3 │ 1 │ 4 │ 9 │ 19 │ +│ all_1_1_0 │ 4 │ 0 │ 4 │ 9 │ 19 │ +│ all_2_2_0 │ 0 │ 3 │ 0 │ 0 │ 96 │ +│ all_2_2_0 │ 1 │ 2 │ 3 │ 3 │ 96 │ +│ all_2_2_0 │ 2 │ 0 │ 4 │ 4 │ 96 │ +│ all_3_3_0 │ 0 │ 3 │ 0 │ 0 │ 96 │ +│ all_3_3_0 │ 1 │ 3 │ 1 │ 6 │ 96 │ +│ all_3_3_0 │ 2 │ 3 │ 3 │ 3 │ 96 │ +│ all_3_3_0 │ 3 │ 1 │ 4 │ 9 │ 96 │ +│ all_3_3_0 │ 4 │ 0 │ 4 │ 9 │ 96 │ +└───────────┴─────────────┴─────────────────┴───┴───┴────────────────────────────┘ +┌─part_name─┬─mark_number─┬─rows_in_granule─┬─a─┬─b─┬─modulo(sipHash64(sp), 100)─┬─a.mark──┬─b.mark──┬─c.mark──────┬─sp.sparse.idx.mark─┬─sp.mark─┬─arr.size0.mark─┬─arr.dict.mark─┬─arr.mark─┬─n.size0.mark─┬─n%2Ec1.mark─┬─n%2Ec2.mark─┬─t%2Ec2.mark─┬─t%2Ec1.mark─┬─t.mark──────┬─column%2Ewith%2Edots.mark─┐ +│ all_1_1_0 │ 0 │ 3 │ 0 │ 0 │ 19 │ (0,0) │ (0,0) │ (NULL,NULL) │ (0,0) │ (0,0) │ (0,0) │ (0,8) │ (0,0) │ (0,0) │ (0,0) │ (0,0) │ (0,0) │ (0,0) │ (NULL,NULL) │ (0,0) │ +│ all_1_1_0 │ 1 │ 3 │ 1 │ 6 │ 19 │ (0,24) │ (0,24) │ (NULL,NULL) │ (0,9) │ (0,0) │ (0,24) │ (0,8) │ (0,22) │ (0,24) │ (0,36) │ (0,72) │ (0,24) │ (0,24) │ (NULL,NULL) │ (0,24) │ +│ all_1_1_0 │ 2 │ 3 │ 3 │ 3 │ 19 │ (0,48) │ (0,48) │ (NULL,NULL) │ (0,18) │ (0,0) │ (0,48) │ (0,8) │ (0,44) │ (0,48) │ (0,72) │ (0,144) │ (0,48) │ (0,48) │ (NULL,NULL) │ (0,48) │ +│ all_1_1_0 │ 3 │ 1 │ 4 │ 9 │ 19 │ (0,72) │ (0,72) │ (NULL,NULL) │ (0,27) │ (0,0) │ (0,72) │ (0,8) │ (0,66) │ (0,72) │ (0,108) │ (0,216) │ (0,72) │ (0,72) │ (NULL,NULL) │ (0,72) │ +│ all_1_1_0 │ 4 │ 0 │ 4 │ 9 │ 19 │ (0,80) │ (0,80) │ (NULL,NULL) │ (0,36) │ (0,0) │ (0,80) │ (0,25) │ (0,84) │ (0,80) │ (0,120) │ (0,240) │ (0,80) │ (0,80) │ (NULL,NULL) │ (0,80) │ +│ all_2_2_0 │ 0 │ 3 │ 0 │ 0 │ 96 │ (0,0) │ (42,0) │ (84,0) │ (NULL,NULL) │ (126,0) │ (NULL,NULL) │ (NULL,NULL) │ (165,0) │ (NULL,NULL) │ (232,0) │ (286,0) │ (NULL,NULL) │ (NULL,NULL) │ (342,0) │ (391,0) │ +│ all_2_2_0 │ 1 │ 2 │ 3 │ 3 │ 96 │ (433,0) │ (472,0) │ (511,0) │ (NULL,NULL) │ (550,0) │ (NULL,NULL) │ (NULL,NULL) │ (589,0) │ (NULL,NULL) │ (659,0) │ (717,0) │ (NULL,NULL) │ (NULL,NULL) │ (773,0) │ (817,0) │ +│ all_2_2_0 │ 2 │ 0 │ 4 │ 4 │ 96 │ (856,0) │ (856,0) │ (856,0) │ (NULL,NULL) │ (856,0) │ (NULL,NULL) │ (NULL,NULL) │ (856,0) │ (NULL,NULL) │ (856,0) │ (856,0) │ (NULL,NULL) │ (NULL,NULL) │ (856,0) │ (856,0) │ +│ all_3_3_0 │ 0 │ 3 │ 0 │ 0 │ 96 │ (0,0) │ (0,0) │ (0,0) │ (NULL,NULL) │ (0,0) │ (0,0) │ (0,8) │ (0,0) │ (0,0) │ (0,0) │ (0,0) │ (0,0) │ (0,0) │ (NULL,NULL) │ (0,0) │ +│ all_3_3_0 │ 1 │ 3 │ 1 │ 6 │ 96 │ (0,24) │ (0,24) │ (0,24) │ (NULL,NULL) │ (0,24) │ (0,24) │ (0,8) │ (0,22) │ (0,24) │ (0,36) │ (0,72) │ (0,24) │ (0,24) │ (NULL,NULL) │ (0,24) │ +│ all_3_3_0 │ 2 │ 3 │ 3 │ 3 │ 96 │ (0,48) │ (0,48) │ (0,48) │ (NULL,NULL) │ (0,48) │ (0,48) │ (0,8) │ (0,44) │ (0,48) │ (0,72) │ (0,144) │ (0,48) │ (0,48) │ (NULL,NULL) │ (0,48) │ +│ all_3_3_0 │ 3 │ 1 │ 4 │ 9 │ 96 │ (0,72) │ (0,72) │ (0,72) │ (NULL,NULL) │ (0,72) │ (0,72) │ (0,8) │ (0,66) │ (0,72) │ (0,108) │ (0,216) │ (0,72) │ (0,72) │ (NULL,NULL) │ (0,72) │ +│ all_3_3_0 │ 4 │ 0 │ 4 │ 9 │ 96 │ (0,80) │ (0,80) │ (0,80) │ (NULL,NULL) │ (0,80) │ (0,80) │ (0,25) │ (0,84) │ (0,80) │ (0,120) │ (0,240) │ (0,80) │ (0,80) │ (NULL,NULL) │ (0,80) │ +└───────────┴─────────────┴─────────────────┴───┴───┴────────────────────────────┴─────────┴─────────┴─────────────┴────────────────────┴─────────┴────────────────┴───────────────┴──────────┴──────────────┴─────────────┴─────────────┴─────────────┴─────────────┴─────────────┴───────────────────────────┘ +part_name String +mark_number UInt64 +rows_in_granule UInt64 +a UInt64 +b UInt64 +modulo(sipHash64(sp), 100) UInt8 +a.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +b.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +c.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +sp.sparse.idx.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +sp.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +arr.size0.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +arr.dict.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +arr.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +n.size0.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +n%2Ec1.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +n%2Ec2.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +t%2Ec2.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +t%2Ec1.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +t.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) +column%2Ewith%2Edots.mark Tuple(offset_in_compressed_file Nullable(UInt64), offset_in_decompressed_block Nullable(UInt64)) diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_2.sql b/tests/queries/0_stateless/02947_merge_tree_index_table_2.sql new file mode 100644 index 00000000000..5520962fb7a --- /dev/null +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_2.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS t_merge_tree_index; + +SET print_pretty_type_names = 0; + +CREATE TABLE t_merge_tree_index +( + `a` UInt64, + `b` UInt64, + `sp` UInt64, + `arr` Array(LowCardinality(String)), + `n` Nested(c1 String, c2 UInt64), + `t` Tuple(c1 UInt64, c2 UInt64), + `column.with.dots` UInt64 +) +ENGINE = MergeTree +ORDER BY (a, b, sipHash64(sp) % 100) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 6, + ratio_of_defaults_for_sparse_serialization = 0.9; + +SYSTEM STOP MERGES t_merge_tree_index; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, 0, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(10); + +ALTER TABLE t_merge_tree_index ADD COLUMN c UInt64 AFTER b; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, number, 10, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(5); +INSERT INTO t_merge_tree_index SELECT number % 5, number, number, 10, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(10); + +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; + +SET describe_compact_output = 1; +DESCRIBE mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true); + +DROP TABLE t_merge_tree_index; diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_3.reference b/tests/queries/0_stateless/02947_merge_tree_index_table_3.reference new file mode 100644 index 00000000000..7f508505ab8 --- /dev/null +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_3.reference @@ -0,0 +1,10 @@ +ACCESS_DENIED +ACCESS_DENIED +ACCESS_DENIED +OK +ACCESS_DENIED +ACCESS_DENIED +ACCESS_DENIED +ACCESS_DENIED +OK +OK diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh b/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh new file mode 100755 index 00000000000..6cb184cb1fe --- /dev/null +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +user_name="${CLICKHOUSE_DATABASE}_test_user_02947" + +$CLICKHOUSE_CLIENT -n -q " +DROP TABLE IF EXISTS t_merge_tree_index; +DROP USER IF EXISTS $user_name; + +CREATE TABLE t_merge_tree_index +( + a UInt64, + b UInt64, + arr Array(LowCardinality(String)), +) +ENGINE = MergeTree +ORDER BY (a, b) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 6, + ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_merge_tree_index (a) VALUES (1); + +CREATE USER $user_name IDENTIFIED WITH plaintext_password BY 'password'; +REVOKE SELECT ON $CLICKHOUSE_DATABASE.t_merge_tree_index FROM $user_name; +GRANT SELECT (b) ON $CLICKHOUSE_DATABASE.t_merge_tree_index TO $user_name; +" + +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT a FROM t_merge_tree_index" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT arr FROM t_merge_tree_index" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT arr.size0 FROM t_merge_tree_index" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b FROM t_merge_tree_index" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" + +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT a FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT a.mark FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT arr.mark FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT arr.size0.mark FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" + +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" +$CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b.mark FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" + +$CLICKHOUSE_CLIENT -n -q " +DROP TABLE IF EXISTS t_merge_tree_index; +DROP USER IF EXISTS $user_name; +" diff --git a/tests/queries/0_stateless/02956_clickhouse_local_system_parts.reference b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.reference index 30365d83930..b33c7b90245 100644 --- a/tests/queries/0_stateless/02956_clickhouse_local_system_parts.reference +++ b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.reference @@ -1 +1,2 @@ test all_1_1_0 1 +test2 all_1_1_0 1 diff --git a/tests/queries/0_stateless/02956_clickhouse_local_system_parts.sh b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.sh index e9d8eb081fb..dac0cc2b865 100755 --- a/tests/queries/0_stateless/02956_clickhouse_local_system_parts.sh +++ b/tests/queries/0_stateless/02956_clickhouse_local_system_parts.sh @@ -5,4 +5,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --multiquery "CREATE TABLE test (x UInt8) ENGINE = MergeTree ORDER BY (); INSERT INTO test SELECT 1; SELECT table, name, rows FROM system.parts WHERE database = currentDatabase();" +$CLICKHOUSE_LOCAL --multiquery " + CREATE TABLE test (x UInt8) ENGINE = MergeTree ORDER BY (); + INSERT INTO test SELECT 1; + + CREATE TABLE test2 (x UInt8) ENGINE = MergeTree ORDER BY (); + INSERT INTO test2 SELECT 1; + + SELECT table, name, rows FROM system.parts WHERE database = currentDatabase(); +" diff --git a/tests/queries/0_stateless/02963_single_value_destructor.reference b/tests/queries/0_stateless/02963_single_value_destructor.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02963_single_value_destructor.sql b/tests/queries/0_stateless/02963_single_value_destructor.sql new file mode 100644 index 00000000000..ee8f9164aef --- /dev/null +++ b/tests/queries/0_stateless/02963_single_value_destructor.sql @@ -0,0 +1,8 @@ +-- When we use SingleValueDataBaseMemoryBlock we must ensure we call the class destructor on destroy + +Select argMax((number, number), (number, number)) FROM numbers(100000) format Null; +Select argMin((number, number), (number, number)) FROM numbers(100000) format Null; +Select anyHeavy((number, number)) FROM numbers(100000) format Null; +Select singleValueOrNull(number::Date32) FROM numbers(100000) format Null; +Select anyArgMax(number, (number, number)) FROM numbers(100000) format Null; +Select anyArgMin(number, (number, number)) FROM numbers(100000) format Null; diff --git a/tests/queries/0_stateless/02966_topk_counts_approx_count_sum.reference b/tests/queries/0_stateless/02966_topk_counts_approx_count_sum.reference new file mode 100644 index 00000000000..0474f8e3fc9 --- /dev/null +++ b/tests/queries/0_stateless/02966_topk_counts_approx_count_sum.reference @@ -0,0 +1,2 @@ +[('6_0',476),('6_1',207),('5_0',154),('5_1',63),('4_0',47)] [('6_0',313208),('6_1',136206),('5_0',32032),('5_1',13104),('4_0',3026)] [('6_0',476,5),('6_1',207,15),('5_0',154,2)] [('6_0',313208,0),('6_1',136206,0),('5_0',32032,0)] [('6_0',476,5),('6_1',207,15),('5_0',154,2)] [('6_0',476,5),('6_1',207,15),('5_0',154,2)] [('6_0',313208,0),('6_1',136206,0),('5_0',32032,0)] +[(6,683,0),(5,217,0),(4,68,0),(3,22,0)] diff --git a/tests/queries/0_stateless/02966_topk_counts_approx_count_sum.sql b/tests/queries/0_stateless/02966_topk_counts_approx_count_sum.sql new file mode 100644 index 00000000000..9ed4baba171 --- /dev/null +++ b/tests/queries/0_stateless/02966_topk_counts_approx_count_sum.sql @@ -0,0 +1,26 @@ +WITH + arraySlice(arrayReverseSort(x -> (x.2, x.1), arrayZip(untuple(sumMap(([k], [1]))))), 1, 5) AS topKExact, + arraySlice(arrayReverseSort(x -> (x.2, x.1), arrayZip(untuple(sumMap(([k], [w]))))), 1, 5) AS topKWeightedExact +SELECT + topKExact, + topKWeightedExact, + topK(3, 2, 'counts')(k) AS topK_counts, + topKWeighted(3, 2, 'counts')(k, w) AS topKWeighted_counts, + approx_top_count(3, 6)(k) AS approx_top_count, + approx_top_k(3, 6)(k) AS approx_top_k, + approx_top_sum(3, 6)(k, w) AS approx_top_sum +FROM +( + SELECT + concat(countDigits(number * number), '_', intDiv((number % 10), 7)) AS k, + number AS w + FROM numbers(1000) +); + +SELECT topKMerge(4, 2, 'counts')(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); + +SELECT topKMerge(4, 3, 'counts')(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT topKMerge(4, 2)(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT topKMerge(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } \ No newline at end of file diff --git a/tests/queries/0_stateless/02968_url_args.reference b/tests/queries/0_stateless/02968_url_args.reference index 1c3693e4a66..e7e9e2c0d94 100644 --- a/tests/queries/0_stateless/02968_url_args.reference +++ b/tests/queries/0_stateless/02968_url_args.reference @@ -2,7 +2,7 @@ CREATE TABLE default.a\n(\n `x` Int64\n)\nENGINE = URL(\'https://example.com/ CREATE TABLE default.b\n(\n `x` Int64\n)\nENGINE = URL(\'https://example.com/\', \'CSV\', headers()) CREATE TABLE default.c\n(\n `x` Int64\n)\nENGINE = S3(\'https://example.s3.amazonaws.com/a.csv\', \'NOSIGN\', \'CSV\', headers(\'foo\' = \'[HIDDEN]\')) CREATE TABLE default.d\n(\n `x` Int64\n)\nENGINE = S3(\'https://example.s3.amazonaws.com/a.csv\', \'NOSIGN\', headers(\'foo\' = \'[HIDDEN]\')) -CREATE VIEW default.e\n(\n `x` Int64\n) AS\nSELECT count()\nFROM url(\'https://example.com/\', CSV, headers(\'foo\' = \'[HIDDEN]\', \'a\' = \'[HIDDEN]\')) -CREATE VIEW default.f\n(\n `x` Int64\n) AS\nSELECT count()\nFROM url(\'https://example.com/\', CSV, headers()) -CREATE VIEW default.g\n(\n `x` Int64\n) AS\nSELECT count()\nFROM s3(\'https://example.s3.amazonaws.com/a.csv\', CSV, headers(\'foo\' = \'[HIDDEN]\')) -CREATE VIEW default.h\n(\n `x` Int64\n) AS\nSELECT count()\nFROM s3(\'https://example.s3.amazonaws.com/a.csv\', headers(\'foo\' = \'[HIDDEN]\')) +CREATE VIEW default.e\n(\n `x` Int64\n)\nAS SELECT count()\nFROM url(\'https://example.com/\', CSV, headers(\'foo\' = \'[HIDDEN]\', \'a\' = \'[HIDDEN]\')) +CREATE VIEW default.f\n(\n `x` Int64\n)\nAS SELECT count()\nFROM url(\'https://example.com/\', CSV, headers()) +CREATE VIEW default.g\n(\n `x` Int64\n)\nAS SELECT count()\nFROM s3(\'https://example.s3.amazonaws.com/a.csv\', CSV, headers(\'foo\' = \'[HIDDEN]\')) +CREATE VIEW default.h\n(\n `x` Int64\n)\nAS SELECT count()\nFROM s3(\'https://example.s3.amazonaws.com/a.csv\', headers(\'foo\' = \'[HIDDEN]\')) diff --git a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh index 386c29704b6..12d08159012 100755 --- a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh +++ b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_MergeTree.sh @@ -3,6 +3,9 @@ # Tag no-fasttest: requires S3 # Tag no-random-settings, no-random-merge-tree-settings: to avoid creating extra files like serialization.json, this test too exocit anyway +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh index bf20247c7aa..b079e67a000 100755 --- a/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh +++ b/tests/queries/0_stateless/02980_s3_plain_DROP_TABLE_ReplicatedMergeTree.sh @@ -3,6 +3,9 @@ # Tag no-fasttest: requires S3 # Tag no-random-settings, no-random-merge-tree-settings: to avoid creating extra files like serialization.json, this test too exocit anyway +# Creation of a database with Ordinary engine emits a warning. +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal + CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.reference b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.reference new file mode 100644 index 00000000000..1c6235170d5 --- /dev/null +++ b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.reference @@ -0,0 +1,6 @@ +inserting into a remote table from local with concurrency equal to max_insert_threads +9 +inserting into a remote table from remote with concurrency max_insert_threads +9 +inserting into a remote table from remote (reading with parallel replicas) with concurrency max_insert_threads +9 diff --git a/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh new file mode 100755 index 00000000000..e65c9654c9c --- /dev/null +++ b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# Tags: no-random-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} -nq """ + CREATE TABLE t1_local + ( + n UInt64, + ) + ENGINE = MergeTree + ORDER BY n; + + CREATE TABLE t3_dist + ( + n UInt64, + ) + ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), 't1_local', rand()); + + CREATE TABLE t4_pr + ( + n UInt64, + ) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/02981_insert_select', '1') + ORDER BY n; + + SYSTEM STOP MERGES t1_local; + + INSERT INTO t1_local SELECT * FROM numbers_mt(1e6); +""" + +max_insert_threads=9 + +echo "inserting into a remote table from local with concurrency equal to max_insert_threads" +${CLICKHOUSE_CLIENT} --max_insert_threads "$max_insert_threads" -q """ + EXPLAIN PIPELINE + INSERT INTO t3_dist + SELECT * FROM t1_local; +""" | grep -v EmptySink | grep -c Sink + +echo "inserting into a remote table from remote with concurrency max_insert_threads" +${CLICKHOUSE_CLIENT} --max_insert_threads "$max_insert_threads" --parallel_distributed_insert_select 0 -q """ + EXPLAIN PIPELINE + INSERT INTO t3_dist + SELECT * FROM t3_dist; +""" | grep -v EmptySink | grep -c Sink + +echo "inserting into a remote table from remote (reading with parallel replicas) with concurrency max_insert_threads" +${CLICKHOUSE_CLIENT} --max_insert_threads "$max_insert_threads" --allow_experimental_parallel_reading_from_replicas 2 --cluster_for_parallel_replicas 'parallel_replicas' --max_parallel_replicas 3 -q """ + EXPLAIN PIPELINE + INSERT INTO t3_dist + SELECT * FROM t4_pr; +""" | grep -v EmptySink | grep -c Sink diff --git a/tests/queries/0_stateless/02981_nested_bad_types.sql b/tests/queries/0_stateless/02981_nested_bad_types.sql index 87bc80693c8..1620eca590d 100644 --- a/tests/queries/0_stateless/02981_nested_bad_types.sql +++ b/tests/queries/0_stateless/02981_nested_bad_types.sql @@ -2,6 +2,7 @@ set allow_suspicious_low_cardinality_types=0; set allow_suspicious_fixed_string_types=0; set allow_experimental_variant_type=0; + select [42]::Array(LowCardinality(UInt64)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} select [[[42]]]::Array(Array(Array(LowCardinality(UInt64)))); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} select map('a', 42)::Map(String, LowCardinality(UInt64)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} @@ -48,9 +49,9 @@ create table test (x Tuple(String, Array(Map(String, Variant(String, UInt64))))) set allow_experimental_variant_type=1; select 42::Variant(String, LowCardinality(UInt64)) settings allow_experimental_variant_type=1; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} -select tuple('a', [map('b', 42)])::Tuple(String, Array(Map(String, Variant(LowCardinality(UInt64), UInt8)))); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} -create table test (x Variant(LowCardinality(UInt64), UInt8)) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} -create table test (x Tuple(String, Array(Map(String, Variant(LowCardinality(UInt64), UInt8))))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select tuple('a', [map('b', 42)])::Tuple(String, Array(Map(String, Variant(LowCardinality(UInt64), String)))); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Variant(LowCardinality(UInt64), String)) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Tuple(String, Array(Map(String, Variant(LowCardinality(UInt64), String))))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} select '42'::Variant(UInt64, FixedString(1000000)); -- {serverError ILLEGAL_COLUMN} select tuple('a', [map('b', '42')])::Tuple(String, Array(Map(String, Variant(UInt32, FixedString(1000000))))); -- {serverError ILLEGAL_COLUMN} diff --git a/tests/queries/0_stateless/02983_empty_map_hasToken.reference b/tests/queries/0_stateless/02983_empty_map_hasToken.reference new file mode 100644 index 00000000000..75378377541 --- /dev/null +++ b/tests/queries/0_stateless/02983_empty_map_hasToken.reference @@ -0,0 +1,10 @@ +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 diff --git a/tests/queries/0_stateless/02983_empty_map_hasToken.sql b/tests/queries/0_stateless/02983_empty_map_hasToken.sql new file mode 100644 index 00000000000..6d146150ac8 --- /dev/null +++ b/tests/queries/0_stateless/02983_empty_map_hasToken.sql @@ -0,0 +1,27 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/60223 + +CREATE TABLE test +( + t String, + id String, + h Map(String, String) +) +ENGINE = MergeTree +ORDER BY (t, id) SETTINGS index_granularity = 4096 ; + +insert into test values ('xxx', 'x', {'content-type':'text/plain','user-agent':'bulk-tests'}); +insert into test values ('xxx', 'y', {'content-type':'application/json','user-agent':'bulk-tests'}); +insert into test select 'xxx', number, map('content-type', 'x' ) FROM numbers(1e2); + +optimize table test final; + +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') WHERE hasToken(h['user-agent'], 'tests') and t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'tests') WHERE hasToken(h['user-agent'], 'bulk') and t = 'xxx'; +SELECT count() FROM test WHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests') and t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests') and t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests') WHERE t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'tests') and hasToken(h['user-agent'], 'bulk') WHERE t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'tests') and hasToken(h['user-agent'], 'bulk'); +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests'); +SELECT count() FROM test WHERE hasToken(h['user-agent'], 'tests') and hasToken(h['user-agent'], 'bulk'); +SELECT count() FROM test WHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests'); diff --git a/tests/queries/0_stateless/02985_dialects_with_distributed_tables.reference b/tests/queries/0_stateless/02985_dialects_with_distributed_tables.reference new file mode 100644 index 00000000000..f22e294ce86 --- /dev/null +++ b/tests/queries/0_stateless/02985_dialects_with_distributed_tables.reference @@ -0,0 +1,9 @@ +123 +234 +315 +123 +234 +315 +123 +234 +315 diff --git a/tests/queries/0_stateless/02985_dialects_with_distributed_tables.sql b/tests/queries/0_stateless/02985_dialects_with_distributed_tables.sql new file mode 100644 index 00000000000..6ac36cf5835 --- /dev/null +++ b/tests/queries/0_stateless/02985_dialects_with_distributed_tables.sql @@ -0,0 +1,30 @@ +-- Tags: no-fasttest, distributed + +DROP TABLE IF EXISTS shared_test_table; +DROP TABLE IF EXISTS distributed_test_table; + +CREATE TABLE shared_test_table (id UInt64) +ENGINE = MergeTree +ORDER BY (id); + +CREATE TABLE distributed_test_table +ENGINE = Distributed(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), shared_test_table); + +INSERT INTO shared_test_table VALUES (123), (651), (446), (315), (234), (764); + +SELECT id FROM distributed_test_table LIMIT 3; + +SET dialect = 'kusto'; + +distributed_test_table | take 3; + +SET dialect = 'prql'; + +from distributed_test_table +select {id} +take 1..3; + +SET dialect = 'clickhouse'; + +DROP TABLE distributed_test_table; +DROP TABLE shared_test_table; diff --git a/tests/queries/0_stateless/02985_parser_check_stack_size.reference b/tests/queries/0_stateless/02985_parser_check_stack_size.reference new file mode 100644 index 00000000000..f83e0818db2 --- /dev/null +++ b/tests/queries/0_stateless/02985_parser_check_stack_size.reference @@ -0,0 +1 @@ +TOO_DEEP diff --git a/tests/queries/0_stateless/02985_parser_check_stack_size.sh b/tests/queries/0_stateless/02985_parser_check_stack_size.sh new file mode 100755 index 00000000000..c91a0a3eacc --- /dev/null +++ b/tests/queries/0_stateless/02985_parser_check_stack_size.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "select 'create table test (x ' || repeat('Array(', 10000) || 'UInt64' || repeat(')', 10000) || ') engine=Memory' format TSVRaw" | $CLICKHOUSE_CURL "${CLICKHOUSE_URL}&max_parser_depth=100000" --data-binary @- | grep -o -F 'TOO_DEEP' diff --git a/tests/queries/0_stateless/02988_ordinary_database_warning.reference b/tests/queries/0_stateless/02988_ordinary_database_warning.reference new file mode 100644 index 00000000000..587579af915 --- /dev/null +++ b/tests/queries/0_stateless/02988_ordinary_database_warning.reference @@ -0,0 +1 @@ +Ok. diff --git a/tests/queries/0_stateless/02988_ordinary_database_warning.sql b/tests/queries/0_stateless/02988_ordinary_database_warning.sql new file mode 100644 index 00000000000..2a9756d6f66 --- /dev/null +++ b/tests/queries/0_stateless/02988_ordinary_database_warning.sql @@ -0,0 +1,10 @@ +DROP DATABASE IF EXISTS 02988_ordinary; + +SET send_logs_level = 'fatal'; +SET allow_deprecated_database_ordinary = 1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE 02988_ordinary ENGINE=Ordinary; + +SELECT DISTINCT 'Ok.' FROM system.warnings WHERE message ILIKE '%Ordinary%' and message ILIKE '%deprecated%'; + +DROP DATABASE IF EXISTS 02988_ordinary; diff --git a/tests/queries/0_stateless/02989_variant_comparison.reference b/tests/queries/0_stateless/02989_variant_comparison.reference new file mode 100644 index 00000000000..df192c6fe8c --- /dev/null +++ b/tests/queries/0_stateless/02989_variant_comparison.reference @@ -0,0 +1,299 @@ +order by v1 nulls first +\N +\N +\N +\N +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,4] +abc +abc +abc +abc +abc +abd +42 +42 +42 +42 +42 +43 +order by v1 nulls last +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,4] +abc +abc +abc +abc +abc +abd +42 +42 +42 +42 +42 +43 +\N +\N +\N +\N +order by v2 nulls first +\N +\N +\N +\N +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,4] +abc +abc +abc +abc +abc +abd +42 +42 +42 +42 +42 +43 +order by v2 nulls last +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,3] +[1,2,4] +abc +abc +abc +abc +abc +abd +42 +42 +42 +42 +42 +43 +\N +\N +\N +\N +order by v1, v2 nulls first +[1,2,3] \N +[1,2,3] [1,2,3] +[1,2,3] [1,2,4] +[1,2,3] abc +[1,2,3] 42 +[1,2,4] [1,2,3] +abc \N +abc [1,2,3] +abc abc +abc abd +abc 42 +abd abc +42 \N +42 [1,2,3] +42 abc +42 42 +42 43 +43 42 +\N \N +\N [1,2,3] +\N abc +\N 42 +order by v1, v2 nulls last +[1,2,3] [1,2,3] +[1,2,3] [1,2,4] +[1,2,3] abc +[1,2,3] 42 +[1,2,3] \N +[1,2,4] [1,2,3] +abc [1,2,3] +abc abc +abc abd +abc 42 +abc \N +abd abc +42 [1,2,3] +42 abc +42 42 +42 43 +42 \N +43 42 +\N [1,2,3] +\N abc +\N 42 +\N \N +order by v2, v1 nulls first +\N [1,2,3] +[1,2,3] [1,2,3] +[1,2,4] [1,2,3] +abc [1,2,3] +42 [1,2,3] +[1,2,3] [1,2,4] +\N abc +[1,2,3] abc +abc abc +abd abc +42 abc +abc abd +\N 42 +[1,2,3] 42 +abc 42 +42 42 +43 42 +42 43 +\N \N +[1,2,3] \N +abc \N +42 \N +order by v2, v1 nulls last +[1,2,3] [1,2,3] +[1,2,4] [1,2,3] +abc [1,2,3] +42 [1,2,3] +\N [1,2,3] +[1,2,3] [1,2,4] +[1,2,3] abc +abc abc +abd abc +42 abc +\N abc +abc abd +[1,2,3] 42 +abc 42 +42 42 +43 42 +\N 42 +42 43 +[1,2,3] \N +abc \N +42 \N +\N \N +v1 = v2 +[1,2,3] [1,2,3] 1 +[1,2,3] [1,2,4] 0 +[1,2,3] abc 0 +[1,2,3] 42 0 +[1,2,3] \N 0 +[1,2,4] [1,2,3] 0 +abc [1,2,3] 0 +abc abc 1 +abc abd 0 +abc 42 0 +abc \N 0 +abd abc 0 +42 [1,2,3] 0 +42 abc 0 +42 42 1 +42 43 0 +42 \N 0 +43 42 0 +\N [1,2,3] 0 +\N abc 0 +\N 42 0 +\N \N 1 +v1 < v2 +[1,2,3] [1,2,3] 0 +[1,2,3] [1,2,4] 1 +[1,2,3] abc 1 +[1,2,3] 42 1 +[1,2,3] \N 1 +[1,2,4] [1,2,3] 0 +abc [1,2,3] 0 +abc abc 0 +abc abd 1 +abc 42 1 +abc \N 1 +abd abc 0 +42 [1,2,3] 0 +42 abc 0 +42 42 0 +42 43 1 +42 \N 1 +43 42 0 +\N [1,2,3] 0 +\N abc 0 +\N 42 0 +\N \N 0 +v1 <= v2 +[1,2,3] [1,2,3] 1 +[1,2,3] [1,2,4] 1 +[1,2,3] abc 1 +[1,2,3] 42 1 +[1,2,3] \N 1 +[1,2,4] [1,2,3] 0 +abc [1,2,3] 0 +abc abc 1 +abc abd 1 +abc 42 1 +abc \N 1 +abd abc 0 +42 [1,2,3] 0 +42 abc 0 +42 42 1 +42 43 1 +42 \N 1 +43 42 0 +\N [1,2,3] 0 +\N abc 0 +\N 42 0 +\N \N 1 +v1 > v2 +[1,2,3] [1,2,3] 0 +[1,2,3] [1,2,4] 0 +[1,2,3] abc 0 +[1,2,3] 42 0 +[1,2,3] \N 0 +[1,2,4] [1,2,3] 1 +abc [1,2,3] 1 +abc abc 0 +abc abd 0 +abc 42 0 +abc \N 0 +abd abc 1 +42 [1,2,3] 1 +42 abc 1 +42 42 0 +42 43 0 +42 \N 0 +43 42 1 +\N [1,2,3] 1 +\N abc 1 +\N 42 1 +\N \N 0 +v1 >= v2 +[1,2,3] [1,2,3] 1 +[1,2,3] [1,2,4] 1 +[1,2,3] abc 1 +[1,2,3] 42 1 +[1,2,3] \N 1 +[1,2,4] [1,2,3] 1 +abc [1,2,3] 1 +abc abc 1 +abc abd 1 +abc 42 1 +abc \N 1 +abd abc 1 +42 [1,2,3] 1 +42 abc 1 +42 42 1 +42 43 1 +42 \N 1 +43 42 1 +\N [1,2,3] 1 +\N abc 1 +\N 42 1 +\N \N 1 diff --git a/tests/queries/0_stateless/02989_variant_comparison.sql b/tests/queries/0_stateless/02989_variant_comparison.sql new file mode 100644 index 00000000000..e0dcbc97c27 --- /dev/null +++ b/tests/queries/0_stateless/02989_variant_comparison.sql @@ -0,0 +1,79 @@ +set allow_experimental_variant_type=1; + +create table test (v1 Variant(String, UInt64, Array(UInt32)), v2 Variant(String, UInt64, Array(UInt32))) engine=Memory; + +insert into test values (42, 42); +insert into test values (42, 43); +insert into test values (43, 42); + +insert into test values ('abc', 'abc'); +insert into test values ('abc', 'abd'); +insert into test values ('abd', 'abc'); + +insert into test values ([1,2,3], [1,2,3]); +insert into test values ([1,2,3], [1,2,4]); +insert into test values ([1,2,4], [1,2,3]); + +insert into test values (NULL, NULL); + +insert into test values (42, 'abc'); +insert into test values ('abc', 42); + +insert into test values (42, [1,2,3]); +insert into test values ([1,2,3], 42); + +insert into test values (42, NULL); +insert into test values (NULL, 42); + +insert into test values ('abc', [1,2,3]); +insert into test values ([1,2,3], 'abc'); + +insert into test values ('abc', NULL); +insert into test values (NULL, 'abc'); + +insert into test values ([1,2,3], NULL); +insert into test values (NULL, [1,2,3]); + + +select 'order by v1 nulls first'; +select v1 from test order by v1 nulls first; + +select 'order by v1 nulls last'; +select v1 from test order by v1 nulls last; + +select 'order by v2 nulls first'; +select v2 from test order by v2 nulls first; + +select 'order by v2 nulls last'; +select v2 from test order by v2 nulls last; + + +select 'order by v1, v2 nulls first'; +select * from test order by v1, v2 nulls first; + +select 'order by v1, v2 nulls last'; +select * from test order by v1, v2 nulls last; + +select 'order by v2, v1 nulls first'; +select * from test order by v2, v1 nulls first; + +select 'order by v2, v1 nulls last'; +select * from test order by v2, v1 nulls last; + +select 'v1 = v2'; +select v1, v2, v1 = v2 from test order by v1, v2; + +select 'v1 < v2'; +select v1, v2, v1 < v2 from test order by v1, v2; + +select 'v1 <= v2'; +select v1, v2, v1 <= v2 from test order by v1, v2; + +select 'v1 > v2'; +select v1, v2, v1 > v2 from test order by v1, v2; + +select 'v1 >= v2'; +select v1, v2, v2 >= v2 from test order by v1, v2; + +drop table test; + diff --git a/tests/queries/0_stateless/02990_parts_splitter_invalid_ranges.reference b/tests/queries/0_stateless/02990_parts_splitter_invalid_ranges.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02990_parts_splitter_invalid_ranges.sql b/tests/queries/0_stateless/02990_parts_splitter_invalid_ranges.sql new file mode 100644 index 00000000000..e19c23acc2e --- /dev/null +++ b/tests/queries/0_stateless/02990_parts_splitter_invalid_ranges.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + `eventType` String, + `timestamp` UInt64, + `key` UInt64 +) +ENGINE = ReplacingMergeTree +PRIMARY KEY (eventType, timestamp) +ORDER BY (eventType, timestamp, key) +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES test_table; + +INSERT INTO test_table VALUES ('1', 1704472004759, 1), ('3', 1704153600000, 2), ('3', 1704153600000, 3), ('5', 1700161822134, 4); + +INSERT INTO test_table VALUES ('1', 1704468357009, 1), ('3', 1704153600000, 2), ('3', 1704153600000, 3), ('5', 1701458520878, 4); + +INSERT INTO test_table VALUES ('1', 1704470704762, 1), ('3', 1704153600000, 2), ('3', 1704153600000, 3), ('5', 1702609856302, 4); + +SELECT eventType, timestamp, key FROM test_table +WHERE (eventType IN ('2', '4')) AND + ((timestamp >= max2(toInt64('1698938519999'), toUnixTimestamp64Milli(now64() - toIntervalDay(90)))) AND + (timestamp <= (toInt64('1707143315452') - 1))); + +SELECT eventType, timestamp, key FROM test_table FINAL +WHERE (eventType IN ('2', '4')) AND + ((timestamp >= max2(toInt64('1698938519999'), toUnixTimestamp64Milli(now64() - toIntervalDay(90)))) AND + (timestamp <= (toInt64('1707143315452') - 1))); + +DROP TABLE test_table; diff --git a/tests/queries/0_stateless/02990_variant_where_cond.reference b/tests/queries/0_stateless/02990_variant_where_cond.reference new file mode 100644 index 00000000000..73b397be12d --- /dev/null +++ b/tests/queries/0_stateless/02990_variant_where_cond.reference @@ -0,0 +1,2 @@ +Hello +42 diff --git a/tests/queries/0_stateless/02990_variant_where_cond.sql b/tests/queries/0_stateless/02990_variant_where_cond.sql new file mode 100644 index 00000000000..8149988f1f2 --- /dev/null +++ b/tests/queries/0_stateless/02990_variant_where_cond.sql @@ -0,0 +1,11 @@ +set allow_experimental_variant_type=1; + +create table test (v Variant(String, UInt64)) engine=Memory; +insert into test values (42), ('Hello'), (NULL); + +select * from test where v = 'Hello'; +select * from test where v = 42; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select * from test where v = 42::UInt64::Variant(String, UInt64); + +drop table test; + diff --git a/tests/queries/0_stateless/02994_inconsistent_formatting.reference b/tests/queries/0_stateless/02994_inconsistent_formatting.reference new file mode 100644 index 00000000000..b9db04e880c --- /dev/null +++ b/tests/queries/0_stateless/02994_inconsistent_formatting.reference @@ -0,0 +1,4 @@ +1 +2 +3 +(1) \ No newline at end of file diff --git a/tests/queries/0_stateless/02994_inconsistent_formatting.sql b/tests/queries/0_stateless/02994_inconsistent_formatting.sql new file mode 100644 index 00000000000..f22f81513f6 --- /dev/null +++ b/tests/queries/0_stateless/02994_inconsistent_formatting.sql @@ -0,0 +1,10 @@ +CREATE TEMPORARY TABLE table (x UInt8); +INSERT INTO `table` FORMAT Values (1); +INSERT INTO TABLE `table` FORMAT Values (2); +INSERT INTO TABLE table FORMAT Values (3); +SELECT * FROM table ORDER BY x; +DROP TABLE table; + +CREATE TEMPORARY TABLE FORMAT (x UInt8); +INSERT INTO table FORMAT Values (1); +SELECT * FROM FORMAT FORMAT Values; diff --git a/tests/queries/0_stateless/02994_sanity_check_settings.reference b/tests/queries/0_stateless/02994_sanity_check_settings.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02994_sanity_check_settings.sql b/tests/queries/0_stateless/02994_sanity_check_settings.sql new file mode 100644 index 00000000000..073cd9749e6 --- /dev/null +++ b/tests/queries/0_stateless/02994_sanity_check_settings.sql @@ -0,0 +1,24 @@ +CREATE TABLE data_02052_1_wide0__fuzz_48 +( + `key` Nullable(Int64), + `value` UInt8 +) + ENGINE = MergeTree + ORDER BY key + SETTINGS min_bytes_for_wide_part = 0, allow_nullable_key = 1 AS +SELECT + number, + repeat(toString(number), 5) +FROM numbers(1); + +SELECT * APPLY max +FROM data_02052_1_wide0__fuzz_48 +GROUP BY toFixedString(toFixedString(toFixedString(toFixedString(toFixedString(toLowCardinality('UInt256'), toFixedString(toNullable(toNullable(2)), toFixedString(toFixedString(7), 7)), 7), 7), materialize(toNullable(7))), 7), materialize(7)) +WITH CUBE + SETTINGS max_read_buffer_size = 7, max_threads = 9223372036854775807; -- { serverError INVALID_SETTING_VALUE } + +SELECT zero + 1 AS x +FROM system.zeros + SETTINGS max_block_size = 9223372036854775806, max_rows_to_read = 20, read_overflow_mode = 'break'; -- { serverError INVALID_SETTING_VALUE } + +EXPLAIN PIPELINE SELECT zero + 1 AS x FROM system.zeros SETTINGS max_block_size = 9223372036854775806, max_rows_to_read = 20, read_overflow_mode = 'break'; -- { serverError INVALID_SETTING_VALUE } diff --git a/tests/queries/0_stateless/02995_bad_formatting_union_intersect.reference b/tests/queries/0_stateless/02995_bad_formatting_union_intersect.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/02995_bad_formatting_union_intersect.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02995_bad_formatting_union_intersect.sql b/tests/queries/0_stateless/02995_bad_formatting_union_intersect.sql new file mode 100644 index 00000000000..227f407fc5c --- /dev/null +++ b/tests/queries/0_stateless/02995_bad_formatting_union_intersect.sql @@ -0,0 +1,2 @@ +create temporary table t1 engine=MergeTree() order by c as ( select 1 as c intersect (select 1 as c union all select 2 as c ) ); +SELECT * FROM t1; diff --git a/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns.reference b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns.reference new file mode 100644 index 00000000000..aa47d0d46d4 --- /dev/null +++ b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns.reference @@ -0,0 +1,2 @@ +0 +0 diff --git a/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns.sql b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns.sql new file mode 100644 index 00000000000..060f16f8945 --- /dev/null +++ b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns.sql @@ -0,0 +1,6 @@ +-- It is special because actions cannot be reused for SimpleAggregateFunction (see https://github.com/ClickHouse/ClickHouse/pull/54436) +drop table if exists data; +create table data (key Int) engine=AggregatingMergeTree() order by tuple(); +insert into data values (0); +select * from data final prewhere indexHint(_partition_id = 'all') or indexHint(_partition_id = 'all'); +select * from data final prewhere indexHint(_partition_id = 'all') or indexHint(_partition_id = 'all') or indexHint(_partition_id = 'all'); diff --git a/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction.reference b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction.reference new file mode 100644 index 00000000000..573541ac970 --- /dev/null +++ b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction.sql b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction.sql new file mode 100644 index 00000000000..97df883fa48 --- /dev/null +++ b/tests/queries/0_stateless/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction.sql @@ -0,0 +1,5 @@ +-- It is special because actions cannot be reused for SimpleAggregateFunction (see https://github.com/ClickHouse/ClickHouse/pull/54436) +drop table if exists data; +create table data (key SimpleAggregateFunction(max, Int)) engine=AggregatingMergeTree() order by tuple(); +insert into data values (0); +select * from data final prewhere indexHint(_partition_id = 'all') and key >= -1 where key >= 0; diff --git a/tests/queries/0_stateless/02997_fix_datetime64_scale_conversion.reference b/tests/queries/0_stateless/02997_fix_datetime64_scale_conversion.reference new file mode 100644 index 00000000000..c4ade2ace13 --- /dev/null +++ b/tests/queries/0_stateless/02997_fix_datetime64_scale_conversion.reference @@ -0,0 +1,100 @@ +2023-01-01 00:00:00 +2023-01-01 00:00:00 +2023-01-01 01:01:01 +2023-01-01 01:01:01 +2023-01-02 02:02:02 +2023-01-02 02:02:02 +2023-01-03 03:03:03 +2023-01-03 03:03:03 +2023-01-04 04:04:04 +2023-01-04 04:04:04 +2023-01-05 05:05:05 +2023-01-05 05:05:05 +2023-01-06 06:06:06 +2023-01-06 06:06:06 +2023-01-07 07:07:07 +2023-01-07 07:07:07 +2023-01-08 08:08:08 +2023-01-08 08:08:08 +2023-01-09 09:09:09 +2023-01-09 09:09:09 +2023-01-01 00:00:00.00 +2023-01-01 00:00:00.00 +2023-01-01 01:01:01.00 +2023-01-01 01:01:01.10 +2023-01-02 02:02:02.00 +2023-01-02 02:02:02.12 +2023-01-03 03:03:03.00 +2023-01-03 03:03:03.12 +2023-01-04 04:04:04.00 +2023-01-04 04:04:04.12 +2023-01-05 05:05:05.00 +2023-01-05 05:05:05.12 +2023-01-06 06:06:06.00 +2023-01-06 06:06:06.12 +2023-01-07 07:07:07.00 +2023-01-07 07:07:07.12 +2023-01-08 08:08:08.00 +2023-01-08 08:08:08.12 +2023-01-09 09:09:09.00 +2023-01-09 09:09:09.12 +2023-01-01 00:00:00.000 +2023-01-01 00:00:00.000 +2023-01-01 01:01:01.000 +2023-01-01 01:01:01.100 +2023-01-02 02:02:02.000 +2023-01-02 02:02:02.120 +2023-01-03 03:03:03.000 +2023-01-03 03:03:03.123 +2023-01-04 04:04:04.000 +2023-01-04 04:04:04.123 +2023-01-05 05:05:05.000 +2023-01-05 05:05:05.123 +2023-01-06 06:06:06.000 +2023-01-06 06:06:06.123 +2023-01-07 07:07:07.000 +2023-01-07 07:07:07.123 +2023-01-08 08:08:08.000 +2023-01-08 08:08:08.123 +2023-01-09 09:09:09.000 +2023-01-09 09:09:09.123 +2023-01-01 00:00:00.000000 +2023-01-01 00:00:00.000000 +2023-01-01 01:01:01.000000 +2023-01-01 01:01:01.100000 +2023-01-02 02:02:02.000000 +2023-01-02 02:02:02.120000 +2023-01-03 03:03:03.000000 +2023-01-03 03:03:03.123000 +2023-01-04 04:04:04.000000 +2023-01-04 04:04:04.123400 +2023-01-05 05:05:05.000000 +2023-01-05 05:05:05.123450 +2023-01-06 06:06:06.000000 +2023-01-06 06:06:06.123456 +2023-01-07 07:07:07.000000 +2023-01-07 07:07:07.123456 +2023-01-08 08:08:08.000000 +2023-01-08 08:08:08.123456 +2023-01-09 09:09:09.000000 +2023-01-09 09:09:09.123456 +2023-01-01 00:00:00.000000 +2023-01-01 00:00:00.000000 +2023-01-01 01:01:01.000000 +2023-01-01 01:01:01.100000 +2023-01-02 02:02:02.000000 +2023-01-02 02:02:02.120000 +2023-01-03 03:03:03.000000 +2023-01-03 03:03:03.123000 +2023-01-04 04:04:04.000000 +2023-01-04 04:04:04.123400 +2023-01-05 05:05:05.000000 +2023-01-05 05:05:05.123450 +2023-01-06 06:06:06.000000 +2023-01-06 06:06:06.123456 +2023-01-07 07:07:07.000000 +2023-01-07 07:07:07.123456 +2023-01-08 08:08:08.000000 +2023-01-08 08:08:08.123456 +2023-01-09 09:09:09.000000 +2023-01-09 09:09:09.123456 diff --git a/tests/queries/0_stateless/02997_fix_datetime64_scale_conversion.sql b/tests/queries/0_stateless/02997_fix_datetime64_scale_conversion.sql new file mode 100644 index 00000000000..b905ef2b972 --- /dev/null +++ b/tests/queries/0_stateless/02997_fix_datetime64_scale_conversion.sql @@ -0,0 +1,124 @@ +DROP TABLE IF EXISTS test_0; +CREATE TABLE IF NOT EXISTS test_0 (a DateTime64(0)) engine = MergeTree order by a; +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_0 ORDER BY a; +DROP TABLE test_0; + +DROP TABLE IF EXISTS test_2; +CREATE TABLE IF NOT EXISTS test_2 (a DateTime64(2)) engine = MergeTree order by a; +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_2 ORDER BY a; +DROP TABLE test_2; + +DROP TABLE IF EXISTS test_3; +CREATE TABLE IF NOT EXISTS test_3 (a DateTime64(3)) engine = MergeTree order by a; +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_3 ORDER BY a; +DROP TABLE test_3; + +DROP TABLE IF EXISTS test_6; +CREATE TABLE IF NOT EXISTS test_6 (a DateTime64(6)) engine = MergeTree order by a; +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_6 ORDER BY a; +DROP TABLE test_6; + +DROP TABLE IF EXISTS test_9; +CREATE TABLE IF NOT EXISTS test_9 (a DateTime64(6)) engine = MergeTree order by a; +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_9 ORDER BY a; +DROP TABLE test_9; diff --git a/tests/queries/0_stateless/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column.reference b/tests/queries/0_stateless/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column.sql b/tests/queries/0_stateless/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column.sql new file mode 100644 index 00000000000..c92d71893c4 --- /dev/null +++ b/tests/queries/0_stateless/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column.sql @@ -0,0 +1,21 @@ +CREATE TABLE attach_partition_t7 ( + a UInt32, + b UInt32 +) + ENGINE = MergeTree +PARTITION BY a ORDER BY a; + +ALTER TABLE attach_partition_t7 + ADD COLUMN mat_column + UInt32 MATERIALIZED a+b; + +insert into attach_partition_t7 values (1, 2); + +CREATE TABLE attach_partition_t8 ( + a UInt32, + b UInt32 +) + ENGINE = MergeTree +PARTITION BY a ORDER BY a; + +ALTER TABLE attach_partition_t8 ATTACH PARTITION ID '1' FROM attach_partition_t7; -- {serverError INCOMPATIBLE_COLUMNS}; diff --git a/tests/queries/0_stateless/02998_http_redirects.reference b/tests/queries/0_stateless/02998_http_redirects.reference new file mode 100644 index 00000000000..527e7df71c3 --- /dev/null +++ b/tests/queries/0_stateless/02998_http_redirects.reference @@ -0,0 +1,5 @@ +Ok. +HTTP/1.1 302 Found +Location: /?query=SELECT+'Pepyaka' +HTTP/1.1 404 Not Found +Pepyaka diff --git a/tests/queries/0_stateless/02998_http_redirects.sh b/tests/queries/0_stateless/02998_http_redirects.sh new file mode 100755 index 00000000000..8a8df884f9f --- /dev/null +++ b/tests/queries/0_stateless/02998_http_redirects.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +URL="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}" + +# Ping handler +${CLICKHOUSE_CURL} -s -S "${URL}/" + +# A handler that is configured to return a redirect +${CLICKHOUSE_CURL} -s -S -I "${URL}/upyachka" | grep -i -P '^HTTP|Location' + +# This handler is configured to not accept any query string +${CLICKHOUSE_CURL} -s -S -I "${URL}/upyachka?hello=world" | grep -i -P '^HTTP|Location' + +# Check that actual redirect works +${CLICKHOUSE_CURL} -s -S -L "${URL}/upyachka" diff --git a/tests/queries/0_stateless/02998_ipv6_hashing.reference b/tests/queries/0_stateless/02998_ipv6_hashing.reference new file mode 100644 index 00000000000..d9753de02c3 --- /dev/null +++ b/tests/queries/0_stateless/02998_ipv6_hashing.reference @@ -0,0 +1,20 @@ +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP +fe80::62:5aff:fed1:daf0 Tt{ֱPT£K{a;xdP diff --git a/tests/queries/0_stateless/02998_ipv6_hashing.sql b/tests/queries/0_stateless/02998_ipv6_hashing.sql new file mode 100644 index 00000000000..a836792748c --- /dev/null +++ b/tests/queries/0_stateless/02998_ipv6_hashing.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT toIPv6(materialize(toLowCardinality('fe80::62:5aff:fed1:daf0'))) AS ipv6, SHA256(ipv6) from numbers(10); +SELECT toIPv6(materialize('fe80::62:5aff:fed1:daf0')) AS ipv6, SHA256(ipv6) from numbers(10); + diff --git a/tests/queries/0_stateless/02998_operator_respect_nulls.reference b/tests/queries/0_stateless/02998_operator_respect_nulls.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02998_operator_respect_nulls.sql b/tests/queries/0_stateless/02998_operator_respect_nulls.sql new file mode 100644 index 00000000000..240992e1ff8 --- /dev/null +++ b/tests/queries/0_stateless/02998_operator_respect_nulls.sql @@ -0,0 +1 @@ +SELECT plus(1, 1) RESPECT NULLS; -- { serverError SYNTAX_ERROR } diff --git a/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.reference b/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.reference new file mode 100644 index 00000000000..496e0337209 --- /dev/null +++ b/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.reference @@ -0,0 +1,360 @@ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ +└─────────┘ +┌───────a─┐ +│ 1000000 │ +└─────────┘ +┌───────a─┐ +│ 1000000 │ +└─────────┘ +┌───────a─┐ +│ 1000000 │ +└─────────┘ +┌───────a─┐ +│ 1000000 │ +└─────────┘ + a + + 1000000 + a + + 1000000 + a + + 1000000 + a + + 1000000 +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ -- 1.00 million +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ -- 1.00 million +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ -- 1.00 million +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000000 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000000 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000000 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000000 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000000 │ -- 1.00 million +└─────────┘ + a + + 1000000 -- 1.00 million + a + + 1000000 -- 1.00 million + a + + 1000000 -- 1.00 million + a + + 1000000 -- 1.00 million +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000001 │ -- 1.00 million +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000001 │ -- 1.00 million +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000001 │ -- 1.00 million +└─────────┘ +┏━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━┩ +│ 1000001 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000001 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000001 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000001 │ -- 1.00 million +└─────────┘ +┌───────a─┐ +│ 1000001 │ -- 1.00 million +└─────────┘ + a + + 1000001 -- 1.00 million + a + + 1000001 -- 1.00 million + a + + 1000001 -- 1.00 million + a + + 1000001 -- 1.00 million +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ -- 1.00 billion +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ -- 1.00 billion +└────────────┘ + a + + 1000000000 -- 1.00 billion + a + + 1000000000 -- 1.00 billion + a + + 1000000000 -- 1.00 billion + a + + 1000000000 -- 1.00 billion +┏━━━━━━━━━━━━┳━━━━━━━━━━━━┓ +┃ a ┃ b ┃ +┡━━━━━━━━━━━━╇━━━━━━━━━━━━┩ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ +┏━━━━━━━━━━━━┳━━━━━━━━━━━━┓ +┃ a ┃ b ┃ +┡━━━━━━━━━━━━╇━━━━━━━━━━━━┩ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ +┏━━━━━━━━━━━━┳━━━━━━━━━━━━┓ +┃ a ┃ b ┃ +┡━━━━━━━━━━━━╇━━━━━━━━━━━━┩ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ +┏━━━━━━━━━━━━┳━━━━━━━━━━━━┓ +┃ a ┃ b ┃ +┡━━━━━━━━━━━━╇━━━━━━━━━━━━┩ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ +┌──────────a─┬──────────b─┐ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ +┌──────────a─┬──────────b─┐ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ +┌──────────a─┬──────────b─┐ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ +┌──────────a─┬──────────b─┐ +│ 1000000000 │ 1000000000 │ +└────────────┴────────────┘ + a b + + 1000000000 1000000000 + a b + + 1000000000 1000000000 + a b + + 1000000000 1000000000 + a b + + 1000000000 1000000000 +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ +├────────────┤ +│ 1000000000 │ +└────────────┘ +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ +├────────────┤ +│ 1000000000 │ +└────────────┘ +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ +├────────────┤ +│ 1000000000 │ +└────────────┘ +┏━━━━━━━━━━━━┓ +┃ a ┃ +┡━━━━━━━━━━━━┩ +│ 1000000000 │ +├────────────┤ +│ 1000000000 │ +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ +│ 1000000000 │ +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ +│ 1000000000 │ +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ +│ 1000000000 │ +└────────────┘ +┌──────────a─┐ +│ 1000000000 │ +│ 1000000000 │ +└────────────┘ + a + + 1000000000 + 1000000000 + a + + 1000000000 + 1000000000 + a + + 1000000000 + 1000000000 + a + + 1000000000 + 1000000000 +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date32') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└──────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date32') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└──────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date32') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└──────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29', 'Date32') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 │ +└──────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29 00:00:00', 'DateTime') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00 │ +└─────────────────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29 00:00:00', 'DateTime') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00 │ +└─────────────────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29 00:00:00', 'DateTime') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00 │ +└─────────────────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST('2024-02-29 00:00:00', 'DateTime') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00 │ +└─────────────────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST(CAST('2024-02-29 00:00:00', 'DateTime'), 'DateTime64') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00.000 │ +└─────────────────────────────────────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST(CAST('2024-02-29 00:00:00', 'DateTime'), 'DateTime64') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00.000 │ +└─────────────────────────────────────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST(CAST('2024-02-29 00:00:00', 'DateTime'), 'DateTime64') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00.000 │ +└─────────────────────────────────────────────────────────────┘ +┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ CAST(CAST('2024-02-29 00:00:00', 'DateTime'), 'DateTime64') ┃ +┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ 2024-02-29 00:00:00.000 │ +└─────────────────────────────────────────────────────────────┘ diff --git a/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.sql b/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.sql new file mode 100644 index 00000000000..5dc69488cea --- /dev/null +++ b/tests/queries/0_stateless/02998_pretty_format_print_readable_number_on_single_value.sql @@ -0,0 +1,96 @@ +SELECT 1_000_000 as a FORMAT Pretty; +SELECT 1_000_000 as a FORMAT PrettyNoEscapes; +SELECT 1_000_000 as a FORMAT PrettyMonoBlock; +SELECT 1_000_000 as a FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000 as a FORMAT PrettyCompact; +SELECT 1_000_000 as a FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000 as a FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000 as a FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000 as a FORMAT PrettySpace; +SELECT 1_000_000 as a FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000 as a FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000 as a FORMAT PrettySpaceNoEscapesMonoBlock; + + +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT Pretty; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyNoEscapes; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompact; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpace; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_001 as a FORMAT Pretty; +SELECT 1_000_001 as a FORMAT PrettyNoEscapes; +SELECT 1_000_001 as a FORMAT PrettyMonoBlock; +SELECT 1_000_001 as a FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_001 as a FORMAT PrettyCompact; +SELECT 1_000_001 as a FORMAT PrettyCompactNoEscapes; +SELECT 1_000_001 as a FORMAT PrettyCompactMonoBlock; +SELECT 1_000_001 as a FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_001 as a FORMAT PrettySpace; +SELECT 1_000_001 as a FORMAT PrettySpaceNoEscapes; +SELECT 1_000_001 as a FORMAT PrettySpaceMonoBlock; +SELECT 1_000_001 as a FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_000_000 as a FORMAT Pretty; +SELECT 1_000_000_000 as a FORMAT PrettyNoEscapes; +SELECT 1_000_000_000 as a FORMAT PrettyMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettyCompact; +SELECT 1_000_000_000 as a FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000_000 as a FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettySpace; +SELECT 1_000_000_000 as a FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000_000 as a FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT Pretty; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyNoEscapes; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompact; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpace; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT Pretty; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyNoEscapes; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompact; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpace; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpaceNoEscapesMonoBlock; + +SET output_format_pretty_single_large_number_tip_threshold=1; +SELECT '2024-02-29'::Date FORMAT Pretty; +SELECT '2024-02-29'::Date FORMAT PrettyNoEscapes; +SELECT '2024-02-29'::Date FORMAT PrettyMonoBlock; +SELECT '2024-02-29'::Date FORMAT PrettyNoEscapesMonoBlock; +SELECT '2024-02-29'::Date32 FORMAT Pretty; +SELECT '2024-02-29'::Date32 FORMAT PrettyNoEscapes; +SELECT '2024-02-29'::Date32 FORMAT PrettyMonoBlock; +SELECT '2024-02-29'::Date32 FORMAT PrettyNoEscapesMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT Pretty; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT PrettyNoEscapes; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT PrettyMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT PrettyNoEscapesMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT Pretty; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT PrettyNoEscapes; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT PrettyMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT PrettyNoEscapesMonoBlock; diff --git a/tests/queries/0_stateless/02998_projection_after_attach_partition.reference b/tests/queries/0_stateless/02998_projection_after_attach_partition.reference new file mode 100644 index 00000000000..1cb984f0f34 --- /dev/null +++ b/tests/queries/0_stateless/02998_projection_after_attach_partition.reference @@ -0,0 +1,31 @@ +-- { echoOn } +DROP TABLE IF EXISTS visits_order; +DROP TABLE IF EXISTS visits_order_dst; +CREATE TABLE visits_order +( + user_id UInt64, + user_name String, + some_int UInt64 +) ENGINE = MergeTree() PRIMARY KEY user_id PARTITION BY user_id; +CREATE TABLE visits_order_dst +( + user_id UInt64, + user_name String, + some_int UInt64 +) ENGINE = MergeTree() PRIMARY KEY user_id PARTITION BY user_id; +ALTER TABLE visits_order ADD PROJECTION user_name_projection (SELECT * ORDER BY user_name); +ALTER TABLE visits_order_dst ADD PROJECTION user_name_projection (SELECT * ORDER BY user_name); +INSERT INTO visits_order SELECT 2, 'user2', number from numbers(1, 10); +INSERT INTO visits_order SELECT 2, 'another_user2', number*2 from numbers(1, 10); +INSERT INTO visits_order SELECT 2, 'yet_another_user2', number*3 from numbers(1, 10); +ALTER TABLE visits_order_dst ATTACH PARTITION ID '2' FROM visits_order; +SET allow_experimental_analyzer=0; +EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; +Expression ((Projection + Before ORDER BY)) + Filter + ReadFromMergeTree (user_name_projection) +SET allow_experimental_analyzer=1; +EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; +Expression ((Project names + Projection)) + Filter + ReadFromMergeTree (user_name_projection) diff --git a/tests/queries/0_stateless/02998_projection_after_attach_partition.sql b/tests/queries/0_stateless/02998_projection_after_attach_partition.sql new file mode 100644 index 00000000000..4e0121dafe9 --- /dev/null +++ b/tests/queries/0_stateless/02998_projection_after_attach_partition.sql @@ -0,0 +1,34 @@ +-- { echoOn } +DROP TABLE IF EXISTS visits_order; +DROP TABLE IF EXISTS visits_order_dst; + +CREATE TABLE visits_order +( + user_id UInt64, + user_name String, + some_int UInt64 +) ENGINE = MergeTree() PRIMARY KEY user_id PARTITION BY user_id; + +CREATE TABLE visits_order_dst +( + user_id UInt64, + user_name String, + some_int UInt64 +) ENGINE = MergeTree() PRIMARY KEY user_id PARTITION BY user_id; + +ALTER TABLE visits_order ADD PROJECTION user_name_projection (SELECT * ORDER BY user_name); +ALTER TABLE visits_order_dst ADD PROJECTION user_name_projection (SELECT * ORDER BY user_name); + +INSERT INTO visits_order SELECT 2, 'user2', number from numbers(1, 10); +INSERT INTO visits_order SELECT 2, 'another_user2', number*2 from numbers(1, 10); +INSERT INTO visits_order SELECT 2, 'yet_another_user2', number*3 from numbers(1, 10); + +ALTER TABLE visits_order_dst ATTACH PARTITION ID '2' FROM visits_order; + +SET allow_experimental_analyzer=0; + +EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; + +SET allow_experimental_analyzer=1; + +EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; diff --git a/tests/queries/0_stateless/02999_analyzer_preimage_null.reference b/tests/queries/0_stateless/02999_analyzer_preimage_null.reference new file mode 100644 index 00000000000..6f9afedfd07 --- /dev/null +++ b/tests/queries/0_stateless/02999_analyzer_preimage_null.reference @@ -0,0 +1,121 @@ +-- { echoOn } +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != toNullable(1993)) AND (id <= b); +QUERY id: 0 + PROJECTION COLUMNS + id UInt32 + value1 String + date1 Date + PROJECTION + LIST id: 1, nodes: 3 + COLUMN id: 2, column_name: id, result_type: UInt32, source_id: 3 + COLUMN id: 4, column_name: value1, result_type: String, source_id: 3 + COLUMN id: 5, column_name: date1, result_type: Date, source_id: 3 + JOIN TREE + TABLE id: 3, alias: __table1, table_name: default.date_t__fuzz_0 + WHERE + FUNCTION id: 6, function_name: and, function_type: ordinary, result_type: Nullable(UInt8) + ARGUMENTS + LIST id: 7, nodes: 2 + FUNCTION id: 8, function_name: notEquals, function_type: ordinary, result_type: Nullable(UInt8) + ARGUMENTS + LIST id: 9, nodes: 2 + FUNCTION id: 10, function_name: toYear, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 11, nodes: 1 + COLUMN id: 12, column_name: date1, result_type: Date, source_id: 3 + CONSTANT id: 13, constant_value: UInt64_1993, constant_value_type: Nullable(UInt16) + EXPRESSION + FUNCTION id: 14, function_name: toNullable, function_type: ordinary, result_type: Nullable(UInt16) + ARGUMENTS + LIST id: 15, nodes: 1 + CONSTANT id: 16, constant_value: UInt64_1993, constant_value_type: UInt16 + FUNCTION id: 17, function_name: lessOrEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 18, nodes: 2 + COLUMN id: 19, column_name: id, result_type: UInt32, source_id: 3 + FUNCTION id: 10, function_name: toYear, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 11, nodes: 1 + COLUMN id: 12, column_name: date1, result_type: Date, source_id: 3 +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != 1993) AND (id <= b) SETTINGS optimize_time_filter_with_preimage=0; +QUERY id: 0 + PROJECTION COLUMNS + id UInt32 + value1 String + date1 Date + PROJECTION + LIST id: 1, nodes: 3 + COLUMN id: 2, column_name: id, result_type: UInt32, source_id: 3 + COLUMN id: 4, column_name: value1, result_type: String, source_id: 3 + COLUMN id: 5, column_name: date1, result_type: Date, source_id: 3 + JOIN TREE + TABLE id: 3, alias: __table1, table_name: default.date_t__fuzz_0 + WHERE + FUNCTION id: 6, function_name: and, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 7, nodes: 2 + FUNCTION id: 8, function_name: notEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 9, nodes: 2 + FUNCTION id: 10, function_name: toYear, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 11, nodes: 1 + COLUMN id: 12, column_name: date1, result_type: Date, source_id: 3 + CONSTANT id: 13, constant_value: UInt64_1993, constant_value_type: UInt16 + FUNCTION id: 14, function_name: lessOrEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 15, nodes: 2 + COLUMN id: 16, column_name: id, result_type: UInt32, source_id: 3 + FUNCTION id: 10, function_name: toYear, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 11, nodes: 1 + COLUMN id: 12, column_name: date1, result_type: Date, source_id: 3 + SETTINGS optimize_time_filter_with_preimage=0 +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != 1993) AND (id <= b) SETTINGS optimize_time_filter_with_preimage=1; +QUERY id: 0 + PROJECTION COLUMNS + id UInt32 + value1 String + date1 Date + PROJECTION + LIST id: 1, nodes: 3 + COLUMN id: 2, column_name: id, result_type: UInt32, source_id: 3 + COLUMN id: 4, column_name: value1, result_type: String, source_id: 3 + COLUMN id: 5, column_name: date1, result_type: Date, source_id: 3 + JOIN TREE + TABLE id: 3, alias: __table1, table_name: default.date_t__fuzz_0 + WHERE + FUNCTION id: 6, function_name: and, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 7, nodes: 2 + FUNCTION id: 8, function_name: or, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 9, nodes: 2 + FUNCTION id: 10, function_name: less, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 11, nodes: 2 + COLUMN id: 12, column_name: date1, result_type: Date, source_id: 3 + CONSTANT id: 13, constant_value: \'1993-01-01\', constant_value_type: String + FUNCTION id: 14, function_name: greaterOrEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 15, nodes: 2 + COLUMN id: 16, column_name: date1, result_type: Date, source_id: 3 + CONSTANT id: 17, constant_value: \'1994-01-01\', constant_value_type: String + FUNCTION id: 18, function_name: lessOrEquals, function_type: ordinary, result_type: UInt8 + ARGUMENTS + LIST id: 19, nodes: 2 + COLUMN id: 20, column_name: id, result_type: UInt32, source_id: 3 + FUNCTION id: 21, function_name: toYear, function_type: ordinary, result_type: UInt16 + ARGUMENTS + LIST id: 22, nodes: 1 + COLUMN id: 23, column_name: date1, result_type: Date, source_id: 3 + SETTINGS optimize_time_filter_with_preimage=1 diff --git a/tests/queries/0_stateless/02999_analyzer_preimage_null.sql b/tests/queries/0_stateless/02999_analyzer_preimage_null.sql new file mode 100644 index 00000000000..07d3a0f69c1 --- /dev/null +++ b/tests/queries/0_stateless/02999_analyzer_preimage_null.sql @@ -0,0 +1,20 @@ +SET allow_experimental_analyzer=1; +SET optimize_time_filter_with_preimage=1; + +CREATE TABLE date_t__fuzz_0 (`id` UInt32, `value1` String, `date1` Date) ENGINE = ReplacingMergeTree ORDER BY id SETTINGS allow_nullable_key=1; + +-- { echoOn } +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != toNullable(1993)) AND (id <= b); + +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != 1993) AND (id <= b) SETTINGS optimize_time_filter_with_preimage=0; + +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != 1993) AND (id <= b) SETTINGS optimize_time_filter_with_preimage=1; diff --git a/tests/queries/0_stateless/02999_variant_suspicious_types.reference b/tests/queries/0_stateless/02999_variant_suspicious_types.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02999_variant_suspicious_types.sql b/tests/queries/0_stateless/02999_variant_suspicious_types.sql new file mode 100644 index 00000000000..8cdbfc13adb --- /dev/null +++ b/tests/queries/0_stateless/02999_variant_suspicious_types.sql @@ -0,0 +1,7 @@ +set allow_suspicious_variant_types=0; +select 42::Variant(UInt32, Int64); -- {serverError ILLEGAL_COLUMN} +select [42]::Variant(Array(UInt32), Array(Int64)); -- {serverError ILLEGAL_COLUMN} +select 'Hello'::Variant(String, LowCardinality(String)); -- {serverError ILLEGAL_COLUMN} +select (1, 'Hello')::Variant(Tuple(UInt32, String), Tuple(Int64, String)); -- {serverError ILLEGAL_COLUMN} +select map(42, 42)::Variant(Map(UInt64, UInt32), Map(UInt64, Int64)); -- {serverError ILLEGAL_COLUMN} + diff --git a/tests/queries/0_stateless/03000_too_big_max_execution_time_setting.reference b/tests/queries/0_stateless/03000_too_big_max_execution_time_setting.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03000_too_big_max_execution_time_setting.sql b/tests/queries/0_stateless/03000_too_big_max_execution_time_setting.sql new file mode 100644 index 00000000000..7aa86891b42 --- /dev/null +++ b/tests/queries/0_stateless/03000_too_big_max_execution_time_setting.sql @@ -0,0 +1,2 @@ +select 1 settings max_execution_time = 9223372036854775808; -- {clientError BAD_ARGUMENTS} + diff --git a/tests/queries/0_stateless/03001_analyzer_nullable_nothing.reference b/tests/queries/0_stateless/03001_analyzer_nullable_nothing.reference new file mode 100644 index 00000000000..2ad74d50ebd --- /dev/null +++ b/tests/queries/0_stateless/03001_analyzer_nullable_nothing.reference @@ -0,0 +1 @@ +0 \N diff --git a/tests/queries/0_stateless/03001_analyzer_nullable_nothing.sql b/tests/queries/0_stateless/03001_analyzer_nullable_nothing.sql new file mode 100644 index 00000000000..32c378ebf0a --- /dev/null +++ b/tests/queries/0_stateless/03001_analyzer_nullable_nothing.sql @@ -0,0 +1,6 @@ +--https://github.com/ClickHouse/ClickHouse/issues/58906 +SELECT + count(_CAST(NULL, 'Nullable(Nothing)')), + round(avg(_CAST(NULL, 'Nullable(Nothing)'))) AS k +FROM numbers(256) + SETTINGS allow_experimental_analyzer = 1; diff --git a/tests/queries/0_stateless/03001_bad_error_message_higher_order_functions.reference b/tests/queries/0_stateless/03001_bad_error_message_higher_order_functions.reference new file mode 100644 index 00000000000..d4e027274e2 --- /dev/null +++ b/tests/queries/0_stateless/03001_bad_error_message_higher_order_functions.reference @@ -0,0 +1 @@ +Argument 3 has size 2 which differs with the size of another argument, 3 diff --git a/tests/queries/0_stateless/03001_bad_error_message_higher_order_functions.sh b/tests/queries/0_stateless/03001_bad_error_message_higher_order_functions.sh new file mode 100755 index 00000000000..967453fd375 --- /dev/null +++ b/tests/queries/0_stateless/03001_bad_error_message_higher_order_functions.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query "SELECT arrayMap((x,y) -> x + y, [1,2,3], [1,2])" 2>&1 | grep -o -F --max-count 1 'Argument 3 has size 2 which differs with the size of another argument, 3' \ No newline at end of file diff --git a/tests/queries/0_stateless/03002_analyzer_prewhere.reference b/tests/queries/0_stateless/03002_analyzer_prewhere.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03002_analyzer_prewhere.sql b/tests/queries/0_stateless/03002_analyzer_prewhere.sql new file mode 100644 index 00000000000..0edf16f1cbe --- /dev/null +++ b/tests/queries/0_stateless/03002_analyzer_prewhere.sql @@ -0,0 +1,12 @@ +SET max_threads = 16, receive_timeout = 10., receive_data_timeout_ms = 10000, allow_suspicious_low_cardinality_types = true, enable_positional_arguments = false, log_queries = true, table_function_remote_max_addresses = 200, any_join_distinct_right_table_keys = true, joined_subquery_requires_alias = false, allow_experimental_analyzer = true, max_execution_time = 10., max_memory_usage = 10000000000, log_comment = '/workspace/ch/tests/queries/0_stateless/01710_projection_in_index.sql', send_logs_level = 'fatal', enable_optimize_predicate_expression = false, prefer_localhost_replica = true, allow_introspection_functions = true, optimize_functions_to_subcolumns = false, transform_null_in = true, optimize_use_projections = true, allow_deprecated_syntax_for_merge_tree = true, parallelize_output_from_storages = false; + +CREATE TABLE t__fuzz_0 (`i` Int32, `j` Nullable(Int32), `k` Int32, PROJECTION p (SELECT * ORDER BY j)) ENGINE = MergeTree ORDER BY i SETTINGS index_granularity = 1, allow_nullable_key=1; + +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; + +SELECT * FROM t__fuzz_0 PREWHERE (i < 5) AND (j IN (1, 2)) WHERE i < 5; +DROP TABLE t__fuzz_0; diff --git a/tests/queries/1_stateful/00157_cache_dictionary.sql b/tests/queries/1_stateful/00157_cache_dictionary.sql index 9699843af8f..3621ff82126 100644 --- a/tests/queries/1_stateful/00157_cache_dictionary.sql +++ b/tests/queries/1_stateful/00157_cache_dictionary.sql @@ -1,8 +1,5 @@ -- Tags: no-tsan, no-parallel --- Suppress "ReadWriteBufferFromHTTP: HTTP request to `{}` failed at try 1/10 with bytes read: 311149/378695. Error: DB::HTTPException: Received error from remote server {}. (Current backoff wait is 100/10000 ms)" errors -SET send_logs_level='error'; - DROP TABLE IF EXISTS test.hits_1m; CREATE TABLE test.hits_1m AS test.hits diff --git a/tests/tsan_suppressions.txt b/tests/tsan_suppressions.txt index 67c7eae08f3..4f29925761b 100644 --- a/tests/tsan_suppressions.txt +++ b/tests/tsan_suppressions.txt @@ -1,2 +1,4 @@ # https://github.com/ClickHouse/ClickHouse/issues/55629 race:rd_kafka_broker_set_nodename +# https://github.com/ClickHouse/ClickHouse/issues/60443 +race:rd_kafka_stats_emit_all diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index e05d8ea81ab..3614bcb7452 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -112,6 +112,7 @@ CDMA CESU CIDR CIDRToRange +CKMAN CLOB CLion CMPLNT @@ -772,7 +773,6 @@ ReferenceKeyed Refreshable RegexpTree RemoteRead -SharedMergeTree ReplacingMergeTree ReplicasMaxAbsoluteDelay ReplicasMaxInsertsInQueue @@ -841,6 +841,8 @@ Sematext SendExternalTables SendScalars ShareAlike +SharedMergeTree +Shortkeys Shortkeys SimHash Simhash @@ -1078,6 +1080,8 @@ anyheavy anylast appendTrailingCharIfAbsent approximative +approxtopk +approxtopsum argMax argMin argmax @@ -1238,6 +1242,7 @@ buildable builtins byteHammingDistance byteSize +byteSlice byteSwap bytebase bytesToCutForIPv @@ -1288,6 +1293,7 @@ cipherList ciphertext cityHash cityhash +ckman clangd cli clickcache @@ -1615,7 +1621,6 @@ greaterorequals greenspace groupArray groupArrayInsertAt -grouparrayintersect groupArrayIntersect groupArrayLast groupArrayMovingAvg @@ -1632,6 +1637,7 @@ groupBitmapXor groupUniqArray grouparray grouparrayinsertat +grouparrayintersect grouparraylast grouparraymovingavg grouparraymovingsum @@ -1697,6 +1703,7 @@ hyperscan hypot hyvor iTerm +iTerm icosahedron icudata idempotency @@ -1744,7 +1751,6 @@ isValidJSON isValidUTF isZeroOrNull iteratively -iTerm jaccard jaccardIndex jaroSimilarity @@ -1895,6 +1901,7 @@ mdadm meanZTest meanztest mebibytes +mergeTreeIndex mergeable mergetree messageID @@ -2317,7 +2324,6 @@ shardNum sharded sharding shortcircuit -Shortkeys shortkeys shoutout simdjson @@ -2423,6 +2429,7 @@ subranges subreddits subseconds subsequence +substreams substring substringIndex substringIndexUTF @@ -2486,6 +2493,7 @@ theilsu themself threadpool throwIf +timeDiff timeSlot timeSlots timeZone @@ -2722,6 +2730,7 @@ wordShingleSimHashCaseInsensitive wordShingleSimHashCaseInsensitiveUTF wordShingleSimHashUTF wordshingleMinHash +writability wrt xcode xeus diff --git a/utils/check-style/check-large-objects.sh b/utils/check-style/check-large-objects.sh index 6b3fe86d310..5c1276e5732 100755 --- a/utils/check-style/check-large-objects.sh +++ b/utils/check-style/check-large-objects.sh @@ -4,8 +4,6 @@ TU_EXCLUDES=( CastOverloadResolver - AggregateFunctionMax - AggregateFunctionMin AggregateFunctionUniq FunctionsConversion diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 6c12970c4bb..3a966daea41 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -50,11 +50,6 @@ find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/n # Broken symlinks find -L $ROOT_PATH -type l 2>/dev/null | grep -v contrib && echo "^ Broken symlinks found" -# Double whitespaces -find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | - grep -vP $EXCLUDE_DIRS | - while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done - # Unused/Undefined/Duplicates ErrorCodes/ProfileEvents/CurrentMetrics declare -A EXTERN_TYPES EXTERN_TYPES[ErrorCodes]=int @@ -180,6 +175,8 @@ for test_case in "${tests_with_query_log[@]}"; do } || echo "Queries to system.query_log/system.query_thread_log does not have current_database = currentDatabase() condition in $test_case" done +grep -iE 'SYSTEM STOP MERGES;?$' -R $ROOT_PATH/tests/queries && echo "Merges cannot be disabled globally in fast/stateful/stateless tests, because it will break concurrently running queries" + # There shouldn't be large jumps between test numbers (since they should be consecutive) max_diff=$( find $ROOT_PATH/tests/queries -iname '*.sql' -or -iname '*.sh' -or -iname '*.py' -or -iname '*.j2' | @@ -448,3 +445,8 @@ find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | grep -vP $EXCLUDE_DIRS | xargs grep -P 'Sql|Html|Xml|Cpu|Tcp|Udp|Http|Db|Json|Yaml' | grep -v -P 'RabbitMQ|Azure|Aws|aws|Avro|IO/S3' && echo "Abbreviations such as SQL, XML, HTTP, should be in all caps. For example, SQL is right, Sql is wrong. XMLHttpRequest is very wrong." + +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | + grep -vP $EXCLUDE_DIRS | + xargs grep -F -i 'ErrorCodes::LOGICAL_ERROR, "Logical error:' && + echo "If an exception has LOGICAL_ERROR code, there is no need to include the text 'Logical error' in the exception message, because then the phrase 'Logical error' will be printed twice." diff --git a/utils/check-style/check-whitespaces b/utils/check-style/check-whitespaces index 5a20569868d..507b1dd2ede 100755 --- a/utils/check-style/check-whitespaces +++ b/utils/check-style/check-whitespaces @@ -2,8 +2,9 @@ ROOT_PATH=$(git rev-parse --show-toplevel) EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|consistent-hashing/|Parsers/New' +NPROC=$(($(nproc) + 3)) # Double whitespaces find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' 2>/dev/null | grep -vP $EXCLUDE_DIRS | - while read i; do $ROOT_PATH/utils/check-style/double-whitespaces.pl < $i || echo -e "^ File $i contains double whitespaces\n"; done + xargs -P "$NPROC" -n 20 "${ROOT_PATH}/utils/check-style/double-whitespaces.pl" diff --git a/utils/check-style/double-whitespaces.pl b/utils/check-style/double-whitespaces.pl index 47b03cb74ab..daeddecbd27 100755 --- a/utils/check-style/double-whitespaces.pl +++ b/utils/check-style/double-whitespaces.pl @@ -5,27 +5,31 @@ use strict; # Find double whitespace such as "a, b, c" that looks very ugly and annoying. # But skip double whitespaces if they are used as an alignment - by comparing to surrounding lines. -my @array; - -while (<>) -{ - push @array, $_; -} - my $ret = 0; -for (my $i = 1; $i < $#array; ++$i) +foreach my $file (@ARGV) { - if ($array[$i] =~ ',( {2,3})[^ /]') - { - # https://stackoverflow.com/questions/87380/how-can-i-find-the-location-of-a-regex-match-in-perl + my @array; - if ((substr($array[$i - 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/) # whitespaces are not part of alignment - && (substr($array[$i + 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/) - && $array[$i] !~ /(-?\d+\w*,\s+){3,}/) # this is not a number table like { 10, -1, 2 } + open (FH,'<',$file); + while () + { + push @array, $_; + } + + for (my $i = 1; $i < $#array; ++$i) + { + if ($array[$i] =~ ',( {2,3})[^ /]') { - print(($i + 1) . ":" . $array[$i]); - $ret = 1; + # https://stackoverflow.com/questions/87380/how-can-i-find-the-location-of-a-regex-match-in-perl + + if ((substr($array[$i - 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/) # whitespaces are not part of alignment + && (substr($array[$i + 1], $+[1] - 1, 2) !~ /^[ -][^ ]$/) + && $array[$i] !~ /(-?\d+\w*,\s+){3,}/) # this is not a number table like { 10, -1, 2 } + { + print($file . ":" . ($i + 1) . $array[$i]); + $ret = 1; + } } } } diff --git a/utils/clickhouse-diagnostics/README.md b/utils/clickhouse-diagnostics/README.md index 9a86ad535fd..01bb543c9a5 100644 --- a/utils/clickhouse-diagnostics/README.md +++ b/utils/clickhouse-diagnostics/README.md @@ -318,7 +318,7 @@ SHOW ACCESS ``` **result** ``` -CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default +CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE `default` CREATE SETTINGS PROFILE default SETTINGS max_memory_usage = 10000000000, load_balancing = 'random' CREATE SETTINGS PROFILE readonly SETTINGS readonly = 1 CREATE QUOTA default KEYED BY user_name FOR INTERVAL 1 hour TRACKING ONLY TO default